swarm, cmd/swarm: Merge branch 'master' into multiple-ens-endpoints
This commit is contained in:
commit
6a9730edaa
14
.mailmap
14
.mailmap
@ -65,7 +65,8 @@ Enrique Fynn <enriquefynn@gmail.com>
|
|||||||
|
|
||||||
Vincent G <caktux@gmail.com>
|
Vincent G <caktux@gmail.com>
|
||||||
|
|
||||||
RJ Catalano <rj@erisindustries.com>
|
RJ Catalano <catalanor0220@gmail.com>
|
||||||
|
RJ Catalano <catalanor0220@gmail.com> <rj@erisindustries.com>
|
||||||
|
|
||||||
Nchinda Nchinda <nchinda2@gmail.com>
|
Nchinda Nchinda <nchinda2@gmail.com>
|
||||||
|
|
||||||
@ -109,3 +110,14 @@ Frank Wang <eternnoir@gmail.com>
|
|||||||
Gary Rong <garyrong0905@gmail.com>
|
Gary Rong <garyrong0905@gmail.com>
|
||||||
|
|
||||||
Guillaume Nicolas <guin56@gmail.com>
|
Guillaume Nicolas <guin56@gmail.com>
|
||||||
|
|
||||||
|
Sorin Neacsu <sorin.neacsu@gmail.com>
|
||||||
|
Sorin Neacsu <sorin.neacsu@gmail.com> <sorin@users.noreply.github.com>
|
||||||
|
|
||||||
|
Valentin Wüstholz <wuestholz@gmail.com>
|
||||||
|
Valentin Wüstholz <wuestholz@gmail.com> <wuestholz@users.noreply.github.com>
|
||||||
|
|
||||||
|
Armin Braun <me@obrown.io>
|
||||||
|
|
||||||
|
Ernesto del Toro <ernesto.deltoro@gmail.com>
|
||||||
|
Ernesto del Toro <ernesto.deltoro@gmail.com> <ernestodeltoro@users.noreply.github.com>
|
||||||
|
@ -185,6 +185,8 @@ matrix:
|
|||||||
- xctool -version
|
- xctool -version
|
||||||
- xcrun simctl list
|
- xcrun simctl list
|
||||||
|
|
||||||
|
# Workaround for https://github.com/golang/go/issues/23749
|
||||||
|
- export CGO_CFLAGS_ALLOW='-fmodules|-fblocks|-fobjc-arc'
|
||||||
- go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds
|
- go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds
|
||||||
|
|
||||||
# This builder does the Azure archive purges to avoid accumulating junk
|
# This builder does the Azure archive purges to avoid accumulating junk
|
||||||
|
92
AUTHORS
92
AUTHORS
@ -1,85 +1,173 @@
|
|||||||
# This is the official list of go-ethereum authors for copyright purposes.
|
# This is the official list of go-ethereum authors for copyright purposes.
|
||||||
|
|
||||||
|
Afri Schoedon <5chdn@users.noreply.github.com>
|
||||||
|
Agustin Armellini Fischer <armellini13@gmail.com>
|
||||||
|
Airead <fgh1987168@gmail.com>
|
||||||
|
Alan Chen <alanchchen@users.noreply.github.com>
|
||||||
|
Alejandro Isaza <alejandro.isaza@gmail.com>
|
||||||
Ales Katona <ales@coinbase.com>
|
Ales Katona <ales@coinbase.com>
|
||||||
Alex Leverington <alex@ethdev.com>
|
Alex Leverington <alex@ethdev.com>
|
||||||
|
Alex Wu <wuyiding@gmail.com>
|
||||||
Alexandre Van de Sande <alex.vandesande@ethdev.com>
|
Alexandre Van de Sande <alex.vandesande@ethdev.com>
|
||||||
|
Ali Hajimirza <Ali92hm@users.noreply.github.com>
|
||||||
|
Anton Evangelatov <anton.evangelatov@gmail.com>
|
||||||
|
Arba Sasmoyo <arba.sasmoyo@gmail.com>
|
||||||
|
Armani Ferrante <armaniferrante@berkeley.edu>
|
||||||
|
Armin Braun <me@obrown.io>
|
||||||
Aron Fischer <github@aron.guru>
|
Aron Fischer <github@aron.guru>
|
||||||
Bas van Kervel <bas@ethdev.com>
|
Bas van Kervel <bas@ethdev.com>
|
||||||
Benjamin Brent <benjamin@benjaminbrent.com>
|
Benjamin Brent <benjamin@benjaminbrent.com>
|
||||||
|
Benoit Verkindt <benoit.verkindt@gmail.com>
|
||||||
|
Bo <bohende@gmail.com>
|
||||||
|
Bo Ye <boy.e.computer.1982@outlook.com>
|
||||||
|
Bob Glickstein <bobg@users.noreply.github.com>
|
||||||
Brian Schroeder <bts@gmail.com>
|
Brian Schroeder <bts@gmail.com>
|
||||||
Casey Detrio <cdetrio@gmail.com>
|
Casey Detrio <cdetrio@gmail.com>
|
||||||
|
Chase Wright <mysticryuujin@gmail.com>
|
||||||
Christoph Jentzsch <jentzsch.software@gmail.com>
|
Christoph Jentzsch <jentzsch.software@gmail.com>
|
||||||
Daniel A. Nagy <nagy.da@gmail.com>
|
Daniel A. Nagy <nagy.da@gmail.com>
|
||||||
|
Daniel Sloof <goapsychadelic@gmail.com>
|
||||||
|
Darrel Herbst <dherbst@gmail.com>
|
||||||
|
Dave Appleton <calistralabs@gmail.com>
|
||||||
Diego Siqueira <DiSiqueira@users.noreply.github.com>
|
Diego Siqueira <DiSiqueira@users.noreply.github.com>
|
||||||
|
Dmitry Shulyak <yashulyak@gmail.com>
|
||||||
|
Egon Elbre <egonelbre@gmail.com>
|
||||||
|
Elias Naur <elias.naur@gmail.com>
|
||||||
Elliot Shepherd <elliot@identitii.com>
|
Elliot Shepherd <elliot@identitii.com>
|
||||||
Enrique Fynn <enriquefynn@gmail.com>
|
Enrique Fynn <enriquefynn@gmail.com>
|
||||||
|
Ernesto del Toro <ernesto.deltoro@gmail.com>
|
||||||
Ethan Buchman <ethan@coinculture.info>
|
Ethan Buchman <ethan@coinculture.info>
|
||||||
|
Eugene Valeyev <evgen.povt@gmail.com>
|
||||||
|
Evangelos Pappas <epappas@evalonlabs.com>
|
||||||
|
Evgeny Danilenko <6655321@bk.ru>
|
||||||
Fabian Vogelsteller <fabian@frozeman.de>
|
Fabian Vogelsteller <fabian@frozeman.de>
|
||||||
|
Fabio Barone <fabio.barone.co@gmail.com>
|
||||||
Fabio Berger <fabioberger1991@gmail.com>
|
Fabio Berger <fabioberger1991@gmail.com>
|
||||||
|
FaceHo <facehoshi@gmail.com>
|
||||||
Felix Lange <fjl@twurst.com>
|
Felix Lange <fjl@twurst.com>
|
||||||
|
Fiisio <liangcszzu@163.com>
|
||||||
Frank Wang <eternnoir@gmail.com>
|
Frank Wang <eternnoir@gmail.com>
|
||||||
|
Furkan KAMACI <furkankamaci@gmail.com>
|
||||||
Gary Rong <garyrong0905@gmail.com>
|
Gary Rong <garyrong0905@gmail.com>
|
||||||
|
George Ornbo <george@shapeshed.com>
|
||||||
Gregg Dourgarian <greggd@tempworks.com>
|
Gregg Dourgarian <greggd@tempworks.com>
|
||||||
|
Guillaume Ballet <gballet@gmail.com>
|
||||||
Guillaume Nicolas <guin56@gmail.com>
|
Guillaume Nicolas <guin56@gmail.com>
|
||||||
Gustav Simonsson <gustav.simonsson@gmail.com>
|
Gustav Simonsson <gustav.simonsson@gmail.com>
|
||||||
Hao Bryan Cheng <haobcheng@gmail.com>
|
Hao Bryan Cheng <haobcheng@gmail.com>
|
||||||
Henning Diedrich <hd@eonblast.com>
|
Henning Diedrich <hd@eonblast.com>
|
||||||
Isidoro Ghezzi <isidoro.ghezzi@icloud.com>
|
Isidoro Ghezzi <isidoro.ghezzi@icloud.com>
|
||||||
|
Ivan Daniluk <ivan.daniluk@gmail.com>
|
||||||
Jae Kwon <jkwon.work@gmail.com>
|
Jae Kwon <jkwon.work@gmail.com>
|
||||||
Jamie Pitts <james.pitts@gmail.com>
|
Jamie Pitts <james.pitts@gmail.com>
|
||||||
|
Janoš Guljaš <janos@users.noreply.github.com>
|
||||||
Jason Carver <jacarver@linkedin.com>
|
Jason Carver <jacarver@linkedin.com>
|
||||||
|
Jay Guo <guojiannan1101@gmail.com>
|
||||||
Jeff R. Allen <jra@nella.org>
|
Jeff R. Allen <jra@nella.org>
|
||||||
Jeffrey Wilcke <jeffrey@ethereum.org>
|
Jeffrey Wilcke <jeffrey@ethereum.org>
|
||||||
Jens Agerberg <github@agerberg.me>
|
Jens Agerberg <github@agerberg.me>
|
||||||
|
Jia Chenhui <jiachenhui1989@gmail.com>
|
||||||
|
Jim McDonald <Jim@mcdee.net>
|
||||||
|
Joel Burget <joelburget@gmail.com>
|
||||||
Jonathan Brown <jbrown@bluedroplet.com>
|
Jonathan Brown <jbrown@bluedroplet.com>
|
||||||
Joseph Chow <ethereum@outlook.com>
|
Joseph Chow <ethereum@outlook.com>
|
||||||
Justin Clark-Casey <justincc@justincc.org>
|
Justin Clark-Casey <justincc@justincc.org>
|
||||||
Justin Drake <drakefjustin@gmail.com>
|
Justin Drake <drakefjustin@gmail.com>
|
||||||
Kenji Siu <kenji@isuntv.com>
|
Kenji Siu <kenji@isuntv.com>
|
||||||
Kobi Gurkan <kobigurk@gmail.com>
|
Kobi Gurkan <kobigurk@gmail.com>
|
||||||
|
Konrad Feldmeier <konrad@brainbot.com>
|
||||||
|
Kurkó Mihály <kurkomisi@users.noreply.github.com>
|
||||||
|
Kyuntae Ethan Kim <ethan.kyuntae.kim@gmail.com>
|
||||||
Lefteris Karapetsas <lefteris@refu.co>
|
Lefteris Karapetsas <lefteris@refu.co>
|
||||||
Leif Jurvetson <leijurv@gmail.com>
|
Leif Jurvetson <leijurv@gmail.com>
|
||||||
|
Leo Shklovskii <leo@thermopylae.net>
|
||||||
Lewis Marshall <lewis@lmars.net>
|
Lewis Marshall <lewis@lmars.net>
|
||||||
|
Lio李欧 <lionello@users.noreply.github.com>
|
||||||
Louis Holbrook <dev@holbrook.no>
|
Louis Holbrook <dev@holbrook.no>
|
||||||
Luca Zeug <luclu@users.noreply.github.com>
|
Luca Zeug <luclu@users.noreply.github.com>
|
||||||
|
Magicking <s@6120.eu>
|
||||||
Maran Hidskes <maran.hidskes@gmail.com>
|
Maran Hidskes <maran.hidskes@gmail.com>
|
||||||
Marek Kotewicz <marek.kotewicz@gmail.com>
|
Marek Kotewicz <marek.kotewicz@gmail.com>
|
||||||
|
Mark <markya0616@gmail.com>
|
||||||
Martin Holst Swende <martin@swende.se>
|
Martin Holst Swende <martin@swende.se>
|
||||||
Matthew Di Ferrante <mattdf@users.noreply.github.com>
|
Matthew Di Ferrante <mattdf@users.noreply.github.com>
|
||||||
Matthew Wampler-Doty <matthew.wampler.doty@gmail.com>
|
Matthew Wampler-Doty <matthew.wampler.doty@gmail.com>
|
||||||
|
Maximilian Meister <mmeister@suse.de>
|
||||||
Micah Zoltu <micah@zoltu.net>
|
Micah Zoltu <micah@zoltu.net>
|
||||||
|
Michael Ruminer <michael.ruminer+github@gmail.com>
|
||||||
|
Miguel Mota <miguelmota2@gmail.com>
|
||||||
|
Miya Chen <miyatlchen@gmail.com>
|
||||||
Nchinda Nchinda <nchinda2@gmail.com>
|
Nchinda Nchinda <nchinda2@gmail.com>
|
||||||
Nick Dodson <silentcicero@outlook.com>
|
Nick Dodson <silentcicero@outlook.com>
|
||||||
Nick Johnson <arachnid@notdot.net>
|
Nick Johnson <arachnid@notdot.net>
|
||||||
|
Nicolas Guillaume <gunicolas@sqli.com>
|
||||||
|
Noman <noman@noman.land>
|
||||||
|
Oli Bye <olibye@users.noreply.github.com>
|
||||||
|
Paul Litvak <litvakpol@012.net.il>
|
||||||
Paulo L F Casaretto <pcasaretto@gmail.com>
|
Paulo L F Casaretto <pcasaretto@gmail.com>
|
||||||
|
Paweł Bylica <chfast@gmail.com>
|
||||||
Peter Pratscher <pratscher@gmail.com>
|
Peter Pratscher <pratscher@gmail.com>
|
||||||
|
Petr Mikusek <petr@mikusek.info>
|
||||||
Péter Szilágyi <peterke@gmail.com>
|
Péter Szilágyi <peterke@gmail.com>
|
||||||
RJ Catalano <rj@erisindustries.com>
|
RJ Catalano <catalanor0220@gmail.com>
|
||||||
Ramesh Nair <ram@hiddentao.com>
|
Ramesh Nair <ram@hiddentao.com>
|
||||||
Ricardo Catalinas Jiménez <r@untroubled.be>
|
Ricardo Catalinas Jiménez <r@untroubled.be>
|
||||||
|
Ricardo Domingos <ricardohsd@gmail.com>
|
||||||
|
Richard Hart <richardhart92@gmail.com>
|
||||||
|
Rob <robert@rojotek.com>
|
||||||
|
Robert Zaremba <robert.zaremba@scale-it.pl>
|
||||||
|
Russ Cox <rsc@golang.org>
|
||||||
Rémy Roy <remyroy@remyroy.com>
|
Rémy Roy <remyroy@remyroy.com>
|
||||||
|
S. Matthew English <s-matthew-english@users.noreply.github.com>
|
||||||
Shintaro Kaneko <kaneshin0120@gmail.com>
|
Shintaro Kaneko <kaneshin0120@gmail.com>
|
||||||
|
Sorin Neacsu <sorin.neacsu@gmail.com>
|
||||||
Stein Dekker <dekker.stein@gmail.com>
|
Stein Dekker <dekker.stein@gmail.com>
|
||||||
|
Steve Waldman <swaldman@mchange.com>
|
||||||
Steven Roose <stevenroose@gmail.com>
|
Steven Roose <stevenroose@gmail.com>
|
||||||
Taylor Gerring <taylor.gerring@gmail.com>
|
Taylor Gerring <taylor.gerring@gmail.com>
|
||||||
Thomas Bocek <tom@tomp2p.net>
|
Thomas Bocek <tom@tomp2p.net>
|
||||||
|
Ti Zhou <tizhou1986@gmail.com>
|
||||||
Tosh Camille <tochecamille@gmail.com>
|
Tosh Camille <tochecamille@gmail.com>
|
||||||
Valentin Wüstholz <wuestholz@users.noreply.github.com>
|
Valentin Wüstholz <wuestholz@gmail.com>
|
||||||
Victor Farazdagi <simple.square@gmail.com>
|
Victor Farazdagi <simple.square@gmail.com>
|
||||||
Victor Tran <vu.tran54@gmail.com>
|
Victor Tran <vu.tran54@gmail.com>
|
||||||
Viktor Trón <viktor.tron@gmail.com>
|
Viktor Trón <viktor.tron@gmail.com>
|
||||||
Ville Sundell <github@solarius.fi>
|
Ville Sundell <github@solarius.fi>
|
||||||
Vincent G <caktux@gmail.com>
|
Vincent G <caktux@gmail.com>
|
||||||
Vitalik Buterin <v@buterin.com>
|
Vitalik Buterin <v@buterin.com>
|
||||||
|
Vitaly V <vvelikodny@gmail.com>
|
||||||
Vivek Anand <vivekanand1101@users.noreply.github.com>
|
Vivek Anand <vivekanand1101@users.noreply.github.com>
|
||||||
Vlad Gluhovsky <gluk256@users.noreply.github.com>
|
Vlad Gluhovsky <gluk256@users.noreply.github.com>
|
||||||
Yohann Léon <sybiload@gmail.com>
|
Yohann Léon <sybiload@gmail.com>
|
||||||
Yoichi Hirai <i@yoichihirai.com>
|
Yoichi Hirai <i@yoichihirai.com>
|
||||||
|
Yondon Fu <yondon.fu@gmail.com>
|
||||||
|
Zach <zach.ramsay@gmail.com>
|
||||||
Zahoor Mohamed <zahoor@zahoor.in>
|
Zahoor Mohamed <zahoor@zahoor.in>
|
||||||
|
Zoe Nolan <github@zoenolan.org>
|
||||||
Zsolt Felföldi <zsfelfoldi@gmail.com>
|
Zsolt Felföldi <zsfelfoldi@gmail.com>
|
||||||
|
am2rican5 <am2rican5@gmail.com>
|
||||||
|
ayeowch <ayeowch@gmail.com>
|
||||||
|
b00ris <b00ris@mail.ru>
|
||||||
|
bailantaotao <Edwin@maicoin.com>
|
||||||
|
baizhenxuan <nkbai@163.com>
|
||||||
|
bloonfield <bloonfield@163.com>
|
||||||
|
changhong <changhong.yu@shanbay.com>
|
||||||
|
evgk <evgeniy.kamyshev@gmail.com>
|
||||||
|
ferhat elmas <elmas.ferhat@gmail.com>
|
||||||
holisticode <holistic.computing@gmail.com>
|
holisticode <holistic.computing@gmail.com>
|
||||||
|
jtakalai <juuso.takalainen@streamr.com>
|
||||||
ken10100147 <sunhongping@kanjian.com>
|
ken10100147 <sunhongping@kanjian.com>
|
||||||
ligi <ligi@ligi.de>
|
ligi <ligi@ligi.de>
|
||||||
|
mark.lin <mark@maicoin.com>
|
||||||
|
necaremus <necaremus@gmail.com>
|
||||||
|
njupt-moon <1015041018@njupt.edu.cn>
|
||||||
|
nkbai <nkbai@163.com>
|
||||||
|
rhaps107 <dod-source@yandex.ru>
|
||||||
|
slumber1122 <slumber1122@gmail.com>
|
||||||
|
sunxiaojun2014 <sunxiaojun-xy@360.cn>
|
||||||
|
terasum <terasum@163.com>
|
||||||
|
tsarpaul <Litvakpol@012.net.il>
|
||||||
xiekeyang <xiekeyang@users.noreply.github.com>
|
xiekeyang <xiekeyang@users.noreply.github.com>
|
||||||
|
yoza <yoza.is12s@gmail.com>
|
||||||
ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com>
|
ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com>
|
||||||
Максим Чусовлянов <mchusovlianov@gmail.com>
|
Максим Чусовлянов <mchusovlianov@gmail.com>
|
||||||
|
30
README.md
30
README.md
@ -5,6 +5,7 @@ Official golang implementation of the Ethereum protocol.
|
|||||||
[![API Reference](
|
[![API Reference](
|
||||||
https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
|
https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
|
||||||
)](https://godoc.org/github.com/ethereum/go-ethereum)
|
)](https://godoc.org/github.com/ethereum/go-ethereum)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
|
||||||
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/ethereum/go-ethereum?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/ethereum/go-ethereum?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||||
|
|
||||||
Automated builds are available for stable releases and the unstable master branch.
|
Automated builds are available for stable releases and the unstable master branch.
|
||||||
@ -56,16 +57,14 @@ the user doesn't care about years-old historical data, so we can fast-sync quick
|
|||||||
state of the network. To do so:
|
state of the network. To do so:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ geth --fast --cache=512 console
|
$ geth console
|
||||||
```
|
```
|
||||||
|
|
||||||
This command will:
|
This command will:
|
||||||
|
|
||||||
* Start geth in fast sync mode (`--fast`), causing it to download more data in exchange for avoiding
|
* Start geth in fast sync mode (default, can be changed with the `--syncmode` flag), causing it to
|
||||||
processing the entire history of the Ethereum network, which is very CPU intensive.
|
download more data in exchange for avoiding processing the entire history of the Ethereum network,
|
||||||
* Bump the memory allowance of the database to 512MB (`--cache=512`), which can help significantly in
|
which is very CPU intensive.
|
||||||
sync times especially for HDD users. This flag is optional and you can set it as high or as low as
|
|
||||||
you'd like, though we'd recommend the 512MB - 2GB range.
|
|
||||||
* Start up Geth's built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console),
|
* Start up Geth's built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console),
|
||||||
(via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API)
|
(via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API)
|
||||||
as well as Geth's own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs).
|
as well as Geth's own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs).
|
||||||
@ -80,12 +79,11 @@ entire system. In other words, instead of attaching to the main network, you wan
|
|||||||
network with your node, which is fully equivalent to the main network, but with play-Ether only.
|
network with your node, which is fully equivalent to the main network, but with play-Ether only.
|
||||||
|
|
||||||
```
|
```
|
||||||
$ geth --testnet --fast --cache=512 console
|
$ geth --testnet console
|
||||||
```
|
```
|
||||||
|
|
||||||
The `--fast`, `--cache` flags and `console` subcommand have the exact same meaning as above and they
|
The `console` subcommand have the exact same meaning as above and they are equally useful on the
|
||||||
are equally useful on the testnet too. Please see above for their explanations if you've skipped to
|
testnet too. Please see above for their explanations if you've skipped to here.
|
||||||
here.
|
|
||||||
|
|
||||||
Specifying the `--testnet` flag however will reconfigure your Geth instance a bit:
|
Specifying the `--testnet` flag however will reconfigure your Geth instance a bit:
|
||||||
|
|
||||||
@ -102,6 +100,14 @@ over between the main network and test network, you should make sure to always u
|
|||||||
for play-money and real-money. Unless you manually move accounts, Geth will by default correctly
|
for play-money and real-money. Unless you manually move accounts, Geth will by default correctly
|
||||||
separate the two networks and will not make any accounts available between them.*
|
separate the two networks and will not make any accounts available between them.*
|
||||||
|
|
||||||
|
### Full node on the Rinkeby test network
|
||||||
|
|
||||||
|
The above test network is a cross client one based on the ethash proof-of-work consensus algorithm. As such, it has certain extra overhead and is more susceptible to reorganization attacks due to the network's low difficulty / security. Go Ethereum also supports connecting to a proof-of-authority based test network called [*Rinkeby*](https://www.rinkeby.io) (operated by members of the community). This network is lighter, more secure, but is only supported by go-ethereum.
|
||||||
|
|
||||||
|
```
|
||||||
|
$ geth --rinkeby console
|
||||||
|
```
|
||||||
|
|
||||||
### Configuration
|
### Configuration
|
||||||
|
|
||||||
As an alternative to passing the numerous flags to the `geth` binary, you can also pass a configuration file via:
|
As an alternative to passing the numerous flags to the `geth` binary, you can also pass a configuration file via:
|
||||||
@ -125,10 +131,10 @@ One of the quickest ways to get Ethereum up and running on your machine is by us
|
|||||||
```
|
```
|
||||||
docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \
|
docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \
|
||||||
-p 8545:8545 -p 30303:30303 \
|
-p 8545:8545 -p 30303:30303 \
|
||||||
ethereum/client-go --fast --cache=512
|
ethereum/client-go
|
||||||
```
|
```
|
||||||
|
|
||||||
This will start geth in fast sync mode with a DB memory allowance of 512MB just as the above command does. It will also create a persistent volume in your home directory for saving your blockchain as well as map the default ports. There is also an `alpine` tag available for a slim version of the image.
|
This will start geth in fast-sync mode with a DB memory allowance of 1GB just as the above command does. It will also create a persistent volume in your home directory for saving your blockchain as well as map the default ports. There is also an `alpine` tag available for a slim version of the image.
|
||||||
|
|
||||||
Do not forget `--rpcaddr 0.0.0.0`, if you want to access RPC from other containers and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not accessible from the outside.
|
Do not forget `--rpcaddr 0.0.0.0`, if you want to access RPC from other containers and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not accessible from the outside.
|
||||||
|
|
||||||
|
@ -136,11 +136,11 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
|
|||||||
|
|
||||||
// MethodById looks up a method by the 4-byte id
|
// MethodById looks up a method by the 4-byte id
|
||||||
// returns nil if none found
|
// returns nil if none found
|
||||||
func (abi *ABI) MethodById(sigdata []byte) *Method {
|
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
|
||||||
for _, method := range abi.Methods {
|
for _, method := range abi.Methods {
|
||||||
if bytes.Equal(method.Id(), sigdata[:4]) {
|
if bytes.Equal(method.Id(), sigdata[:4]) {
|
||||||
return &method
|
return &method, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil, fmt.Errorf("no method with id: %#x", sigdata[:4])
|
||||||
}
|
}
|
||||||
|
@ -689,7 +689,11 @@ func TestABI_MethodById(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for name, m := range abi.Methods {
|
for name, m := range abi.Methods {
|
||||||
a := fmt.Sprintf("%v", m)
|
a := fmt.Sprintf("%v", m)
|
||||||
b := fmt.Sprintf("%v", abi.MethodById(m.Id()))
|
m2, err := abi.MethodById(m.Id())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to look up ABI method: %v", err)
|
||||||
|
}
|
||||||
|
b := fmt.Sprintf("%v", m2)
|
||||||
if a != b {
|
if a != b {
|
||||||
t.Errorf("Method %v (id %v) not 'findable' by id in ABI", name, common.ToHex(m.Id()))
|
t.Errorf("Method %v (id %v) not 'findable' by id in ABI", name, common.ToHex(m.Id()))
|
||||||
}
|
}
|
||||||
|
@ -67,6 +67,17 @@ func (arguments Arguments) LengthNonIndexed() int {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NonIndexed returns the arguments with indexed arguments filtered out
|
||||||
|
func (arguments Arguments) NonIndexed() Arguments {
|
||||||
|
var ret []Argument
|
||||||
|
for _, arg := range arguments {
|
||||||
|
if !arg.Indexed {
|
||||||
|
ret = append(ret, arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
// isTuple returns true for non-atomic constructs, like (uint,uint) or uint[]
|
// isTuple returns true for non-atomic constructs, like (uint,uint) or uint[]
|
||||||
func (arguments Arguments) isTuple() bool {
|
func (arguments Arguments) isTuple() bool {
|
||||||
return len(arguments) > 1
|
return len(arguments) > 1
|
||||||
@ -74,21 +85,25 @@ func (arguments Arguments) isTuple() bool {
|
|||||||
|
|
||||||
// Unpack performs the operation hexdata -> Go format
|
// Unpack performs the operation hexdata -> Go format
|
||||||
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
|
||||||
if arguments.isTuple() {
|
|
||||||
return arguments.unpackTuple(v, data)
|
|
||||||
}
|
|
||||||
return arguments.unpackAtomic(v, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (arguments Arguments) unpackTuple(v interface{}, output []byte) error {
|
|
||||||
// make sure the passed value is arguments pointer
|
// make sure the passed value is arguments pointer
|
||||||
valueOf := reflect.ValueOf(v)
|
if reflect.Ptr != reflect.ValueOf(v).Kind() {
|
||||||
if reflect.Ptr != valueOf.Kind() {
|
|
||||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
||||||
}
|
}
|
||||||
|
marshalledValues, err := arguments.UnpackValues(data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if arguments.isTuple() {
|
||||||
|
return arguments.unpackTuple(v, marshalledValues)
|
||||||
|
}
|
||||||
|
return arguments.unpackAtomic(v, marshalledValues)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
value = valueOf.Elem()
|
value = reflect.ValueOf(v).Elem()
|
||||||
typ = value.Type()
|
typ = value.Type()
|
||||||
kind = value.Kind()
|
kind = value.Kind()
|
||||||
)
|
)
|
||||||
@ -110,30 +125,9 @@ func (arguments Arguments) unpackTuple(v interface{}, output []byte) error {
|
|||||||
exists[field] = true
|
exists[field] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// `i` counts the nonindexed arguments.
|
for i, arg := range arguments.NonIndexed() {
|
||||||
// `j` counts the number of complex types.
|
|
||||||
// both `i` and `j` are used to to correctly compute `data` offset.
|
|
||||||
|
|
||||||
i, j := -1, 0
|
reflectValue := reflect.ValueOf(marshalledValues[i])
|
||||||
for _, arg := range arguments {
|
|
||||||
|
|
||||||
if arg.Indexed {
|
|
||||||
// can't read, continue
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
marshalledValue, err := toGoType((i+j)*32, arg.Type, output)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if arg.Type.T == ArrayTy {
|
|
||||||
// combined index ('i' + 'j') need to be adjusted only by size of array, thus
|
|
||||||
// we need to decrement 'j' because 'i' was incremented
|
|
||||||
j += arg.Type.Size - 1
|
|
||||||
}
|
|
||||||
|
|
||||||
reflectValue := reflect.ValueOf(marshalledValue)
|
|
||||||
|
|
||||||
switch kind {
|
switch kind {
|
||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
@ -166,34 +160,52 @@ func (arguments Arguments) unpackTuple(v interface{}, output []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// unpackAtomic unpacks ( hexdata -> go ) a single value
|
// unpackAtomic unpacks ( hexdata -> go ) a single value
|
||||||
func (arguments Arguments) unpackAtomic(v interface{}, output []byte) error {
|
func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues []interface{}) error {
|
||||||
// make sure the passed value is arguments pointer
|
if len(marshalledValues) != 1 {
|
||||||
valueOf := reflect.ValueOf(v)
|
return fmt.Errorf("abi: wrong length, expected single value, got %d", len(marshalledValues))
|
||||||
if reflect.Ptr != valueOf.Kind() {
|
|
||||||
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
|
|
||||||
}
|
}
|
||||||
arg := arguments[0]
|
elem := reflect.ValueOf(v).Elem()
|
||||||
if arg.Indexed {
|
reflectValue := reflect.ValueOf(marshalledValues[0])
|
||||||
return fmt.Errorf("abi: attempting to unpack indexed variable into element.")
|
return set(elem, reflectValue, arguments.NonIndexed()[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
value := valueOf.Elem()
|
// UnpackValues can be used to unpack ABI-encoded hexdata according to the ABI-specification,
|
||||||
|
// without supplying a struct to unpack into. Instead, this method returns a list containing the
|
||||||
|
// values. An atomic argument will be a list with one element.
|
||||||
|
func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
|
||||||
|
retval := make([]interface{}, 0, arguments.LengthNonIndexed())
|
||||||
|
virtualArgs := 0
|
||||||
|
for index, arg := range arguments.NonIndexed() {
|
||||||
|
marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
|
||||||
|
if arg.Type.T == ArrayTy {
|
||||||
|
// If we have a static array, like [3]uint256, these are coded as
|
||||||
|
// just like uint256,uint256,uint256.
|
||||||
|
// This means that we need to add two 'virtual' arguments when
|
||||||
|
// we count the index from now on
|
||||||
|
|
||||||
marshalledValue, err := toGoType(0, arg.Type, output)
|
virtualArgs += arg.Type.Size - 1
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
return set(value, reflect.ValueOf(marshalledValue), arg)
|
retval = append(retval, marshalledValue)
|
||||||
|
}
|
||||||
|
return retval, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unpack performs the operation Go format -> Hexdata
|
// PackValues performs the operation Go format -> Hexdata
|
||||||
|
// It is the semantic opposite of UnpackValues
|
||||||
|
func (arguments Arguments) PackValues(args []interface{}) ([]byte, error) {
|
||||||
|
return arguments.Pack(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pack performs the operation Go format -> Hexdata
|
||||||
func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
||||||
// Make sure arguments match up and pack them
|
// Make sure arguments match up and pack them
|
||||||
abiArgs := arguments
|
abiArgs := arguments
|
||||||
if len(args) != len(abiArgs) {
|
if len(args) != len(abiArgs) {
|
||||||
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(abiArgs))
|
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(abiArgs))
|
||||||
}
|
}
|
||||||
|
|
||||||
// variable input is the output appended at the end of packed
|
// variable input is the output appended at the end of packed
|
||||||
// output. This is used for strings and bytes types input.
|
// output. This is used for strings and bytes types input.
|
||||||
var variableInput []byte
|
var variableInput []byte
|
||||||
@ -207,7 +219,6 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
|||||||
inputOffset += 32
|
inputOffset += 32
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var ret []byte
|
var ret []byte
|
||||||
for i, a := range args {
|
for i, a := range args {
|
||||||
input := abiArgs[i]
|
input := abiArgs[i]
|
||||||
@ -216,7 +227,6 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// check for a slice type (string, bytes, slice)
|
// check for a slice type (string, bytes, slice)
|
||||||
if input.Type.requiresLengthPrefix() {
|
if input.Type.requiresLengthPrefix() {
|
||||||
// calculate the offset
|
// calculate the offset
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2015 The go-ethereum Authors
|
// Copyright 2017 The go-ethereum Authors
|
||||||
// This file is part of the go-ethereum library.
|
// This file is part of the go-ethereum library.
|
||||||
//
|
//
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2015 The go-ethereum Authors
|
// Copyright 2017 The go-ethereum Authors
|
||||||
// This file is part of the go-ethereum library.
|
// This file is part of the go-ethereum library.
|
||||||
//
|
//
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
@ -95,6 +95,9 @@ func readFixedBytes(t Type, word []byte) (interface{}, error) {
|
|||||||
|
|
||||||
// iteratively unpack elements
|
// iteratively unpack elements
|
||||||
func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) {
|
func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) {
|
||||||
|
if size < 0 {
|
||||||
|
return nil, fmt.Errorf("cannot marshal input to array, size is negative (%d)", size)
|
||||||
|
}
|
||||||
if start+32*size > len(output) {
|
if start+32*size > len(output) {
|
||||||
return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), start+32*size)
|
return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), start+32*size)
|
||||||
}
|
}
|
||||||
@ -181,16 +184,32 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
|
|||||||
|
|
||||||
// interprets a 32 byte slice as an offset and then determines which indice to look to decode the type.
|
// interprets a 32 byte slice as an offset and then determines which indice to look to decode the type.
|
||||||
func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err error) {
|
func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err error) {
|
||||||
offset := int(binary.BigEndian.Uint64(output[index+24 : index+32]))
|
bigOffsetEnd := big.NewInt(0).SetBytes(output[index : index+32])
|
||||||
if offset+32 > len(output) {
|
bigOffsetEnd.Add(bigOffsetEnd, common.Big32)
|
||||||
return 0, 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %d would go over slice boundary (len=%d)", len(output), offset+32)
|
outputLength := big.NewInt(int64(len(output)))
|
||||||
}
|
|
||||||
length = int(binary.BigEndian.Uint64(output[offset+24 : offset+32]))
|
|
||||||
if offset+32+length > len(output) {
|
|
||||||
return 0, 0, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32+length)
|
|
||||||
}
|
|
||||||
start = offset + 32
|
|
||||||
|
|
||||||
//fmt.Printf("LENGTH PREFIX INFO: \nsize: %v\noffset: %v\nstart: %v\n", length, offset, start)
|
if bigOffsetEnd.Cmp(outputLength) > 0 {
|
||||||
|
return 0, 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", bigOffsetEnd, outputLength)
|
||||||
|
}
|
||||||
|
|
||||||
|
if bigOffsetEnd.BitLen() > 63 {
|
||||||
|
return 0, 0, fmt.Errorf("abi offset larger than int64: %v", bigOffsetEnd)
|
||||||
|
}
|
||||||
|
|
||||||
|
offsetEnd := int(bigOffsetEnd.Uint64())
|
||||||
|
lengthBig := big.NewInt(0).SetBytes(output[offsetEnd-32 : offsetEnd])
|
||||||
|
|
||||||
|
totalSize := big.NewInt(0)
|
||||||
|
totalSize.Add(totalSize, bigOffsetEnd)
|
||||||
|
totalSize.Add(totalSize, lengthBig)
|
||||||
|
if totalSize.BitLen() > 63 {
|
||||||
|
return 0, 0, fmt.Errorf("abi length larger than int64: %v", totalSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
if totalSize.Cmp(outputLength) > 0 {
|
||||||
|
return 0, 0, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %v require %v", outputLength, totalSize)
|
||||||
|
}
|
||||||
|
start = int(bigOffsetEnd.Uint64())
|
||||||
|
length = int(lengthBig.Uint64())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2015 The go-ethereum Authors
|
// Copyright 2017 The go-ethereum Authors
|
||||||
// This file is part of the go-ethereum library.
|
// This file is part of the go-ethereum library.
|
||||||
//
|
//
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
@ -130,7 +130,7 @@ var unpackTests = []unpackTest{
|
|||||||
{
|
{
|
||||||
def: `[{"type": "bytes32"}]`,
|
def: `[{"type": "bytes32"}]`,
|
||||||
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
enc: "0100000000000000000000000000000000000000000000000000000000000000",
|
||||||
want: common.HexToHash("0100000000000000000000000000000000000000000000000000000000000000"),
|
want: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
def: `[{"type": "function"}]`,
|
def: `[{"type": "function"}]`,
|
||||||
@ -683,3 +683,73 @@ func TestUnmarshal(t *testing.T) {
|
|||||||
t.Fatal("expected error:", err)
|
t.Fatal("expected error:", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestOOMMaliciousInput(t *testing.T) {
|
||||||
|
oomTests := []unpackTest{
|
||||||
|
{
|
||||||
|
def: `[{"type": "uint8[]"}]`,
|
||||||
|
enc: "0000000000000000000000000000000000000000000000000000000000000020" + // offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000003" + // num elems
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||||
|
},
|
||||||
|
{ // Length larger than 64 bits
|
||||||
|
def: `[{"type": "uint8[]"}]`,
|
||||||
|
enc: "0000000000000000000000000000000000000000000000000000000000000020" + // offset
|
||||||
|
"00ffffffffffffffffffffffffffffffffffffffffffffff0000000000000002" + // num elems
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||||
|
},
|
||||||
|
{ // Offset very large (over 64 bits)
|
||||||
|
def: `[{"type": "uint8[]"}]`,
|
||||||
|
enc: "00ffffffffffffffffffffffffffffffffffffffffffffff0000000000000020" + // offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // num elems
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||||
|
},
|
||||||
|
{ // Offset very large (below 64 bits)
|
||||||
|
def: `[{"type": "uint8[]"}]`,
|
||||||
|
enc: "0000000000000000000000000000000000000000000000007ffffffffff00020" + // offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // num elems
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||||
|
},
|
||||||
|
{ // Offset negative (as 64 bit)
|
||||||
|
def: `[{"type": "uint8[]"}]`,
|
||||||
|
enc: "000000000000000000000000000000000000000000000000f000000000000020" + // offset
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002" + // num elems
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||||
|
},
|
||||||
|
|
||||||
|
{ // Negative length
|
||||||
|
def: `[{"type": "uint8[]"}]`,
|
||||||
|
enc: "0000000000000000000000000000000000000000000000000000000000000020" + // offset
|
||||||
|
"000000000000000000000000000000000000000000000000f000000000000002" + // num elems
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||||
|
},
|
||||||
|
{ // Very large length
|
||||||
|
def: `[{"type": "uint8[]"}]`,
|
||||||
|
enc: "0000000000000000000000000000000000000000000000000000000000000020" + // offset
|
||||||
|
"0000000000000000000000000000000000000000000000007fffffffff000002" + // num elems
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
|
||||||
|
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for i, test := range oomTests {
|
||||||
|
def := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
|
||||||
|
abi, err := JSON(strings.NewReader(def))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("invalid ABI definition %s: %v", def, err)
|
||||||
|
}
|
||||||
|
encb, err := hex.DecodeString(test.enc)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("invalid hex: %s" + test.enc)
|
||||||
|
}
|
||||||
|
_, err = abi.Methods["method"].Outputs.UnpackValues(encb)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Expected error on malicious input, test %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -121,7 +121,8 @@ var (
|
|||||||
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
||||||
// Note: wily is unsupported because it was officially deprecated on lanchpad.
|
// Note: wily is unsupported because it was officially deprecated on lanchpad.
|
||||||
// Note: yakkety is unsupported because it was officially deprecated on lanchpad.
|
// Note: yakkety is unsupported because it was officially deprecated on lanchpad.
|
||||||
debDistros = []string{"trusty", "xenial", "zesty", "artful"}
|
// Note: zesty is unsupported because it was officially deprecated on lanchpad.
|
||||||
|
debDistros = []string{"trusty", "xenial", "artful", "bionic"}
|
||||||
)
|
)
|
||||||
|
|
||||||
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
||||||
|
@ -55,10 +55,9 @@ var (
|
|||||||
"crypto/sha3/",
|
"crypto/sha3/",
|
||||||
"internal/jsre/deps",
|
"internal/jsre/deps",
|
||||||
"log/",
|
"log/",
|
||||||
|
"common/bitutil/bitutil",
|
||||||
// don't license generated files
|
// don't license generated files
|
||||||
"contracts/chequebook/contract/",
|
"contracts/chequebook/contract/code.go",
|
||||||
"contracts/ens/contract/",
|
|
||||||
"contracts/release/contract.go",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// paths with this prefix are licensed as GPL. all other files are LGPL.
|
// paths with this prefix are licensed as GPL. all other files are LGPL.
|
||||||
|
@ -122,7 +122,12 @@ func main() {
|
|||||||
utils.Fatalf("%v", err)
|
utils.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if _, err := discover.ListenUDP(nodeKey, conn, realaddr, nil, "", restrictList); err != nil {
|
cfg := discover.Config{
|
||||||
|
PrivateKey: nodeKey,
|
||||||
|
AnnounceAddr: realaddr,
|
||||||
|
NetRestrict: restrictList,
|
||||||
|
}
|
||||||
|
if _, err := discover.ListenUDP(conn, cfg); err != nil {
|
||||||
utils.Fatalf("%v", err)
|
utils.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,19 @@
|
|||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2017 The go-ethereum Authors
|
// Copyright 2018 The go-ethereum Authors
|
||||||
// This file is part of go-ethereum.
|
// This file is part of go-ethereum.
|
||||||
//
|
//
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2017 The go-ethereum Authors
|
// Copyright 2018 The go-ethereum Authors
|
||||||
// This file is part of go-ethereum.
|
// This file is part of go-ethereum.
|
||||||
//
|
//
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
@ -1,18 +1,18 @@
|
|||||||
// Copyright 2017 The go-ethereum Authors
|
// Copyright 2017 The go-ethereum Authors
|
||||||
// This file is part of the go-ethereum library.
|
// This file is part of go-ethereum.
|
||||||
//
|
//
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
// it under the terms of the GNU General Public License as published by
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
// (at your option) any later version.
|
// (at your option) any later version.
|
||||||
//
|
//
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
// GNU Lesser General Public License for more details.
|
// GNU General Public License for more details.
|
||||||
//
|
//
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
|
@ -686,8 +686,6 @@ func authTwitter(url string) (string, string, common.Address, error) {
|
|||||||
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
||||||
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
||||||
}
|
}
|
||||||
username := parts[len(parts)-3]
|
|
||||||
|
|
||||||
// Twitter's API isn't really friendly with direct links. Still, we don't
|
// Twitter's API isn't really friendly with direct links. Still, we don't
|
||||||
// want to do ask read permissions from users, so just load the public posts and
|
// want to do ask read permissions from users, so just load the public posts and
|
||||||
// scrape it for the Ethereum address and profile URL.
|
// scrape it for the Ethereum address and profile URL.
|
||||||
@ -697,6 +695,13 @@ func authTwitter(url string) (string, string, common.Address, error) {
|
|||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
// Resolve the username from the final redirect, no intermediate junk
|
||||||
|
parts = strings.Split(res.Request.URL.String(), "/")
|
||||||
|
if len(parts) < 4 || parts[len(parts)-2] != "status" {
|
||||||
|
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
|
||||||
|
}
|
||||||
|
username := parts[len(parts)-3]
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(res.Body)
|
body, err := ioutil.ReadAll(res.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", common.Address{}, err
|
return "", "", common.Address{}, err
|
||||||
|
@ -67,6 +67,9 @@ It expects the genesis file as argument.`,
|
|||||||
utils.DataDirFlag,
|
utils.DataDirFlag,
|
||||||
utils.CacheFlag,
|
utils.CacheFlag,
|
||||||
utils.LightModeFlag,
|
utils.LightModeFlag,
|
||||||
|
utils.GCModeFlag,
|
||||||
|
utils.CacheDatabaseFlag,
|
||||||
|
utils.CacheGCFlag,
|
||||||
},
|
},
|
||||||
Category: "BLOCKCHAIN COMMANDS",
|
Category: "BLOCKCHAIN COMMANDS",
|
||||||
Description: `
|
Description: `
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/console"
|
"github.com/ethereum/go-ethereum/console"
|
||||||
@ -207,7 +208,7 @@ func ephemeralConsole(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
// Wait for pending callbacks, but stop for Ctrl-C.
|
// Wait for pending callbacks, but stop for Ctrl-C.
|
||||||
abort := make(chan os.Signal, 1)
|
abort := make(chan os.Signal, 1)
|
||||||
signal.Notify(abort, os.Interrupt)
|
signal.Notify(abort, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
<-abort
|
<-abort
|
||||||
|
@ -114,6 +114,7 @@ var (
|
|||||||
utils.VMEnableDebugFlag,
|
utils.VMEnableDebugFlag,
|
||||||
utils.NetworkIdFlag,
|
utils.NetworkIdFlag,
|
||||||
utils.RPCCORSDomainFlag,
|
utils.RPCCORSDomainFlag,
|
||||||
|
utils.RPCVirtualHostsFlag,
|
||||||
utils.EthStatsURLFlag,
|
utils.EthStatsURLFlag,
|
||||||
utils.MetricsEnabledFlag,
|
utils.MetricsEnabledFlag,
|
||||||
utils.FakePoWFlag,
|
utils.FakePoWFlag,
|
||||||
|
@ -156,6 +156,7 @@ var AppHelpFlagGroups = []flagGroup{
|
|||||||
utils.IPCDisabledFlag,
|
utils.IPCDisabledFlag,
|
||||||
utils.IPCPathFlag,
|
utils.IPCPathFlag,
|
||||||
utils.RPCCORSDomainFlag,
|
utils.RPCCORSDomainFlag,
|
||||||
|
utils.RPCVirtualHostsFlag,
|
||||||
utils.JSpathFlag,
|
utils.JSpathFlag,
|
||||||
utils.ExecFlag,
|
utils.ExecFlag,
|
||||||
utils.PreloadJSFlag,
|
utils.PreloadJSFlag,
|
||||||
|
@ -1,3 +1,19 @@
|
|||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
// p2psim provides a command-line client for a simulation HTTP API.
|
// p2psim provides a command-line client for a simulation HTTP API.
|
||||||
//
|
//
|
||||||
// Here is an example of creating a 2 node network with the first node
|
// Here is an example of creating a 2 node network with the first node
|
||||||
|
@ -117,7 +117,7 @@ var dashboardContent = `
|
|||||||
<br/>
|
<br/>
|
||||||
<p>To run an archive node, download <a href="/{{.GethGenesis}}"><code>{{.GethGenesis}}</code></a> and start Geth with:
|
<p>To run an archive node, download <a href="/{{.GethGenesis}}"><code>{{.GethGenesis}}</code></a> and start Geth with:
|
||||||
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
||||||
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --cache=1024 --syncmode=full{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesFullFlat}}</pre>
|
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --cache=1024 --syncmode=full{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesFlat}}</pre>
|
||||||
</p>
|
</p>
|
||||||
<br/>
|
<br/>
|
||||||
<p>You can download Geth from <a href="https://geth.ethereum.org/downloads/" target="about:blank">https://geth.ethereum.org/downloads/</a>.</p>
|
<p>You can download Geth from <a href="https://geth.ethereum.org/downloads/" target="about:blank">https://geth.ethereum.org/downloads/</a>.</p>
|
||||||
@ -136,7 +136,7 @@ var dashboardContent = `
|
|||||||
<br/>
|
<br/>
|
||||||
<p>To run a full node, download <a href="/{{.GethGenesis}}"><code>{{.GethGenesis}}</code></a> and start Geth with:
|
<p>To run a full node, download <a href="/{{.GethGenesis}}"><code>{{.GethGenesis}}</code></a> and start Geth with:
|
||||||
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
||||||
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --cache=512{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesFullFlat}}</pre>
|
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --cache=512{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesFlat}}</pre>
|
||||||
</p>
|
</p>
|
||||||
<br/>
|
<br/>
|
||||||
<p>You can download Geth from <a href="https://geth.ethereum.org/downloads/" target="about:blank">https://geth.ethereum.org/downloads/</a>.</p>
|
<p>You can download Geth from <a href="https://geth.ethereum.org/downloads/" target="about:blank">https://geth.ethereum.org/downloads/</a>.</p>
|
||||||
@ -158,7 +158,7 @@ var dashboardContent = `
|
|||||||
<br/>
|
<br/>
|
||||||
<p>To run a light node, download <a href="/{{.GethGenesis}}"><code>{{.GethGenesis}}</code></a> and start Geth with:
|
<p>To run a light node, download <a href="/{{.GethGenesis}}"><code>{{.GethGenesis}}</code></a> and start Geth with:
|
||||||
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
||||||
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --syncmode=light{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesLightFlat}}</pre>
|
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --syncmode=light{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesFlat}}</pre>
|
||||||
</p>
|
</p>
|
||||||
<br/>
|
<br/>
|
||||||
<p>You can download Geth from <a href="https://geth.ethereum.org/downloads/" target="about:blank">https://geth.ethereum.org/downloads/</a>.</p>
|
<p>You can download Geth from <a href="https://geth.ethereum.org/downloads/" target="about:blank">https://geth.ethereum.org/downloads/</a>.</p>
|
||||||
@ -177,7 +177,7 @@ var dashboardContent = `
|
|||||||
<br/>
|
<br/>
|
||||||
<p>To run an embedded node, download <a href="/{{.GethGenesis}}"><code>{{.GethGenesis}}</code></a> and start Geth with:
|
<p>To run an embedded node, download <a href="/{{.GethGenesis}}"><code>{{.GethGenesis}}</code></a> and start Geth with:
|
||||||
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
||||||
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --cache=16 --ethash.cachesinmem=1 --syncmode=light{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesLightFlat}}</pre>
|
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --cache=16 --ethash.cachesinmem=1 --syncmode=light{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesFlat}}</pre>
|
||||||
</p>
|
</p>
|
||||||
<br/>
|
<br/>
|
||||||
<p>You can download Geth from <a href="https://geth.ethereum.org/downloads/" target="about:blank">https://geth.ethereum.org/downloads/</a>.</p>
|
<p>You can download Geth from <a href="https://geth.ethereum.org/downloads/" target="about:blank">https://geth.ethereum.org/downloads/</a>.</p>
|
||||||
@ -208,7 +208,7 @@ var dashboardContent = `
|
|||||||
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
||||||
</p>
|
</p>
|
||||||
<p>With your local chain initialized, you can start the Ethereum Wallet:
|
<p>With your local chain initialized, you can start the Ethereum Wallet:
|
||||||
<pre>ethereumwallet --rpc $HOME/.{{.Network}}/geth.ipc --node-networkid={{.NetworkID}} --node-datadir=$HOME/.{{.Network}}{{if .Ethstats}} --node-ethstats='{{.Ethstats}}'{{end}} --node-bootnodes={{.BootnodesFullFlat}}</pre>
|
<pre>ethereumwallet --rpc $HOME/.{{.Network}}/geth.ipc --node-networkid={{.NetworkID}} --node-datadir=$HOME/.{{.Network}}{{if .Ethstats}} --node-ethstats='{{.Ethstats}}'{{end}} --node-bootnodes={{.BootnodesFlat}}</pre>
|
||||||
<p>
|
<p>
|
||||||
<br/>
|
<br/>
|
||||||
<p>You can download the Ethereum Wallet from <a href="https://github.com/ethereum/mist/releases" target="about:blank">https://github.com/ethereum/mist/releases</a>.</p>
|
<p>You can download the Ethereum Wallet from <a href="https://github.com/ethereum/mist/releases" target="about:blank">https://github.com/ethereum/mist/releases</a>.</p>
|
||||||
@ -229,7 +229,7 @@ var dashboardContent = `
|
|||||||
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
||||||
</p>
|
</p>
|
||||||
<p>With your local chain initialized, you can start Mist:
|
<p>With your local chain initialized, you can start Mist:
|
||||||
<pre>mist --rpc $HOME/.{{.Network}}/geth.ipc --node-networkid={{.NetworkID}} --node-datadir=$HOME/.{{.Network}}{{if .Ethstats}} --node-ethstats='{{.Ethstats}}'{{end}} --node-bootnodes={{.BootnodesFullFlat}}</pre>
|
<pre>mist --rpc $HOME/.{{.Network}}/geth.ipc --node-networkid={{.NetworkID}} --node-datadir=$HOME/.{{.Network}}{{if .Ethstats}} --node-ethstats='{{.Ethstats}}'{{end}} --node-bootnodes={{.BootnodesFlat}}</pre>
|
||||||
<p>
|
<p>
|
||||||
<br/>
|
<br/>
|
||||||
<p>You can download the Mist browser from <a href="https://github.com/ethereum/mist/releases" target="about:blank">https://github.com/ethereum/mist/releases</a>.</p>
|
<p>You can download the Mist browser from <a href="https://github.com/ethereum/mist/releases" target="about:blank">https://github.com/ethereum/mist/releases</a>.</p>
|
||||||
@ -261,7 +261,7 @@ var dashboardContent = `
|
|||||||
<p>Inside your Java code you can now import the geth archive and connect to Ethereum:
|
<p>Inside your Java code you can now import the geth archive and connect to Ethereum:
|
||||||
<pre>import org.ethereum.geth.*;</pre>
|
<pre>import org.ethereum.geth.*;</pre>
|
||||||
<pre>
|
<pre>
|
||||||
Enodes bootnodes = new Enodes();{{range .BootnodesLight}}
|
Enodes bootnodes = new Enodes();{{range .Bootnodes}}
|
||||||
bootnodes.append(new Enode("{{.}}"));{{end}}
|
bootnodes.append(new Enode("{{.}}"));{{end}}
|
||||||
|
|
||||||
NodeConfig config = new NodeConfig();
|
NodeConfig config = new NodeConfig();
|
||||||
@ -294,7 +294,7 @@ node.start();
|
|||||||
<pre>
|
<pre>
|
||||||
var error: NSError?
|
var error: NSError?
|
||||||
|
|
||||||
let bootnodes = GethNewEnodesEmpty(){{range .BootnodesLight}}
|
let bootnodes = GethNewEnodesEmpty(){{range .Bootnodes}}
|
||||||
bootnodes?.append(GethNewEnode("{{.}}", &error)){{end}}
|
bootnodes?.append(GethNewEnode("{{.}}", &error)){{end}}
|
||||||
|
|
||||||
let config = GethNewNodeConfig()
|
let config = GethNewNodeConfig()
|
||||||
@ -595,16 +595,16 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
|
|||||||
statsLogin = ""
|
statsLogin = ""
|
||||||
}
|
}
|
||||||
indexfile := new(bytes.Buffer)
|
indexfile := new(bytes.Buffer)
|
||||||
bootCpp := make([]string, len(conf.bootFull))
|
bootCpp := make([]string, len(conf.bootnodes))
|
||||||
for i, boot := range conf.bootFull {
|
for i, boot := range conf.bootnodes {
|
||||||
bootCpp[i] = "required:" + strings.TrimPrefix(boot, "enode://")
|
bootCpp[i] = "required:" + strings.TrimPrefix(boot, "enode://")
|
||||||
}
|
}
|
||||||
bootHarmony := make([]string, len(conf.bootFull))
|
bootHarmony := make([]string, len(conf.bootnodes))
|
||||||
for i, boot := range conf.bootFull {
|
for i, boot := range conf.bootnodes {
|
||||||
bootHarmony[i] = fmt.Sprintf("-Dpeer.active.%d.url=%s", i, boot)
|
bootHarmony[i] = fmt.Sprintf("-Dpeer.active.%d.url=%s", i, boot)
|
||||||
}
|
}
|
||||||
bootPython := make([]string, len(conf.bootFull))
|
bootPython := make([]string, len(conf.bootnodes))
|
||||||
for i, boot := range conf.bootFull {
|
for i, boot := range conf.bootnodes {
|
||||||
bootPython[i] = "'" + boot + "'"
|
bootPython[i] = "'" + boot + "'"
|
||||||
}
|
}
|
||||||
template.Must(template.New("").Parse(dashboardContent)).Execute(indexfile, map[string]interface{}{
|
template.Must(template.New("").Parse(dashboardContent)).Execute(indexfile, map[string]interface{}{
|
||||||
@ -616,10 +616,8 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
|
|||||||
"WalletPage": config.wallet,
|
"WalletPage": config.wallet,
|
||||||
"FaucetPage": config.faucet,
|
"FaucetPage": config.faucet,
|
||||||
"GethGenesis": network + ".json",
|
"GethGenesis": network + ".json",
|
||||||
"BootnodesFull": conf.bootFull,
|
"Bootnodes": conf.bootnodes,
|
||||||
"BootnodesLight": conf.bootLight,
|
"BootnodesFlat": strings.Join(conf.bootnodes, ","),
|
||||||
"BootnodesFullFlat": strings.Join(conf.bootFull, ","),
|
|
||||||
"BootnodesLightFlat": strings.Join(conf.bootLight, ","),
|
|
||||||
"Ethstats": statsLogin,
|
"Ethstats": statsLogin,
|
||||||
"Ethash": conf.Genesis.Config.Ethash != nil,
|
"Ethash": conf.Genesis.Config.Ethash != nil,
|
||||||
"CppGenesis": network + "-cpp.json",
|
"CppGenesis": network + "-cpp.json",
|
||||||
@ -651,7 +649,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
|
|||||||
harmonySpecJSON, _ := conf.Genesis.MarshalJSON()
|
harmonySpecJSON, _ := conf.Genesis.MarshalJSON()
|
||||||
files[filepath.Join(workdir, network+"-harmony.json")] = harmonySpecJSON
|
files[filepath.Join(workdir, network+"-harmony.json")] = harmonySpecJSON
|
||||||
|
|
||||||
paritySpec, err := newParityChainSpec(network, conf.Genesis, conf.bootFull)
|
paritySpec, err := newParityChainSpec(network, conf.Genesis, conf.bootnodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -93,7 +93,7 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
|
|||||||
"NetworkID": config.node.network,
|
"NetworkID": config.node.network,
|
||||||
"Bootnodes": strings.Join(bootnodes, ","),
|
"Bootnodes": strings.Join(bootnodes, ","),
|
||||||
"Ethstats": config.node.ethstats,
|
"Ethstats": config.node.ethstats,
|
||||||
"EthPort": config.node.portFull,
|
"EthPort": config.node.port,
|
||||||
"CaptchaToken": config.captchaToken,
|
"CaptchaToken": config.captchaToken,
|
||||||
"CaptchaSecret": config.captchaSecret,
|
"CaptchaSecret": config.captchaSecret,
|
||||||
"FaucetName": strings.Title(network),
|
"FaucetName": strings.Title(network),
|
||||||
@ -110,7 +110,7 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
|
|||||||
"Datadir": config.node.datadir,
|
"Datadir": config.node.datadir,
|
||||||
"VHost": config.host,
|
"VHost": config.host,
|
||||||
"ApiPort": config.port,
|
"ApiPort": config.port,
|
||||||
"EthPort": config.node.portFull,
|
"EthPort": config.node.port,
|
||||||
"EthName": config.node.ethstats[:strings.Index(config.node.ethstats, ":")],
|
"EthName": config.node.ethstats[:strings.Index(config.node.ethstats, ":")],
|
||||||
"CaptchaToken": config.captchaToken,
|
"CaptchaToken": config.captchaToken,
|
||||||
"CaptchaSecret": config.captchaSecret,
|
"CaptchaSecret": config.captchaSecret,
|
||||||
@ -158,7 +158,7 @@ func (info *faucetInfos) Report() map[string]string {
|
|||||||
report := map[string]string{
|
report := map[string]string{
|
||||||
"Website address": info.host,
|
"Website address": info.host,
|
||||||
"Website listener port": strconv.Itoa(info.port),
|
"Website listener port": strconv.Itoa(info.port),
|
||||||
"Ethereum listener port": strconv.Itoa(info.node.portFull),
|
"Ethereum listener port": strconv.Itoa(info.node.port),
|
||||||
"Funding amount (base tier)": fmt.Sprintf("%d Ethers", info.amount),
|
"Funding amount (base tier)": fmt.Sprintf("%d Ethers", info.amount),
|
||||||
"Funding cooldown (base tier)": fmt.Sprintf("%d mins", info.minutes),
|
"Funding cooldown (base tier)": fmt.Sprintf("%d mins", info.minutes),
|
||||||
"Funding tiers": strconv.Itoa(info.tiers),
|
"Funding tiers": strconv.Itoa(info.tiers),
|
||||||
@ -228,7 +228,7 @@ func checkFaucet(client *sshClient, network string) (*faucetInfos, error) {
|
|||||||
return &faucetInfos{
|
return &faucetInfos{
|
||||||
node: &nodeInfos{
|
node: &nodeInfos{
|
||||||
datadir: infos.volumes["/root/.faucet"],
|
datadir: infos.volumes["/root/.faucet"],
|
||||||
portFull: infos.portmap[infos.envvars["ETH_PORT"]+"/tcp"],
|
port: infos.portmap[infos.envvars["ETH_PORT"]+"/tcp"],
|
||||||
ethstats: infos.envvars["ETH_NAME"],
|
ethstats: infos.envvars["ETH_NAME"],
|
||||||
keyJSON: keyJSON,
|
keyJSON: keyJSON,
|
||||||
keyPass: keyPass,
|
keyPass: keyPass,
|
||||||
|
@ -42,7 +42,7 @@ ADD genesis.json /genesis.json
|
|||||||
RUN \
|
RUN \
|
||||||
echo 'geth --cache 512 init /genesis.json' > geth.sh && \{{if .Unlock}}
|
echo 'geth --cache 512 init /genesis.json' > geth.sh && \{{if .Unlock}}
|
||||||
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
|
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
|
||||||
echo $'geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .BootV4}}--bootnodesv4 {{.BootV4}}{{end}} {{if .BootV5}}--bootnodesv5 {{.BootV5}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine --minerthreads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
|
echo $'geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine --minerthreads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
|
||||||
|
|
||||||
ENTRYPOINT ["/bin/sh", "geth.sh"]
|
ENTRYPOINT ["/bin/sh", "geth.sh"]
|
||||||
`
|
`
|
||||||
@ -56,15 +56,13 @@ services:
|
|||||||
build: .
|
build: .
|
||||||
image: {{.Network}}/{{.Type}}
|
image: {{.Network}}/{{.Type}}
|
||||||
ports:
|
ports:
|
||||||
- "{{.FullPort}}:{{.FullPort}}"
|
- "{{.Port}}:{{.Port}}"
|
||||||
- "{{.FullPort}}:{{.FullPort}}/udp"{{if .Light}}
|
- "{{.Port}}:{{.Port}}/udp"
|
||||||
- "{{.LightPort}}:{{.LightPort}}/udp"{{end}}
|
|
||||||
volumes:
|
volumes:
|
||||||
- {{.Datadir}}:/root/.ethereum{{if .Ethashdir}}
|
- {{.Datadir}}:/root/.ethereum{{if .Ethashdir}}
|
||||||
- {{.Ethashdir}}:/root/.ethash{{end}}
|
- {{.Ethashdir}}:/root/.ethash{{end}}
|
||||||
environment:
|
environment:
|
||||||
- FULL_PORT={{.FullPort}}/tcp
|
- PORT={{.Port}}/tcp
|
||||||
- LIGHT_PORT={{.LightPort}}/udp
|
|
||||||
- TOTAL_PEERS={{.TotalPeers}}
|
- TOTAL_PEERS={{.TotalPeers}}
|
||||||
- LIGHT_PEERS={{.LightPeers}}
|
- LIGHT_PEERS={{.LightPeers}}
|
||||||
- STATS_NAME={{.Ethstats}}
|
- STATS_NAME={{.Ethstats}}
|
||||||
@ -82,12 +80,11 @@ services:
|
|||||||
// deployNode deploys a new Ethereum node container to a remote machine via SSH,
|
// deployNode deploys a new Ethereum node container to a remote machine via SSH,
|
||||||
// docker and docker-compose. If an instance with the specified network name
|
// docker and docker-compose. If an instance with the specified network name
|
||||||
// already exists there, it will be overwritten!
|
// already exists there, it will be overwritten!
|
||||||
func deployNode(client *sshClient, network string, bootv4, bootv5 []string, config *nodeInfos, nocache bool) ([]byte, error) {
|
func deployNode(client *sshClient, network string, bootnodes []string, config *nodeInfos, nocache bool) ([]byte, error) {
|
||||||
kind := "sealnode"
|
kind := "sealnode"
|
||||||
if config.keyJSON == "" && config.etherbase == "" {
|
if config.keyJSON == "" && config.etherbase == "" {
|
||||||
kind = "bootnode"
|
kind = "bootnode"
|
||||||
bootv4 = make([]string, 0)
|
bootnodes = make([]string, 0)
|
||||||
bootv5 = make([]string, 0)
|
|
||||||
}
|
}
|
||||||
// Generate the content to upload to the server
|
// Generate the content to upload to the server
|
||||||
workdir := fmt.Sprintf("%d", rand.Int63())
|
workdir := fmt.Sprintf("%d", rand.Int63())
|
||||||
@ -100,11 +97,10 @@ func deployNode(client *sshClient, network string, bootv4, bootv5 []string, conf
|
|||||||
dockerfile := new(bytes.Buffer)
|
dockerfile := new(bytes.Buffer)
|
||||||
template.Must(template.New("").Parse(nodeDockerfile)).Execute(dockerfile, map[string]interface{}{
|
template.Must(template.New("").Parse(nodeDockerfile)).Execute(dockerfile, map[string]interface{}{
|
||||||
"NetworkID": config.network,
|
"NetworkID": config.network,
|
||||||
"Port": config.portFull,
|
"Port": config.port,
|
||||||
"Peers": config.peersTotal,
|
"Peers": config.peersTotal,
|
||||||
"LightFlag": lightFlag,
|
"LightFlag": lightFlag,
|
||||||
"BootV4": strings.Join(bootv4, ","),
|
"Bootnodes": strings.Join(bootnodes, ","),
|
||||||
"BootV5": strings.Join(bootv5, ","),
|
|
||||||
"Ethstats": config.ethstats,
|
"Ethstats": config.ethstats,
|
||||||
"Etherbase": config.etherbase,
|
"Etherbase": config.etherbase,
|
||||||
"GasTarget": uint64(1000000 * config.gasTarget),
|
"GasTarget": uint64(1000000 * config.gasTarget),
|
||||||
@ -119,10 +115,9 @@ func deployNode(client *sshClient, network string, bootv4, bootv5 []string, conf
|
|||||||
"Datadir": config.datadir,
|
"Datadir": config.datadir,
|
||||||
"Ethashdir": config.ethashdir,
|
"Ethashdir": config.ethashdir,
|
||||||
"Network": network,
|
"Network": network,
|
||||||
"FullPort": config.portFull,
|
"Port": config.port,
|
||||||
"TotalPeers": config.peersTotal,
|
"TotalPeers": config.peersTotal,
|
||||||
"Light": config.peersLight > 0,
|
"Light": config.peersLight > 0,
|
||||||
"LightPort": config.portFull + 1,
|
|
||||||
"LightPeers": config.peersLight,
|
"LightPeers": config.peersLight,
|
||||||
"Ethstats": config.ethstats[:strings.Index(config.ethstats, ":")],
|
"Ethstats": config.ethstats[:strings.Index(config.ethstats, ":")],
|
||||||
"Etherbase": config.etherbase,
|
"Etherbase": config.etherbase,
|
||||||
@ -157,10 +152,8 @@ type nodeInfos struct {
|
|||||||
datadir string
|
datadir string
|
||||||
ethashdir string
|
ethashdir string
|
||||||
ethstats string
|
ethstats string
|
||||||
portFull int
|
port int
|
||||||
portLight int
|
enode string
|
||||||
enodeFull string
|
|
||||||
enodeLight string
|
|
||||||
peersTotal int
|
peersTotal int
|
||||||
peersLight int
|
peersLight int
|
||||||
etherbase string
|
etherbase string
|
||||||
@ -175,15 +168,11 @@ type nodeInfos struct {
|
|||||||
func (info *nodeInfos) Report() map[string]string {
|
func (info *nodeInfos) Report() map[string]string {
|
||||||
report := map[string]string{
|
report := map[string]string{
|
||||||
"Data directory": info.datadir,
|
"Data directory": info.datadir,
|
||||||
"Listener port (full nodes)": strconv.Itoa(info.portFull),
|
"Listener port": strconv.Itoa(info.port),
|
||||||
"Peer count (all total)": strconv.Itoa(info.peersTotal),
|
"Peer count (all total)": strconv.Itoa(info.peersTotal),
|
||||||
"Peer count (light nodes)": strconv.Itoa(info.peersLight),
|
"Peer count (light nodes)": strconv.Itoa(info.peersLight),
|
||||||
"Ethstats username": info.ethstats,
|
"Ethstats username": info.ethstats,
|
||||||
}
|
}
|
||||||
if info.peersLight > 0 {
|
|
||||||
// Light server enabled
|
|
||||||
report["Listener port (light nodes)"] = strconv.Itoa(info.portLight)
|
|
||||||
}
|
|
||||||
if info.gasTarget > 0 {
|
if info.gasTarget > 0 {
|
||||||
// Miner or signer node
|
// Miner or signer node
|
||||||
report["Gas limit (baseline target)"] = fmt.Sprintf("%0.3f MGas", info.gasTarget)
|
report["Gas limit (baseline target)"] = fmt.Sprintf("%0.3f MGas", info.gasTarget)
|
||||||
@ -250,7 +239,7 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
|||||||
keyPass = string(bytes.TrimSpace(out))
|
keyPass = string(bytes.TrimSpace(out))
|
||||||
}
|
}
|
||||||
// Run a sanity check to see if the devp2p is reachable
|
// Run a sanity check to see if the devp2p is reachable
|
||||||
port := infos.portmap[infos.envvars["FULL_PORT"]]
|
port := infos.portmap[infos.envvars["PORT"]]
|
||||||
if err = checkPort(client.server, port); err != nil {
|
if err = checkPort(client.server, port); err != nil {
|
||||||
log.Warn(fmt.Sprintf("%s devp2p port seems unreachable", strings.Title(kind)), "server", client.server, "port", port, "err", err)
|
log.Warn(fmt.Sprintf("%s devp2p port seems unreachable", strings.Title(kind)), "server", client.server, "port", port, "err", err)
|
||||||
}
|
}
|
||||||
@ -259,8 +248,7 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
|||||||
genesis: genesis,
|
genesis: genesis,
|
||||||
datadir: infos.volumes["/root/.ethereum"],
|
datadir: infos.volumes["/root/.ethereum"],
|
||||||
ethashdir: infos.volumes["/root/.ethash"],
|
ethashdir: infos.volumes["/root/.ethash"],
|
||||||
portFull: infos.portmap[infos.envvars["FULL_PORT"]],
|
port: port,
|
||||||
portLight: infos.portmap[infos.envvars["LIGHT_PORT"]],
|
|
||||||
peersTotal: totalPeers,
|
peersTotal: totalPeers,
|
||||||
peersLight: lightPeers,
|
peersLight: lightPeers,
|
||||||
ethstats: infos.envvars["STATS_NAME"],
|
ethstats: infos.envvars["STATS_NAME"],
|
||||||
@ -270,9 +258,7 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
|||||||
gasTarget: gasTarget,
|
gasTarget: gasTarget,
|
||||||
gasPrice: gasPrice,
|
gasPrice: gasPrice,
|
||||||
}
|
}
|
||||||
stats.enodeFull = fmt.Sprintf("enode://%s@%s:%d", id, client.address, stats.portFull)
|
stats.enode = fmt.Sprintf("enode://%s@%s:%d", id, client.address, stats.port)
|
||||||
if stats.portLight != 0 {
|
|
||||||
stats.enodeLight = fmt.Sprintf("enode://%s@%s:%d?discport=%d", id, client.address, stats.portFull, stats.portLight)
|
|
||||||
}
|
|
||||||
return stats, nil
|
return stats, nil
|
||||||
}
|
}
|
||||||
|
@ -40,8 +40,7 @@ import (
|
|||||||
// between sessions.
|
// between sessions.
|
||||||
type config struct {
|
type config struct {
|
||||||
path string // File containing the configuration values
|
path string // File containing the configuration values
|
||||||
bootFull []string // Bootnodes to always connect to by full nodes
|
bootnodes []string // Bootnodes to always connect to by all nodes
|
||||||
bootLight []string // Bootnodes to always connect to by light nodes
|
|
||||||
ethstats string // Ethstats settings to cache for node deploys
|
ethstats string // Ethstats settings to cache for node deploys
|
||||||
|
|
||||||
Genesis *core.Genesis `json:"genesis,omitempty"` // Genesis block to cache for node deploys
|
Genesis *core.Genesis `json:"genesis,omitempty"` // Genesis block to cache for node deploys
|
||||||
|
@ -55,7 +55,7 @@ func (w *wizard) deployExplorer() {
|
|||||||
}
|
}
|
||||||
existed := err == nil
|
existed := err == nil
|
||||||
|
|
||||||
chainspec, err := newParityChainSpec(w.network, w.conf.Genesis, w.conf.bootFull)
|
chainspec, err := newParityChainSpec(w.network, w.conf.Genesis, w.conf.bootnodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to create chain spec for explorer", "err", err)
|
log.Error("Failed to create chain spec for explorer", "err", err)
|
||||||
return
|
return
|
||||||
|
@ -38,7 +38,7 @@ func (w *wizard) deployFaucet() {
|
|||||||
infos, err := checkFaucet(client, w.network)
|
infos, err := checkFaucet(client, w.network)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
infos = &faucetInfos{
|
infos = &faucetInfos{
|
||||||
node: &nodeInfos{portFull: 30303, peersTotal: 25},
|
node: &nodeInfos{port: 30303, peersTotal: 25},
|
||||||
port: 80,
|
port: 80,
|
||||||
host: client.server,
|
host: client.server,
|
||||||
amount: 1,
|
amount: 1,
|
||||||
@ -113,8 +113,8 @@ func (w *wizard) deployFaucet() {
|
|||||||
}
|
}
|
||||||
// Figure out which port to listen on
|
// Figure out which port to listen on
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Which TCP/UDP port should the light client listen on? (default = %d)\n", infos.node.portFull)
|
fmt.Printf("Which TCP/UDP port should the light client listen on? (default = %d)\n", infos.node.port)
|
||||||
infos.node.portFull = w.readDefaultInt(infos.node.portFull)
|
infos.node.port = w.readDefaultInt(infos.node.port)
|
||||||
|
|
||||||
// Set a proper name to report on the stats page
|
// Set a proper name to report on the stats page
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
@ -168,7 +168,7 @@ func (w *wizard) deployFaucet() {
|
|||||||
fmt.Printf("Should the faucet be built from scratch (y/n)? (default = no)\n")
|
fmt.Printf("Should the faucet be built from scratch (y/n)? (default = no)\n")
|
||||||
nocache = w.readDefaultString("n") != "n"
|
nocache = w.readDefaultString("n") != "n"
|
||||||
}
|
}
|
||||||
if out, err := deployFaucet(client, w.network, w.conf.bootLight, infos, nocache); err != nil {
|
if out, err := deployFaucet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
|
||||||
log.Error("Failed to deploy faucet container", "err", err)
|
log.Error("Failed to deploy faucet container", "err", err)
|
||||||
if len(out) > 0 {
|
if len(out) > 0 {
|
||||||
fmt.Printf("%s\n", out)
|
fmt.Printf("%s\n", out)
|
||||||
|
@ -59,15 +59,16 @@ func (w *wizard) run() {
|
|||||||
fmt.Println()
|
fmt.Println()
|
||||||
|
|
||||||
// Make sure we have a good network name to work with fmt.Println()
|
// Make sure we have a good network name to work with fmt.Println()
|
||||||
|
// Docker accepts hyphens in image names, but doesn't like it for container names
|
||||||
if w.network == "" {
|
if w.network == "" {
|
||||||
fmt.Println("Please specify a network name to administer (no spaces, please)")
|
fmt.Println("Please specify a network name to administer (no spaces or hyphens, please)")
|
||||||
for {
|
for {
|
||||||
w.network = w.readString()
|
w.network = w.readString()
|
||||||
if !strings.Contains(w.network, " ") {
|
if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") {
|
||||||
fmt.Printf("\nSweet, you can set this via --network=%s next time!\n\n", w.network)
|
fmt.Printf("\nSweet, you can set this via --network=%s next time!\n\n", w.network)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
log.Error("I also like to live dangerously, still no spaces")
|
log.Error("I also like to live dangerously, still no spaces or hyphens")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Info("Administering Ethereum network", "name", w.network)
|
log.Info("Administering Ethereum network", "name", w.network)
|
||||||
|
@ -37,8 +37,7 @@ func (w *wizard) networkStats() {
|
|||||||
}
|
}
|
||||||
// Clear out some previous configs to refill from current scan
|
// Clear out some previous configs to refill from current scan
|
||||||
w.conf.ethstats = ""
|
w.conf.ethstats = ""
|
||||||
w.conf.bootFull = w.conf.bootFull[:0]
|
w.conf.bootnodes = w.conf.bootnodes[:0]
|
||||||
w.conf.bootLight = w.conf.bootLight[:0]
|
|
||||||
|
|
||||||
// Iterate over all the specified hosts and check their status
|
// Iterate over all the specified hosts and check their status
|
||||||
var pend sync.WaitGroup
|
var pend sync.WaitGroup
|
||||||
@ -76,8 +75,7 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s
|
|||||||
var (
|
var (
|
||||||
genesis string
|
genesis string
|
||||||
ethstats string
|
ethstats string
|
||||||
bootFull []string
|
bootnodes []string
|
||||||
bootLight []string
|
|
||||||
)
|
)
|
||||||
// Ensure a valid SSH connection to the remote server
|
// Ensure a valid SSH connection to the remote server
|
||||||
logger := log.New("server", server)
|
logger := log.New("server", server)
|
||||||
@ -123,10 +121,7 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s
|
|||||||
stat.services["bootnode"] = infos.Report()
|
stat.services["bootnode"] = infos.Report()
|
||||||
|
|
||||||
genesis = string(infos.genesis)
|
genesis = string(infos.genesis)
|
||||||
bootFull = append(bootFull, infos.enodeFull)
|
bootnodes = append(bootnodes, infos.enode)
|
||||||
if infos.enodeLight != "" {
|
|
||||||
bootLight = append(bootLight, infos.enodeLight)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
logger.Debug("Checking for sealnode availability")
|
logger.Debug("Checking for sealnode availability")
|
||||||
if infos, err := checkNode(client, w.network, false); err != nil {
|
if infos, err := checkNode(client, w.network, false); err != nil {
|
||||||
@ -184,8 +179,7 @@ func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *s
|
|||||||
if ethstats != "" {
|
if ethstats != "" {
|
||||||
w.conf.ethstats = ethstats
|
w.conf.ethstats = ethstats
|
||||||
}
|
}
|
||||||
w.conf.bootFull = append(w.conf.bootFull, bootFull...)
|
w.conf.bootnodes = append(w.conf.bootnodes, bootnodes...)
|
||||||
w.conf.bootLight = append(w.conf.bootLight, bootLight...)
|
|
||||||
|
|
||||||
return stat
|
return stat
|
||||||
}
|
}
|
||||||
|
@ -48,9 +48,9 @@ func (w *wizard) deployNode(boot bool) {
|
|||||||
infos, err := checkNode(client, w.network, boot)
|
infos, err := checkNode(client, w.network, boot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if boot {
|
if boot {
|
||||||
infos = &nodeInfos{portFull: 30303, peersTotal: 512, peersLight: 256}
|
infos = &nodeInfos{port: 30303, peersTotal: 512, peersLight: 256}
|
||||||
} else {
|
} else {
|
||||||
infos = &nodeInfos{portFull: 30303, peersTotal: 50, peersLight: 0, gasTarget: 4.7, gasPrice: 18}
|
infos = &nodeInfos{port: 30303, peersTotal: 50, peersLight: 0, gasTarget: 4.7, gasPrice: 18}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
existed := err == nil
|
existed := err == nil
|
||||||
@ -79,8 +79,8 @@ func (w *wizard) deployNode(boot bool) {
|
|||||||
}
|
}
|
||||||
// Figure out which port to listen on
|
// Figure out which port to listen on
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Printf("Which TCP/UDP port to listen on? (default = %d)\n", infos.portFull)
|
fmt.Printf("Which TCP/UDP port to listen on? (default = %d)\n", infos.port)
|
||||||
infos.portFull = w.readDefaultInt(infos.portFull)
|
infos.port = w.readDefaultInt(infos.port)
|
||||||
|
|
||||||
// Figure out how many peers to allow (different based on node type)
|
// Figure out how many peers to allow (different based on node type)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
@ -163,7 +163,7 @@ func (w *wizard) deployNode(boot bool) {
|
|||||||
fmt.Printf("Should the node be built from scratch (y/n)? (default = no)\n")
|
fmt.Printf("Should the node be built from scratch (y/n)? (default = no)\n")
|
||||||
nocache = w.readDefaultString("n") != "n"
|
nocache = w.readDefaultString("n") != "n"
|
||||||
}
|
}
|
||||||
if out, err := deployNode(client, w.network, w.conf.bootFull, w.conf.bootLight, infos, nocache); err != nil {
|
if out, err := deployNode(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
|
||||||
log.Error("Failed to deploy Ethereum node container", "err", err)
|
log.Error("Failed to deploy Ethereum node container", "err", err)
|
||||||
if len(out) > 0 {
|
if len(out) > 0 {
|
||||||
fmt.Printf("%s\n", out)
|
fmt.Printf("%s\n", out)
|
||||||
|
@ -98,7 +98,7 @@ func (w *wizard) deployWallet() {
|
|||||||
fmt.Printf("Should the wallet be built from scratch (y/n)? (default = no)\n")
|
fmt.Printf("Should the wallet be built from scratch (y/n)? (default = no)\n")
|
||||||
nocache = w.readDefaultString("n") != "n"
|
nocache = w.readDefaultString("n") != "n"
|
||||||
}
|
}
|
||||||
if out, err := deployWallet(client, w.network, w.conf.bootFull, infos, nocache); err != nil {
|
if out, err := deployWallet(client, w.network, w.conf.bootnodes, infos, nocache); err != nil {
|
||||||
log.Error("Failed to deploy wallet container", "err", err)
|
log.Error("Failed to deploy wallet container", "err", err)
|
||||||
if len(out) > 0 {
|
if len(out) > 0 {
|
||||||
fmt.Printf("%s\n", out)
|
fmt.Printf("%s\n", out)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2016 The go-ethereum Authors
|
// Copyright 2017 The go-ethereum Authors
|
||||||
// This file is part of go-ethereum.
|
// This file is part of go-ethereum.
|
||||||
//
|
//
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2016 The go-ethereum Authors
|
// Copyright 2017 The go-ethereum Authors
|
||||||
// This file is part of go-ethereum.
|
// This file is part of go-ethereum.
|
||||||
//
|
//
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
"os/signal"
|
"os/signal"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
@ -64,7 +65,7 @@ func StartNode(stack *node.Node) {
|
|||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
sigc := make(chan os.Signal, 1)
|
sigc := make(chan os.Signal, 1)
|
||||||
signal.Notify(sigc, os.Interrupt)
|
signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
|
||||||
defer signal.Stop(sigc)
|
defer signal.Stop(sigc)
|
||||||
<-sigc
|
<-sigc
|
||||||
log.Info("Got interrupt, shutting down...")
|
log.Info("Got interrupt, shutting down...")
|
||||||
@ -85,7 +86,7 @@ func ImportChain(chain *core.BlockChain, fn string) error {
|
|||||||
// If a signal is received, the import will stop at the next batch.
|
// If a signal is received, the import will stop at the next batch.
|
||||||
interrupt := make(chan os.Signal, 1)
|
interrupt := make(chan os.Signal, 1)
|
||||||
stop := make(chan struct{})
|
stop := make(chan struct{})
|
||||||
signal.Notify(interrupt, os.Interrupt)
|
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
|
||||||
defer signal.Stop(interrupt)
|
defer signal.Stop(interrupt)
|
||||||
defer close(interrupt)
|
defer close(interrupt)
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -397,6 +397,11 @@ var (
|
|||||||
Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced)",
|
Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced)",
|
||||||
Value: "",
|
Value: "",
|
||||||
}
|
}
|
||||||
|
RPCVirtualHostsFlag = cli.StringFlag{
|
||||||
|
Name: "rpcvhosts",
|
||||||
|
Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
|
||||||
|
Value: "localhost",
|
||||||
|
}
|
||||||
RPCApiFlag = cli.StringFlag{
|
RPCApiFlag = cli.StringFlag{
|
||||||
Name: "rpcapi",
|
Name: "rpcapi",
|
||||||
Usage: "API's offered over the HTTP-RPC interface",
|
Usage: "API's offered over the HTTP-RPC interface",
|
||||||
@ -690,6 +695,8 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) {
|
|||||||
if ctx.GlobalIsSet(RPCApiFlag.Name) {
|
if ctx.GlobalIsSet(RPCApiFlag.Name) {
|
||||||
cfg.HTTPModules = splitAndTrim(ctx.GlobalString(RPCApiFlag.Name))
|
cfg.HTTPModules = splitAndTrim(ctx.GlobalString(RPCApiFlag.Name))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cfg.HTTPVirtualHosts = splitAndTrim(ctx.GlobalString(RPCVirtualHostsFlag.Name))
|
||||||
}
|
}
|
||||||
|
|
||||||
// setWS creates the WebSocket RPC listener interface string from the set
|
// setWS creates the WebSocket RPC listener interface string from the set
|
||||||
|
@ -43,7 +43,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||||
"github.com/ethereum/go-ethereum/whisper/mailserver"
|
"github.com/ethereum/go-ethereum/whisper/mailserver"
|
||||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
|
whisper "github.com/ethereum/go-ethereum/whisper/whisperv6"
|
||||||
"golang.org/x/crypto/pbkdf2"
|
"golang.org/x/crypto/pbkdf2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -66,8 +66,10 @@ var (
|
|||||||
asymKey *ecdsa.PrivateKey
|
asymKey *ecdsa.PrivateKey
|
||||||
nodeid *ecdsa.PrivateKey
|
nodeid *ecdsa.PrivateKey
|
||||||
topic whisper.TopicType
|
topic whisper.TopicType
|
||||||
|
|
||||||
asymKeyID string
|
asymKeyID string
|
||||||
filterID string
|
asymFilterID string
|
||||||
|
symFilterID string
|
||||||
symPass string
|
symPass string
|
||||||
msPassword string
|
msPassword string
|
||||||
)
|
)
|
||||||
@ -263,7 +265,7 @@ func initialize() {
|
|||||||
Config: p2p.Config{
|
Config: p2p.Config{
|
||||||
PrivateKey: nodeid,
|
PrivateKey: nodeid,
|
||||||
MaxPeers: maxPeers,
|
MaxPeers: maxPeers,
|
||||||
Name: common.MakeName("wnode", "5.0"),
|
Name: common.MakeName("wnode", "6.0"),
|
||||||
Protocols: shh.Protocols(),
|
Protocols: shh.Protocols(),
|
||||||
ListenAddr: *argIP,
|
ListenAddr: *argIP,
|
||||||
NAT: nat.Any(),
|
NAT: nat.Any(),
|
||||||
@ -363,13 +365,22 @@ func configureNode() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
filter := whisper.Filter{
|
symFilter := whisper.Filter{
|
||||||
KeySym: symKey,
|
KeySym: symKey,
|
||||||
|
Topics: [][]byte{topic[:]},
|
||||||
|
AllowP2P: p2pAccept,
|
||||||
|
}
|
||||||
|
symFilterID, err = shh.Subscribe(&symFilter)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("Failed to install filter: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
asymFilter := whisper.Filter{
|
||||||
KeyAsym: asymKey,
|
KeyAsym: asymKey,
|
||||||
Topics: [][]byte{topic[:]},
|
Topics: [][]byte{topic[:]},
|
||||||
AllowP2P: p2pAccept,
|
AllowP2P: p2pAccept,
|
||||||
}
|
}
|
||||||
filterID, err = shh.Subscribe(&filter)
|
asymFilterID, err = shh.Subscribe(&asymFilter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Failed to install filter: %s", err)
|
utils.Fatalf("Failed to install filter: %s", err)
|
||||||
}
|
}
|
||||||
@ -522,9 +533,14 @@ func sendMsg(payload []byte) common.Hash {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func messageLoop() {
|
func messageLoop() {
|
||||||
f := shh.GetFilter(filterID)
|
sf := shh.GetFilter(symFilterID)
|
||||||
if f == nil {
|
if sf == nil {
|
||||||
utils.Fatalf("filter is not installed")
|
utils.Fatalf("symmetric filter is not installed")
|
||||||
|
}
|
||||||
|
|
||||||
|
af := shh.GetFilter(asymFilterID)
|
||||||
|
if af == nil {
|
||||||
|
utils.Fatalf("asymmetric filter is not installed")
|
||||||
}
|
}
|
||||||
|
|
||||||
ticker := time.NewTicker(time.Millisecond * 50)
|
ticker := time.NewTicker(time.Millisecond * 50)
|
||||||
@ -532,7 +548,16 @@ func messageLoop() {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
messages := f.Retrieve()
|
messages := sf.Retrieve()
|
||||||
|
for _, msg := range messages {
|
||||||
|
if *fileExMode || len(msg.Payload) > 2048 {
|
||||||
|
writeMessageToFile(*argSaveDir, msg)
|
||||||
|
} else {
|
||||||
|
printMessageInfo(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
messages = af.Retrieve()
|
||||||
for _, msg := range messages {
|
for _, msg := range messages {
|
||||||
if *fileExMode || len(msg.Payload) > 2048 {
|
if *fileExMode || len(msg.Payload) > 2048 {
|
||||||
writeMessageToFile(*argSaveDir, msg)
|
writeMessageToFile(*argSaveDir, msg)
|
||||||
@ -631,7 +656,7 @@ func requestExpiredMessagesLoop() {
|
|||||||
params.PoW = *argServerPoW
|
params.PoW = *argServerPoW
|
||||||
params.Payload = data
|
params.Payload = data
|
||||||
params.KeySym = key
|
params.KeySym = key
|
||||||
params.Src = nodeid
|
params.Src = asymKey
|
||||||
params.WorkTime = 5
|
params.WorkTime = 5
|
||||||
|
|
||||||
msg, err := whisper.NewSentMessage(¶ms)
|
msg, err := whisper.NewSentMessage(¶ms)
|
||||||
|
@ -1,18 +1,18 @@
|
|||||||
// Copyright 2016 The go-ethereum Authors
|
// Copyright 2016 The go-ethereum Authors
|
||||||
// This file is part of go-ethereum.
|
// This file is part of the go-ethereum library.
|
||||||
//
|
//
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
// it under the terms of the GNU General Public License as published by
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
// (at your option) any later version.
|
// (at your option) any later version.
|
||||||
//
|
//
|
||||||
// go-ethereum is distributed in the hope that it will be useful,
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
// GNU General Public License for more details.
|
// GNU Lesser General Public License for more details.
|
||||||
//
|
//
|
||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
// +build freebsd
|
// +build freebsd
|
||||||
|
|
||||||
|
@ -1,18 +1,18 @@
|
|||||||
// Copyright 2016 The go-ethereum Authors
|
// Copyright 2016 The go-ethereum Authors
|
||||||
// This file is part of go-ethereum.
|
// This file is part of the go-ethereum library.
|
||||||
//
|
//
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
// it under the terms of the GNU General Public License as published by
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
// (at your option) any later version.
|
// (at your option) any later version.
|
||||||
//
|
//
|
||||||
// go-ethereum is distributed in the hope that it will be useful,
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
// GNU General Public License for more details.
|
// GNU Lesser General Public License for more details.
|
||||||
//
|
//
|
||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package fdlimit
|
package fdlimit
|
||||||
|
|
||||||
|
@ -1,18 +1,18 @@
|
|||||||
// Copyright 2016 The go-ethereum Authors
|
// Copyright 2016 The go-ethereum Authors
|
||||||
// This file is part of go-ethereum.
|
// This file is part of the go-ethereum library.
|
||||||
//
|
//
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
// it under the terms of the GNU General Public License as published by
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
// (at your option) any later version.
|
// (at your option) any later version.
|
||||||
//
|
//
|
||||||
// go-ethereum is distributed in the hope that it will be useful,
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
// GNU General Public License for more details.
|
// GNU Lesser General Public License for more details.
|
||||||
//
|
//
|
||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
// +build linux darwin netbsd openbsd solaris
|
// +build linux darwin netbsd openbsd solaris
|
||||||
|
|
||||||
|
@ -1,18 +1,18 @@
|
|||||||
// Copyright 2016 The go-ethereum Authors
|
// Copyright 2018 The go-ethereum Authors
|
||||||
// This file is part of go-ethereum.
|
// This file is part of the go-ethereum library.
|
||||||
//
|
//
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
// it under the terms of the GNU General Public License as published by
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
// (at your option) any later version.
|
// (at your option) any later version.
|
||||||
//
|
//
|
||||||
// go-ethereum is distributed in the hope that it will be useful,
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
// GNU General Public License for more details.
|
// GNU Lesser General Public License for more details.
|
||||||
//
|
//
|
||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package fdlimit
|
package fdlimit
|
||||||
|
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/internal/jsre"
|
"github.com/ethereum/go-ethereum/internal/jsre"
|
||||||
"github.com/ethereum/go-ethereum/internal/web3ext"
|
"github.com/ethereum/go-ethereum/internal/web3ext"
|
||||||
@ -332,7 +333,7 @@ func (c *Console) Interactive() {
|
|||||||
}()
|
}()
|
||||||
// Monitor Ctrl-C too in case the input is empty and we need to bail
|
// Monitor Ctrl-C too in case the input is empty and we need to bail
|
||||||
abort := make(chan os.Signal, 1)
|
abort := make(chan os.Signal, 1)
|
||||||
signal.Notify(abort, os.Interrupt)
|
signal.Notify(abort, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
|
||||||
// Start sending prompts to the user and reading back inputs
|
// Start sending prompts to the user and reading back inputs
|
||||||
for {
|
for {
|
||||||
|
@ -2,7 +2,7 @@ FROM alpine:3.7
|
|||||||
|
|
||||||
RUN \
|
RUN \
|
||||||
apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
|
apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
|
||||||
git clone --depth 1 --branch release/1.7 https://github.com/ethereum/go-ethereum && \
|
git clone --depth 1 --branch release/1.8 https://github.com/ethereum/go-ethereum && \
|
||||||
(cd go-ethereum && make geth) && \
|
(cd go-ethereum && make geth) && \
|
||||||
cp go-ethereum/build/bin/geth /geth && \
|
cp go-ethereum/build/bin/geth /geth && \
|
||||||
apk del go git make gcc musl-dev linux-headers && \
|
apk del go git make gcc musl-dev linux-headers && \
|
||||||
|
@ -5,7 +5,7 @@ ENV PATH=/usr/lib/go-1.9/bin:$PATH
|
|||||||
RUN \
|
RUN \
|
||||||
apt-get update && apt-get upgrade -q -y && \
|
apt-get update && apt-get upgrade -q -y && \
|
||||||
apt-get install -y --no-install-recommends golang-1.9 git make gcc libc-dev ca-certificates && \
|
apt-get install -y --no-install-recommends golang-1.9 git make gcc libc-dev ca-certificates && \
|
||||||
git clone --depth 1 --branch release/1.7 https://github.com/ethereum/go-ethereum && \
|
git clone --depth 1 --branch release/1.8 https://github.com/ethereum/go-ethereum && \
|
||||||
(cd go-ethereum && make geth) && \
|
(cd go-ethereum && make geth) && \
|
||||||
cp go-ethereum/build/bin/geth /geth && \
|
cp go-ethereum/build/bin/geth /geth && \
|
||||||
apt-get remove -y golang-1.9 git make gcc libc-dev && apt autoremove -y && apt-get clean && \
|
apt-get remove -y golang-1.9 git make gcc libc-dev && apt autoremove -y && apt-get clean && \
|
||||||
|
@ -926,13 +926,9 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
|||||||
if chosen < lastWrite+triesInMemory {
|
if chosen < lastWrite+triesInMemory {
|
||||||
switch {
|
switch {
|
||||||
case size >= 2*limit:
|
case size >= 2*limit:
|
||||||
log.Error("Trie memory critical, forcing to disk", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
log.Warn("State memory usage too high, committing", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
||||||
case bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit:
|
case bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit:
|
||||||
log.Error("Trie timing critical, forcing to disk", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
||||||
case size > limit:
|
|
||||||
log.Warn("Trie memory at dangerous levels", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
|
||||||
case bc.gcproc > bc.cacheConfig.TrieTimeLimit:
|
|
||||||
log.Warn("Trie timing at dangerous levels", "time", bc.gcproc, "limit", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If optimum or critical limits reached, write to disk
|
// If optimum or critical limits reached, write to disk
|
||||||
@ -1070,8 +1066,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
|
|||||||
}
|
}
|
||||||
switch {
|
switch {
|
||||||
case err == ErrKnownBlock:
|
case err == ErrKnownBlock:
|
||||||
|
// Block and state both already known. However if the current block is below
|
||||||
|
// this number we did a rollback and we should reimport it nonetheless.
|
||||||
|
if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
|
||||||
stats.ignored++
|
stats.ignored++
|
||||||
continue
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
case err == consensus.ErrFutureBlock:
|
case err == consensus.ErrFutureBlock:
|
||||||
// Allow up to MaxFuture second in the future blocks. If this limit is exceeded
|
// Allow up to MaxFuture second in the future blocks. If this limit is exceeded
|
||||||
|
@ -215,6 +215,9 @@ func (st *StateTransition) TransitionDb() (ret []byte, usedGas uint64, failed bo
|
|||||||
|
|
||||||
// Pay intrinsic gas
|
// Pay intrinsic gas
|
||||||
gas, err := IntrinsicGas(st.data, contractCreation, homestead)
|
gas, err := IntrinsicGas(st.data, contractCreation, homestead)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, false, err
|
||||||
|
}
|
||||||
if err = st.useGas(gas); err != nil {
|
if err = st.useGas(gas); err != nil {
|
||||||
return nil, 0, false, err
|
return nil, 0, false, err
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,19 @@
|
|||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package vm
|
package vm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -1,3 +1,19 @@
|
|||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package vm
|
package vm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -20,9 +20,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -123,11 +121,6 @@ func (in *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err er
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
codehash := contract.CodeHash // codehash is used when doing jump dest caching
|
|
||||||
if codehash == (common.Hash{}) {
|
|
||||||
codehash = crypto.Keccak256Hash(contract.Code)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
op OpCode // current opcode
|
op OpCode // current opcode
|
||||||
mem = NewMemory() // bound memory
|
mem = NewMemory() // bound memory
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2016 The go-ethereum Authors
|
// Copyright 2017 The go-ethereum Authors
|
||||||
// This file is part of the go-ethereum library.
|
// This file is part of the go-ethereum library.
|
||||||
//
|
//
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
@ -173,8 +173,8 @@ type LightChain interface {
|
|||||||
type BlockChain interface {
|
type BlockChain interface {
|
||||||
LightChain
|
LightChain
|
||||||
|
|
||||||
// HasBlockAndState verifies block and associated states' presence in the local chain.
|
// HasBlock verifies a block's presence in the local chain.
|
||||||
HasBlockAndState(common.Hash, uint64) bool
|
HasBlock(common.Hash, uint64) bool
|
||||||
|
|
||||||
// GetBlockByHash retrieves a block from the local chain.
|
// GetBlockByHash retrieves a block from the local chain.
|
||||||
GetBlockByHash(common.Hash) *types.Block
|
GetBlockByHash(common.Hash) *types.Block
|
||||||
@ -266,7 +266,6 @@ func (d *Downloader) Synchronising() bool {
|
|||||||
// RegisterPeer injects a new download peer into the set of block source to be
|
// RegisterPeer injects a new download peer into the set of block source to be
|
||||||
// used for fetching hashes and blocks from.
|
// used for fetching hashes and blocks from.
|
||||||
func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error {
|
func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error {
|
||||||
|
|
||||||
logger := log.New("peer", id)
|
logger := log.New("peer", id)
|
||||||
logger.Trace("Registering sync peer")
|
logger.Trace("Registering sync peer")
|
||||||
if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil {
|
if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil {
|
||||||
@ -583,7 +582,6 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
|
|||||||
// Figure out the valid ancestor range to prevent rewrite attacks
|
// Figure out the valid ancestor range to prevent rewrite attacks
|
||||||
floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64()
|
floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64()
|
||||||
|
|
||||||
p.log.Debug("Looking for common ancestor", "local", ceil, "remote", height)
|
|
||||||
if d.mode == FullSync {
|
if d.mode == FullSync {
|
||||||
ceil = d.blockchain.CurrentBlock().NumberU64()
|
ceil = d.blockchain.CurrentBlock().NumberU64()
|
||||||
} else if d.mode == FastSync {
|
} else if d.mode == FastSync {
|
||||||
@ -592,6 +590,8 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
|
|||||||
if ceil >= MaxForkAncestry {
|
if ceil >= MaxForkAncestry {
|
||||||
floor = int64(ceil - MaxForkAncestry)
|
floor = int64(ceil - MaxForkAncestry)
|
||||||
}
|
}
|
||||||
|
p.log.Debug("Looking for common ancestor", "local", ceil, "remote", height)
|
||||||
|
|
||||||
// Request the topmost blocks to short circuit binary ancestor lookup
|
// Request the topmost blocks to short circuit binary ancestor lookup
|
||||||
head := ceil
|
head := ceil
|
||||||
if head > height {
|
if head > height {
|
||||||
@ -647,7 +647,7 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Otherwise check if we already know the header or not
|
// Otherwise check if we already know the header or not
|
||||||
if (d.mode == FullSync && d.blockchain.HasBlockAndState(headers[i].Hash(), headers[i].Number.Uint64())) || (d.mode != FullSync && d.lightchain.HasHeader(headers[i].Hash(), headers[i].Number.Uint64())) {
|
if (d.mode == FullSync && d.blockchain.HasBlock(headers[i].Hash(), headers[i].Number.Uint64())) || (d.mode != FullSync && d.lightchain.HasHeader(headers[i].Hash(), headers[i].Number.Uint64())) {
|
||||||
number, hash = headers[i].Number.Uint64(), headers[i].Hash()
|
number, hash = headers[i].Number.Uint64(), headers[i].Hash()
|
||||||
|
|
||||||
// If every header is known, even future ones, the peer straight out lied about its head
|
// If every header is known, even future ones, the peer straight out lied about its head
|
||||||
@ -712,7 +712,7 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err
|
|||||||
arrived = true
|
arrived = true
|
||||||
|
|
||||||
// Modify the search interval based on the response
|
// Modify the search interval based on the response
|
||||||
if (d.mode == FullSync && !d.blockchain.HasBlockAndState(headers[0].Hash(), headers[0].Number.Uint64())) || (d.mode != FullSync && !d.lightchain.HasHeader(headers[0].Hash(), headers[0].Number.Uint64())) {
|
if (d.mode == FullSync && !d.blockchain.HasBlock(headers[0].Hash(), headers[0].Number.Uint64())) || (d.mode != FullSync && !d.lightchain.HasHeader(headers[0].Hash(), headers[0].Number.Uint64())) {
|
||||||
end = check
|
end = check
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -221,14 +221,9 @@ func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
|
|||||||
return dl.GetHeaderByHash(hash) != nil
|
return dl.GetHeaderByHash(hash) != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasBlockAndState checks if a block and associated state is present in the testers canonical chain.
|
// HasBlock checks if a block is present in the testers canonical chain.
|
||||||
func (dl *downloadTester) HasBlockAndState(hash common.Hash, number uint64) bool {
|
func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
|
||||||
block := dl.GetBlockByHash(hash)
|
return dl.GetBlockByHash(hash) != nil
|
||||||
if block == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
_, err := dl.stateDb.Get(block.Root().Bytes())
|
|
||||||
return err == nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetHeader retrieves a header from the testers canonical chain.
|
// GetHeader retrieves a header from the testers canonical chain.
|
||||||
|
11
eth/sync.go
11
eth/sync.go
@ -189,18 +189,13 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
|
|||||||
mode = downloader.FastSync
|
mode = downloader.FastSync
|
||||||
}
|
}
|
||||||
// Run the sync cycle, and disable fast sync if we've went past the pivot block
|
// Run the sync cycle, and disable fast sync if we've went past the pivot block
|
||||||
err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode)
|
if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
if atomic.LoadUint32(&pm.fastSync) == 1 {
|
if atomic.LoadUint32(&pm.fastSync) == 1 {
|
||||||
// Disable fast sync if we indeed have something in our chain
|
|
||||||
if pm.blockchain.CurrentBlock().NumberU64() > 0 {
|
|
||||||
log.Info("Fast sync complete, auto disabling")
|
log.Info("Fast sync complete, auto disabling")
|
||||||
atomic.StoreUint32(&pm.fastSync, 0)
|
atomic.StoreUint32(&pm.fastSync, 0)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
atomic.StoreUint32(&pm.acceptTxs, 1) // Mark initial sync done
|
atomic.StoreUint32(&pm.acceptTxs, 1) // Mark initial sync done
|
||||||
if head := pm.blockchain.CurrentBlock(); head.NumberU64() > 0 {
|
if head := pm.blockchain.CurrentBlock(); head.NumberU64() > 0 {
|
||||||
// We've completed a sync cycle, notify all peers of new state. This path is
|
// We've completed a sync cycle, notify all peers of new state. This path is
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2016 The go-ethereum Authors
|
// Copyright 2017 The go-ethereum Authors
|
||||||
// This file is part of the go-ethereum library.
|
// This file is part of the go-ethereum library.
|
||||||
//
|
//
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2016 The go-ethereum Authors
|
// Copyright 2017 The go-ethereum Authors
|
||||||
// This file is part of the go-ethereum library.
|
// This file is part of the go-ethereum library.
|
||||||
//
|
//
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
@ -94,7 +94,7 @@ func LocalEnv() Environment {
|
|||||||
}
|
}
|
||||||
if env.Branch == "" {
|
if env.Branch == "" {
|
||||||
if head != "HEAD" {
|
if head != "HEAD" {
|
||||||
env.Branch = strings.TrimLeft(head, "refs/heads/")
|
env.Branch = strings.TrimPrefix(head, "refs/heads/")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if info, err := os.Stat(".git/objects"); err == nil && info.IsDir() && env.Tag == "" {
|
if info, err := os.Stat(".git/objects"); err == nil && info.IsDir() && env.Tag == "" {
|
||||||
|
@ -1,18 +1,18 @@
|
|||||||
// Copyright 2016 The go-ethereum Authors
|
// Copyright 2017 The go-ethereum Authors
|
||||||
// This file is part of go-ethereum.
|
// This file is part of the go-ethereum library.
|
||||||
//
|
//
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
// it under the terms of the GNU General Public License as published by
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
// (at your option) any later version.
|
// (at your option) any later version.
|
||||||
//
|
//
|
||||||
// go-ethereum is distributed in the hope that it will be useful,
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
// GNU General Public License for more details.
|
// GNU Lesser General Public License for more details.
|
||||||
//
|
//
|
||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package cmdtest
|
package cmdtest
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2015 The go-ethereum Authors
|
// Copyright 2017 The go-ethereum Authors
|
||||||
// This file is part of the go-ethereum library.
|
// This file is part of the go-ethereum library.
|
||||||
//
|
//
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
@ -1135,6 +1135,18 @@ func (args *SendTxArgs) setDefaults(ctx context.Context, b Backend) error {
|
|||||||
if args.Data != nil && args.Input != nil && !bytes.Equal(*args.Data, *args.Input) {
|
if args.Data != nil && args.Input != nil && !bytes.Equal(*args.Data, *args.Input) {
|
||||||
return errors.New(`Both "data" and "input" are set and not equal. Please use "input" to pass transaction call data.`)
|
return errors.New(`Both "data" and "input" are set and not equal. Please use "input" to pass transaction call data.`)
|
||||||
}
|
}
|
||||||
|
if args.To == nil {
|
||||||
|
// Contract creation
|
||||||
|
var input []byte
|
||||||
|
if args.Data != nil {
|
||||||
|
input = *args.Data
|
||||||
|
} else if args.Input != nil {
|
||||||
|
input = *args.Input
|
||||||
|
}
|
||||||
|
if len(input) == 0 {
|
||||||
|
return errors.New(`contract creation without any data provided`)
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,24 +36,26 @@ const (
|
|||||||
maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer
|
maxNodeCount = 20 // maximum number of fetcherTreeNode entries remembered for each peer
|
||||||
)
|
)
|
||||||
|
|
||||||
// lightFetcher
|
// lightFetcher implements retrieval of newly announced headers. It also provides a peerHasBlock function for the
|
||||||
|
// ODR system to ensure that we only request data related to a certain block from peers who have already processed
|
||||||
|
// and announced that block.
|
||||||
type lightFetcher struct {
|
type lightFetcher struct {
|
||||||
pm *ProtocolManager
|
pm *ProtocolManager
|
||||||
odr *LesOdr
|
odr *LesOdr
|
||||||
chain *light.LightChain
|
chain *light.LightChain
|
||||||
|
|
||||||
|
lock sync.Mutex // lock protects access to the fetcher's internal state variables except sent requests
|
||||||
maxConfirmedTd *big.Int
|
maxConfirmedTd *big.Int
|
||||||
peers map[*peer]*fetcherPeerInfo
|
peers map[*peer]*fetcherPeerInfo
|
||||||
lastUpdateStats *updateStatsEntry
|
lastUpdateStats *updateStatsEntry
|
||||||
|
|
||||||
lock sync.Mutex // qwerqwerqwe
|
|
||||||
deliverChn chan fetchResponse
|
|
||||||
reqMu sync.RWMutex
|
|
||||||
requested map[uint64]fetchRequest
|
|
||||||
timeoutChn chan uint64
|
|
||||||
requestChn chan bool // true if initiated from outside
|
|
||||||
syncing bool
|
syncing bool
|
||||||
syncDone chan *peer
|
syncDone chan *peer
|
||||||
|
|
||||||
|
reqMu sync.RWMutex // reqMu protects access to sent header fetch requests
|
||||||
|
requested map[uint64]fetchRequest
|
||||||
|
deliverChn chan fetchResponse
|
||||||
|
timeoutChn chan uint64
|
||||||
|
requestChn chan bool // true if initiated from outside
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetcherPeerInfo holds fetcher-specific information about each active peer
|
// fetcherPeerInfo holds fetcher-specific information about each active peer
|
||||||
@ -425,6 +427,9 @@ func (f *lightFetcher) nextRequest() (*distReq, uint64) {
|
|||||||
},
|
},
|
||||||
canSend: func(dp distPeer) bool {
|
canSend: func(dp distPeer) bool {
|
||||||
p := dp.(*peer)
|
p := dp.(*peer)
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
fp := f.peers[p]
|
fp := f.peers[p]
|
||||||
return fp != nil && fp.nodeByHash[bestHash] != nil
|
return fp != nil && fp.nodeByHash[bestHash] != nil
|
||||||
},
|
},
|
||||||
@ -557,8 +562,13 @@ func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*typ
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching
|
// we ran out of recently delivered headers but have not reached a node known by this peer yet, continue matching
|
||||||
td = f.chain.GetTd(header.ParentHash, header.Number.Uint64()-1)
|
hash, number := header.ParentHash, header.Number.Uint64()-1
|
||||||
header = f.chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
|
td = f.chain.GetTd(hash, number)
|
||||||
|
header = f.chain.GetHeader(hash, number)
|
||||||
|
if header == nil || td == nil {
|
||||||
|
log.Error("Missing parent of validated header", "hash", hash, "number", number)
|
||||||
|
return false
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
header = headers[i]
|
header = headers[i]
|
||||||
td = tds[i]
|
td = tds[i]
|
||||||
@ -642,13 +652,18 @@ func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool {
|
|||||||
if td == nil {
|
if td == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
header := f.chain.GetHeader(n.hash, n.number)
|
||||||
|
// check the availability of both header and td because reads are not protected by chain db mutex
|
||||||
|
// Note: returning false is always safe here
|
||||||
|
if header == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
fp := f.peers[p]
|
fp := f.peers[p]
|
||||||
if fp == nil {
|
if fp == nil {
|
||||||
p.Log().Debug("Unknown peer to check known nodes")
|
p.Log().Debug("Unknown peer to check known nodes")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
header := f.chain.GetHeader(n.hash, n.number)
|
|
||||||
if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) {
|
if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) {
|
||||||
p.Log().Debug("Inconsistent announcement")
|
p.Log().Debug("Inconsistent announcement")
|
||||||
go f.pm.removePeer(p.id)
|
go f.pm.removePeer(p.id)
|
||||||
|
@ -790,10 +790,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
proofs := nodes.NodeList()
|
|
||||||
bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost)
|
bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost)
|
||||||
pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost)
|
pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost)
|
||||||
return p.SendProofsV2(req.ReqID, bv, proofs)
|
return p.SendProofsV2(req.ReqID, bv, nodes.NodeList())
|
||||||
|
|
||||||
case ProofsV1Msg:
|
case ProofsV1Msg:
|
||||||
if pm.odr == nil {
|
if pm.odr == nil {
|
||||||
@ -856,15 +855,12 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
if reject(uint64(reqCnt), MaxHelperTrieProofsFetch) {
|
if reject(uint64(reqCnt), MaxHelperTrieProofsFetch) {
|
||||||
return errResp(ErrRequestRejected, "")
|
return errResp(ErrRequestRejected, "")
|
||||||
}
|
}
|
||||||
|
trieDb := trie.NewDatabase(ethdb.NewTable(pm.chainDb, light.ChtTablePrefix))
|
||||||
for _, req := range req.Reqs {
|
for _, req := range req.Reqs {
|
||||||
if header := pm.blockchain.GetHeaderByNumber(req.BlockNum); header != nil {
|
if header := pm.blockchain.GetHeaderByNumber(req.BlockNum); header != nil {
|
||||||
sectionHead := core.GetCanonicalHash(pm.chainDb, req.ChtNum*light.ChtV1Frequency-1)
|
sectionHead := core.GetCanonicalHash(pm.chainDb, req.ChtNum*light.CHTFrequencyServer-1)
|
||||||
if root := light.GetChtRoot(pm.chainDb, req.ChtNum-1, sectionHead); root != (common.Hash{}) {
|
if root := light.GetChtRoot(pm.chainDb, req.ChtNum-1, sectionHead); root != (common.Hash{}) {
|
||||||
statedb, err := pm.blockchain.State()
|
trie, err := trie.New(root, trieDb)
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
trie, err := statedb.Database().OpenTrie(root)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -878,7 +874,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
if bytes += proof.DataSize() + estHeaderRlpSize; bytes >= softResponseLimit {
|
if bytes += proof.DataSize() + estHeaderRlpSize; bytes >= softResponseLimit {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -910,20 +905,16 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
lastIdx uint64
|
lastIdx uint64
|
||||||
lastType uint
|
lastType uint
|
||||||
root common.Hash
|
root common.Hash
|
||||||
statedb *state.StateDB
|
auxTrie *trie.Trie
|
||||||
trie state.Trie
|
|
||||||
)
|
)
|
||||||
|
|
||||||
nodes := light.NewNodeSet()
|
nodes := light.NewNodeSet()
|
||||||
|
|
||||||
for _, req := range req.Reqs {
|
for _, req := range req.Reqs {
|
||||||
if trie == nil || req.HelperTrieType != lastType || req.TrieIdx != lastIdx {
|
if auxTrie == nil || req.Type != lastType || req.TrieIdx != lastIdx {
|
||||||
statedb, trie, lastType, lastIdx = nil, nil, req.HelperTrieType, req.TrieIdx
|
auxTrie, lastType, lastIdx = nil, req.Type, req.TrieIdx
|
||||||
|
|
||||||
if root, _ = pm.getHelperTrie(req.HelperTrieType, req.TrieIdx); root != (common.Hash{}) {
|
var prefix string
|
||||||
if statedb, _ = pm.blockchain.State(); statedb != nil {
|
if root, prefix = pm.getHelperTrie(req.Type, req.TrieIdx); root != (common.Hash{}) {
|
||||||
trie, _ = statedb.Database().OpenTrie(root)
|
auxTrie, _ = trie.New(root, trie.NewDatabase(ethdb.NewTable(pm.chainDb, prefix)))
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if req.AuxReq == auxRoot {
|
if req.AuxReq == auxRoot {
|
||||||
@ -934,8 +925,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
auxData = append(auxData, data)
|
auxData = append(auxData, data)
|
||||||
auxBytes += len(data)
|
auxBytes += len(data)
|
||||||
} else {
|
} else {
|
||||||
if trie != nil {
|
if auxTrie != nil {
|
||||||
trie.Prove(req.Key, req.FromLevel, nodes)
|
auxTrie.Prove(req.Key, req.FromLevel, nodes)
|
||||||
}
|
}
|
||||||
if req.AuxReq != 0 {
|
if req.AuxReq != 0 {
|
||||||
data := pm.getHelperTrieAuxData(req)
|
data := pm.getHelperTrieAuxData(req)
|
||||||
@ -947,10 +938,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
proofs := nodes.NodeList()
|
|
||||||
bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost)
|
bv, rcost := p.fcClient.RequestProcessed(costs.baseCost + uint64(reqCnt)*costs.reqCost)
|
||||||
pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost)
|
pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost)
|
||||||
return p.SendHelperTrieProofs(req.ReqID, bv, HelperTrieResps{Proofs: proofs, AuxData: auxData})
|
return p.SendHelperTrieProofs(req.ReqID, bv, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData})
|
||||||
|
|
||||||
case HeaderProofsMsg:
|
case HeaderProofsMsg:
|
||||||
if pm.odr == nil {
|
if pm.odr == nil {
|
||||||
@ -1123,7 +1113,7 @@ func (pm *ProtocolManager) getAccount(statedb *state.StateDB, root, hash common.
|
|||||||
func (pm *ProtocolManager) getHelperTrie(id uint, idx uint64) (common.Hash, string) {
|
func (pm *ProtocolManager) getHelperTrie(id uint, idx uint64) (common.Hash, string) {
|
||||||
switch id {
|
switch id {
|
||||||
case htCanonical:
|
case htCanonical:
|
||||||
sectionHead := core.GetCanonicalHash(pm.chainDb, (idx+1)*light.ChtFrequency-1)
|
sectionHead := core.GetCanonicalHash(pm.chainDb, (idx+1)*light.CHTFrequencyClient-1)
|
||||||
return light.GetChtV2Root(pm.chainDb, idx, sectionHead), light.ChtTablePrefix
|
return light.GetChtV2Root(pm.chainDb, idx, sectionHead), light.ChtTablePrefix
|
||||||
case htBloomBits:
|
case htBloomBits:
|
||||||
sectionHead := core.GetCanonicalHash(pm.chainDb, (idx+1)*light.BloomTrieFrequency-1)
|
sectionHead := core.GetCanonicalHash(pm.chainDb, (idx+1)*light.BloomTrieFrequency-1)
|
||||||
@ -1134,10 +1124,8 @@ func (pm *ProtocolManager) getHelperTrie(id uint, idx uint64) (common.Hash, stri
|
|||||||
|
|
||||||
// getHelperTrieAuxData returns requested auxiliary data for the given HelperTrie request
|
// getHelperTrieAuxData returns requested auxiliary data for the given HelperTrie request
|
||||||
func (pm *ProtocolManager) getHelperTrieAuxData(req HelperTrieReq) []byte {
|
func (pm *ProtocolManager) getHelperTrieAuxData(req HelperTrieReq) []byte {
|
||||||
if req.HelperTrieType == htCanonical && req.AuxReq == auxHeader {
|
switch {
|
||||||
if len(req.Key) != 8 {
|
case req.Type == htCanonical && req.AuxReq == auxHeader && len(req.Key) == 8:
|
||||||
return nil
|
|
||||||
}
|
|
||||||
blockNum := binary.BigEndian.Uint64(req.Key)
|
blockNum := binary.BigEndian.Uint64(req.Key)
|
||||||
hash := core.GetCanonicalHash(pm.chainDb, blockNum)
|
hash := core.GetCanonicalHash(pm.chainDb, blockNum)
|
||||||
return core.GetHeaderRLP(pm.chainDb, hash, blockNum)
|
return core.GetHeaderRLP(pm.chainDb, hash, blockNum)
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
package les
|
package les
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"encoding/binary"
|
||||||
"math/big"
|
"math/big"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"testing"
|
"testing"
|
||||||
@ -45,27 +45,8 @@ func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{}
|
|||||||
return p2p.ExpectMsg(r, msgcode, resp{reqID, bv, data})
|
return p2p.ExpectMsg(r, msgcode, resp{reqID, bv, data})
|
||||||
}
|
}
|
||||||
|
|
||||||
func testCheckProof(t *testing.T, exp *light.NodeSet, got light.NodeList) {
|
|
||||||
if exp.KeyCount() > len(got) {
|
|
||||||
t.Errorf("proof has fewer nodes than expected")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if exp.KeyCount() < len(got) {
|
|
||||||
t.Errorf("proof has more nodes than expected")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, node := range got {
|
|
||||||
n, _ := exp.Get(crypto.Keccak256(node))
|
|
||||||
if !bytes.Equal(n, node) {
|
|
||||||
t.Errorf("proof contents mismatch")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests that block headers can be retrieved from a remote chain based on user queries.
|
// Tests that block headers can be retrieved from a remote chain based on user queries.
|
||||||
func TestGetBlockHeadersLes1(t *testing.T) { testGetBlockHeaders(t, 1) }
|
func TestGetBlockHeadersLes1(t *testing.T) { testGetBlockHeaders(t, 1) }
|
||||||
|
|
||||||
func TestGetBlockHeadersLes2(t *testing.T) { testGetBlockHeaders(t, 2) }
|
func TestGetBlockHeadersLes2(t *testing.T) { testGetBlockHeaders(t, 2) }
|
||||||
|
|
||||||
func testGetBlockHeaders(t *testing.T, protocol int) {
|
func testGetBlockHeaders(t *testing.T, protocol int) {
|
||||||
@ -196,7 +177,6 @@ func testGetBlockHeaders(t *testing.T, protocol int) {
|
|||||||
|
|
||||||
// Tests that block contents can be retrieved from a remote chain based on their hashes.
|
// Tests that block contents can be retrieved from a remote chain based on their hashes.
|
||||||
func TestGetBlockBodiesLes1(t *testing.T) { testGetBlockBodies(t, 1) }
|
func TestGetBlockBodiesLes1(t *testing.T) { testGetBlockBodies(t, 1) }
|
||||||
|
|
||||||
func TestGetBlockBodiesLes2(t *testing.T) { testGetBlockBodies(t, 2) }
|
func TestGetBlockBodiesLes2(t *testing.T) { testGetBlockBodies(t, 2) }
|
||||||
|
|
||||||
func testGetBlockBodies(t *testing.T, protocol int) {
|
func testGetBlockBodies(t *testing.T, protocol int) {
|
||||||
@ -274,7 +254,6 @@ func testGetBlockBodies(t *testing.T, protocol int) {
|
|||||||
|
|
||||||
// Tests that the contract codes can be retrieved based on account addresses.
|
// Tests that the contract codes can be retrieved based on account addresses.
|
||||||
func TestGetCodeLes1(t *testing.T) { testGetCode(t, 1) }
|
func TestGetCodeLes1(t *testing.T) { testGetCode(t, 1) }
|
||||||
|
|
||||||
func TestGetCodeLes2(t *testing.T) { testGetCode(t, 2) }
|
func TestGetCodeLes2(t *testing.T) { testGetCode(t, 2) }
|
||||||
|
|
||||||
func testGetCode(t *testing.T, protocol int) {
|
func testGetCode(t *testing.T, protocol int) {
|
||||||
@ -309,7 +288,6 @@ func testGetCode(t *testing.T, protocol int) {
|
|||||||
|
|
||||||
// Tests that the transaction receipts can be retrieved based on hashes.
|
// Tests that the transaction receipts can be retrieved based on hashes.
|
||||||
func TestGetReceiptLes1(t *testing.T) { testGetReceipt(t, 1) }
|
func TestGetReceiptLes1(t *testing.T) { testGetReceipt(t, 1) }
|
||||||
|
|
||||||
func TestGetReceiptLes2(t *testing.T) { testGetReceipt(t, 2) }
|
func TestGetReceiptLes2(t *testing.T) { testGetReceipt(t, 2) }
|
||||||
|
|
||||||
func testGetReceipt(t *testing.T, protocol int) {
|
func testGetReceipt(t *testing.T, protocol int) {
|
||||||
@ -338,7 +316,6 @@ func testGetReceipt(t *testing.T, protocol int) {
|
|||||||
|
|
||||||
// Tests that trie merkle proofs can be retrieved
|
// Tests that trie merkle proofs can be retrieved
|
||||||
func TestGetProofsLes1(t *testing.T) { testGetProofs(t, 1) }
|
func TestGetProofsLes1(t *testing.T) { testGetProofs(t, 1) }
|
||||||
|
|
||||||
func TestGetProofsLes2(t *testing.T) { testGetProofs(t, 2) }
|
func TestGetProofsLes2(t *testing.T) { testGetProofs(t, 2) }
|
||||||
|
|
||||||
func testGetProofs(t *testing.T, protocol int) {
|
func testGetProofs(t *testing.T, protocol int) {
|
||||||
@ -389,27 +366,126 @@ func testGetProofs(t *testing.T, protocol int) {
|
|||||||
case 2:
|
case 2:
|
||||||
cost := peer.GetRequestCost(GetProofsV2Msg, len(proofreqs))
|
cost := peer.GetRequestCost(GetProofsV2Msg, len(proofreqs))
|
||||||
sendRequest(peer.app, GetProofsV2Msg, 42, cost, proofreqs)
|
sendRequest(peer.app, GetProofsV2Msg, 42, cost, proofreqs)
|
||||||
msg, err := peer.app.ReadMsg()
|
if err := expectResponse(peer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {
|
||||||
if err != nil {
|
t.Errorf("proofs mismatch: %v", err)
|
||||||
t.Errorf("Message read error: %v", err)
|
|
||||||
}
|
}
|
||||||
var resp struct {
|
|
||||||
ReqID, BV uint64
|
|
||||||
Data light.NodeList
|
|
||||||
}
|
}
|
||||||
if err := msg.Decode(&resp); err != nil {
|
|
||||||
t.Errorf("reply decode error: %v", err)
|
|
||||||
}
|
}
|
||||||
if msg.Code != ProofsV2Msg {
|
|
||||||
t.Errorf("Message code mismatch")
|
// Tests that CHT proofs can be correctly retrieved.
|
||||||
|
func TestGetCHTProofsLes1(t *testing.T) { testGetCHTProofs(t, 1) }
|
||||||
|
func TestGetCHTProofsLes2(t *testing.T) { testGetCHTProofs(t, 2) }
|
||||||
|
|
||||||
|
func testGetCHTProofs(t *testing.T, protocol int) {
|
||||||
|
// Figure out the client's CHT frequency
|
||||||
|
frequency := uint64(light.CHTFrequencyClient)
|
||||||
|
if protocol == 1 {
|
||||||
|
frequency = uint64(light.CHTFrequencyServer)
|
||||||
}
|
}
|
||||||
if resp.ReqID != 42 {
|
// Assemble the test environment
|
||||||
t.Errorf("ReqID mismatch")
|
db, _ := ethdb.NewMemDatabase()
|
||||||
|
pm := newTestProtocolManagerMust(t, false, int(frequency)+light.HelperTrieProcessConfirmations, testChainGen, nil, nil, db)
|
||||||
|
bc := pm.blockchain.(*core.BlockChain)
|
||||||
|
peer, _ := newTestPeer(t, "peer", protocol, pm, true)
|
||||||
|
defer peer.close()
|
||||||
|
|
||||||
|
// Wait a while for the CHT indexer to process the new headers
|
||||||
|
time.Sleep(100 * time.Millisecond * time.Duration(frequency/light.CHTFrequencyServer)) // Chain indexer throttling
|
||||||
|
time.Sleep(250 * time.Millisecond) // CI tester slack
|
||||||
|
|
||||||
|
// Assemble the proofs from the different protocols
|
||||||
|
header := bc.GetHeaderByNumber(frequency)
|
||||||
|
rlp, _ := rlp.EncodeToBytes(header)
|
||||||
|
|
||||||
|
key := make([]byte, 8)
|
||||||
|
binary.BigEndian.PutUint64(key, frequency)
|
||||||
|
|
||||||
|
proofsV1 := []ChtResp{{
|
||||||
|
Header: header,
|
||||||
|
}}
|
||||||
|
proofsV2 := HelperTrieResps{
|
||||||
|
AuxData: [][]byte{rlp},
|
||||||
}
|
}
|
||||||
if resp.BV != testBufLimit {
|
switch protocol {
|
||||||
t.Errorf("BV mismatch")
|
case 1:
|
||||||
|
root := light.GetChtRoot(db, 0, bc.GetHeaderByNumber(frequency-1).Hash())
|
||||||
|
trie, _ := trie.New(root, trie.NewDatabase(ethdb.NewTable(db, light.ChtTablePrefix)))
|
||||||
|
|
||||||
|
var proof light.NodeList
|
||||||
|
trie.Prove(key, 0, &proof)
|
||||||
|
proofsV1[0].Proof = proof
|
||||||
|
|
||||||
|
case 2:
|
||||||
|
root := light.GetChtV2Root(db, 0, bc.GetHeaderByNumber(frequency-1).Hash())
|
||||||
|
trie, _ := trie.New(root, trie.NewDatabase(ethdb.NewTable(db, light.ChtTablePrefix)))
|
||||||
|
trie.Prove(key, 0, &proofsV2.Proofs)
|
||||||
|
}
|
||||||
|
// Assemble the requests for the different protocols
|
||||||
|
requestsV1 := []ChtReq{{
|
||||||
|
ChtNum: 1,
|
||||||
|
BlockNum: frequency,
|
||||||
|
}}
|
||||||
|
requestsV2 := []HelperTrieReq{{
|
||||||
|
Type: htCanonical,
|
||||||
|
TrieIdx: 0,
|
||||||
|
Key: key,
|
||||||
|
AuxReq: auxHeader,
|
||||||
|
}}
|
||||||
|
// Send the proof request and verify the response
|
||||||
|
switch protocol {
|
||||||
|
case 1:
|
||||||
|
cost := peer.GetRequestCost(GetHeaderProofsMsg, len(requestsV1))
|
||||||
|
sendRequest(peer.app, GetHeaderProofsMsg, 42, cost, requestsV1)
|
||||||
|
if err := expectResponse(peer.app, HeaderProofsMsg, 42, testBufLimit, proofsV1); err != nil {
|
||||||
|
t.Errorf("proofs mismatch: %v", err)
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
cost := peer.GetRequestCost(GetHelperTrieProofsMsg, len(requestsV2))
|
||||||
|
sendRequest(peer.app, GetHelperTrieProofsMsg, 42, cost, requestsV2)
|
||||||
|
if err := expectResponse(peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {
|
||||||
|
t.Errorf("proofs mismatch: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that bloombits proofs can be correctly retrieved.
|
||||||
|
func TestGetBloombitsProofs(t *testing.T) {
|
||||||
|
// Assemble the test environment
|
||||||
|
db, _ := ethdb.NewMemDatabase()
|
||||||
|
pm := newTestProtocolManagerMust(t, false, light.BloomTrieFrequency+256, testChainGen, nil, nil, db)
|
||||||
|
bc := pm.blockchain.(*core.BlockChain)
|
||||||
|
peer, _ := newTestPeer(t, "peer", 2, pm, true)
|
||||||
|
defer peer.close()
|
||||||
|
|
||||||
|
// Wait a while for the bloombits indexer to process the new headers
|
||||||
|
time.Sleep(100 * time.Millisecond * time.Duration(light.BloomTrieFrequency/4096)) // Chain indexer throttling
|
||||||
|
time.Sleep(250 * time.Millisecond) // CI tester slack
|
||||||
|
|
||||||
|
// Request and verify each bit of the bloom bits proofs
|
||||||
|
for bit := 0; bit < 2048; bit++ {
|
||||||
|
// Assemble therequest and proofs for the bloombits
|
||||||
|
key := make([]byte, 10)
|
||||||
|
|
||||||
|
binary.BigEndian.PutUint16(key[:2], uint16(bit))
|
||||||
|
binary.BigEndian.PutUint64(key[2:], uint64(light.BloomTrieFrequency))
|
||||||
|
|
||||||
|
requests := []HelperTrieReq{{
|
||||||
|
Type: htBloomBits,
|
||||||
|
TrieIdx: 0,
|
||||||
|
Key: key,
|
||||||
|
}}
|
||||||
|
var proofs HelperTrieResps
|
||||||
|
|
||||||
|
root := light.GetBloomTrieRoot(db, 0, bc.GetHeaderByNumber(light.BloomTrieFrequency-1).Hash())
|
||||||
|
trie, _ := trie.New(root, trie.NewDatabase(ethdb.NewTable(db, light.BloomTrieTablePrefix)))
|
||||||
|
trie.Prove(key, 0, &proofs.Proofs)
|
||||||
|
|
||||||
|
// Send the proof request and verify the response
|
||||||
|
cost := peer.GetRequestCost(GetHelperTrieProofsMsg, len(requests))
|
||||||
|
sendRequest(peer.app, GetHelperTrieProofsMsg, 42, cost, requests)
|
||||||
|
if err := expectResponse(peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofs); err != nil {
|
||||||
|
t.Errorf("bit %d: proofs mismatch: %v", bit, err)
|
||||||
}
|
}
|
||||||
testCheckProof(t, proofsV2, resp.Data)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/les/flowcontrol"
|
"github.com/ethereum/go-ethereum/les/flowcontrol"
|
||||||
@ -55,6 +56,9 @@ var (
|
|||||||
testContractCodeDeployed = testContractCode[16:]
|
testContractCodeDeployed = testContractCode[16:]
|
||||||
testContractDeployed = uint64(2)
|
testContractDeployed = uint64(2)
|
||||||
|
|
||||||
|
testEventEmitterCode = common.Hex2Bytes("60606040523415600e57600080fd5b7f57050ab73f6b9ebdd9f76b8d4997793f48cf956e965ee070551b9ca0bb71584e60405160405180910390a160358060476000396000f3006060604052600080fd00a165627a7a723058203f727efcad8b5811f8cb1fc2620ce5e8c63570d697aef968172de296ea3994140029")
|
||||||
|
testEventEmitterAddr common.Address
|
||||||
|
|
||||||
testBufLimit = uint64(100)
|
testBufLimit = uint64(100)
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -85,15 +89,19 @@ func testChainGen(i int, block *core.BlockGen) {
|
|||||||
// In block 2, the test bank sends some more ether to account #1.
|
// In block 2, the test bank sends some more ether to account #1.
|
||||||
// acc1Addr passes it on to account #2.
|
// acc1Addr passes it on to account #2.
|
||||||
// acc1Addr creates a test contract.
|
// acc1Addr creates a test contract.
|
||||||
tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testBankKey)
|
// acc1Addr creates a test event.
|
||||||
nonce := block.TxNonce(acc1Addr)
|
nonce := block.TxNonce(acc1Addr)
|
||||||
|
|
||||||
|
tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testBankKey)
|
||||||
tx2, _ := types.SignTx(types.NewTransaction(nonce, acc2Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, acc1Key)
|
tx2, _ := types.SignTx(types.NewTransaction(nonce, acc2Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, acc1Key)
|
||||||
nonce++
|
tx3, _ := types.SignTx(types.NewContractCreation(nonce+1, big.NewInt(0), 200000, big.NewInt(0), testContractCode), signer, acc1Key)
|
||||||
tx3, _ := types.SignTx(types.NewContractCreation(nonce, big.NewInt(0), 200000, big.NewInt(0), testContractCode), signer, acc1Key)
|
testContractAddr = crypto.CreateAddress(acc1Addr, nonce+1)
|
||||||
testContractAddr = crypto.CreateAddress(acc1Addr, nonce)
|
tx4, _ := types.SignTx(types.NewContractCreation(nonce+2, big.NewInt(0), 200000, big.NewInt(0), testEventEmitterCode), signer, acc1Key)
|
||||||
|
testEventEmitterAddr = crypto.CreateAddress(acc1Addr, nonce+2)
|
||||||
block.AddTx(tx1)
|
block.AddTx(tx1)
|
||||||
block.AddTx(tx2)
|
block.AddTx(tx2)
|
||||||
block.AddTx(tx3)
|
block.AddTx(tx3)
|
||||||
|
block.AddTx(tx4)
|
||||||
case 2:
|
case 2:
|
||||||
// Block 3 is empty but was mined by account #2.
|
// Block 3 is empty but was mined by account #2.
|
||||||
block.SetCoinbase(acc2Addr)
|
block.SetCoinbase(acc2Addr)
|
||||||
@ -147,6 +155,16 @@ func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *cor
|
|||||||
chain, _ = light.NewLightChain(odr, gspec.Config, engine)
|
chain, _ = light.NewLightChain(odr, gspec.Config, engine)
|
||||||
} else {
|
} else {
|
||||||
blockchain, _ := core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{})
|
blockchain, _ := core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{})
|
||||||
|
|
||||||
|
chtIndexer := light.NewChtIndexer(db, false)
|
||||||
|
chtIndexer.Start(blockchain)
|
||||||
|
|
||||||
|
bbtIndexer := light.NewBloomTrieIndexer(db, false)
|
||||||
|
|
||||||
|
bloomIndexer := eth.NewBloomIndexer(db, params.BloomBitsBlocks)
|
||||||
|
bloomIndexer.AddChildIndexer(bbtIndexer)
|
||||||
|
bloomIndexer.Start(blockchain)
|
||||||
|
|
||||||
gchain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, blocks, generator)
|
gchain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, blocks, generator)
|
||||||
if _, err := blockchain.InsertChain(gchain); err != nil {
|
if _, err := blockchain.InsertChain(gchain); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -321,7 +321,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type HelperTrieReq struct {
|
type HelperTrieReq struct {
|
||||||
HelperTrieType uint
|
Type uint
|
||||||
TrieIdx uint64
|
TrieIdx uint64
|
||||||
Key []byte
|
Key []byte
|
||||||
FromLevel, AuxReq uint
|
FromLevel, AuxReq uint
|
||||||
@ -365,7 +365,7 @@ func (r *ChtRequest) CanSend(peer *peer) bool {
|
|||||||
peer.lock.RLock()
|
peer.lock.RLock()
|
||||||
defer peer.lock.RUnlock()
|
defer peer.lock.RUnlock()
|
||||||
|
|
||||||
return peer.headInfo.Number >= light.HelperTrieConfirmations && r.ChtNum <= (peer.headInfo.Number-light.HelperTrieConfirmations)/light.ChtFrequency
|
return peer.headInfo.Number >= light.HelperTrieConfirmations && r.ChtNum <= (peer.headInfo.Number-light.HelperTrieConfirmations)/light.CHTFrequencyClient
|
||||||
}
|
}
|
||||||
|
|
||||||
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
|
||||||
@ -374,7 +374,7 @@ func (r *ChtRequest) Request(reqID uint64, peer *peer) error {
|
|||||||
var encNum [8]byte
|
var encNum [8]byte
|
||||||
binary.BigEndian.PutUint64(encNum[:], r.BlockNum)
|
binary.BigEndian.PutUint64(encNum[:], r.BlockNum)
|
||||||
req := HelperTrieReq{
|
req := HelperTrieReq{
|
||||||
HelperTrieType: htCanonical,
|
Type: htCanonical,
|
||||||
TrieIdx: r.ChtNum,
|
TrieIdx: r.ChtNum,
|
||||||
Key: encNum[:],
|
Key: encNum[:],
|
||||||
AuxReq: auxHeader,
|
AuxReq: auxHeader,
|
||||||
@ -493,12 +493,12 @@ func (r *BloomRequest) Request(reqID uint64, peer *peer) error {
|
|||||||
reqs := make([]HelperTrieReq, len(r.SectionIdxList))
|
reqs := make([]HelperTrieReq, len(r.SectionIdxList))
|
||||||
|
|
||||||
var encNumber [10]byte
|
var encNumber [10]byte
|
||||||
binary.BigEndian.PutUint16(encNumber[0:2], uint16(r.BitIdx))
|
binary.BigEndian.PutUint16(encNumber[:2], uint16(r.BitIdx))
|
||||||
|
|
||||||
for i, sectionIdx := range r.SectionIdxList {
|
for i, sectionIdx := range r.SectionIdxList {
|
||||||
binary.BigEndian.PutUint64(encNumber[2:10], sectionIdx)
|
binary.BigEndian.PutUint64(encNumber[2:], sectionIdx)
|
||||||
reqs[i] = HelperTrieReq{
|
reqs[i] = HelperTrieReq{
|
||||||
HelperTrieType: htBloomBits,
|
Type: htBloomBits,
|
||||||
TrieIdx: r.BloomTrieNum,
|
TrieIdx: r.BloomTrieNum,
|
||||||
Key: common.CopyBytes(encNumber[:]),
|
Key: common.CopyBytes(encNumber[:]),
|
||||||
}
|
}
|
||||||
@ -525,10 +525,10 @@ func (r *BloomRequest) Validate(db ethdb.Database, msg *Msg) error {
|
|||||||
|
|
||||||
// Verify the proofs
|
// Verify the proofs
|
||||||
var encNumber [10]byte
|
var encNumber [10]byte
|
||||||
binary.BigEndian.PutUint16(encNumber[0:2], uint16(r.BitIdx))
|
binary.BigEndian.PutUint16(encNumber[:2], uint16(r.BitIdx))
|
||||||
|
|
||||||
for i, idx := range r.SectionIdxList {
|
for i, idx := range r.SectionIdxList {
|
||||||
binary.BigEndian.PutUint64(encNumber[2:10], idx)
|
binary.BigEndian.PutUint64(encNumber[2:], idx)
|
||||||
value, err, _ := trie.VerifyProof(r.BloomTrieRoot, encNumber[:], reads)
|
value, err, _ := trie.VerifyProof(r.BloomTrieRoot, encNumber[:], reads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -281,7 +281,6 @@ func (p *peer) RequestProofs(reqID, cost uint64, reqs []ProofReq) error {
|
|||||||
default:
|
default:
|
||||||
panic(nil)
|
panic(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node.
|
// RequestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node.
|
||||||
@ -291,12 +290,12 @@ func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, reqs []HelperTrieReq)
|
|||||||
case lpv1:
|
case lpv1:
|
||||||
reqsV1 := make([]ChtReq, len(reqs))
|
reqsV1 := make([]ChtReq, len(reqs))
|
||||||
for i, req := range reqs {
|
for i, req := range reqs {
|
||||||
if req.HelperTrieType != htCanonical || req.AuxReq != auxHeader || len(req.Key) != 8 {
|
if req.Type != htCanonical || req.AuxReq != auxHeader || len(req.Key) != 8 {
|
||||||
return fmt.Errorf("Request invalid in LES/1 mode")
|
return fmt.Errorf("Request invalid in LES/1 mode")
|
||||||
}
|
}
|
||||||
blockNum := binary.BigEndian.Uint64(req.Key)
|
blockNum := binary.BigEndian.Uint64(req.Key)
|
||||||
// convert HelperTrie request to old CHT request
|
// convert HelperTrie request to old CHT request
|
||||||
reqsV1[i] = ChtReq{ChtNum: (req.TrieIdx + 1) * (light.ChtFrequency / light.ChtV1Frequency), BlockNum: blockNum, FromLevel: req.FromLevel}
|
reqsV1[i] = ChtReq{ChtNum: (req.TrieIdx + 1) * (light.CHTFrequencyClient / light.CHTFrequencyServer), BlockNum: blockNum, FromLevel: req.FromLevel}
|
||||||
}
|
}
|
||||||
return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqsV1)
|
return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqsV1)
|
||||||
case lpv2:
|
case lpv2:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2016 The go-ethereum Authors
|
// Copyright 2017 The go-ethereum Authors
|
||||||
// This file is part of the go-ethereum library.
|
// This file is part of the go-ethereum library.
|
||||||
//
|
//
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
@ -20,7 +20,6 @@ package les
|
|||||||
import (
|
import (
|
||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
|
||||||
"math"
|
"math"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@ -73,23 +72,22 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
|
|||||||
logger := log.New()
|
logger := log.New()
|
||||||
|
|
||||||
chtV1SectionCount, _, _ := srv.chtIndexer.Sections() // indexer still uses LES/1 4k section size for backwards server compatibility
|
chtV1SectionCount, _, _ := srv.chtIndexer.Sections() // indexer still uses LES/1 4k section size for backwards server compatibility
|
||||||
chtV2SectionCount := chtV1SectionCount / (light.ChtFrequency / light.ChtV1Frequency)
|
chtV2SectionCount := chtV1SectionCount / (light.CHTFrequencyClient / light.CHTFrequencyServer)
|
||||||
if chtV2SectionCount != 0 {
|
if chtV2SectionCount != 0 {
|
||||||
// convert to LES/2 section
|
// convert to LES/2 section
|
||||||
chtLastSection := chtV2SectionCount - 1
|
chtLastSection := chtV2SectionCount - 1
|
||||||
// convert last LES/2 section index back to LES/1 index for chtIndexer.SectionHead
|
// convert last LES/2 section index back to LES/1 index for chtIndexer.SectionHead
|
||||||
chtLastSectionV1 := (chtLastSection+1)*(light.ChtFrequency/light.ChtV1Frequency) - 1
|
chtLastSectionV1 := (chtLastSection+1)*(light.CHTFrequencyClient/light.CHTFrequencyServer) - 1
|
||||||
chtSectionHead := srv.chtIndexer.SectionHead(chtLastSectionV1)
|
chtSectionHead := srv.chtIndexer.SectionHead(chtLastSectionV1)
|
||||||
chtRoot := light.GetChtV2Root(pm.chainDb, chtLastSection, chtSectionHead)
|
chtRoot := light.GetChtV2Root(pm.chainDb, chtLastSection, chtSectionHead)
|
||||||
logger.Info("CHT", "section", chtLastSection, "sectionHead", fmt.Sprintf("%064x", chtSectionHead), "root", fmt.Sprintf("%064x", chtRoot))
|
logger.Info("Loaded CHT", "section", chtLastSection, "head", chtSectionHead, "root", chtRoot)
|
||||||
}
|
}
|
||||||
|
|
||||||
bloomTrieSectionCount, _, _ := srv.bloomTrieIndexer.Sections()
|
bloomTrieSectionCount, _, _ := srv.bloomTrieIndexer.Sections()
|
||||||
if bloomTrieSectionCount != 0 {
|
if bloomTrieSectionCount != 0 {
|
||||||
bloomTrieLastSection := bloomTrieSectionCount - 1
|
bloomTrieLastSection := bloomTrieSectionCount - 1
|
||||||
bloomTrieSectionHead := srv.bloomTrieIndexer.SectionHead(bloomTrieLastSection)
|
bloomTrieSectionHead := srv.bloomTrieIndexer.SectionHead(bloomTrieLastSection)
|
||||||
bloomTrieRoot := light.GetBloomTrieRoot(pm.chainDb, bloomTrieLastSection, bloomTrieSectionHead)
|
bloomTrieRoot := light.GetBloomTrieRoot(pm.chainDb, bloomTrieLastSection, bloomTrieSectionHead)
|
||||||
logger.Info("BloomTrie", "section", bloomTrieLastSection, "sectionHead", fmt.Sprintf("%064x", bloomTrieSectionHead), "root", fmt.Sprintf("%064x", bloomTrieRoot))
|
logger.Info("Loaded bloom trie", "section", bloomTrieLastSection, "head", bloomTrieSectionHead, "root", bloomTrieRoot)
|
||||||
}
|
}
|
||||||
|
|
||||||
srv.chtIndexer.Start(eth.BlockChain())
|
srv.chtIndexer.Start(eth.BlockChain())
|
||||||
@ -111,6 +109,7 @@ func (s *LesServer) Protocols() []p2p.Protocol {
|
|||||||
// Start starts the LES server
|
// Start starts the LES server
|
||||||
func (s *LesServer) Start(srvr *p2p.Server) {
|
func (s *LesServer) Start(srvr *p2p.Server) {
|
||||||
s.protocolManager.Start(s.config.LightPeers)
|
s.protocolManager.Start(s.config.LightPeers)
|
||||||
|
if srvr.DiscV5 != nil {
|
||||||
for _, topic := range s.lesTopics {
|
for _, topic := range s.lesTopics {
|
||||||
topic := topic
|
topic := topic
|
||||||
go func() {
|
go func() {
|
||||||
@ -121,6 +120,7 @@ func (s *LesServer) Start(srvr *p2p.Server) {
|
|||||||
srvr.DiscV5.RegisterTopic(topic, s.quitSync)
|
srvr.DiscV5.RegisterTopic(topic, s.quitSync)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
}
|
||||||
s.privateKey = srvr.PrivateKey
|
s.privateKey = srvr.PrivateKey
|
||||||
s.protocolManager.blockLoop()
|
s.protocolManager.blockLoop()
|
||||||
}
|
}
|
||||||
|
@ -100,7 +100,6 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, engine consensus.
|
|||||||
if cp, ok := trustedCheckpoints[bc.genesisBlock.Hash()]; ok {
|
if cp, ok := trustedCheckpoints[bc.genesisBlock.Hash()]; ok {
|
||||||
bc.addTrustedCheckpoint(cp)
|
bc.addTrustedCheckpoint(cp)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := bc.loadLastState(); err != nil {
|
if err := bc.loadLastState(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -128,7 +127,7 @@ func (self *LightChain) addTrustedCheckpoint(cp trustedCheckpoint) {
|
|||||||
if self.odr.BloomIndexer() != nil {
|
if self.odr.BloomIndexer() != nil {
|
||||||
self.odr.BloomIndexer().AddKnownSectionHead(cp.sectionIdx, cp.sectionHead)
|
self.odr.BloomIndexer().AddKnownSectionHead(cp.sectionIdx, cp.sectionHead)
|
||||||
}
|
}
|
||||||
log.Info("Added trusted checkpoint", "chain name", cp.name)
|
log.Info("Added trusted checkpoint", "chain", cp.name, "block", (cp.sectionIdx+1)*CHTFrequencyClient-1, "hash", cp.sectionHead)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *LightChain) getProcInterrupt() bool {
|
func (self *LightChain) getProcInterrupt() bool {
|
||||||
@ -454,8 +453,8 @@ func (self *LightChain) SyncCht(ctx context.Context) bool {
|
|||||||
}
|
}
|
||||||
headNum := self.CurrentHeader().Number.Uint64()
|
headNum := self.CurrentHeader().Number.Uint64()
|
||||||
chtCount, _, _ := self.odr.ChtIndexer().Sections()
|
chtCount, _, _ := self.odr.ChtIndexer().Sections()
|
||||||
if headNum+1 < chtCount*ChtFrequency {
|
if headNum+1 < chtCount*CHTFrequencyClient {
|
||||||
num := chtCount*ChtFrequency - 1
|
num := chtCount*CHTFrequencyClient - 1
|
||||||
header, err := GetHeaderByNumber(ctx, self.odr, num)
|
header, err := GetHeaderByNumber(ctx, self.odr, num)
|
||||||
if header != nil && err == nil {
|
if header != nil && err == nil {
|
||||||
self.mu.Lock()
|
self.mu.Lock()
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2014 The go-ethereum Authors
|
// Copyright 2017 The go-ethereum Authors
|
||||||
// This file is part of the go-ethereum library.
|
// This file is part of the go-ethereum library.
|
||||||
//
|
//
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
@ -29,7 +29,9 @@ import (
|
|||||||
// NodeSet stores a set of trie nodes. It implements trie.Database and can also
|
// NodeSet stores a set of trie nodes. It implements trie.Database and can also
|
||||||
// act as a cache for another trie.Database.
|
// act as a cache for another trie.Database.
|
||||||
type NodeSet struct {
|
type NodeSet struct {
|
||||||
db map[string][]byte
|
nodes map[string][]byte
|
||||||
|
order []string
|
||||||
|
|
||||||
dataSize int
|
dataSize int
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
}
|
}
|
||||||
@ -37,7 +39,7 @@ type NodeSet struct {
|
|||||||
// NewNodeSet creates an empty node set
|
// NewNodeSet creates an empty node set
|
||||||
func NewNodeSet() *NodeSet {
|
func NewNodeSet() *NodeSet {
|
||||||
return &NodeSet{
|
return &NodeSet{
|
||||||
db: make(map[string][]byte),
|
nodes: make(map[string][]byte),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -46,10 +48,15 @@ func (db *NodeSet) Put(key []byte, value []byte) error {
|
|||||||
db.lock.Lock()
|
db.lock.Lock()
|
||||||
defer db.lock.Unlock()
|
defer db.lock.Unlock()
|
||||||
|
|
||||||
if _, ok := db.db[string(key)]; !ok {
|
if _, ok := db.nodes[string(key)]; ok {
|
||||||
db.db[string(key)] = common.CopyBytes(value)
|
return nil
|
||||||
db.dataSize += len(value)
|
|
||||||
}
|
}
|
||||||
|
keystr := string(key)
|
||||||
|
|
||||||
|
db.nodes[keystr] = common.CopyBytes(value)
|
||||||
|
db.order = append(db.order, keystr)
|
||||||
|
db.dataSize += len(value)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -58,7 +65,7 @@ func (db *NodeSet) Get(key []byte) ([]byte, error) {
|
|||||||
db.lock.RLock()
|
db.lock.RLock()
|
||||||
defer db.lock.RUnlock()
|
defer db.lock.RUnlock()
|
||||||
|
|
||||||
if entry, ok := db.db[string(key)]; ok {
|
if entry, ok := db.nodes[string(key)]; ok {
|
||||||
return entry, nil
|
return entry, nil
|
||||||
}
|
}
|
||||||
return nil, errors.New("not found")
|
return nil, errors.New("not found")
|
||||||
@ -75,7 +82,7 @@ func (db *NodeSet) KeyCount() int {
|
|||||||
db.lock.RLock()
|
db.lock.RLock()
|
||||||
defer db.lock.RUnlock()
|
defer db.lock.RUnlock()
|
||||||
|
|
||||||
return len(db.db)
|
return len(db.nodes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DataSize returns the aggregated data size of nodes in the set
|
// DataSize returns the aggregated data size of nodes in the set
|
||||||
@ -92,8 +99,8 @@ func (db *NodeSet) NodeList() NodeList {
|
|||||||
defer db.lock.RUnlock()
|
defer db.lock.RUnlock()
|
||||||
|
|
||||||
var values NodeList
|
var values NodeList
|
||||||
for _, value := range db.db {
|
for _, key := range db.order {
|
||||||
values = append(values, value)
|
values = append(values, db.nodes[key])
|
||||||
}
|
}
|
||||||
return values
|
return values
|
||||||
}
|
}
|
||||||
@ -103,7 +110,7 @@ func (db *NodeSet) Store(target ethdb.Putter) {
|
|||||||
db.lock.RLock()
|
db.lock.RLock()
|
||||||
defer db.lock.RUnlock()
|
defer db.lock.RUnlock()
|
||||||
|
|
||||||
for key, value := range db.db {
|
for key, value := range db.nodes {
|
||||||
target.Put([]byte(key), value)
|
target.Put([]byte(key), value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -52,23 +52,20 @@ func GetHeaderByNumber(ctx context.Context, odr OdrBackend, number uint64) (*typ
|
|||||||
for chtCount > 0 && canonicalHash != sectionHead && canonicalHash != (common.Hash{}) {
|
for chtCount > 0 && canonicalHash != sectionHead && canonicalHash != (common.Hash{}) {
|
||||||
chtCount--
|
chtCount--
|
||||||
if chtCount > 0 {
|
if chtCount > 0 {
|
||||||
sectionHeadNum = chtCount*ChtFrequency - 1
|
sectionHeadNum = chtCount*CHTFrequencyClient - 1
|
||||||
sectionHead = odr.ChtIndexer().SectionHead(chtCount - 1)
|
sectionHead = odr.ChtIndexer().SectionHead(chtCount - 1)
|
||||||
canonicalHash = core.GetCanonicalHash(db, sectionHeadNum)
|
canonicalHash = core.GetCanonicalHash(db, sectionHeadNum)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if number >= chtCount*CHTFrequencyClient {
|
||||||
if number >= chtCount*ChtFrequency {
|
|
||||||
return nil, ErrNoTrustedCht
|
return nil, ErrNoTrustedCht
|
||||||
}
|
}
|
||||||
|
|
||||||
r := &ChtRequest{ChtRoot: GetChtRoot(db, chtCount-1, sectionHead), ChtNum: chtCount - 1, BlockNum: number}
|
r := &ChtRequest{ChtRoot: GetChtRoot(db, chtCount-1, sectionHead), ChtNum: chtCount - 1, BlockNum: number}
|
||||||
if err := odr.Retrieve(ctx, r); err != nil {
|
if err := odr.Retrieve(ctx, r); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
} else {
|
|
||||||
return r.Header, nil
|
|
||||||
}
|
}
|
||||||
|
return r.Header, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetCanonicalHash(ctx context.Context, odr OdrBackend, number uint64) (common.Hash, error) {
|
func GetCanonicalHash(ctx context.Context, odr OdrBackend, number uint64) (common.Hash, error) {
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2016 The go-ethereum Authors
|
// Copyright 2017 The go-ethereum Authors
|
||||||
// This file is part of the go-ethereum library.
|
// This file is part of the go-ethereum library.
|
||||||
//
|
//
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
@ -19,7 +19,6 @@ package light
|
|||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -35,8 +34,14 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ChtFrequency = 32768
|
// CHTFrequencyClient is the block frequency for creating CHTs on the client side.
|
||||||
ChtV1Frequency = 4096 // as long as we want to retain LES/1 compatibility, servers generate CHTs with the old, higher frequency
|
CHTFrequencyClient = 32768
|
||||||
|
|
||||||
|
// CHTFrequencyServer is the block frequency for creating CHTs on the server side.
|
||||||
|
// Eventually this can be merged back with the client version, but that requires a
|
||||||
|
// full database upgrade, so that should be left for a suitable moment.
|
||||||
|
CHTFrequencyServer = 4096
|
||||||
|
|
||||||
HelperTrieConfirmations = 2048 // number of confirmations before a server is expected to have the given HelperTrie available
|
HelperTrieConfirmations = 2048 // number of confirmations before a server is expected to have the given HelperTrie available
|
||||||
HelperTrieProcessConfirmations = 256 // number of confirmations before a HelperTrie is generated
|
HelperTrieProcessConfirmations = 256 // number of confirmations before a HelperTrie is generated
|
||||||
)
|
)
|
||||||
@ -52,19 +57,19 @@ type trustedCheckpoint struct {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
mainnetCheckpoint = trustedCheckpoint{
|
mainnetCheckpoint = trustedCheckpoint{
|
||||||
name: "ETH mainnet",
|
name: "mainnet",
|
||||||
sectionIdx: 150,
|
sectionIdx: 153,
|
||||||
sectionHead: common.HexToHash("1e2e67f289565cbe7bd4367f7960dbd73a3f7c53439e1047cd7ba331c8109e39"),
|
sectionHead: common.HexToHash("04c2114a8cbe49ba5c37a03cc4b4b8d3adfc0bd2c78e0e726405dd84afca1d63"),
|
||||||
chtRoot: common.HexToHash("f2a6c9ca143d647b44523cc249f1072c8912358ab873a77a5fdc792b8df99e80"),
|
chtRoot: common.HexToHash("d7ec603e5d30b567a6e894ee7704e4603232f206d3e5a589794cec0c57bf318e"),
|
||||||
bloomTrieRoot: common.HexToHash("c018952fa1513c97857e79fbb9a37acaf8432d5b85e52a78eca7dff5fd5900ee"),
|
bloomTrieRoot: common.HexToHash("0b139b8fb692e21f663ff200da287192201c28ef5813c1ac6ba02a0a4799eef9"),
|
||||||
}
|
}
|
||||||
|
|
||||||
ropstenCheckpoint = trustedCheckpoint{
|
ropstenCheckpoint = trustedCheckpoint{
|
||||||
name: "Ropsten testnet",
|
name: "ropsten",
|
||||||
sectionIdx: 75,
|
sectionIdx: 79,
|
||||||
sectionHead: common.HexToHash("12e68324f4578ea3e8e7fb3968167686729396c9279287fa1f1a8b51bb2d05b4"),
|
sectionHead: common.HexToHash("1b1ba890510e06411fdee9bb64ca7705c56a1a4ce3559ddb34b3680c526cb419"),
|
||||||
chtRoot: common.HexToHash("3e51dc095c69fa654a4cac766e0afff7357515b4b3c3a379c675f810363e54be"),
|
chtRoot: common.HexToHash("71d60207af74e5a22a3e1cfbfc89f9944f91b49aa980c86fba94d568369eaf44"),
|
||||||
bloomTrieRoot: common.HexToHash("33e3a70b33c1d73aa698d496a80615e98ed31fa8f56969876180553b32333339"),
|
bloomTrieRoot: common.HexToHash("70aca4b3b6d08dde8704c95cedb1420394453c1aec390947751e69ff8c436360"),
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -100,7 +105,7 @@ func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) c
|
|||||||
// GetChtV2Root reads the CHT root assoctiated to the given section from the database
|
// GetChtV2Root reads the CHT root assoctiated to the given section from the database
|
||||||
// Note that sectionIdx is specified according to LES/2 CHT section size
|
// Note that sectionIdx is specified according to LES/2 CHT section size
|
||||||
func GetChtV2Root(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash {
|
func GetChtV2Root(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash {
|
||||||
return GetChtRoot(db, (sectionIdx+1)*(ChtFrequency/ChtV1Frequency)-1, sectionHead)
|
return GetChtRoot(db, (sectionIdx+1)*(CHTFrequencyClient/CHTFrequencyServer)-1, sectionHead)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StoreChtRoot writes the CHT root assoctiated to the given section into the database
|
// StoreChtRoot writes the CHT root assoctiated to the given section into the database
|
||||||
@ -124,10 +129,10 @@ type ChtIndexerBackend struct {
|
|||||||
func NewChtIndexer(db ethdb.Database, clientMode bool) *core.ChainIndexer {
|
func NewChtIndexer(db ethdb.Database, clientMode bool) *core.ChainIndexer {
|
||||||
var sectionSize, confirmReq uint64
|
var sectionSize, confirmReq uint64
|
||||||
if clientMode {
|
if clientMode {
|
||||||
sectionSize = ChtFrequency
|
sectionSize = CHTFrequencyClient
|
||||||
confirmReq = HelperTrieConfirmations
|
confirmReq = HelperTrieConfirmations
|
||||||
} else {
|
} else {
|
||||||
sectionSize = ChtV1Frequency
|
sectionSize = CHTFrequencyServer
|
||||||
confirmReq = HelperTrieProcessConfirmations
|
confirmReq = HelperTrieProcessConfirmations
|
||||||
}
|
}
|
||||||
idb := ethdb.NewTable(db, "chtIndex-")
|
idb := ethdb.NewTable(db, "chtIndex-")
|
||||||
@ -174,8 +179,8 @@ func (c *ChtIndexerBackend) Commit() error {
|
|||||||
}
|
}
|
||||||
c.triedb.Commit(root, false)
|
c.triedb.Commit(root, false)
|
||||||
|
|
||||||
if ((c.section+1)*c.sectionSize)%ChtFrequency == 0 {
|
if ((c.section+1)*c.sectionSize)%CHTFrequencyClient == 0 {
|
||||||
log.Info("Storing CHT", "idx", c.section*c.sectionSize/ChtFrequency, "sectionHead", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
|
log.Info("Storing CHT", "section", c.section*c.sectionSize/CHTFrequencyClient, "head", c.lastHash, "root", root)
|
||||||
}
|
}
|
||||||
StoreChtRoot(c.diskdb, c.section, c.lastHash, root)
|
StoreChtRoot(c.diskdb, c.section, c.lastHash, root)
|
||||||
return nil
|
return nil
|
||||||
@ -294,7 +299,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
|
|||||||
b.triedb.Commit(root, false)
|
b.triedb.Commit(root, false)
|
||||||
|
|
||||||
sectionHead := b.sectionHeads[b.bloomTrieRatio-1]
|
sectionHead := b.sectionHeads[b.bloomTrieRatio-1]
|
||||||
log.Info("Storing BloomTrie", "section", b.section, "sectionHead", fmt.Sprintf("%064x", sectionHead), "root", fmt.Sprintf("%064x", root), "compression ratio", float64(compSize)/float64(decompSize))
|
log.Info("Storing bloom trie", "section", b.section, "head", sectionHead, "root", root, "compression", float64(compSize)/float64(decompSize))
|
||||||
StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root)
|
StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -154,12 +154,20 @@ func (c *BoundContract) GetDeployer() *Transaction {
|
|||||||
// Call invokes the (constant) contract method with params as input values and
|
// Call invokes the (constant) contract method with params as input values and
|
||||||
// sets the output to result.
|
// sets the output to result.
|
||||||
func (c *BoundContract) Call(opts *CallOpts, out *Interfaces, method string, args *Interfaces) error {
|
func (c *BoundContract) Call(opts *CallOpts, out *Interfaces, method string, args *Interfaces) error {
|
||||||
|
if len(out.objects) == 1 {
|
||||||
|
result := out.objects[0]
|
||||||
|
if err := c.contract.Call(&opts.opts, result, method, args.objects...); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
out.objects[0] = result
|
||||||
|
} else {
|
||||||
results := make([]interface{}, len(out.objects))
|
results := make([]interface{}, len(out.objects))
|
||||||
copy(results, out.objects)
|
copy(results, out.objects)
|
||||||
if err := c.contract.Call(&opts.opts, &results, method, args.objects...); err != nil {
|
if err := c.contract.Call(&opts.opts, &results, method, args.objects...); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
copy(out.objects, results)
|
copy(out.objects, results)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
12
node/api.go
12
node/api.go
@ -114,7 +114,7 @@ func (api *PrivateAdminAPI) PeerEvents(ctx context.Context) (*rpc.Subscription,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StartRPC starts the HTTP RPC API server.
|
// StartRPC starts the HTTP RPC API server.
|
||||||
func (api *PrivateAdminAPI) StartRPC(host *string, port *int, cors *string, apis *string) (bool, error) {
|
func (api *PrivateAdminAPI) StartRPC(host *string, port *int, cors *string, apis *string, vhosts *string) (bool, error) {
|
||||||
api.node.lock.Lock()
|
api.node.lock.Lock()
|
||||||
defer api.node.lock.Unlock()
|
defer api.node.lock.Unlock()
|
||||||
|
|
||||||
@ -141,6 +141,14 @@ func (api *PrivateAdminAPI) StartRPC(host *string, port *int, cors *string, apis
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
allowedVHosts := api.node.config.HTTPVirtualHosts
|
||||||
|
if vhosts != nil {
|
||||||
|
allowedVHosts = nil
|
||||||
|
for _, vhost := range strings.Split(*host, ",") {
|
||||||
|
allowedVHosts = append(allowedVHosts, strings.TrimSpace(vhost))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
modules := api.node.httpWhitelist
|
modules := api.node.httpWhitelist
|
||||||
if apis != nil {
|
if apis != nil {
|
||||||
modules = nil
|
modules = nil
|
||||||
@ -149,7 +157,7 @@ func (api *PrivateAdminAPI) StartRPC(host *string, port *int, cors *string, apis
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := api.node.startHTTP(fmt.Sprintf("%s:%d", *host, *port), api.node.rpcAPIs, modules, allowedOrigins); err != nil {
|
if err := api.node.startHTTP(fmt.Sprintf("%s:%d", *host, *port), api.node.rpcAPIs, modules, allowedOrigins, allowedVHosts); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
|
@ -105,6 +105,15 @@ type Config struct {
|
|||||||
// useless for custom HTTP clients.
|
// useless for custom HTTP clients.
|
||||||
HTTPCors []string `toml:",omitempty"`
|
HTTPCors []string `toml:",omitempty"`
|
||||||
|
|
||||||
|
// HTTPVirtualHosts is the list of virtual hostnames which are allowed on incoming requests.
|
||||||
|
// This is by default {'localhost'}. Using this prevents attacks like
|
||||||
|
// DNS rebinding, which bypasses SOP by simply masquerading as being within the same
|
||||||
|
// origin. These attacks do not utilize CORS, since they are not cross-domain.
|
||||||
|
// By explicitly checking the Host-header, the server will not allow requests
|
||||||
|
// made against the server with a malicious host domain.
|
||||||
|
// Requests using ip address directly are not affected
|
||||||
|
HTTPVirtualHosts []string `toml:",omitempty"`
|
||||||
|
|
||||||
// HTTPModules is a list of API modules to expose via the HTTP RPC interface.
|
// HTTPModules is a list of API modules to expose via the HTTP RPC interface.
|
||||||
// If the module list is empty, all RPC API endpoints designated public will be
|
// If the module list is empty, all RPC API endpoints designated public will be
|
||||||
// exposed.
|
// exposed.
|
||||||
@ -137,7 +146,7 @@ type Config struct {
|
|||||||
WSExposeAll bool `toml:",omitempty"`
|
WSExposeAll bool `toml:",omitempty"`
|
||||||
|
|
||||||
// Logger is a custom logger to use with the p2p.Server.
|
// Logger is a custom logger to use with the p2p.Server.
|
||||||
Logger log.Logger
|
Logger log.Logger `toml:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// IPCEndpoint resolves an IPC endpoint based on a configured value, taking into
|
// IPCEndpoint resolves an IPC endpoint based on a configured value, taking into
|
||||||
|
29
node/node.go
29
node/node.go
@ -263,7 +263,7 @@ func (n *Node) startRPC(services map[reflect.Type]Service) error {
|
|||||||
n.stopInProc()
|
n.stopInProc()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := n.startHTTP(n.httpEndpoint, apis, n.config.HTTPModules, n.config.HTTPCors); err != nil {
|
if err := n.startHTTP(n.httpEndpoint, apis, n.config.HTTPModules, n.config.HTTPCors, n.config.HTTPVirtualHosts); err != nil {
|
||||||
n.stopIPC()
|
n.stopIPC()
|
||||||
n.stopInProc()
|
n.stopInProc()
|
||||||
return err
|
return err
|
||||||
@ -287,7 +287,7 @@ func (n *Node) startInProc(apis []rpc.API) error {
|
|||||||
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
n.log.Debug(fmt.Sprintf("InProc registered %T under '%s'", api.Service, api.Namespace))
|
n.log.Debug("InProc registered", "service", api.Service, "namespace", api.Namespace)
|
||||||
}
|
}
|
||||||
n.inprocHandler = handler
|
n.inprocHandler = handler
|
||||||
return nil
|
return nil
|
||||||
@ -313,7 +313,7 @@ func (n *Node) startIPC(apis []rpc.API) error {
|
|||||||
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
n.log.Debug(fmt.Sprintf("IPC registered %T under '%s'", api.Service, api.Namespace))
|
n.log.Debug("IPC registered", "service", api.Service, "namespace", api.Namespace)
|
||||||
}
|
}
|
||||||
// All APIs registered, start the IPC listener
|
// All APIs registered, start the IPC listener
|
||||||
var (
|
var (
|
||||||
@ -324,7 +324,7 @@ func (n *Node) startIPC(apis []rpc.API) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
n.log.Info(fmt.Sprintf("IPC endpoint opened: %s", n.ipcEndpoint))
|
n.log.Info("IPC endpoint opened", "url", n.ipcEndpoint)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
conn, err := listener.Accept()
|
conn, err := listener.Accept()
|
||||||
@ -337,7 +337,7 @@ func (n *Node) startIPC(apis []rpc.API) error {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Not closed, just some error; report and continue
|
// Not closed, just some error; report and continue
|
||||||
n.log.Error(fmt.Sprintf("IPC accept failed: %v", err))
|
n.log.Error("IPC accept failed", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
go handler.ServeCodec(rpc.NewJSONCodec(conn), rpc.OptionMethodInvocation|rpc.OptionSubscriptions)
|
go handler.ServeCodec(rpc.NewJSONCodec(conn), rpc.OptionMethodInvocation|rpc.OptionSubscriptions)
|
||||||
@ -356,7 +356,7 @@ func (n *Node) stopIPC() {
|
|||||||
n.ipcListener.Close()
|
n.ipcListener.Close()
|
||||||
n.ipcListener = nil
|
n.ipcListener = nil
|
||||||
|
|
||||||
n.log.Info(fmt.Sprintf("IPC endpoint closed: %s", n.ipcEndpoint))
|
n.log.Info("IPC endpoint closed", "endpoint", n.ipcEndpoint)
|
||||||
}
|
}
|
||||||
if n.ipcHandler != nil {
|
if n.ipcHandler != nil {
|
||||||
n.ipcHandler.Stop()
|
n.ipcHandler.Stop()
|
||||||
@ -365,7 +365,7 @@ func (n *Node) stopIPC() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// startHTTP initializes and starts the HTTP RPC endpoint.
|
// startHTTP initializes and starts the HTTP RPC endpoint.
|
||||||
func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors []string) error {
|
func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors []string, vhosts []string) error {
|
||||||
// Short circuit if the HTTP endpoint isn't being exposed
|
// Short circuit if the HTTP endpoint isn't being exposed
|
||||||
if endpoint == "" {
|
if endpoint == "" {
|
||||||
return nil
|
return nil
|
||||||
@ -382,7 +382,7 @@ func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors
|
|||||||
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
n.log.Debug(fmt.Sprintf("HTTP registered %T under '%s'", api.Service, api.Namespace))
|
n.log.Debug("HTTP registered", "service", api.Service, "namespace", api.Namespace)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// All APIs registered, start the HTTP listener
|
// All APIs registered, start the HTTP listener
|
||||||
@ -393,9 +393,8 @@ func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors
|
|||||||
if listener, err = net.Listen("tcp", endpoint); err != nil {
|
if listener, err = net.Listen("tcp", endpoint); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
go rpc.NewHTTPServer(cors, handler).Serve(listener)
|
go rpc.NewHTTPServer(cors, vhosts, handler).Serve(listener)
|
||||||
n.log.Info(fmt.Sprintf("HTTP endpoint opened: http://%s", endpoint))
|
n.log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%s", endpoint), "cors", strings.Join(cors, ","), "vhosts", strings.Join(vhosts, ","))
|
||||||
|
|
||||||
// All listeners booted successfully
|
// All listeners booted successfully
|
||||||
n.httpEndpoint = endpoint
|
n.httpEndpoint = endpoint
|
||||||
n.httpListener = listener
|
n.httpListener = listener
|
||||||
@ -410,7 +409,7 @@ func (n *Node) stopHTTP() {
|
|||||||
n.httpListener.Close()
|
n.httpListener.Close()
|
||||||
n.httpListener = nil
|
n.httpListener = nil
|
||||||
|
|
||||||
n.log.Info(fmt.Sprintf("HTTP endpoint closed: http://%s", n.httpEndpoint))
|
n.log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%s", n.httpEndpoint))
|
||||||
}
|
}
|
||||||
if n.httpHandler != nil {
|
if n.httpHandler != nil {
|
||||||
n.httpHandler.Stop()
|
n.httpHandler.Stop()
|
||||||
@ -436,7 +435,7 @@ func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrig
|
|||||||
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
n.log.Debug(fmt.Sprintf("WebSocket registered %T under '%s'", api.Service, api.Namespace))
|
n.log.Debug("WebSocket registered", "service", api.Service, "namespace", api.Namespace)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// All APIs registered, start the HTTP listener
|
// All APIs registered, start the HTTP listener
|
||||||
@ -448,7 +447,7 @@ func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrig
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
go rpc.NewWSServer(wsOrigins, handler).Serve(listener)
|
go rpc.NewWSServer(wsOrigins, handler).Serve(listener)
|
||||||
n.log.Info(fmt.Sprintf("WebSocket endpoint opened: ws://%s", listener.Addr()))
|
n.log.Info("WebSocket endpoint opened", "url", fmt.Sprintf("ws://%s", listener.Addr()))
|
||||||
|
|
||||||
// All listeners booted successfully
|
// All listeners booted successfully
|
||||||
n.wsEndpoint = endpoint
|
n.wsEndpoint = endpoint
|
||||||
@ -464,7 +463,7 @@ func (n *Node) stopWS() {
|
|||||||
n.wsListener.Close()
|
n.wsListener.Close()
|
||||||
n.wsListener = nil
|
n.wsListener = nil
|
||||||
|
|
||||||
n.log.Info(fmt.Sprintf("WebSocket endpoint closed: ws://%s", n.wsEndpoint))
|
n.log.Info("WebSocket endpoint closed", "url", fmt.Sprintf("ws://%s", n.wsEndpoint))
|
||||||
}
|
}
|
||||||
if n.wsHandler != nil {
|
if n.wsHandler != nil {
|
||||||
n.wsHandler.Stop()
|
n.wsHandler.Stop()
|
||||||
|
13
p2p/dial.go
13
p2p/dial.go
@ -154,6 +154,9 @@ func (s *dialstate) addStatic(n *discover.Node) {
|
|||||||
func (s *dialstate) removeStatic(n *discover.Node) {
|
func (s *dialstate) removeStatic(n *discover.Node) {
|
||||||
// This removes a task so future attempts to connect will not be made.
|
// This removes a task so future attempts to connect will not be made.
|
||||||
delete(s.static, n.ID)
|
delete(s.static, n.ID)
|
||||||
|
// This removes a previous dial timestamp so that application
|
||||||
|
// can force a server to reconnect with chosen peer immediately.
|
||||||
|
s.hist.remove(n.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *dialstate) newTasks(nRunning int, peers map[discover.NodeID]*Peer, now time.Time) []task {
|
func (s *dialstate) newTasks(nRunning int, peers map[discover.NodeID]*Peer, now time.Time) []task {
|
||||||
@ -390,6 +393,16 @@ func (h dialHistory) min() pastDial {
|
|||||||
}
|
}
|
||||||
func (h *dialHistory) add(id discover.NodeID, exp time.Time) {
|
func (h *dialHistory) add(id discover.NodeID, exp time.Time) {
|
||||||
heap.Push(h, pastDial{id, exp})
|
heap.Push(h, pastDial{id, exp})
|
||||||
|
|
||||||
|
}
|
||||||
|
func (h *dialHistory) remove(id discover.NodeID) bool {
|
||||||
|
for i, v := range *h {
|
||||||
|
if v.id == id {
|
||||||
|
heap.Remove(h, i)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
func (h dialHistory) contains(id discover.NodeID) bool {
|
func (h dialHistory) contains(id discover.NodeID) bool {
|
||||||
for _, v := range h {
|
for _, v := range h {
|
||||||
|
@ -515,6 +515,50 @@ func TestDialStateStaticDial(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This test checks that static peers will be redialed immediately if they were re-added to a static list.
|
||||||
|
func TestDialStaticAfterReset(t *testing.T) {
|
||||||
|
wantStatic := []*discover.Node{
|
||||||
|
{ID: uintID(1)},
|
||||||
|
{ID: uintID(2)},
|
||||||
|
}
|
||||||
|
|
||||||
|
rounds := []round{
|
||||||
|
// Static dials are launched for the nodes that aren't yet connected.
|
||||||
|
{
|
||||||
|
peers: nil,
|
||||||
|
new: []task{
|
||||||
|
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(1)}},
|
||||||
|
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(2)}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// No new dial tasks, all peers are connected.
|
||||||
|
{
|
||||||
|
peers: []*Peer{
|
||||||
|
{rw: &conn{flags: staticDialedConn, id: uintID(1)}},
|
||||||
|
{rw: &conn{flags: staticDialedConn, id: uintID(2)}},
|
||||||
|
},
|
||||||
|
done: []task{
|
||||||
|
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(1)}},
|
||||||
|
&dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(2)}},
|
||||||
|
},
|
||||||
|
new: []task{
|
||||||
|
&waitExpireTask{Duration: 30 * time.Second},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
dTest := dialtest{
|
||||||
|
init: newDialState(wantStatic, nil, fakeTable{}, 0, nil),
|
||||||
|
rounds: rounds,
|
||||||
|
}
|
||||||
|
runDialTest(t, dTest)
|
||||||
|
for _, n := range wantStatic {
|
||||||
|
dTest.init.removeStatic(n)
|
||||||
|
dTest.init.addStatic(n)
|
||||||
|
}
|
||||||
|
// without removing peers they will be considered recently dialed
|
||||||
|
runDialTest(t, dTest)
|
||||||
|
}
|
||||||
|
|
||||||
// This test checks that past dials are not retried for some time.
|
// This test checks that past dials are not retried for some time.
|
||||||
func TestDialStateCache(t *testing.T) {
|
func TestDialStateCache(t *testing.T) {
|
||||||
wantStatic := []*discover.Node{
|
wantStatic := []*discover.Node{
|
||||||
|
@ -257,7 +257,7 @@ func (db *nodeDB) expireNodes() error {
|
|||||||
}
|
}
|
||||||
// Skip the node if not expired yet (and not self)
|
// Skip the node if not expired yet (and not self)
|
||||||
if !bytes.Equal(id[:], db.self[:]) {
|
if !bytes.Equal(id[:], db.self[:]) {
|
||||||
if seen := db.lastPong(id); seen.After(threshold) {
|
if seen := db.bondTime(id); seen.After(threshold) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -278,13 +278,18 @@ func (db *nodeDB) updateLastPing(id NodeID, instance time.Time) error {
|
|||||||
return db.storeInt64(makeKey(id, nodeDBDiscoverPing), instance.Unix())
|
return db.storeInt64(makeKey(id, nodeDBDiscoverPing), instance.Unix())
|
||||||
}
|
}
|
||||||
|
|
||||||
// lastPong retrieves the time of the last successful contact from remote node.
|
// bondTime retrieves the time of the last successful pong from remote node.
|
||||||
func (db *nodeDB) lastPong(id NodeID) time.Time {
|
func (db *nodeDB) bondTime(id NodeID) time.Time {
|
||||||
return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPong)), 0)
|
return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPong)), 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateLastPong updates the last time a remote node successfully contacted.
|
// hasBond reports whether the given node is considered bonded.
|
||||||
func (db *nodeDB) updateLastPong(id NodeID, instance time.Time) error {
|
func (db *nodeDB) hasBond(id NodeID) bool {
|
||||||
|
return time.Since(db.bondTime(id)) < nodeDBNodeExpiration
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateBondTime updates the last pong time of a node.
|
||||||
|
func (db *nodeDB) updateBondTime(id NodeID, instance time.Time) error {
|
||||||
return db.storeInt64(makeKey(id, nodeDBDiscoverPong), instance.Unix())
|
return db.storeInt64(makeKey(id, nodeDBDiscoverPong), instance.Unix())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -327,7 +332,7 @@ seek:
|
|||||||
if n.ID == db.self {
|
if n.ID == db.self {
|
||||||
continue seek
|
continue seek
|
||||||
}
|
}
|
||||||
if now.Sub(db.lastPong(n.ID)) > maxAge {
|
if now.Sub(db.bondTime(n.ID)) > maxAge {
|
||||||
continue seek
|
continue seek
|
||||||
}
|
}
|
||||||
for i := range nodes {
|
for i := range nodes {
|
||||||
|
@ -125,13 +125,13 @@ func TestNodeDBFetchStore(t *testing.T) {
|
|||||||
t.Errorf("ping: value mismatch: have %v, want %v", stored, inst)
|
t.Errorf("ping: value mismatch: have %v, want %v", stored, inst)
|
||||||
}
|
}
|
||||||
// Check fetch/store operations on a node pong object
|
// Check fetch/store operations on a node pong object
|
||||||
if stored := db.lastPong(node.ID); stored.Unix() != 0 {
|
if stored := db.bondTime(node.ID); stored.Unix() != 0 {
|
||||||
t.Errorf("pong: non-existing object: %v", stored)
|
t.Errorf("pong: non-existing object: %v", stored)
|
||||||
}
|
}
|
||||||
if err := db.updateLastPong(node.ID, inst); err != nil {
|
if err := db.updateBondTime(node.ID, inst); err != nil {
|
||||||
t.Errorf("pong: failed to update: %v", err)
|
t.Errorf("pong: failed to update: %v", err)
|
||||||
}
|
}
|
||||||
if stored := db.lastPong(node.ID); stored.Unix() != inst.Unix() {
|
if stored := db.bondTime(node.ID); stored.Unix() != inst.Unix() {
|
||||||
t.Errorf("pong: value mismatch: have %v, want %v", stored, inst)
|
t.Errorf("pong: value mismatch: have %v, want %v", stored, inst)
|
||||||
}
|
}
|
||||||
// Check fetch/store operations on a node findnode-failure object
|
// Check fetch/store operations on a node findnode-failure object
|
||||||
@ -224,8 +224,8 @@ func TestNodeDBSeedQuery(t *testing.T) {
|
|||||||
if err := db.updateNode(seed.node); err != nil {
|
if err := db.updateNode(seed.node); err != nil {
|
||||||
t.Fatalf("node %d: failed to insert: %v", i, err)
|
t.Fatalf("node %d: failed to insert: %v", i, err)
|
||||||
}
|
}
|
||||||
if err := db.updateLastPong(seed.node.ID, seed.pong); err != nil {
|
if err := db.updateBondTime(seed.node.ID, seed.pong); err != nil {
|
||||||
t.Fatalf("node %d: failed to insert lastPong: %v", i, err)
|
t.Fatalf("node %d: failed to insert bondTime: %v", i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -332,8 +332,8 @@ func TestNodeDBExpiration(t *testing.T) {
|
|||||||
if err := db.updateNode(seed.node); err != nil {
|
if err := db.updateNode(seed.node); err != nil {
|
||||||
t.Fatalf("node %d: failed to insert: %v", i, err)
|
t.Fatalf("node %d: failed to insert: %v", i, err)
|
||||||
}
|
}
|
||||||
if err := db.updateLastPong(seed.node.ID, seed.pong); err != nil {
|
if err := db.updateBondTime(seed.node.ID, seed.pong); err != nil {
|
||||||
t.Fatalf("node %d: failed to update pong: %v", i, err)
|
t.Fatalf("node %d: failed to update bondTime: %v", i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Expire some of them, and check the rest
|
// Expire some of them, and check the rest
|
||||||
@ -365,8 +365,8 @@ func TestNodeDBSelfExpiration(t *testing.T) {
|
|||||||
if err := db.updateNode(seed.node); err != nil {
|
if err := db.updateNode(seed.node); err != nil {
|
||||||
t.Fatalf("node %d: failed to insert: %v", i, err)
|
t.Fatalf("node %d: failed to insert: %v", i, err)
|
||||||
}
|
}
|
||||||
if err := db.updateLastPong(seed.node.ID, seed.pong); err != nil {
|
if err := db.updateBondTime(seed.node.ID, seed.pong); err != nil {
|
||||||
t.Fatalf("node %d: failed to update pong: %v", i, err)
|
t.Fatalf("node %d: failed to update bondTime: %v", i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Expire the nodes and make sure self has been evacuated too
|
// Expire the nodes and make sure self has been evacuated too
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
@ -51,9 +52,8 @@ type Node struct {
|
|||||||
// with ID.
|
// with ID.
|
||||||
sha common.Hash
|
sha common.Hash
|
||||||
|
|
||||||
// whether this node is currently being pinged in order to replace
|
// Time when the node was added to the table.
|
||||||
// it in a bucket
|
addedAt time.Time
|
||||||
contested bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNode creates a new node. It is mostly meant to be used for
|
// NewNode creates a new node. It is mostly meant to be used for
|
||||||
|
@ -23,10 +23,11 @@
|
|||||||
package discover
|
package discover
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
crand "crypto/rand"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
mrand "math/rand"
|
||||||
"net"
|
"net"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
@ -35,29 +36,45 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
alpha = 3 // Kademlia concurrency factor
|
alpha = 3 // Kademlia concurrency factor
|
||||||
bucketSize = 16 // Kademlia bucket size
|
bucketSize = 16 // Kademlia bucket size
|
||||||
|
maxReplacements = 10 // Size of per-bucket replacement list
|
||||||
|
|
||||||
|
// We keep buckets for the upper 1/15 of distances because
|
||||||
|
// it's very unlikely we'll ever encounter a node that's closer.
|
||||||
hashBits = len(common.Hash{}) * 8
|
hashBits = len(common.Hash{}) * 8
|
||||||
nBuckets = hashBits + 1 // Number of buckets
|
nBuckets = hashBits / 15 // Number of buckets
|
||||||
|
bucketMinDistance = hashBits - nBuckets // Log distance of closest bucket
|
||||||
|
|
||||||
maxBondingPingPongs = 16
|
// IP address limits.
|
||||||
maxFindnodeFailures = 5
|
bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24
|
||||||
|
tableIPLimit, tableSubnet = 10, 24
|
||||||
|
|
||||||
autoRefreshInterval = 1 * time.Hour
|
maxBondingPingPongs = 16 // Limit on the number of concurrent ping/pong interactions
|
||||||
|
maxFindnodeFailures = 5 // Nodes exceeding this limit are dropped
|
||||||
|
|
||||||
|
refreshInterval = 30 * time.Minute
|
||||||
|
revalidateInterval = 10 * time.Second
|
||||||
|
copyNodesInterval = 30 * time.Second
|
||||||
|
seedMinTableTime = 5 * time.Minute
|
||||||
seedCount = 30
|
seedCount = 30
|
||||||
seedMaxAge = 5 * 24 * time.Hour
|
seedMaxAge = 5 * 24 * time.Hour
|
||||||
)
|
)
|
||||||
|
|
||||||
type Table struct {
|
type Table struct {
|
||||||
mutex sync.Mutex // protects buckets, their content, and nursery
|
mutex sync.Mutex // protects buckets, bucket content, nursery, rand
|
||||||
buckets [nBuckets]*bucket // index of known nodes by distance
|
buckets [nBuckets]*bucket // index of known nodes by distance
|
||||||
nursery []*Node // bootstrap nodes
|
nursery []*Node // bootstrap nodes
|
||||||
db *nodeDB // database of known nodes
|
rand *mrand.Rand // source of randomness, periodically reseeded
|
||||||
|
ips netutil.DistinctNetSet
|
||||||
|
|
||||||
|
db *nodeDB // database of known nodes
|
||||||
refreshReq chan chan struct{}
|
refreshReq chan chan struct{}
|
||||||
|
initDone chan struct{}
|
||||||
closeReq chan struct{}
|
closeReq chan struct{}
|
||||||
closed chan struct{}
|
closed chan struct{}
|
||||||
|
|
||||||
@ -89,9 +106,13 @@ type transport interface {
|
|||||||
|
|
||||||
// bucket contains nodes, ordered by their last activity. the entry
|
// bucket contains nodes, ordered by their last activity. the entry
|
||||||
// that was most recently active is the first element in entries.
|
// that was most recently active is the first element in entries.
|
||||||
type bucket struct{ entries []*Node }
|
type bucket struct {
|
||||||
|
entries []*Node // live entries, sorted by time of last contact
|
||||||
|
replacements []*Node // recently seen nodes to be used if revalidation fails
|
||||||
|
ips netutil.DistinctNetSet
|
||||||
|
}
|
||||||
|
|
||||||
func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string) (*Table, error) {
|
func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string, bootnodes []*Node) (*Table, error) {
|
||||||
// If no node database was given, use an in-memory one
|
// If no node database was given, use an in-memory one
|
||||||
db, err := newNodeDB(nodeDBPath, Version, ourID)
|
db, err := newNodeDB(nodeDBPath, Version, ourID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -104,19 +125,42 @@ func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string
|
|||||||
bonding: make(map[NodeID]*bondproc),
|
bonding: make(map[NodeID]*bondproc),
|
||||||
bondslots: make(chan struct{}, maxBondingPingPongs),
|
bondslots: make(chan struct{}, maxBondingPingPongs),
|
||||||
refreshReq: make(chan chan struct{}),
|
refreshReq: make(chan chan struct{}),
|
||||||
|
initDone: make(chan struct{}),
|
||||||
closeReq: make(chan struct{}),
|
closeReq: make(chan struct{}),
|
||||||
closed: make(chan struct{}),
|
closed: make(chan struct{}),
|
||||||
|
rand: mrand.New(mrand.NewSource(0)),
|
||||||
|
ips: netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit},
|
||||||
|
}
|
||||||
|
if err := tab.setFallbackNodes(bootnodes); err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
for i := 0; i < cap(tab.bondslots); i++ {
|
for i := 0; i < cap(tab.bondslots); i++ {
|
||||||
tab.bondslots <- struct{}{}
|
tab.bondslots <- struct{}{}
|
||||||
}
|
}
|
||||||
for i := range tab.buckets {
|
for i := range tab.buckets {
|
||||||
tab.buckets[i] = new(bucket)
|
tab.buckets[i] = &bucket{
|
||||||
|
ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit},
|
||||||
}
|
}
|
||||||
go tab.refreshLoop()
|
}
|
||||||
|
tab.seedRand()
|
||||||
|
tab.loadSeedNodes(false)
|
||||||
|
// Start the background expiration goroutine after loading seeds so that the search for
|
||||||
|
// seed nodes also considers older nodes that would otherwise be removed by the
|
||||||
|
// expiration.
|
||||||
|
tab.db.ensureExpirer()
|
||||||
|
go tab.loop()
|
||||||
return tab, nil
|
return tab, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tab *Table) seedRand() {
|
||||||
|
var b [8]byte
|
||||||
|
crand.Read(b[:])
|
||||||
|
|
||||||
|
tab.mutex.Lock()
|
||||||
|
tab.rand.Seed(int64(binary.BigEndian.Uint64(b[:])))
|
||||||
|
tab.mutex.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
// Self returns the local node.
|
// Self returns the local node.
|
||||||
// The returned node should not be modified by the caller.
|
// The returned node should not be modified by the caller.
|
||||||
func (tab *Table) Self() *Node {
|
func (tab *Table) Self() *Node {
|
||||||
@ -127,9 +171,12 @@ func (tab *Table) Self() *Node {
|
|||||||
// table. It will not write the same node more than once. The nodes in
|
// table. It will not write the same node more than once. The nodes in
|
||||||
// the slice are copies and can be modified by the caller.
|
// the slice are copies and can be modified by the caller.
|
||||||
func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
|
func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
|
||||||
|
if !tab.isInitDone() {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
tab.mutex.Lock()
|
tab.mutex.Lock()
|
||||||
defer tab.mutex.Unlock()
|
defer tab.mutex.Unlock()
|
||||||
// TODO: tree-based buckets would help here
|
|
||||||
// Find all non-empty buckets and get a fresh slice of their entries.
|
// Find all non-empty buckets and get a fresh slice of their entries.
|
||||||
var buckets [][]*Node
|
var buckets [][]*Node
|
||||||
for _, b := range tab.buckets {
|
for _, b := range tab.buckets {
|
||||||
@ -141,8 +188,8 @@ func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
// Shuffle the buckets.
|
// Shuffle the buckets.
|
||||||
for i := uint32(len(buckets)) - 1; i > 0; i-- {
|
for i := len(buckets) - 1; i > 0; i-- {
|
||||||
j := randUint(i)
|
j := tab.rand.Intn(len(buckets))
|
||||||
buckets[i], buckets[j] = buckets[j], buckets[i]
|
buckets[i], buckets[j] = buckets[j], buckets[i]
|
||||||
}
|
}
|
||||||
// Move head of each bucket into buf, removing buckets that become empty.
|
// Move head of each bucket into buf, removing buckets that become empty.
|
||||||
@ -161,15 +208,6 @@ func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
|
|||||||
return i + 1
|
return i + 1
|
||||||
}
|
}
|
||||||
|
|
||||||
func randUint(max uint32) uint32 {
|
|
||||||
if max == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
var b [4]byte
|
|
||||||
rand.Read(b[:])
|
|
||||||
return binary.BigEndian.Uint32(b[:]) % max
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close terminates the network listener and flushes the node database.
|
// Close terminates the network listener and flushes the node database.
|
||||||
func (tab *Table) Close() {
|
func (tab *Table) Close() {
|
||||||
select {
|
select {
|
||||||
@ -180,16 +218,15 @@ func (tab *Table) Close() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetFallbackNodes sets the initial points of contact. These nodes
|
// setFallbackNodes sets the initial points of contact. These nodes
|
||||||
// are used to connect to the network if the table is empty and there
|
// are used to connect to the network if the table is empty and there
|
||||||
// are no known nodes in the database.
|
// are no known nodes in the database.
|
||||||
func (tab *Table) SetFallbackNodes(nodes []*Node) error {
|
func (tab *Table) setFallbackNodes(nodes []*Node) error {
|
||||||
for _, n := range nodes {
|
for _, n := range nodes {
|
||||||
if err := n.validateComplete(); err != nil {
|
if err := n.validateComplete(); err != nil {
|
||||||
return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
|
return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tab.mutex.Lock()
|
|
||||||
tab.nursery = make([]*Node, 0, len(nodes))
|
tab.nursery = make([]*Node, 0, len(nodes))
|
||||||
for _, n := range nodes {
|
for _, n := range nodes {
|
||||||
cpy := *n
|
cpy := *n
|
||||||
@ -198,11 +235,19 @@ func (tab *Table) SetFallbackNodes(nodes []*Node) error {
|
|||||||
cpy.sha = crypto.Keccak256Hash(n.ID[:])
|
cpy.sha = crypto.Keccak256Hash(n.ID[:])
|
||||||
tab.nursery = append(tab.nursery, &cpy)
|
tab.nursery = append(tab.nursery, &cpy)
|
||||||
}
|
}
|
||||||
tab.mutex.Unlock()
|
|
||||||
tab.refresh()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isInitDone returns whether the table's initial seeding procedure has completed.
|
||||||
|
func (tab *Table) isInitDone() bool {
|
||||||
|
select {
|
||||||
|
case <-tab.initDone:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Resolve searches for a specific node with the given ID.
|
// Resolve searches for a specific node with the given ID.
|
||||||
// It returns nil if the node could not be found.
|
// It returns nil if the node could not be found.
|
||||||
func (tab *Table) Resolve(targetID NodeID) *Node {
|
func (tab *Table) Resolve(targetID NodeID) *Node {
|
||||||
@ -314,33 +359,49 @@ func (tab *Table) refresh() <-chan struct{} {
|
|||||||
return done
|
return done
|
||||||
}
|
}
|
||||||
|
|
||||||
// refreshLoop schedules doRefresh runs and coordinates shutdown.
|
// loop schedules refresh, revalidate runs and coordinates shutdown.
|
||||||
func (tab *Table) refreshLoop() {
|
func (tab *Table) loop() {
|
||||||
var (
|
var (
|
||||||
timer = time.NewTicker(autoRefreshInterval)
|
revalidate = time.NewTimer(tab.nextRevalidateTime())
|
||||||
waiting []chan struct{} // accumulates waiting callers while doRefresh runs
|
refresh = time.NewTicker(refreshInterval)
|
||||||
done chan struct{} // where doRefresh reports completion
|
copyNodes = time.NewTicker(copyNodesInterval)
|
||||||
|
revalidateDone = make(chan struct{})
|
||||||
|
refreshDone = make(chan struct{}) // where doRefresh reports completion
|
||||||
|
waiting = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs
|
||||||
)
|
)
|
||||||
|
defer refresh.Stop()
|
||||||
|
defer revalidate.Stop()
|
||||||
|
defer copyNodes.Stop()
|
||||||
|
|
||||||
|
// Start initial refresh.
|
||||||
|
go tab.doRefresh(refreshDone)
|
||||||
|
|
||||||
loop:
|
loop:
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-timer.C:
|
case <-refresh.C:
|
||||||
if done == nil {
|
tab.seedRand()
|
||||||
done = make(chan struct{})
|
if refreshDone == nil {
|
||||||
go tab.doRefresh(done)
|
refreshDone = make(chan struct{})
|
||||||
|
go tab.doRefresh(refreshDone)
|
||||||
}
|
}
|
||||||
case req := <-tab.refreshReq:
|
case req := <-tab.refreshReq:
|
||||||
waiting = append(waiting, req)
|
waiting = append(waiting, req)
|
||||||
if done == nil {
|
if refreshDone == nil {
|
||||||
done = make(chan struct{})
|
refreshDone = make(chan struct{})
|
||||||
go tab.doRefresh(done)
|
go tab.doRefresh(refreshDone)
|
||||||
}
|
}
|
||||||
case <-done:
|
case <-refreshDone:
|
||||||
for _, ch := range waiting {
|
for _, ch := range waiting {
|
||||||
close(ch)
|
close(ch)
|
||||||
}
|
}
|
||||||
waiting = nil
|
waiting, refreshDone = nil, nil
|
||||||
done = nil
|
case <-revalidate.C:
|
||||||
|
go tab.doRevalidate(revalidateDone)
|
||||||
|
case <-revalidateDone:
|
||||||
|
revalidate.Reset(tab.nextRevalidateTime())
|
||||||
|
case <-copyNodes.C:
|
||||||
|
go tab.copyBondedNodes()
|
||||||
case <-tab.closeReq:
|
case <-tab.closeReq:
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
@ -349,8 +410,8 @@ loop:
|
|||||||
if tab.net != nil {
|
if tab.net != nil {
|
||||||
tab.net.close()
|
tab.net.close()
|
||||||
}
|
}
|
||||||
if done != nil {
|
if refreshDone != nil {
|
||||||
<-done
|
<-refreshDone
|
||||||
}
|
}
|
||||||
for _, ch := range waiting {
|
for _, ch := range waiting {
|
||||||
close(ch)
|
close(ch)
|
||||||
@ -365,38 +426,109 @@ loop:
|
|||||||
func (tab *Table) doRefresh(done chan struct{}) {
|
func (tab *Table) doRefresh(done chan struct{}) {
|
||||||
defer close(done)
|
defer close(done)
|
||||||
|
|
||||||
|
// Load nodes from the database and insert
|
||||||
|
// them. This should yield a few previously seen nodes that are
|
||||||
|
// (hopefully) still alive.
|
||||||
|
tab.loadSeedNodes(true)
|
||||||
|
|
||||||
|
// Run self lookup to discover new neighbor nodes.
|
||||||
|
tab.lookup(tab.self.ID, false)
|
||||||
|
|
||||||
// The Kademlia paper specifies that the bucket refresh should
|
// The Kademlia paper specifies that the bucket refresh should
|
||||||
// perform a lookup in the least recently used bucket. We cannot
|
// perform a lookup in the least recently used bucket. We cannot
|
||||||
// adhere to this because the findnode target is a 512bit value
|
// adhere to this because the findnode target is a 512bit value
|
||||||
// (not hash-sized) and it is not easily possible to generate a
|
// (not hash-sized) and it is not easily possible to generate a
|
||||||
// sha3 preimage that falls into a chosen bucket.
|
// sha3 preimage that falls into a chosen bucket.
|
||||||
// We perform a lookup with a random target instead.
|
// We perform a few lookups with a random target instead.
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
var target NodeID
|
var target NodeID
|
||||||
rand.Read(target[:])
|
crand.Read(target[:])
|
||||||
result := tab.lookup(target, false)
|
tab.lookup(target, false)
|
||||||
if len(result) > 0 {
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tab *Table) loadSeedNodes(bond bool) {
|
||||||
|
seeds := tab.db.querySeeds(seedCount, seedMaxAge)
|
||||||
|
seeds = append(seeds, tab.nursery...)
|
||||||
|
if bond {
|
||||||
|
seeds = tab.bondall(seeds)
|
||||||
|
}
|
||||||
|
for i := range seeds {
|
||||||
|
seed := seeds[i]
|
||||||
|
age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.bondTime(seed.ID)) }}
|
||||||
|
log.Debug("Found seed node in database", "id", seed.ID, "addr", seed.addr(), "age", age)
|
||||||
|
tab.add(seed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// doRevalidate checks that the last node in a random bucket is still live
|
||||||
|
// and replaces or deletes the node if it isn't.
|
||||||
|
func (tab *Table) doRevalidate(done chan<- struct{}) {
|
||||||
|
defer func() { done <- struct{}{} }()
|
||||||
|
|
||||||
|
last, bi := tab.nodeToRevalidate()
|
||||||
|
if last == nil {
|
||||||
|
// No non-empty bucket found.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// The table is empty. Load nodes from the database and insert
|
// Ping the selected node and wait for a pong.
|
||||||
// them. This should yield a few previously seen nodes that are
|
err := tab.ping(last.ID, last.addr())
|
||||||
// (hopefully) still alive.
|
|
||||||
seeds := tab.db.querySeeds(seedCount, seedMaxAge)
|
|
||||||
seeds = tab.bondall(append(seeds, tab.nursery...))
|
|
||||||
|
|
||||||
if len(seeds) == 0 {
|
|
||||||
log.Debug("No discv4 seed nodes found")
|
|
||||||
}
|
|
||||||
for _, n := range seeds {
|
|
||||||
age := log.Lazy{Fn: func() time.Duration { return time.Since(tab.db.lastPong(n.ID)) }}
|
|
||||||
log.Trace("Found seed node in database", "id", n.ID, "addr", n.addr(), "age", age)
|
|
||||||
}
|
|
||||||
tab.mutex.Lock()
|
tab.mutex.Lock()
|
||||||
tab.stuff(seeds)
|
defer tab.mutex.Unlock()
|
||||||
tab.mutex.Unlock()
|
b := tab.buckets[bi]
|
||||||
|
if err == nil {
|
||||||
|
// The node responded, move it to the front.
|
||||||
|
log.Debug("Revalidated node", "b", bi, "id", last.ID)
|
||||||
|
b.bump(last)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// No reply received, pick a replacement or delete the node if there aren't
|
||||||
|
// any replacements.
|
||||||
|
if r := tab.replace(b, last); r != nil {
|
||||||
|
log.Debug("Replaced dead node", "b", bi, "id", last.ID, "ip", last.IP, "r", r.ID, "rip", r.IP)
|
||||||
|
} else {
|
||||||
|
log.Debug("Removed dead node", "b", bi, "id", last.ID, "ip", last.IP)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Finally, do a self lookup to fill up the buckets.
|
// nodeToRevalidate returns the last node in a random, non-empty bucket.
|
||||||
tab.lookup(tab.self.ID, false)
|
func (tab *Table) nodeToRevalidate() (n *Node, bi int) {
|
||||||
|
tab.mutex.Lock()
|
||||||
|
defer tab.mutex.Unlock()
|
||||||
|
|
||||||
|
for _, bi = range tab.rand.Perm(len(tab.buckets)) {
|
||||||
|
b := tab.buckets[bi]
|
||||||
|
if len(b.entries) > 0 {
|
||||||
|
last := b.entries[len(b.entries)-1]
|
||||||
|
return last, bi
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tab *Table) nextRevalidateTime() time.Duration {
|
||||||
|
tab.mutex.Lock()
|
||||||
|
defer tab.mutex.Unlock()
|
||||||
|
|
||||||
|
return time.Duration(tab.rand.Int63n(int64(revalidateInterval)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyBondedNodes adds nodes from the table to the database if they have been in the table
|
||||||
|
// longer then minTableTime.
|
||||||
|
func (tab *Table) copyBondedNodes() {
|
||||||
|
tab.mutex.Lock()
|
||||||
|
defer tab.mutex.Unlock()
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
for _, b := range tab.buckets {
|
||||||
|
for _, n := range b.entries {
|
||||||
|
if now.Sub(n.addedAt) >= seedMinTableTime {
|
||||||
|
tab.db.updateNode(n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// closest returns the n nodes in the table that are closest to the
|
// closest returns the n nodes in the table that are closest to the
|
||||||
@ -459,15 +591,14 @@ func (tab *Table) bond(pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16
|
|||||||
if id == tab.self.ID {
|
if id == tab.self.ID {
|
||||||
return nil, errors.New("is self")
|
return nil, errors.New("is self")
|
||||||
}
|
}
|
||||||
// Retrieve a previously known node and any recent findnode failures
|
if pinged && !tab.isInitDone() {
|
||||||
node, fails := tab.db.node(id), 0
|
return nil, errors.New("still initializing")
|
||||||
if node != nil {
|
|
||||||
fails = tab.db.findFails(id)
|
|
||||||
}
|
}
|
||||||
// If the node is unknown (non-bonded) or failed (remotely unknown), bond from scratch
|
// Start bonding if we haven't seen this node for a while or if it failed findnode too often.
|
||||||
|
node, fails := tab.db.node(id), tab.db.findFails(id)
|
||||||
|
age := time.Since(tab.db.bondTime(id))
|
||||||
var result error
|
var result error
|
||||||
age := time.Since(tab.db.lastPong(id))
|
if fails > 0 || age > nodeDBNodeExpiration {
|
||||||
if node == nil || fails > 0 || age > nodeDBNodeExpiration {
|
|
||||||
log.Trace("Starting bonding ping/pong", "id", id, "known", node != nil, "failcount", fails, "age", age)
|
log.Trace("Starting bonding ping/pong", "id", id, "known", node != nil, "failcount", fails, "age", age)
|
||||||
|
|
||||||
tab.bondmu.Lock()
|
tab.bondmu.Lock()
|
||||||
@ -494,10 +625,10 @@ func (tab *Table) bond(pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16
|
|||||||
node = w.n
|
node = w.n
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if node != nil {
|
|
||||||
// Add the node to the table even if the bonding ping/pong
|
// Add the node to the table even if the bonding ping/pong
|
||||||
// fails. It will be relaced quickly if it continues to be
|
// fails. It will be relaced quickly if it continues to be
|
||||||
// unresponsive.
|
// unresponsive.
|
||||||
|
if node != nil {
|
||||||
tab.add(node)
|
tab.add(node)
|
||||||
tab.db.updateFindFails(id, 0)
|
tab.db.updateFindFails(id, 0)
|
||||||
}
|
}
|
||||||
@ -522,7 +653,6 @@ func (tab *Table) pingpong(w *bondproc, pinged bool, id NodeID, addr *net.UDPAdd
|
|||||||
}
|
}
|
||||||
// Bonding succeeded, update the node database.
|
// Bonding succeeded, update the node database.
|
||||||
w.n = NewNode(id, addr.IP, uint16(addr.Port), tcpPort)
|
w.n = NewNode(id, addr.IP, uint16(addr.Port), tcpPort)
|
||||||
tab.db.updateNode(w.n)
|
|
||||||
close(w.done)
|
close(w.done)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -533,17 +663,19 @@ func (tab *Table) ping(id NodeID, addr *net.UDPAddr) error {
|
|||||||
if err := tab.net.ping(id, addr); err != nil {
|
if err := tab.net.ping(id, addr); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tab.db.updateLastPong(id, time.Now())
|
tab.db.updateBondTime(id, time.Now())
|
||||||
|
|
||||||
// Start the background expiration goroutine after the first
|
|
||||||
// successful communication. Subsequent calls have no effect if it
|
|
||||||
// is already running. We do this here instead of somewhere else
|
|
||||||
// so that the search for seed nodes also considers older nodes
|
|
||||||
// that would otherwise be removed by the expiration.
|
|
||||||
tab.db.ensureExpirer()
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// bucket returns the bucket for the given node ID hash.
|
||||||
|
func (tab *Table) bucket(sha common.Hash) *bucket {
|
||||||
|
d := logdist(tab.self.sha, sha)
|
||||||
|
if d <= bucketMinDistance {
|
||||||
|
return tab.buckets[0]
|
||||||
|
}
|
||||||
|
return tab.buckets[d-bucketMinDistance-1]
|
||||||
|
}
|
||||||
|
|
||||||
// add attempts to add the given node its corresponding bucket. If the
|
// add attempts to add the given node its corresponding bucket. If the
|
||||||
// bucket has space available, adding the node succeeds immediately.
|
// bucket has space available, adding the node succeeds immediately.
|
||||||
// Otherwise, the node is added if the least recently active node in
|
// Otherwise, the node is added if the least recently active node in
|
||||||
@ -551,57 +683,29 @@ func (tab *Table) ping(id NodeID, addr *net.UDPAddr) error {
|
|||||||
//
|
//
|
||||||
// The caller must not hold tab.mutex.
|
// The caller must not hold tab.mutex.
|
||||||
func (tab *Table) add(new *Node) {
|
func (tab *Table) add(new *Node) {
|
||||||
b := tab.buckets[logdist(tab.self.sha, new.sha)]
|
|
||||||
tab.mutex.Lock()
|
tab.mutex.Lock()
|
||||||
defer tab.mutex.Unlock()
|
defer tab.mutex.Unlock()
|
||||||
if b.bump(new) {
|
|
||||||
return
|
b := tab.bucket(new.sha)
|
||||||
}
|
if !tab.bumpOrAdd(b, new) {
|
||||||
var oldest *Node
|
// Node is not in table. Add it to the replacement list.
|
||||||
if len(b.entries) == bucketSize {
|
tab.addReplacement(b, new)
|
||||||
oldest = b.entries[bucketSize-1]
|
|
||||||
if oldest.contested {
|
|
||||||
// The node is already being replaced, don't attempt
|
|
||||||
// to replace it.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
oldest.contested = true
|
|
||||||
// Let go of the mutex so other goroutines can access
|
|
||||||
// the table while we ping the least recently active node.
|
|
||||||
tab.mutex.Unlock()
|
|
||||||
err := tab.ping(oldest.ID, oldest.addr())
|
|
||||||
tab.mutex.Lock()
|
|
||||||
oldest.contested = false
|
|
||||||
if err == nil {
|
|
||||||
// The node responded, don't replace it.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
added := b.replace(new, oldest)
|
|
||||||
if added && tab.nodeAddedHook != nil {
|
|
||||||
tab.nodeAddedHook(new)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// stuff adds nodes the table to the end of their corresponding bucket
|
// stuff adds nodes the table to the end of their corresponding bucket
|
||||||
// if the bucket is not full. The caller must hold tab.mutex.
|
// if the bucket is not full. The caller must not hold tab.mutex.
|
||||||
func (tab *Table) stuff(nodes []*Node) {
|
func (tab *Table) stuff(nodes []*Node) {
|
||||||
outer:
|
tab.mutex.Lock()
|
||||||
|
defer tab.mutex.Unlock()
|
||||||
|
|
||||||
for _, n := range nodes {
|
for _, n := range nodes {
|
||||||
if n.ID == tab.self.ID {
|
if n.ID == tab.self.ID {
|
||||||
continue // don't add self
|
continue // don't add self
|
||||||
}
|
}
|
||||||
bucket := tab.buckets[logdist(tab.self.sha, n.sha)]
|
b := tab.bucket(n.sha)
|
||||||
for i := range bucket.entries {
|
if len(b.entries) < bucketSize {
|
||||||
if bucket.entries[i].ID == n.ID {
|
tab.bumpOrAdd(b, n)
|
||||||
continue outer // already in bucket
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(bucket.entries) < bucketSize {
|
|
||||||
bucket.entries = append(bucket.entries, n)
|
|
||||||
if tab.nodeAddedHook != nil {
|
|
||||||
tab.nodeAddedHook(n)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -611,36 +715,72 @@ outer:
|
|||||||
func (tab *Table) delete(node *Node) {
|
func (tab *Table) delete(node *Node) {
|
||||||
tab.mutex.Lock()
|
tab.mutex.Lock()
|
||||||
defer tab.mutex.Unlock()
|
defer tab.mutex.Unlock()
|
||||||
bucket := tab.buckets[logdist(tab.self.sha, node.sha)]
|
|
||||||
for i := range bucket.entries {
|
tab.deleteInBucket(tab.bucket(node.sha), node)
|
||||||
if bucket.entries[i].ID == node.ID {
|
|
||||||
bucket.entries = append(bucket.entries[:i], bucket.entries[i+1:]...)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bucket) replace(n *Node, last *Node) bool {
|
func (tab *Table) addIP(b *bucket, ip net.IP) bool {
|
||||||
// Don't add if b already contains n.
|
if netutil.IsLAN(ip) {
|
||||||
for i := range b.entries {
|
return true
|
||||||
if b.entries[i].ID == n.ID {
|
}
|
||||||
|
if !tab.ips.Add(ip) {
|
||||||
|
log.Debug("IP exceeds table limit", "ip", ip)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
if !b.ips.Add(ip) {
|
||||||
// Replace last if it is still the last entry or just add n if b
|
log.Debug("IP exceeds bucket limit", "ip", ip)
|
||||||
// isn't full. If is no longer the last entry, it has either been
|
tab.ips.Remove(ip)
|
||||||
// replaced with someone else or became active.
|
|
||||||
if len(b.entries) == bucketSize && (last == nil || b.entries[bucketSize-1].ID != last.ID) {
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if len(b.entries) < bucketSize {
|
|
||||||
b.entries = append(b.entries, nil)
|
|
||||||
}
|
|
||||||
copy(b.entries[1:], b.entries)
|
|
||||||
b.entries[0] = n
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tab *Table) removeIP(b *bucket, ip net.IP) {
|
||||||
|
if netutil.IsLAN(ip) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tab.ips.Remove(ip)
|
||||||
|
b.ips.Remove(ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tab *Table) addReplacement(b *bucket, n *Node) {
|
||||||
|
for _, e := range b.replacements {
|
||||||
|
if e.ID == n.ID {
|
||||||
|
return // already in list
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !tab.addIP(b, n.IP) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var removed *Node
|
||||||
|
b.replacements, removed = pushNode(b.replacements, n, maxReplacements)
|
||||||
|
if removed != nil {
|
||||||
|
tab.removeIP(b, removed.IP)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// replace removes n from the replacement list and replaces 'last' with it if it is the
|
||||||
|
// last entry in the bucket. If 'last' isn't the last entry, it has either been replaced
|
||||||
|
// with someone else or became active.
|
||||||
|
func (tab *Table) replace(b *bucket, last *Node) *Node {
|
||||||
|
if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID != last.ID {
|
||||||
|
// Entry has moved, don't replace it.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Still the last entry.
|
||||||
|
if len(b.replacements) == 0 {
|
||||||
|
tab.deleteInBucket(b, last)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
r := b.replacements[tab.rand.Intn(len(b.replacements))]
|
||||||
|
b.replacements = deleteNode(b.replacements, r)
|
||||||
|
b.entries[len(b.entries)-1] = r
|
||||||
|
tab.removeIP(b, last.IP)
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// bump moves the given node to the front of the bucket entry list
|
||||||
|
// if it is contained in that list.
|
||||||
func (b *bucket) bump(n *Node) bool {
|
func (b *bucket) bump(n *Node) bool {
|
||||||
for i := range b.entries {
|
for i := range b.entries {
|
||||||
if b.entries[i].ID == n.ID {
|
if b.entries[i].ID == n.ID {
|
||||||
@ -653,6 +793,50 @@ func (b *bucket) bump(n *Node) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// bumpOrAdd moves n to the front of the bucket entry list or adds it if the list isn't
|
||||||
|
// full. The return value is true if n is in the bucket.
|
||||||
|
func (tab *Table) bumpOrAdd(b *bucket, n *Node) bool {
|
||||||
|
if b.bump(n) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
b.entries, _ = pushNode(b.entries, n, bucketSize)
|
||||||
|
b.replacements = deleteNode(b.replacements, n)
|
||||||
|
n.addedAt = time.Now()
|
||||||
|
if tab.nodeAddedHook != nil {
|
||||||
|
tab.nodeAddedHook(n)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tab *Table) deleteInBucket(b *bucket, n *Node) {
|
||||||
|
b.entries = deleteNode(b.entries, n)
|
||||||
|
tab.removeIP(b, n.IP)
|
||||||
|
}
|
||||||
|
|
||||||
|
// pushNode adds n to the front of list, keeping at most max items.
|
||||||
|
func pushNode(list []*Node, n *Node, max int) ([]*Node, *Node) {
|
||||||
|
if len(list) < max {
|
||||||
|
list = append(list, nil)
|
||||||
|
}
|
||||||
|
removed := list[len(list)-1]
|
||||||
|
copy(list[1:], list)
|
||||||
|
list[0] = n
|
||||||
|
return list, removed
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteNode removes n from list.
|
||||||
|
func deleteNode(list []*Node, n *Node) []*Node {
|
||||||
|
for i := range list {
|
||||||
|
if list[i].ID == n.ID {
|
||||||
|
return append(list[:i], list[i+1:]...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
// nodesByDistance is a list of nodes, ordered by
|
// nodesByDistance is a list of nodes, ordered by
|
||||||
// distance to target.
|
// distance to target.
|
||||||
type nodesByDistance struct {
|
type nodesByDistance struct {
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"net"
|
"net"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -32,60 +33,65 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestTable_pingReplace(t *testing.T) {
|
func TestTable_pingReplace(t *testing.T) {
|
||||||
doit := func(newNodeIsResponding, lastInBucketIsResponding bool) {
|
run := func(newNodeResponding, lastInBucketResponding bool) {
|
||||||
|
name := fmt.Sprintf("newNodeResponding=%t/lastInBucketResponding=%t", newNodeResponding, lastInBucketResponding)
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
testPingReplace(t, newNodeResponding, lastInBucketResponding)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
run(true, true)
|
||||||
|
run(false, true)
|
||||||
|
run(true, false)
|
||||||
|
run(false, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testPingReplace(t *testing.T, newNodeIsResponding, lastInBucketIsResponding bool) {
|
||||||
transport := newPingRecorder()
|
transport := newPingRecorder()
|
||||||
tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{}, "")
|
tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{}, "", nil)
|
||||||
defer tab.Close()
|
defer tab.Close()
|
||||||
pingSender := NewNode(MustHexID("a502af0f59b2aab7746995408c79e9ca312d2793cc997e44fc55eda62f0150bbb8c59a6f9269ba3a081518b62699ee807c7c19c20125ddfccca872608af9e370"), net.IP{}, 99, 99)
|
|
||||||
|
// Wait for init so bond is accepted.
|
||||||
|
<-tab.initDone
|
||||||
|
|
||||||
// fill up the sender's bucket.
|
// fill up the sender's bucket.
|
||||||
last := fillBucket(tab, 253)
|
pingSender := NewNode(MustHexID("a502af0f59b2aab7746995408c79e9ca312d2793cc997e44fc55eda62f0150bbb8c59a6f9269ba3a081518b62699ee807c7c19c20125ddfccca872608af9e370"), net.IP{}, 99, 99)
|
||||||
|
last := fillBucket(tab, pingSender)
|
||||||
|
|
||||||
// this call to bond should replace the last node
|
// this call to bond should replace the last node
|
||||||
// in its bucket if the node is not responding.
|
// in its bucket if the node is not responding.
|
||||||
transport.responding[last.ID] = lastInBucketIsResponding
|
transport.dead[last.ID] = !lastInBucketIsResponding
|
||||||
transport.responding[pingSender.ID] = newNodeIsResponding
|
transport.dead[pingSender.ID] = !newNodeIsResponding
|
||||||
tab.bond(true, pingSender.ID, &net.UDPAddr{}, 0)
|
tab.bond(true, pingSender.ID, &net.UDPAddr{}, 0)
|
||||||
|
tab.doRevalidate(make(chan struct{}, 1))
|
||||||
|
|
||||||
// first ping goes to sender (bonding pingback)
|
// first ping goes to sender (bonding pingback)
|
||||||
if !transport.pinged[pingSender.ID] {
|
if !transport.pinged[pingSender.ID] {
|
||||||
t.Error("table did not ping back sender")
|
t.Error("table did not ping back sender")
|
||||||
}
|
}
|
||||||
if newNodeIsResponding {
|
if !transport.pinged[last.ID] {
|
||||||
// second ping goes to oldest node in bucket
|
// second ping goes to oldest node in bucket
|
||||||
// to see whether it is still alive.
|
// to see whether it is still alive.
|
||||||
if !transport.pinged[last.ID] {
|
|
||||||
t.Error("table did not ping last node in bucket")
|
t.Error("table did not ping last node in bucket")
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
tab.mutex.Lock()
|
tab.mutex.Lock()
|
||||||
defer tab.mutex.Unlock()
|
defer tab.mutex.Unlock()
|
||||||
if l := len(tab.buckets[253].entries); l != bucketSize {
|
wantSize := bucketSize
|
||||||
t.Errorf("wrong bucket size after bond: got %d, want %d", l, bucketSize)
|
if !lastInBucketIsResponding && !newNodeIsResponding {
|
||||||
|
wantSize--
|
||||||
}
|
}
|
||||||
|
if l := len(tab.bucket(pingSender.sha).entries); l != wantSize {
|
||||||
if lastInBucketIsResponding || !newNodeIsResponding {
|
t.Errorf("wrong bucket size after bond: got %d, want %d", l, wantSize)
|
||||||
if !contains(tab.buckets[253].entries, last.ID) {
|
|
||||||
t.Error("last entry was removed")
|
|
||||||
}
|
}
|
||||||
if contains(tab.buckets[253].entries, pingSender.ID) {
|
if found := contains(tab.bucket(pingSender.sha).entries, last.ID); found != lastInBucketIsResponding {
|
||||||
t.Error("new entry was added")
|
t.Errorf("last entry found: %t, want: %t", found, lastInBucketIsResponding)
|
||||||
}
|
}
|
||||||
} else {
|
wantNewEntry := newNodeIsResponding && !lastInBucketIsResponding
|
||||||
if contains(tab.buckets[253].entries, last.ID) {
|
if found := contains(tab.bucket(pingSender.sha).entries, pingSender.ID); found != wantNewEntry {
|
||||||
t.Error("last entry was not removed")
|
t.Errorf("new entry found: %t, want: %t", found, wantNewEntry)
|
||||||
}
|
}
|
||||||
if !contains(tab.buckets[253].entries, pingSender.ID) {
|
|
||||||
t.Error("new entry was not added")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
doit(true, true)
|
|
||||||
doit(false, true)
|
|
||||||
doit(true, false)
|
|
||||||
doit(false, false)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBucket_bumpNoDuplicates(t *testing.T) {
|
func TestBucket_bumpNoDuplicates(t *testing.T) {
|
||||||
@ -130,11 +136,45 @@ func TestBucket_bumpNoDuplicates(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This checks that the table-wide IP limit is applied correctly.
|
||||||
|
func TestTable_IPLimit(t *testing.T) {
|
||||||
|
transport := newPingRecorder()
|
||||||
|
tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{}, "", nil)
|
||||||
|
defer tab.Close()
|
||||||
|
|
||||||
|
for i := 0; i < tableIPLimit+1; i++ {
|
||||||
|
n := nodeAtDistance(tab.self.sha, i)
|
||||||
|
n.IP = net.IP{172, 0, 1, byte(i)}
|
||||||
|
tab.add(n)
|
||||||
|
}
|
||||||
|
if tab.len() > tableIPLimit {
|
||||||
|
t.Errorf("too many nodes in table")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This checks that the table-wide IP limit is applied correctly.
|
||||||
|
func TestTable_BucketIPLimit(t *testing.T) {
|
||||||
|
transport := newPingRecorder()
|
||||||
|
tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{}, "", nil)
|
||||||
|
defer tab.Close()
|
||||||
|
|
||||||
|
d := 3
|
||||||
|
for i := 0; i < bucketIPLimit+1; i++ {
|
||||||
|
n := nodeAtDistance(tab.self.sha, d)
|
||||||
|
n.IP = net.IP{172, 0, 1, byte(i)}
|
||||||
|
tab.add(n)
|
||||||
|
}
|
||||||
|
if tab.len() > bucketIPLimit {
|
||||||
|
t.Errorf("too many nodes in table")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// fillBucket inserts nodes into the given bucket until
|
// fillBucket inserts nodes into the given bucket until
|
||||||
// it is full. The node's IDs dont correspond to their
|
// it is full. The node's IDs dont correspond to their
|
||||||
// hashes.
|
// hashes.
|
||||||
func fillBucket(tab *Table, ld int) (last *Node) {
|
func fillBucket(tab *Table, n *Node) (last *Node) {
|
||||||
b := tab.buckets[ld]
|
ld := logdist(tab.self.sha, n.sha)
|
||||||
|
b := tab.bucket(n.sha)
|
||||||
for len(b.entries) < bucketSize {
|
for len(b.entries) < bucketSize {
|
||||||
b.entries = append(b.entries, nodeAtDistance(tab.self.sha, ld))
|
b.entries = append(b.entries, nodeAtDistance(tab.self.sha, ld))
|
||||||
}
|
}
|
||||||
@ -146,30 +186,39 @@ func fillBucket(tab *Table, ld int) (last *Node) {
|
|||||||
func nodeAtDistance(base common.Hash, ld int) (n *Node) {
|
func nodeAtDistance(base common.Hash, ld int) (n *Node) {
|
||||||
n = new(Node)
|
n = new(Node)
|
||||||
n.sha = hashAtDistance(base, ld)
|
n.sha = hashAtDistance(base, ld)
|
||||||
n.IP = net.IP{10, 0, 2, byte(ld)}
|
n.IP = net.IP{byte(ld), 0, 2, byte(ld)}
|
||||||
copy(n.ID[:], n.sha[:]) // ensure the node still has a unique ID
|
copy(n.ID[:], n.sha[:]) // ensure the node still has a unique ID
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
type pingRecorder struct{ responding, pinged map[NodeID]bool }
|
type pingRecorder struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
dead, pinged map[NodeID]bool
|
||||||
|
}
|
||||||
|
|
||||||
func newPingRecorder() *pingRecorder {
|
func newPingRecorder() *pingRecorder {
|
||||||
return &pingRecorder{make(map[NodeID]bool), make(map[NodeID]bool)}
|
return &pingRecorder{
|
||||||
|
dead: make(map[NodeID]bool),
|
||||||
|
pinged: make(map[NodeID]bool),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *pingRecorder) findnode(toid NodeID, toaddr *net.UDPAddr, target NodeID) ([]*Node, error) {
|
func (t *pingRecorder) findnode(toid NodeID, toaddr *net.UDPAddr, target NodeID) ([]*Node, error) {
|
||||||
panic("findnode called on pingRecorder")
|
return nil, nil
|
||||||
}
|
}
|
||||||
func (t *pingRecorder) close() {}
|
func (t *pingRecorder) close() {}
|
||||||
func (t *pingRecorder) waitping(from NodeID) error {
|
func (t *pingRecorder) waitping(from NodeID) error {
|
||||||
return nil // remote always pings
|
return nil // remote always pings
|
||||||
}
|
}
|
||||||
func (t *pingRecorder) ping(toid NodeID, toaddr *net.UDPAddr) error {
|
func (t *pingRecorder) ping(toid NodeID, toaddr *net.UDPAddr) error {
|
||||||
|
t.mu.Lock()
|
||||||
|
defer t.mu.Unlock()
|
||||||
|
|
||||||
t.pinged[toid] = true
|
t.pinged[toid] = true
|
||||||
if t.responding[toid] {
|
if t.dead[toid] {
|
||||||
return nil
|
|
||||||
} else {
|
|
||||||
return errTimeout
|
return errTimeout
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -178,7 +227,8 @@ func TestTable_closest(t *testing.T) {
|
|||||||
|
|
||||||
test := func(test *closeTest) bool {
|
test := func(test *closeTest) bool {
|
||||||
// for any node table, Target and N
|
// for any node table, Target and N
|
||||||
tab, _ := newTable(nil, test.Self, &net.UDPAddr{}, "")
|
transport := newPingRecorder()
|
||||||
|
tab, _ := newTable(transport, test.Self, &net.UDPAddr{}, "", nil)
|
||||||
defer tab.Close()
|
defer tab.Close()
|
||||||
tab.stuff(test.All)
|
tab.stuff(test.All)
|
||||||
|
|
||||||
@ -237,8 +287,11 @@ func TestTable_ReadRandomNodesGetAll(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
test := func(buf []*Node) bool {
|
test := func(buf []*Node) bool {
|
||||||
tab, _ := newTable(nil, NodeID{}, &net.UDPAddr{}, "")
|
transport := newPingRecorder()
|
||||||
|
tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{}, "", nil)
|
||||||
defer tab.Close()
|
defer tab.Close()
|
||||||
|
<-tab.initDone
|
||||||
|
|
||||||
for i := 0; i < len(buf); i++ {
|
for i := 0; i < len(buf); i++ {
|
||||||
ld := cfg.Rand.Intn(len(tab.buckets))
|
ld := cfg.Rand.Intn(len(tab.buckets))
|
||||||
tab.stuff([]*Node{nodeAtDistance(tab.self.sha, ld)})
|
tab.stuff([]*Node{nodeAtDistance(tab.self.sha, ld)})
|
||||||
@ -280,7 +333,7 @@ func (*closeTest) Generate(rand *rand.Rand, size int) reflect.Value {
|
|||||||
|
|
||||||
func TestTable_Lookup(t *testing.T) {
|
func TestTable_Lookup(t *testing.T) {
|
||||||
self := nodeAtDistance(common.Hash{}, 0)
|
self := nodeAtDistance(common.Hash{}, 0)
|
||||||
tab, _ := newTable(lookupTestnet, self.ID, &net.UDPAddr{}, "")
|
tab, _ := newTable(lookupTestnet, self.ID, &net.UDPAddr{}, "", nil)
|
||||||
defer tab.Close()
|
defer tab.Close()
|
||||||
|
|
||||||
// lookup on empty table returns no nodes
|
// lookup on empty table returns no nodes
|
||||||
|
@ -216,9 +216,22 @@ type ReadPacket struct {
|
|||||||
Addr *net.UDPAddr
|
Addr *net.UDPAddr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Config holds Table-related settings.
|
||||||
|
type Config struct {
|
||||||
|
// These settings are required and configure the UDP listener:
|
||||||
|
PrivateKey *ecdsa.PrivateKey
|
||||||
|
|
||||||
|
// These settings are optional:
|
||||||
|
AnnounceAddr *net.UDPAddr // local address announced in the DHT
|
||||||
|
NodeDBPath string // if set, the node database is stored at this filesystem location
|
||||||
|
NetRestrict *netutil.Netlist // network whitelist
|
||||||
|
Bootnodes []*Node // list of bootstrap nodes
|
||||||
|
Unhandled chan<- ReadPacket // unhandled packets are sent on this channel
|
||||||
|
}
|
||||||
|
|
||||||
// ListenUDP returns a new table that listens for UDP packets on laddr.
|
// ListenUDP returns a new table that listens for UDP packets on laddr.
|
||||||
func ListenUDP(priv *ecdsa.PrivateKey, conn conn, realaddr *net.UDPAddr, unhandled chan ReadPacket, nodeDBPath string, netrestrict *netutil.Netlist) (*Table, error) {
|
func ListenUDP(c conn, cfg Config) (*Table, error) {
|
||||||
tab, _, err := newUDP(priv, conn, realaddr, unhandled, nodeDBPath, netrestrict)
|
tab, _, err := newUDP(c, cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -226,25 +239,29 @@ func ListenUDP(priv *ecdsa.PrivateKey, conn conn, realaddr *net.UDPAddr, unhandl
|
|||||||
return tab, nil
|
return tab, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newUDP(priv *ecdsa.PrivateKey, c conn, realaddr *net.UDPAddr, unhandled chan ReadPacket, nodeDBPath string, netrestrict *netutil.Netlist) (*Table, *udp, error) {
|
func newUDP(c conn, cfg Config) (*Table, *udp, error) {
|
||||||
udp := &udp{
|
udp := &udp{
|
||||||
conn: c,
|
conn: c,
|
||||||
priv: priv,
|
priv: cfg.PrivateKey,
|
||||||
netrestrict: netrestrict,
|
netrestrict: cfg.NetRestrict,
|
||||||
closing: make(chan struct{}),
|
closing: make(chan struct{}),
|
||||||
gotreply: make(chan reply),
|
gotreply: make(chan reply),
|
||||||
addpending: make(chan *pending),
|
addpending: make(chan *pending),
|
||||||
}
|
}
|
||||||
|
realaddr := c.LocalAddr().(*net.UDPAddr)
|
||||||
|
if cfg.AnnounceAddr != nil {
|
||||||
|
realaddr = cfg.AnnounceAddr
|
||||||
|
}
|
||||||
// TODO: separate TCP port
|
// TODO: separate TCP port
|
||||||
udp.ourEndpoint = makeEndpoint(realaddr, uint16(realaddr.Port))
|
udp.ourEndpoint = makeEndpoint(realaddr, uint16(realaddr.Port))
|
||||||
tab, err := newTable(udp, PubkeyID(&priv.PublicKey), realaddr, nodeDBPath)
|
tab, err := newTable(udp, PubkeyID(&cfg.PrivateKey.PublicKey), realaddr, cfg.NodeDBPath, cfg.Bootnodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
udp.Table = tab
|
udp.Table = tab
|
||||||
|
|
||||||
go udp.loop()
|
go udp.loop()
|
||||||
go udp.readLoop(unhandled)
|
go udp.readLoop(cfg.Unhandled)
|
||||||
return udp.Table, udp, nil
|
return udp.Table, udp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -256,14 +273,20 @@ func (t *udp) close() {
|
|||||||
|
|
||||||
// ping sends a ping message to the given node and waits for a reply.
|
// ping sends a ping message to the given node and waits for a reply.
|
||||||
func (t *udp) ping(toid NodeID, toaddr *net.UDPAddr) error {
|
func (t *udp) ping(toid NodeID, toaddr *net.UDPAddr) error {
|
||||||
// TODO: maybe check for ReplyTo field in callback to measure RTT
|
req := &ping{
|
||||||
errc := t.pending(toid, pongPacket, func(interface{}) bool { return true })
|
|
||||||
t.send(toaddr, pingPacket, &ping{
|
|
||||||
Version: Version,
|
Version: Version,
|
||||||
From: t.ourEndpoint,
|
From: t.ourEndpoint,
|
||||||
To: makeEndpoint(toaddr, 0), // TODO: maybe use known TCP port from DB
|
To: makeEndpoint(toaddr, 0), // TODO: maybe use known TCP port from DB
|
||||||
Expiration: uint64(time.Now().Add(expiration).Unix()),
|
Expiration: uint64(time.Now().Add(expiration).Unix()),
|
||||||
|
}
|
||||||
|
packet, hash, err := encodePacket(t.priv, pingPacket, req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
errc := t.pending(toid, pongPacket, func(p interface{}) bool {
|
||||||
|
return bytes.Equal(p.(*pong).ReplyTok, hash)
|
||||||
})
|
})
|
||||||
|
t.write(toaddr, req.name(), packet)
|
||||||
return <-errc
|
return <-errc
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -447,40 +470,45 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *udp) send(toaddr *net.UDPAddr, ptype byte, req packet) error {
|
func (t *udp) send(toaddr *net.UDPAddr, ptype byte, req packet) ([]byte, error) {
|
||||||
packet, err := encodePacket(t.priv, ptype, req)
|
packet, hash, err := encodePacket(t.priv, ptype, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return hash, err
|
||||||
}
|
}
|
||||||
_, err = t.conn.WriteToUDP(packet, toaddr)
|
return hash, t.write(toaddr, req.name(), packet)
|
||||||
log.Trace(">> "+req.name(), "addr", toaddr, "err", err)
|
}
|
||||||
|
|
||||||
|
func (t *udp) write(toaddr *net.UDPAddr, what string, packet []byte) error {
|
||||||
|
_, err := t.conn.WriteToUDP(packet, toaddr)
|
||||||
|
log.Trace(">> "+what, "addr", toaddr, "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) ([]byte, error) {
|
func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) (packet, hash []byte, err error) {
|
||||||
b := new(bytes.Buffer)
|
b := new(bytes.Buffer)
|
||||||
b.Write(headSpace)
|
b.Write(headSpace)
|
||||||
b.WriteByte(ptype)
|
b.WriteByte(ptype)
|
||||||
if err := rlp.Encode(b, req); err != nil {
|
if err := rlp.Encode(b, req); err != nil {
|
||||||
log.Error("Can't encode discv4 packet", "err", err)
|
log.Error("Can't encode discv4 packet", "err", err)
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
packet := b.Bytes()
|
packet = b.Bytes()
|
||||||
sig, err := crypto.Sign(crypto.Keccak256(packet[headSize:]), priv)
|
sig, err := crypto.Sign(crypto.Keccak256(packet[headSize:]), priv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Can't sign discv4 packet", "err", err)
|
log.Error("Can't sign discv4 packet", "err", err)
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
copy(packet[macSize:], sig)
|
copy(packet[macSize:], sig)
|
||||||
// add the hash to the front. Note: this doesn't protect the
|
// add the hash to the front. Note: this doesn't protect the
|
||||||
// packet in any way. Our public key will be part of this hash in
|
// packet in any way. Our public key will be part of this hash in
|
||||||
// The future.
|
// The future.
|
||||||
copy(packet, crypto.Keccak256(packet[macSize:]))
|
hash = crypto.Keccak256(packet[macSize:])
|
||||||
return packet, nil
|
copy(packet, hash)
|
||||||
|
return packet, hash, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// readLoop runs in its own goroutine. it handles incoming UDP packets.
|
// readLoop runs in its own goroutine. it handles incoming UDP packets.
|
||||||
func (t *udp) readLoop(unhandled chan ReadPacket) {
|
func (t *udp) readLoop(unhandled chan<- ReadPacket) {
|
||||||
defer t.conn.Close()
|
defer t.conn.Close()
|
||||||
if unhandled != nil {
|
if unhandled != nil {
|
||||||
defer close(unhandled)
|
defer close(unhandled)
|
||||||
@ -585,7 +613,7 @@ func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte
|
|||||||
if expired(req.Expiration) {
|
if expired(req.Expiration) {
|
||||||
return errExpired
|
return errExpired
|
||||||
}
|
}
|
||||||
if t.db.node(fromID) == nil {
|
if !t.db.hasBond(fromID) {
|
||||||
// No bond exists, we don't process the packet. This prevents
|
// No bond exists, we don't process the packet. This prevents
|
||||||
// an attack vector where the discovery protocol could be used
|
// an attack vector where the discovery protocol could be used
|
||||||
// to amplify traffic in a DDOS attack. A malicious actor
|
// to amplify traffic in a DDOS attack. A malicious actor
|
||||||
@ -601,18 +629,22 @@ func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte
|
|||||||
t.mutex.Unlock()
|
t.mutex.Unlock()
|
||||||
|
|
||||||
p := neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())}
|
p := neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())}
|
||||||
|
var sent bool
|
||||||
// Send neighbors in chunks with at most maxNeighbors per packet
|
// Send neighbors in chunks with at most maxNeighbors per packet
|
||||||
// to stay below the 1280 byte limit.
|
// to stay below the 1280 byte limit.
|
||||||
for i, n := range closest {
|
for _, n := range closest {
|
||||||
if netutil.CheckRelayIP(from.IP, n.IP) != nil {
|
if netutil.CheckRelayIP(from.IP, n.IP) == nil {
|
||||||
continue
|
|
||||||
}
|
|
||||||
p.Nodes = append(p.Nodes, nodeToRPC(n))
|
p.Nodes = append(p.Nodes, nodeToRPC(n))
|
||||||
if len(p.Nodes) == maxNeighbors || i == len(closest)-1 {
|
}
|
||||||
|
if len(p.Nodes) == maxNeighbors {
|
||||||
t.send(from, neighborsPacket, &p)
|
t.send(from, neighborsPacket, &p)
|
||||||
p.Nodes = p.Nodes[:0]
|
p.Nodes = p.Nodes[:0]
|
||||||
|
sent = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(p.Nodes) > 0 || !sent {
|
||||||
|
t.send(from, neighborsPacket, &p)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,14 +70,15 @@ func newUDPTest(t *testing.T) *udpTest {
|
|||||||
remotekey: newkey(),
|
remotekey: newkey(),
|
||||||
remoteaddr: &net.UDPAddr{IP: net.IP{10, 0, 1, 99}, Port: 30303},
|
remoteaddr: &net.UDPAddr{IP: net.IP{10, 0, 1, 99}, Port: 30303},
|
||||||
}
|
}
|
||||||
realaddr := test.pipe.LocalAddr().(*net.UDPAddr)
|
test.table, test.udp, _ = newUDP(test.pipe, Config{PrivateKey: test.localkey})
|
||||||
test.table, test.udp, _ = newUDP(test.localkey, test.pipe, realaddr, nil, "", nil)
|
// Wait for initial refresh so the table doesn't send unexpected findnode.
|
||||||
|
<-test.table.initDone
|
||||||
return test
|
return test
|
||||||
}
|
}
|
||||||
|
|
||||||
// handles a packet as if it had been sent to the transport.
|
// handles a packet as if it had been sent to the transport.
|
||||||
func (test *udpTest) packetIn(wantError error, ptype byte, data packet) error {
|
func (test *udpTest) packetIn(wantError error, ptype byte, data packet) error {
|
||||||
enc, err := encodePacket(test.remotekey, ptype, data)
|
enc, _, err := encodePacket(test.remotekey, ptype, data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return test.errorf("packet (%d) encode error: %v", ptype, err)
|
return test.errorf("packet (%d) encode error: %v", ptype, err)
|
||||||
}
|
}
|
||||||
@ -90,19 +91,19 @@ func (test *udpTest) packetIn(wantError error, ptype byte, data packet) error {
|
|||||||
|
|
||||||
// waits for a packet to be sent by the transport.
|
// waits for a packet to be sent by the transport.
|
||||||
// validate should have type func(*udpTest, X) error, where X is a packet type.
|
// validate should have type func(*udpTest, X) error, where X is a packet type.
|
||||||
func (test *udpTest) waitPacketOut(validate interface{}) error {
|
func (test *udpTest) waitPacketOut(validate interface{}) ([]byte, error) {
|
||||||
dgram := test.pipe.waitPacketOut()
|
dgram := test.pipe.waitPacketOut()
|
||||||
p, _, _, err := decodePacket(dgram)
|
p, _, hash, err := decodePacket(dgram)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return test.errorf("sent packet decode error: %v", err)
|
return hash, test.errorf("sent packet decode error: %v", err)
|
||||||
}
|
}
|
||||||
fn := reflect.ValueOf(validate)
|
fn := reflect.ValueOf(validate)
|
||||||
exptype := fn.Type().In(0)
|
exptype := fn.Type().In(0)
|
||||||
if reflect.TypeOf(p) != exptype {
|
if reflect.TypeOf(p) != exptype {
|
||||||
return test.errorf("sent packet type mismatch, got: %v, want: %v", reflect.TypeOf(p), exptype)
|
return hash, test.errorf("sent packet type mismatch, got: %v, want: %v", reflect.TypeOf(p), exptype)
|
||||||
}
|
}
|
||||||
fn.Call([]reflect.Value{reflect.ValueOf(p)})
|
fn.Call([]reflect.Value{reflect.ValueOf(p)})
|
||||||
return nil
|
return hash, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (test *udpTest) errorf(format string, args ...interface{}) error {
|
func (test *udpTest) errorf(format string, args ...interface{}) error {
|
||||||
@ -246,12 +247,8 @@ func TestUDP_findnode(t *testing.T) {
|
|||||||
|
|
||||||
// ensure there's a bond with the test node,
|
// ensure there's a bond with the test node,
|
||||||
// findnode won't be accepted otherwise.
|
// findnode won't be accepted otherwise.
|
||||||
test.table.db.updateNode(NewNode(
|
test.table.db.updateBondTime(PubkeyID(&test.remotekey.PublicKey), time.Now())
|
||||||
PubkeyID(&test.remotekey.PublicKey),
|
|
||||||
test.remoteaddr.IP,
|
|
||||||
uint16(test.remoteaddr.Port),
|
|
||||||
99,
|
|
||||||
))
|
|
||||||
// check that closest neighbors are returned.
|
// check that closest neighbors are returned.
|
||||||
test.packetIn(nil, findnodePacket, &findnode{Target: testTarget, Expiration: futureExp})
|
test.packetIn(nil, findnodePacket, &findnode{Target: testTarget, Expiration: futureExp})
|
||||||
expected := test.table.closest(targetHash, bucketSize)
|
expected := test.table.closest(targetHash, bucketSize)
|
||||||
@ -351,7 +348,7 @@ func TestUDP_successfulPing(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// remote is unknown, the table pings back.
|
// remote is unknown, the table pings back.
|
||||||
test.waitPacketOut(func(p *ping) error {
|
hash, _ := test.waitPacketOut(func(p *ping) error {
|
||||||
if !reflect.DeepEqual(p.From, test.udp.ourEndpoint) {
|
if !reflect.DeepEqual(p.From, test.udp.ourEndpoint) {
|
||||||
t.Errorf("got ping.From %v, want %v", p.From, test.udp.ourEndpoint)
|
t.Errorf("got ping.From %v, want %v", p.From, test.udp.ourEndpoint)
|
||||||
}
|
}
|
||||||
@ -365,7 +362,7 @@ func TestUDP_successfulPing(t *testing.T) {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
test.packetIn(nil, pongPacket, &pong{Expiration: futureExp})
|
test.packetIn(nil, pongPacket, &pong{ReplyTok: hash, Expiration: futureExp})
|
||||||
|
|
||||||
// the node should be added to the table shortly after getting the
|
// the node should be added to the table shortly after getting the
|
||||||
// pong packet.
|
// pong packet.
|
||||||
|
@ -565,11 +565,8 @@ loop:
|
|||||||
if lookupChn := searchInfo[res.target.topic].lookupChn; lookupChn != nil {
|
if lookupChn := searchInfo[res.target.topic].lookupChn; lookupChn != nil {
|
||||||
lookupChn <- net.ticketStore.radius[res.target.topic].converged
|
lookupChn <- net.ticketStore.radius[res.target.topic].converged
|
||||||
}
|
}
|
||||||
net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node) []byte {
|
net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node, topic Topic) []byte {
|
||||||
net.ping(n, n.addr())
|
if n.state != nil && n.state.canQuery {
|
||||||
return n.pingEcho
|
|
||||||
}, func(n *Node, topic Topic) []byte {
|
|
||||||
if n.state == known {
|
|
||||||
return net.conn.send(n, topicQueryPacket, topicQuery{Topic: topic}) // TODO: set expiration
|
return net.conn.send(n, topicQueryPacket, topicQuery{Topic: topic}) // TODO: set expiration
|
||||||
} else {
|
} else {
|
||||||
if n.state == unknown {
|
if n.state == unknown {
|
||||||
@ -633,7 +630,8 @@ loop:
|
|||||||
}
|
}
|
||||||
net.refreshResp <- refreshDone
|
net.refreshResp <- refreshDone
|
||||||
case <-refreshDone:
|
case <-refreshDone:
|
||||||
log.Trace("<-net.refreshDone")
|
log.Trace("<-net.refreshDone", "table size", net.tab.count)
|
||||||
|
if net.tab.count != 0 {
|
||||||
refreshDone = nil
|
refreshDone = nil
|
||||||
list := searchReqWhenRefreshDone
|
list := searchReqWhenRefreshDone
|
||||||
searchReqWhenRefreshDone = nil
|
searchReqWhenRefreshDone = nil
|
||||||
@ -642,6 +640,10 @@ loop:
|
|||||||
net.topicSearchReq <- req
|
net.topicSearchReq <- req
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
} else {
|
||||||
|
refreshDone = make(chan struct{})
|
||||||
|
net.refresh(refreshDone)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Trace("loop stopped")
|
log.Trace("loop stopped")
|
||||||
@ -751,7 +753,15 @@ func (net *Network) internNodeFromNeighbours(sender *net.UDPAddr, rn rpcNode) (n
|
|||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
if !n.IP.Equal(rn.IP) || n.UDP != rn.UDP || n.TCP != rn.TCP {
|
if !n.IP.Equal(rn.IP) || n.UDP != rn.UDP || n.TCP != rn.TCP {
|
||||||
|
if n.state == known {
|
||||||
|
// reject address change if node is known by us
|
||||||
err = fmt.Errorf("metadata mismatch: got %v, want %v", rn, n)
|
err = fmt.Errorf("metadata mismatch: got %v, want %v", rn, n)
|
||||||
|
} else {
|
||||||
|
// accept otherwise; this will be handled nicer with signed ENRs
|
||||||
|
n.IP = rn.IP
|
||||||
|
n.UDP = rn.UDP
|
||||||
|
n.TCP = rn.TCP
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
@ -494,13 +494,13 @@ func (s *ticketStore) registerLookupDone(lookup lookupInfo, nodes []*Node, ping
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ticketStore) searchLookupDone(lookup lookupInfo, nodes []*Node, ping func(n *Node) []byte, query func(n *Node, topic Topic) []byte) {
|
func (s *ticketStore) searchLookupDone(lookup lookupInfo, nodes []*Node, query func(n *Node, topic Topic) []byte) {
|
||||||
now := mclock.Now()
|
now := mclock.Now()
|
||||||
for i, n := range nodes {
|
for i, n := range nodes {
|
||||||
if i == 0 || (binary.BigEndian.Uint64(n.sha[:8])^binary.BigEndian.Uint64(lookup.target[:8])) < s.radius[lookup.topic].minRadius {
|
if i == 0 || (binary.BigEndian.Uint64(n.sha[:8])^binary.BigEndian.Uint64(lookup.target[:8])) < s.radius[lookup.topic].minRadius {
|
||||||
if lookup.radiusLookup {
|
if lookup.radiusLookup {
|
||||||
if lastReq, ok := s.nodeLastReq[n]; !ok || time.Duration(now-lastReq.time) > radiusTC {
|
if lastReq, ok := s.nodeLastReq[n]; !ok || time.Duration(now-lastReq.time) > radiusTC {
|
||||||
s.nodeLastReq[n] = reqInfo{pingHash: ping(n), lookup: lookup, time: now}
|
s.nodeLastReq[n] = reqInfo{pingHash: nil, lookup: lookup, time: now}
|
||||||
}
|
}
|
||||||
} // else {
|
} // else {
|
||||||
if s.canQueryTopic(n, lookup.topic) {
|
if s.canQueryTopic(n, lookup.topic) {
|
||||||
|
@ -49,7 +49,7 @@ var (
|
|||||||
// Timeouts
|
// Timeouts
|
||||||
const (
|
const (
|
||||||
respTimeout = 500 * time.Millisecond
|
respTimeout = 500 * time.Millisecond
|
||||||
sendTimeout = 500 * time.Millisecond
|
queryDelay = 1000 * time.Millisecond
|
||||||
expiration = 20 * time.Second
|
expiration = 20 * time.Second
|
||||||
|
|
||||||
ntpFailureThreshold = 32 // Continuous timeouts after which to check NTP
|
ntpFailureThreshold = 32 // Continuous timeouts after which to check NTP
|
||||||
@ -318,20 +318,20 @@ func (t *udp) sendTopicRegister(remote *Node, topics []Topic, idx int, pong []by
|
|||||||
|
|
||||||
func (t *udp) sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node) {
|
func (t *udp) sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node) {
|
||||||
p := topicNodes{Echo: queryHash}
|
p := topicNodes{Echo: queryHash}
|
||||||
if len(nodes) == 0 {
|
var sent bool
|
||||||
t.sendPacket(remote.ID, remote.addr(), byte(topicNodesPacket), p)
|
for _, result := range nodes {
|
||||||
return
|
if result.IP.Equal(t.net.tab.self.IP) || netutil.CheckRelayIP(remote.IP, result.IP) == nil {
|
||||||
}
|
|
||||||
for i, result := range nodes {
|
|
||||||
if netutil.CheckRelayIP(remote.IP, result.IP) != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
p.Nodes = append(p.Nodes, nodeToRPC(result))
|
p.Nodes = append(p.Nodes, nodeToRPC(result))
|
||||||
if len(p.Nodes) == maxTopicNodes || i == len(nodes)-1 {
|
}
|
||||||
|
if len(p.Nodes) == maxTopicNodes {
|
||||||
t.sendPacket(remote.ID, remote.addr(), byte(topicNodesPacket), p)
|
t.sendPacket(remote.ID, remote.addr(), byte(topicNodesPacket), p)
|
||||||
p.Nodes = p.Nodes[:0]
|
p.Nodes = p.Nodes[:0]
|
||||||
|
sent = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if !sent || len(p.Nodes) > 0 {
|
||||||
|
t.sendPacket(remote.ID, remote.addr(), byte(topicNodesPacket), p)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *udp) sendPacket(toid NodeID, toaddr *net.UDPAddr, ptype byte, req interface{}) (hash []byte, err error) {
|
func (t *udp) sendPacket(toid NodeID, toaddr *net.UDPAddr, ptype byte, req interface{}) (hash []byte, err error) {
|
||||||
|
@ -18,8 +18,11 @@
|
|||||||
package netutil
|
package netutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -189,3 +192,131 @@ func CheckRelayIP(sender, addr net.IP) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SameNet reports whether two IP addresses have an equal prefix of the given bit length.
|
||||||
|
func SameNet(bits uint, ip, other net.IP) bool {
|
||||||
|
ip4, other4 := ip.To4(), other.To4()
|
||||||
|
switch {
|
||||||
|
case (ip4 == nil) != (other4 == nil):
|
||||||
|
return false
|
||||||
|
case ip4 != nil:
|
||||||
|
return sameNet(bits, ip4, other4)
|
||||||
|
default:
|
||||||
|
return sameNet(bits, ip.To16(), other.To16())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sameNet(bits uint, ip, other net.IP) bool {
|
||||||
|
nb := int(bits / 8)
|
||||||
|
mask := ^byte(0xFF >> (bits % 8))
|
||||||
|
if mask != 0 && nb < len(ip) && ip[nb]&mask != other[nb]&mask {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return nb <= len(ip) && bytes.Equal(ip[:nb], other[:nb])
|
||||||
|
}
|
||||||
|
|
||||||
|
// DistinctNetSet tracks IPs, ensuring that at most N of them
|
||||||
|
// fall into the same network range.
|
||||||
|
type DistinctNetSet struct {
|
||||||
|
Subnet uint // number of common prefix bits
|
||||||
|
Limit uint // maximum number of IPs in each subnet
|
||||||
|
|
||||||
|
members map[string]uint
|
||||||
|
buf net.IP
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds an IP address to the set. It returns false (and doesn't add the IP) if the
|
||||||
|
// number of existing IPs in the defined range exceeds the limit.
|
||||||
|
func (s *DistinctNetSet) Add(ip net.IP) bool {
|
||||||
|
key := s.key(ip)
|
||||||
|
n := s.members[string(key)]
|
||||||
|
if n < s.Limit {
|
||||||
|
s.members[string(key)] = n + 1
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes an IP from the set.
|
||||||
|
func (s *DistinctNetSet) Remove(ip net.IP) {
|
||||||
|
key := s.key(ip)
|
||||||
|
if n, ok := s.members[string(key)]; ok {
|
||||||
|
if n == 1 {
|
||||||
|
delete(s.members, string(key))
|
||||||
|
} else {
|
||||||
|
s.members[string(key)] = n - 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains whether the given IP is contained in the set.
|
||||||
|
func (s DistinctNetSet) Contains(ip net.IP) bool {
|
||||||
|
key := s.key(ip)
|
||||||
|
_, ok := s.members[string(key)]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of tracked IPs.
|
||||||
|
func (s DistinctNetSet) Len() int {
|
||||||
|
n := uint(0)
|
||||||
|
for _, i := range s.members {
|
||||||
|
n += i
|
||||||
|
}
|
||||||
|
return int(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// key encodes the map key for an address into a temporary buffer.
|
||||||
|
//
|
||||||
|
// The first byte of key is '4' or '6' to distinguish IPv4/IPv6 address types.
|
||||||
|
// The remainder of the key is the IP, truncated to the number of bits.
|
||||||
|
func (s *DistinctNetSet) key(ip net.IP) net.IP {
|
||||||
|
// Lazily initialize storage.
|
||||||
|
if s.members == nil {
|
||||||
|
s.members = make(map[string]uint)
|
||||||
|
s.buf = make(net.IP, 17)
|
||||||
|
}
|
||||||
|
// Canonicalize ip and bits.
|
||||||
|
typ := byte('6')
|
||||||
|
if ip4 := ip.To4(); ip4 != nil {
|
||||||
|
typ, ip = '4', ip4
|
||||||
|
}
|
||||||
|
bits := s.Subnet
|
||||||
|
if bits > uint(len(ip)*8) {
|
||||||
|
bits = uint(len(ip) * 8)
|
||||||
|
}
|
||||||
|
// Encode the prefix into s.buf.
|
||||||
|
nb := int(bits / 8)
|
||||||
|
mask := ^byte(0xFF >> (bits % 8))
|
||||||
|
s.buf[0] = typ
|
||||||
|
buf := append(s.buf[:1], ip[:nb]...)
|
||||||
|
if nb < len(ip) && mask != 0 {
|
||||||
|
buf = append(buf, ip[nb]&mask)
|
||||||
|
}
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements fmt.Stringer
|
||||||
|
func (s DistinctNetSet) String() string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
buf.WriteString("{")
|
||||||
|
keys := make([]string, 0, len(s.members))
|
||||||
|
for k := range s.members {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
for i, k := range keys {
|
||||||
|
var ip net.IP
|
||||||
|
if k[0] == '4' {
|
||||||
|
ip = make(net.IP, 4)
|
||||||
|
} else {
|
||||||
|
ip = make(net.IP, 16)
|
||||||
|
}
|
||||||
|
copy(ip, k[1:])
|
||||||
|
fmt.Fprintf(&buf, "%v×%d", ip, s.members[k])
|
||||||
|
if i != len(keys)-1 {
|
||||||
|
buf.WriteString(" ")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buf.WriteString("}")
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
@ -17,9 +17,11 @@
|
|||||||
package netutil
|
package netutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
"testing/quick"
|
||||||
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
"github.com/davecgh/go-spew/spew"
|
||||||
)
|
)
|
||||||
@ -171,3 +173,90 @@ func BenchmarkCheckRelayIP(b *testing.B) {
|
|||||||
CheckRelayIP(sender, addr)
|
CheckRelayIP(sender, addr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSameNet(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
ip, other string
|
||||||
|
bits uint
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{"0.0.0.0", "0.0.0.0", 32, true},
|
||||||
|
{"0.0.0.0", "0.0.0.1", 0, true},
|
||||||
|
{"0.0.0.0", "0.0.0.1", 31, true},
|
||||||
|
{"0.0.0.0", "0.0.0.1", 32, false},
|
||||||
|
{"0.33.0.1", "0.34.0.2", 8, true},
|
||||||
|
{"0.33.0.1", "0.34.0.2", 13, true},
|
||||||
|
{"0.33.0.1", "0.34.0.2", 15, false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
if ok := SameNet(test.bits, parseIP(test.ip), parseIP(test.other)); ok != test.want {
|
||||||
|
t.Errorf("SameNet(%d, %s, %s) == %t, want %t", test.bits, test.ip, test.other, ok, test.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleSameNet() {
|
||||||
|
// This returns true because the IPs are in the same /24 network:
|
||||||
|
fmt.Println(SameNet(24, net.IP{127, 0, 0, 1}, net.IP{127, 0, 0, 3}))
|
||||||
|
// This call returns false:
|
||||||
|
fmt.Println(SameNet(24, net.IP{127, 3, 0, 1}, net.IP{127, 5, 0, 3}))
|
||||||
|
// Output:
|
||||||
|
// true
|
||||||
|
// false
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDistinctNetSet(t *testing.T) {
|
||||||
|
ops := []struct {
|
||||||
|
add, remove string
|
||||||
|
fails bool
|
||||||
|
}{
|
||||||
|
{add: "127.0.0.1"},
|
||||||
|
{add: "127.0.0.2"},
|
||||||
|
{add: "127.0.0.3", fails: true},
|
||||||
|
{add: "127.32.0.1"},
|
||||||
|
{add: "127.32.0.2"},
|
||||||
|
{add: "127.32.0.3", fails: true},
|
||||||
|
{add: "127.33.0.1", fails: true},
|
||||||
|
{add: "127.34.0.1"},
|
||||||
|
{add: "127.34.0.2"},
|
||||||
|
{add: "127.34.0.3", fails: true},
|
||||||
|
// Make room for an address, then add again.
|
||||||
|
{remove: "127.0.0.1"},
|
||||||
|
{add: "127.0.0.3"},
|
||||||
|
{add: "127.0.0.3", fails: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
set := DistinctNetSet{Subnet: 15, Limit: 2}
|
||||||
|
for _, op := range ops {
|
||||||
|
var desc string
|
||||||
|
if op.add != "" {
|
||||||
|
desc = fmt.Sprintf("Add(%s)", op.add)
|
||||||
|
if ok := set.Add(parseIP(op.add)); ok != !op.fails {
|
||||||
|
t.Errorf("%s == %t, want %t", desc, ok, !op.fails)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
desc = fmt.Sprintf("Remove(%s)", op.remove)
|
||||||
|
set.Remove(parseIP(op.remove))
|
||||||
|
}
|
||||||
|
t.Logf("%s: %v", desc, set)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDistinctNetSetAddRemove(t *testing.T) {
|
||||||
|
cfg := &quick.Config{}
|
||||||
|
fn := func(ips []net.IP) bool {
|
||||||
|
s := DistinctNetSet{Limit: 3, Subnet: 2}
|
||||||
|
for _, ip := range ips {
|
||||||
|
s.Add(ip)
|
||||||
|
}
|
||||||
|
for _, ip := range ips {
|
||||||
|
s.Remove(ip)
|
||||||
|
}
|
||||||
|
return s.Len() == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := quick.Check(fn, cfg); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -419,6 +419,9 @@ type PeerInfo struct {
|
|||||||
Network struct {
|
Network struct {
|
||||||
LocalAddress string `json:"localAddress"` // Local endpoint of the TCP data connection
|
LocalAddress string `json:"localAddress"` // Local endpoint of the TCP data connection
|
||||||
RemoteAddress string `json:"remoteAddress"` // Remote endpoint of the TCP data connection
|
RemoteAddress string `json:"remoteAddress"` // Remote endpoint of the TCP data connection
|
||||||
|
Inbound bool `json:"inbound"`
|
||||||
|
Trusted bool `json:"trusted"`
|
||||||
|
Static bool `json:"static"`
|
||||||
} `json:"network"`
|
} `json:"network"`
|
||||||
Protocols map[string]interface{} `json:"protocols"` // Sub-protocol specific metadata fields
|
Protocols map[string]interface{} `json:"protocols"` // Sub-protocol specific metadata fields
|
||||||
}
|
}
|
||||||
@ -439,6 +442,9 @@ func (p *Peer) Info() *PeerInfo {
|
|||||||
}
|
}
|
||||||
info.Network.LocalAddress = p.LocalAddr().String()
|
info.Network.LocalAddress = p.LocalAddr().String()
|
||||||
info.Network.RemoteAddress = p.RemoteAddr().String()
|
info.Network.RemoteAddress = p.RemoteAddr().String()
|
||||||
|
info.Network.Inbound = p.rw.is(inboundConn)
|
||||||
|
info.Network.Trusted = p.rw.is(trustedConn)
|
||||||
|
info.Network.Static = p.rw.is(staticDialedConn)
|
||||||
|
|
||||||
// Gather all the running protocol infos
|
// Gather all the running protocol infos
|
||||||
for _, proto := range p.running {
|
for _, proto := range p.running {
|
||||||
|
311
p2p/protocols/protocol.go
Normal file
311
p2p/protocols/protocol.go
Normal file
@ -0,0 +1,311 @@
|
|||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package protocols is an extension to p2p. It offers a user friendly simple way to define
|
||||||
|
devp2p subprotocols by abstracting away code standardly shared by protocols.
|
||||||
|
|
||||||
|
* automate assigments of code indexes to messages
|
||||||
|
* automate RLP decoding/encoding based on reflecting
|
||||||
|
* provide the forever loop to read incoming messages
|
||||||
|
* standardise error handling related to communication
|
||||||
|
* standardised handshake negotiation
|
||||||
|
* TODO: automatic generation of wire protocol specification for peers
|
||||||
|
|
||||||
|
*/
|
||||||
|
package protocols
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
|
)
|
||||||
|
|
||||||
|
// error codes used by this protocol scheme
|
||||||
|
const (
|
||||||
|
ErrMsgTooLong = iota
|
||||||
|
ErrDecode
|
||||||
|
ErrWrite
|
||||||
|
ErrInvalidMsgCode
|
||||||
|
ErrInvalidMsgType
|
||||||
|
ErrHandshake
|
||||||
|
ErrNoHandler
|
||||||
|
ErrHandler
|
||||||
|
)
|
||||||
|
|
||||||
|
// error description strings associated with the codes
|
||||||
|
var errorToString = map[int]string{
|
||||||
|
ErrMsgTooLong: "Message too long",
|
||||||
|
ErrDecode: "Invalid message (RLP error)",
|
||||||
|
ErrWrite: "Error sending message",
|
||||||
|
ErrInvalidMsgCode: "Invalid message code",
|
||||||
|
ErrInvalidMsgType: "Invalid message type",
|
||||||
|
ErrHandshake: "Handshake error",
|
||||||
|
ErrNoHandler: "No handler registered error",
|
||||||
|
ErrHandler: "Message handler error",
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Error implements the standard go error interface.
|
||||||
|
Use:
|
||||||
|
|
||||||
|
errorf(code, format, params ...interface{})
|
||||||
|
|
||||||
|
Prints as:
|
||||||
|
|
||||||
|
<description>: <details>
|
||||||
|
|
||||||
|
where description is given by code in errorToString
|
||||||
|
and details is fmt.Sprintf(format, params...)
|
||||||
|
|
||||||
|
exported field Code can be checked
|
||||||
|
*/
|
||||||
|
type Error struct {
|
||||||
|
Code int
|
||||||
|
message string
|
||||||
|
format string
|
||||||
|
params []interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e Error) Error() (message string) {
|
||||||
|
if len(e.message) == 0 {
|
||||||
|
name, ok := errorToString[e.Code]
|
||||||
|
if !ok {
|
||||||
|
panic("invalid message code")
|
||||||
|
}
|
||||||
|
e.message = name
|
||||||
|
if e.format != "" {
|
||||||
|
e.message += ": " + fmt.Sprintf(e.format, e.params...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return e.message
|
||||||
|
}
|
||||||
|
|
||||||
|
func errorf(code int, format string, params ...interface{}) *Error {
|
||||||
|
return &Error{
|
||||||
|
Code: code,
|
||||||
|
format: format,
|
||||||
|
params: params,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spec is a protocol specification including its name and version as well as
|
||||||
|
// the types of messages which are exchanged
|
||||||
|
type Spec struct {
|
||||||
|
// Name is the name of the protocol, often a three-letter word
|
||||||
|
Name string
|
||||||
|
|
||||||
|
// Version is the version number of the protocol
|
||||||
|
Version uint
|
||||||
|
|
||||||
|
// MaxMsgSize is the maximum accepted length of the message payload
|
||||||
|
MaxMsgSize uint32
|
||||||
|
|
||||||
|
// Messages is a list of message data types which this protocol uses, with
|
||||||
|
// each message type being sent with its array index as the code (so
|
||||||
|
// [&foo{}, &bar{}, &baz{}] would send foo, bar and baz with codes
|
||||||
|
// 0, 1 and 2 respectively)
|
||||||
|
// each message must have a single unique data type
|
||||||
|
Messages []interface{}
|
||||||
|
|
||||||
|
initOnce sync.Once
|
||||||
|
codes map[reflect.Type]uint64
|
||||||
|
types map[uint64]reflect.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Spec) init() {
|
||||||
|
s.initOnce.Do(func() {
|
||||||
|
s.codes = make(map[reflect.Type]uint64, len(s.Messages))
|
||||||
|
s.types = make(map[uint64]reflect.Type, len(s.Messages))
|
||||||
|
for i, msg := range s.Messages {
|
||||||
|
code := uint64(i)
|
||||||
|
typ := reflect.TypeOf(msg)
|
||||||
|
if typ.Kind() == reflect.Ptr {
|
||||||
|
typ = typ.Elem()
|
||||||
|
}
|
||||||
|
s.codes[typ] = code
|
||||||
|
s.types[code] = typ
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Length returns the number of message types in the protocol
|
||||||
|
func (s *Spec) Length() uint64 {
|
||||||
|
return uint64(len(s.Messages))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCode returns the message code of a type, and boolean second argument is
|
||||||
|
// false if the message type is not found
|
||||||
|
func (s *Spec) GetCode(msg interface{}) (uint64, bool) {
|
||||||
|
s.init()
|
||||||
|
typ := reflect.TypeOf(msg)
|
||||||
|
if typ.Kind() == reflect.Ptr {
|
||||||
|
typ = typ.Elem()
|
||||||
|
}
|
||||||
|
code, ok := s.codes[typ]
|
||||||
|
return code, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMsg construct a new message type given the code
|
||||||
|
func (s *Spec) NewMsg(code uint64) (interface{}, bool) {
|
||||||
|
s.init()
|
||||||
|
typ, ok := s.types[code]
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
return reflect.New(typ).Interface(), true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Peer represents a remote peer or protocol instance that is running on a peer connection with
|
||||||
|
// a remote peer
|
||||||
|
type Peer struct {
|
||||||
|
*p2p.Peer // the p2p.Peer object representing the remote
|
||||||
|
rw p2p.MsgReadWriter // p2p.MsgReadWriter to send messages to and read messages from
|
||||||
|
spec *Spec
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPeer constructs a new peer
|
||||||
|
// this constructor is called by the p2p.Protocol#Run function
|
||||||
|
// the first two arguments are the arguments passed to p2p.Protocol.Run function
|
||||||
|
// the third argument is the Spec describing the protocol
|
||||||
|
func NewPeer(p *p2p.Peer, rw p2p.MsgReadWriter, spec *Spec) *Peer {
|
||||||
|
return &Peer{
|
||||||
|
Peer: p,
|
||||||
|
rw: rw,
|
||||||
|
spec: spec,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run starts the forever loop that handles incoming messages
|
||||||
|
// called within the p2p.Protocol#Run function
|
||||||
|
// the handler argument is a function which is called for each message received
|
||||||
|
// from the remote peer, a returned error causes the loop to exit
|
||||||
|
// resulting in disconnection
|
||||||
|
func (p *Peer) Run(handler func(msg interface{}) error) error {
|
||||||
|
for {
|
||||||
|
if err := p.handleIncoming(handler); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drop disconnects a peer.
|
||||||
|
// TODO: may need to implement protocol drop only? don't want to kick off the peer
|
||||||
|
// if they are useful for other protocols
|
||||||
|
func (p *Peer) Drop(err error) {
|
||||||
|
p.Disconnect(p2p.DiscSubprotocolError)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send takes a message, encodes it in RLP, finds the right message code and sends the
|
||||||
|
// message off to the peer
|
||||||
|
// this low level call will be wrapped by libraries providing routed or broadcast sends
|
||||||
|
// but often just used to forward and push messages to directly connected peers
|
||||||
|
func (p *Peer) Send(msg interface{}) error {
|
||||||
|
code, found := p.spec.GetCode(msg)
|
||||||
|
if !found {
|
||||||
|
return errorf(ErrInvalidMsgType, "%v", code)
|
||||||
|
}
|
||||||
|
return p2p.Send(p.rw, code, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleIncoming(code)
|
||||||
|
// is called each cycle of the main forever loop that dispatches incoming messages
|
||||||
|
// if this returns an error the loop returns and the peer is disconnected with the error
|
||||||
|
// this generic handler
|
||||||
|
// * checks message size,
|
||||||
|
// * checks for out-of-range message codes,
|
||||||
|
// * handles decoding with reflection,
|
||||||
|
// * call handlers as callbacks
|
||||||
|
func (p *Peer) handleIncoming(handle func(msg interface{}) error) error {
|
||||||
|
msg, err := p.rw.ReadMsg()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// make sure that the payload has been fully consumed
|
||||||
|
defer msg.Discard()
|
||||||
|
|
||||||
|
if msg.Size > p.spec.MaxMsgSize {
|
||||||
|
return errorf(ErrMsgTooLong, "%v > %v", msg.Size, p.spec.MaxMsgSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
val, ok := p.spec.NewMsg(msg.Code)
|
||||||
|
if !ok {
|
||||||
|
return errorf(ErrInvalidMsgCode, "%v", msg.Code)
|
||||||
|
}
|
||||||
|
if err := msg.Decode(val); err != nil {
|
||||||
|
return errorf(ErrDecode, "<= %v: %v", msg, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// call the registered handler callbacks
|
||||||
|
// a registered callback take the decoded message as argument as an interface
|
||||||
|
// which the handler is supposed to cast to the appropriate type
|
||||||
|
// it is entirely safe not to check the cast in the handler since the handler is
|
||||||
|
// chosen based on the proper type in the first place
|
||||||
|
if err := handle(val); err != nil {
|
||||||
|
return errorf(ErrHandler, "(msg code %v): %v", msg.Code, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handshake negotiates a handshake on the peer connection
|
||||||
|
// * arguments
|
||||||
|
// * context
|
||||||
|
// * the local handshake to be sent to the remote peer
|
||||||
|
// * funcion to be called on the remote handshake (can be nil)
|
||||||
|
// * expects a remote handshake back of the same type
|
||||||
|
// * the dialing peer needs to send the handshake first and then waits for remote
|
||||||
|
// * the listening peer waits for the remote handshake and then sends it
|
||||||
|
// returns the remote handshake and an error
|
||||||
|
func (p *Peer) Handshake(ctx context.Context, hs interface{}, verify func(interface{}) error) (rhs interface{}, err error) {
|
||||||
|
if _, ok := p.spec.GetCode(hs); !ok {
|
||||||
|
return nil, errorf(ErrHandshake, "unknown handshake message type: %T", hs)
|
||||||
|
}
|
||||||
|
errc := make(chan error, 2)
|
||||||
|
handle := func(msg interface{}) error {
|
||||||
|
rhs = msg
|
||||||
|
if verify != nil {
|
||||||
|
return verify(rhs)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
send := func() { errc <- p.Send(hs) }
|
||||||
|
receive := func() { errc <- p.handleIncoming(handle) }
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
if p.Inbound() {
|
||||||
|
receive()
|
||||||
|
send()
|
||||||
|
} else {
|
||||||
|
send()
|
||||||
|
receive()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
select {
|
||||||
|
case err = <-errc:
|
||||||
|
case <-ctx.Done():
|
||||||
|
err = ctx.Err()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, errorf(ErrHandshake, err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rhs, nil
|
||||||
|
}
|
389
p2p/protocols/protocol_test.go
Normal file
389
p2p/protocols/protocol_test.go
Normal file
@ -0,0 +1,389 @@
|
|||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package protocols
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||||
|
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// handshake message type
|
||||||
|
type hs0 struct {
|
||||||
|
C uint
|
||||||
|
}
|
||||||
|
|
||||||
|
// message to kill/drop the peer with nodeID
|
||||||
|
type kill struct {
|
||||||
|
C discover.NodeID
|
||||||
|
}
|
||||||
|
|
||||||
|
// message to drop connection
|
||||||
|
type drop struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
/// protoHandshake represents module-independent aspects of the protocol and is
|
||||||
|
// the first message peers send and receive as part the initial exchange
|
||||||
|
type protoHandshake struct {
|
||||||
|
Version uint // local and remote peer should have identical version
|
||||||
|
NetworkID string // local and remote peer should have identical network id
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkProtoHandshake verifies local and remote protoHandshakes match
|
||||||
|
func checkProtoHandshake(testVersion uint, testNetworkID string) func(interface{}) error {
|
||||||
|
return func(rhs interface{}) error {
|
||||||
|
remote := rhs.(*protoHandshake)
|
||||||
|
if remote.NetworkID != testNetworkID {
|
||||||
|
return fmt.Errorf("%s (!= %s)", remote.NetworkID, testNetworkID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if remote.Version != testVersion {
|
||||||
|
return fmt.Errorf("%d (!= %d)", remote.Version, testVersion)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newProtocol sets up a protocol
|
||||||
|
// the run function here demonstrates a typical protocol using peerPool, handshake
|
||||||
|
// and messages registered to handlers
|
||||||
|
func newProtocol(pp *p2ptest.TestPeerPool) func(*p2p.Peer, p2p.MsgReadWriter) error {
|
||||||
|
spec := &Spec{
|
||||||
|
Name: "test",
|
||||||
|
Version: 42,
|
||||||
|
MaxMsgSize: 10 * 1024,
|
||||||
|
Messages: []interface{}{
|
||||||
|
protoHandshake{},
|
||||||
|
hs0{},
|
||||||
|
kill{},
|
||||||
|
drop{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||||
|
peer := NewPeer(p, rw, spec)
|
||||||
|
|
||||||
|
// initiate one-off protohandshake and check validity
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
|
defer cancel()
|
||||||
|
phs := &protoHandshake{42, "420"}
|
||||||
|
hsCheck := checkProtoHandshake(phs.Version, phs.NetworkID)
|
||||||
|
_, err := peer.Handshake(ctx, phs, hsCheck)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
lhs := &hs0{42}
|
||||||
|
// module handshake demonstrating a simple repeatable exchange of same-type message
|
||||||
|
hs, err := peer.Handshake(ctx, lhs, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if rmhs := hs.(*hs0); rmhs.C > lhs.C {
|
||||||
|
return fmt.Errorf("handshake mismatch remote %v > local %v", rmhs.C, lhs.C)
|
||||||
|
}
|
||||||
|
|
||||||
|
handle := func(msg interface{}) error {
|
||||||
|
switch msg := msg.(type) {
|
||||||
|
|
||||||
|
case *protoHandshake:
|
||||||
|
return errors.New("duplicate handshake")
|
||||||
|
|
||||||
|
case *hs0:
|
||||||
|
rhs := msg
|
||||||
|
if rhs.C > lhs.C {
|
||||||
|
return fmt.Errorf("handshake mismatch remote %v > local %v", rhs.C, lhs.C)
|
||||||
|
}
|
||||||
|
lhs.C += rhs.C
|
||||||
|
return peer.Send(lhs)
|
||||||
|
|
||||||
|
case *kill:
|
||||||
|
// demonstrates use of peerPool, killing another peer connection as a response to a message
|
||||||
|
id := msg.C
|
||||||
|
pp.Get(id).Drop(errors.New("killed"))
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case *drop:
|
||||||
|
// for testing we can trigger self induced disconnect upon receiving drop message
|
||||||
|
return errors.New("dropped")
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown message type: %T", msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pp.Add(peer)
|
||||||
|
defer pp.Remove(peer)
|
||||||
|
return peer.Run(handle)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func protocolTester(t *testing.T, pp *p2ptest.TestPeerPool) *p2ptest.ProtocolTester {
|
||||||
|
conf := adapters.RandomNodeConfig()
|
||||||
|
return p2ptest.NewProtocolTester(t, conf.ID, 2, newProtocol(pp))
|
||||||
|
}
|
||||||
|
|
||||||
|
func protoHandshakeExchange(id discover.NodeID, proto *protoHandshake) []p2ptest.Exchange {
|
||||||
|
|
||||||
|
return []p2ptest.Exchange{
|
||||||
|
{
|
||||||
|
Expects: []p2ptest.Expect{
|
||||||
|
{
|
||||||
|
Code: 0,
|
||||||
|
Msg: &protoHandshake{42, "420"},
|
||||||
|
Peer: id,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Triggers: []p2ptest.Trigger{
|
||||||
|
{
|
||||||
|
Code: 0,
|
||||||
|
Msg: proto,
|
||||||
|
Peer: id,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func runProtoHandshake(t *testing.T, proto *protoHandshake, errs ...error) {
|
||||||
|
pp := p2ptest.NewTestPeerPool()
|
||||||
|
s := protocolTester(t, pp)
|
||||||
|
// TODO: make this more than one handshake
|
||||||
|
id := s.IDs[0]
|
||||||
|
if err := s.TestExchanges(protoHandshakeExchange(id, proto)...); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
var disconnects []*p2ptest.Disconnect
|
||||||
|
for i, err := range errs {
|
||||||
|
disconnects = append(disconnects, &p2ptest.Disconnect{Peer: s.IDs[i], Error: err})
|
||||||
|
}
|
||||||
|
if err := s.TestDisconnected(disconnects...); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProtoHandshakeVersionMismatch(t *testing.T) {
|
||||||
|
runProtoHandshake(t, &protoHandshake{41, "420"}, errorf(ErrHandshake, errorf(ErrHandler, "(msg code 0): 41 (!= 42)").Error()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProtoHandshakeNetworkIDMismatch(t *testing.T) {
|
||||||
|
runProtoHandshake(t, &protoHandshake{42, "421"}, errorf(ErrHandshake, errorf(ErrHandler, "(msg code 0): 421 (!= 420)").Error()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProtoHandshakeSuccess(t *testing.T) {
|
||||||
|
runProtoHandshake(t, &protoHandshake{42, "420"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func moduleHandshakeExchange(id discover.NodeID, resp uint) []p2ptest.Exchange {
|
||||||
|
|
||||||
|
return []p2ptest.Exchange{
|
||||||
|
{
|
||||||
|
Expects: []p2ptest.Expect{
|
||||||
|
{
|
||||||
|
Code: 1,
|
||||||
|
Msg: &hs0{42},
|
||||||
|
Peer: id,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Triggers: []p2ptest.Trigger{
|
||||||
|
{
|
||||||
|
Code: 1,
|
||||||
|
Msg: &hs0{resp},
|
||||||
|
Peer: id,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func runModuleHandshake(t *testing.T, resp uint, errs ...error) {
|
||||||
|
pp := p2ptest.NewTestPeerPool()
|
||||||
|
s := protocolTester(t, pp)
|
||||||
|
id := s.IDs[0]
|
||||||
|
if err := s.TestExchanges(protoHandshakeExchange(id, &protoHandshake{42, "420"})...); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := s.TestExchanges(moduleHandshakeExchange(id, resp)...); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
var disconnects []*p2ptest.Disconnect
|
||||||
|
for i, err := range errs {
|
||||||
|
disconnects = append(disconnects, &p2ptest.Disconnect{Peer: s.IDs[i], Error: err})
|
||||||
|
}
|
||||||
|
if err := s.TestDisconnected(disconnects...); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModuleHandshakeError(t *testing.T) {
|
||||||
|
runModuleHandshake(t, 43, fmt.Errorf("handshake mismatch remote 43 > local 42"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModuleHandshakeSuccess(t *testing.T) {
|
||||||
|
runModuleHandshake(t, 42)
|
||||||
|
}
|
||||||
|
|
||||||
|
// testing complex interactions over multiple peers, relaying, dropping
|
||||||
|
func testMultiPeerSetup(a, b discover.NodeID) []p2ptest.Exchange {
|
||||||
|
|
||||||
|
return []p2ptest.Exchange{
|
||||||
|
{
|
||||||
|
Label: "primary handshake",
|
||||||
|
Expects: []p2ptest.Expect{
|
||||||
|
{
|
||||||
|
Code: 0,
|
||||||
|
Msg: &protoHandshake{42, "420"},
|
||||||
|
Peer: a,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Code: 0,
|
||||||
|
Msg: &protoHandshake{42, "420"},
|
||||||
|
Peer: b,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Label: "module handshake",
|
||||||
|
Triggers: []p2ptest.Trigger{
|
||||||
|
{
|
||||||
|
Code: 0,
|
||||||
|
Msg: &protoHandshake{42, "420"},
|
||||||
|
Peer: a,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Code: 0,
|
||||||
|
Msg: &protoHandshake{42, "420"},
|
||||||
|
Peer: b,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Expects: []p2ptest.Expect{
|
||||||
|
{
|
||||||
|
Code: 1,
|
||||||
|
Msg: &hs0{42},
|
||||||
|
Peer: a,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Code: 1,
|
||||||
|
Msg: &hs0{42},
|
||||||
|
Peer: b,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
{Label: "alternative module handshake", Triggers: []p2ptest.Trigger{{Code: 1, Msg: &hs0{41}, Peer: a},
|
||||||
|
{Code: 1, Msg: &hs0{41}, Peer: b}}},
|
||||||
|
{Label: "repeated module handshake", Triggers: []p2ptest.Trigger{{Code: 1, Msg: &hs0{1}, Peer: a}}},
|
||||||
|
{Label: "receiving repeated module handshake", Expects: []p2ptest.Expect{{Code: 1, Msg: &hs0{43}, Peer: a}}}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func runMultiplePeers(t *testing.T, peer int, errs ...error) {
|
||||||
|
pp := p2ptest.NewTestPeerPool()
|
||||||
|
s := protocolTester(t, pp)
|
||||||
|
|
||||||
|
if err := s.TestExchanges(testMultiPeerSetup(s.IDs[0], s.IDs[1])...); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// after some exchanges of messages, we can test state changes
|
||||||
|
// here this is simply demonstrated by the peerPool
|
||||||
|
// after the handshake negotiations peers must be added to the pool
|
||||||
|
// time.Sleep(1)
|
||||||
|
tick := time.NewTicker(10 * time.Millisecond)
|
||||||
|
timeout := time.NewTimer(1 * time.Second)
|
||||||
|
WAIT:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-tick.C:
|
||||||
|
if pp.Has(s.IDs[0]) {
|
||||||
|
break WAIT
|
||||||
|
}
|
||||||
|
case <-timeout.C:
|
||||||
|
t.Fatal("timeout")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !pp.Has(s.IDs[1]) {
|
||||||
|
t.Fatalf("missing peer test-1: %v (%v)", pp, s.IDs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// peer 0 sends kill request for peer with index <peer>
|
||||||
|
err := s.TestExchanges(p2ptest.Exchange{
|
||||||
|
Triggers: []p2ptest.Trigger{
|
||||||
|
{
|
||||||
|
Code: 2,
|
||||||
|
Msg: &kill{s.IDs[peer]},
|
||||||
|
Peer: s.IDs[0],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// the peer not killed sends a drop request
|
||||||
|
err = s.TestExchanges(p2ptest.Exchange{
|
||||||
|
Triggers: []p2ptest.Trigger{
|
||||||
|
{
|
||||||
|
Code: 3,
|
||||||
|
Msg: &drop{},
|
||||||
|
Peer: s.IDs[(peer+1)%2],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check the actual discconnect errors on the individual peers
|
||||||
|
var disconnects []*p2ptest.Disconnect
|
||||||
|
for i, err := range errs {
|
||||||
|
disconnects = append(disconnects, &p2ptest.Disconnect{Peer: s.IDs[i], Error: err})
|
||||||
|
}
|
||||||
|
if err := s.TestDisconnected(disconnects...); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// test if disconnected peers have been removed from peerPool
|
||||||
|
if pp.Has(s.IDs[peer]) {
|
||||||
|
t.Fatalf("peer test-%v not dropped: %v (%v)", peer, pp, s.IDs)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMultiplePeersDropSelf(t *testing.T) {
|
||||||
|
runMultiplePeers(t, 0,
|
||||||
|
fmt.Errorf("subprotocol error"),
|
||||||
|
fmt.Errorf("Message handler error: (msg code 3): dropped"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMultiplePeersDropOther(t *testing.T) {
|
||||||
|
runMultiplePeers(t, 1,
|
||||||
|
fmt.Errorf("Message handler error: (msg code 3): dropped"),
|
||||||
|
fmt.Errorf("subprotocol error"),
|
||||||
|
)
|
||||||
|
}
|
@ -108,10 +108,16 @@ func (t *rlpx) close(err error) {
|
|||||||
// Tell the remote end why we're disconnecting if possible.
|
// Tell the remote end why we're disconnecting if possible.
|
||||||
if t.rw != nil {
|
if t.rw != nil {
|
||||||
if r, ok := err.(DiscReason); ok && r != DiscNetworkError {
|
if r, ok := err.(DiscReason); ok && r != DiscNetworkError {
|
||||||
t.fd.SetWriteDeadline(time.Now().Add(discWriteTimeout))
|
// rlpx tries to send DiscReason to disconnected peer
|
||||||
|
// if the connection is net.Pipe (in-memory simulation)
|
||||||
|
// it hangs forever, since net.Pipe does not implement
|
||||||
|
// a write deadline. Because of this only try to send
|
||||||
|
// the disconnect reason message if there is no error.
|
||||||
|
if err := t.fd.SetWriteDeadline(time.Now().Add(discWriteTimeout)); err == nil {
|
||||||
SendItems(t.rw, discMsg, r)
|
SendItems(t.rw, discMsg, r)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
t.fd.Close()
|
t.fd.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,14 +156,18 @@ func TestProtocolHandshake(t *testing.T) {
|
|||||||
node1 = &discover.Node{ID: discover.PubkeyID(&prv1.PublicKey), IP: net.IP{5, 6, 7, 8}, TCP: 44}
|
node1 = &discover.Node{ID: discover.PubkeyID(&prv1.PublicKey), IP: net.IP{5, 6, 7, 8}, TCP: 44}
|
||||||
hs1 = &protoHandshake{Version: 3, ID: node1.ID, Caps: []Cap{{"c", 1}, {"d", 3}}}
|
hs1 = &protoHandshake{Version: 3, ID: node1.ID, Caps: []Cap{{"c", 1}, {"d", 3}}}
|
||||||
|
|
||||||
fd0, fd1 = net.Pipe()
|
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
)
|
)
|
||||||
|
|
||||||
|
fd0, fd1, err := tcpPipe()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
wg.Add(2)
|
wg.Add(2)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
defer fd1.Close()
|
defer fd0.Close()
|
||||||
rlpx := newRLPX(fd0)
|
rlpx := newRLPX(fd0)
|
||||||
remid, err := rlpx.doEncHandshake(prv0, node1)
|
remid, err := rlpx.doEncHandshake(prv0, node1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -597,3 +601,31 @@ func TestHandshakeForwardCompatibility(t *testing.T) {
|
|||||||
t.Errorf("ingress-mac('foo') mismatch:\ngot %x\nwant %x", fooIngressHash, wantFooIngressHash)
|
t.Errorf("ingress-mac('foo') mismatch:\ngot %x\nwant %x", fooIngressHash, wantFooIngressHash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// tcpPipe creates an in process full duplex pipe based on a localhost TCP socket
|
||||||
|
func tcpPipe() (net.Conn, net.Conn, error) {
|
||||||
|
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
defer l.Close()
|
||||||
|
|
||||||
|
var aconn net.Conn
|
||||||
|
aerr := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
var err error
|
||||||
|
aconn, err = l.Accept()
|
||||||
|
aerr <- err
|
||||||
|
}()
|
||||||
|
|
||||||
|
dconn, err := net.Dial("tcp", l.Addr().String())
|
||||||
|
if err != nil {
|
||||||
|
<-aerr
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if err := <-aerr; err != nil {
|
||||||
|
dconn.Close()
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return aconn, dconn, nil
|
||||||
|
}
|
||||||
|
@ -40,11 +40,10 @@ const (
|
|||||||
refreshPeersInterval = 30 * time.Second
|
refreshPeersInterval = 30 * time.Second
|
||||||
staticPeerCheckInterval = 15 * time.Second
|
staticPeerCheckInterval = 15 * time.Second
|
||||||
|
|
||||||
// Maximum number of concurrently handshaking inbound connections.
|
// Connectivity defaults.
|
||||||
maxAcceptConns = 50
|
|
||||||
|
|
||||||
// Maximum number of concurrently dialing outbound connections.
|
|
||||||
maxActiveDialTasks = 16
|
maxActiveDialTasks = 16
|
||||||
|
defaultMaxPendingPeers = 50
|
||||||
|
defaultDialRatio = 3
|
||||||
|
|
||||||
// Maximum time allowed for reading a complete message.
|
// Maximum time allowed for reading a complete message.
|
||||||
// This is effectively the amount of time a connection can be idle.
|
// This is effectively the amount of time a connection can be idle.
|
||||||
@ -70,6 +69,11 @@ type Config struct {
|
|||||||
// Zero defaults to preset values.
|
// Zero defaults to preset values.
|
||||||
MaxPendingPeers int `toml:",omitempty"`
|
MaxPendingPeers int `toml:",omitempty"`
|
||||||
|
|
||||||
|
// DialRatio controls the ratio of inbound to dialed connections.
|
||||||
|
// Example: a DialRatio of 2 allows 1/2 of connections to be dialed.
|
||||||
|
// Setting DialRatio to zero defaults it to 3.
|
||||||
|
DialRatio int `toml:",omitempty"`
|
||||||
|
|
||||||
// NoDiscovery can be used to disable the peer discovery mechanism.
|
// NoDiscovery can be used to disable the peer discovery mechanism.
|
||||||
// Disabling is useful for protocol debugging (manual topology).
|
// Disabling is useful for protocol debugging (manual topology).
|
||||||
NoDiscovery bool
|
NoDiscovery bool
|
||||||
@ -138,7 +142,7 @@ type Config struct {
|
|||||||
EnableMsgEvents bool
|
EnableMsgEvents bool
|
||||||
|
|
||||||
// Logger is a custom logger to use with the p2p.Server.
|
// Logger is a custom logger to use with the p2p.Server.
|
||||||
Logger log.Logger
|
Logger log.Logger `toml:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Server manages all peer connections.
|
// Server manages all peer connections.
|
||||||
@ -427,7 +431,6 @@ func (srv *Server) Start() (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
realaddr = conn.LocalAddr().(*net.UDPAddr)
|
realaddr = conn.LocalAddr().(*net.UDPAddr)
|
||||||
if srv.NAT != nil {
|
if srv.NAT != nil {
|
||||||
if !realaddr.IP.IsLoopback() {
|
if !realaddr.IP.IsLoopback() {
|
||||||
@ -447,11 +450,16 @@ func (srv *Server) Start() (err error) {
|
|||||||
|
|
||||||
// node table
|
// node table
|
||||||
if !srv.NoDiscovery {
|
if !srv.NoDiscovery {
|
||||||
ntab, err := discover.ListenUDP(srv.PrivateKey, conn, realaddr, unhandled, srv.NodeDatabase, srv.NetRestrict)
|
cfg := discover.Config{
|
||||||
if err != nil {
|
PrivateKey: srv.PrivateKey,
|
||||||
return err
|
AnnounceAddr: realaddr,
|
||||||
|
NodeDBPath: srv.NodeDatabase,
|
||||||
|
NetRestrict: srv.NetRestrict,
|
||||||
|
Bootnodes: srv.BootstrapNodes,
|
||||||
|
Unhandled: unhandled,
|
||||||
}
|
}
|
||||||
if err := ntab.SetFallbackNodes(srv.BootstrapNodes); err != nil {
|
ntab, err := discover.ListenUDP(conn, cfg)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
srv.ntab = ntab
|
srv.ntab = ntab
|
||||||
@ -476,10 +484,7 @@ func (srv *Server) Start() (err error) {
|
|||||||
srv.DiscV5 = ntab
|
srv.DiscV5 = ntab
|
||||||
}
|
}
|
||||||
|
|
||||||
dynPeers := (srv.MaxPeers + 1) / 2
|
dynPeers := srv.maxDialedConns()
|
||||||
if srv.NoDiscovery {
|
|
||||||
dynPeers = 0
|
|
||||||
}
|
|
||||||
dialer := newDialState(srv.StaticNodes, srv.BootstrapNodes, srv.ntab, dynPeers, srv.NetRestrict)
|
dialer := newDialState(srv.StaticNodes, srv.BootstrapNodes, srv.ntab, dynPeers, srv.NetRestrict)
|
||||||
|
|
||||||
// handshake
|
// handshake
|
||||||
@ -536,6 +541,7 @@ func (srv *Server) run(dialstate dialer) {
|
|||||||
defer srv.loopWG.Done()
|
defer srv.loopWG.Done()
|
||||||
var (
|
var (
|
||||||
peers = make(map[discover.NodeID]*Peer)
|
peers = make(map[discover.NodeID]*Peer)
|
||||||
|
inboundCount = 0
|
||||||
trusted = make(map[discover.NodeID]bool, len(srv.TrustedNodes))
|
trusted = make(map[discover.NodeID]bool, len(srv.TrustedNodes))
|
||||||
taskdone = make(chan task, maxActiveDialTasks)
|
taskdone = make(chan task, maxActiveDialTasks)
|
||||||
runningTasks []task
|
runningTasks []task
|
||||||
@ -621,14 +627,14 @@ running:
|
|||||||
}
|
}
|
||||||
// TODO: track in-progress inbound node IDs (pre-Peer) to avoid dialing them.
|
// TODO: track in-progress inbound node IDs (pre-Peer) to avoid dialing them.
|
||||||
select {
|
select {
|
||||||
case c.cont <- srv.encHandshakeChecks(peers, c):
|
case c.cont <- srv.encHandshakeChecks(peers, inboundCount, c):
|
||||||
case <-srv.quit:
|
case <-srv.quit:
|
||||||
break running
|
break running
|
||||||
}
|
}
|
||||||
case c := <-srv.addpeer:
|
case c := <-srv.addpeer:
|
||||||
// At this point the connection is past the protocol handshake.
|
// At this point the connection is past the protocol handshake.
|
||||||
// Its capabilities are known and the remote identity is verified.
|
// Its capabilities are known and the remote identity is verified.
|
||||||
err := srv.protoHandshakeChecks(peers, c)
|
err := srv.protoHandshakeChecks(peers, inboundCount, c)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// The handshakes are done and it passed all checks.
|
// The handshakes are done and it passed all checks.
|
||||||
p := newPeer(c, srv.Protocols)
|
p := newPeer(c, srv.Protocols)
|
||||||
@ -639,8 +645,11 @@ running:
|
|||||||
}
|
}
|
||||||
name := truncateName(c.name)
|
name := truncateName(c.name)
|
||||||
srv.log.Debug("Adding p2p peer", "name", name, "addr", c.fd.RemoteAddr(), "peers", len(peers)+1)
|
srv.log.Debug("Adding p2p peer", "name", name, "addr", c.fd.RemoteAddr(), "peers", len(peers)+1)
|
||||||
peers[c.id] = p
|
|
||||||
go srv.runPeer(p)
|
go srv.runPeer(p)
|
||||||
|
peers[c.id] = p
|
||||||
|
if p.Inbound() {
|
||||||
|
inboundCount++
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// The dialer logic relies on the assumption that
|
// The dialer logic relies on the assumption that
|
||||||
// dial tasks complete after the peer has been added or
|
// dial tasks complete after the peer has been added or
|
||||||
@ -655,6 +664,9 @@ running:
|
|||||||
d := common.PrettyDuration(mclock.Now() - pd.created)
|
d := common.PrettyDuration(mclock.Now() - pd.created)
|
||||||
pd.log.Debug("Removing p2p peer", "duration", d, "peers", len(peers)-1, "req", pd.requested, "err", pd.err)
|
pd.log.Debug("Removing p2p peer", "duration", d, "peers", len(peers)-1, "req", pd.requested, "err", pd.err)
|
||||||
delete(peers, pd.ID())
|
delete(peers, pd.ID())
|
||||||
|
if pd.Inbound() {
|
||||||
|
inboundCount--
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -681,20 +693,22 @@ running:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (srv *Server) protoHandshakeChecks(peers map[discover.NodeID]*Peer, c *conn) error {
|
func (srv *Server) protoHandshakeChecks(peers map[discover.NodeID]*Peer, inboundCount int, c *conn) error {
|
||||||
// Drop connections with no matching protocols.
|
// Drop connections with no matching protocols.
|
||||||
if len(srv.Protocols) > 0 && countMatchingProtocols(srv.Protocols, c.caps) == 0 {
|
if len(srv.Protocols) > 0 && countMatchingProtocols(srv.Protocols, c.caps) == 0 {
|
||||||
return DiscUselessPeer
|
return DiscUselessPeer
|
||||||
}
|
}
|
||||||
// Repeat the encryption handshake checks because the
|
// Repeat the encryption handshake checks because the
|
||||||
// peer set might have changed between the handshakes.
|
// peer set might have changed between the handshakes.
|
||||||
return srv.encHandshakeChecks(peers, c)
|
return srv.encHandshakeChecks(peers, inboundCount, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (srv *Server) encHandshakeChecks(peers map[discover.NodeID]*Peer, c *conn) error {
|
func (srv *Server) encHandshakeChecks(peers map[discover.NodeID]*Peer, inboundCount int, c *conn) error {
|
||||||
switch {
|
switch {
|
||||||
case !c.is(trustedConn|staticDialedConn) && len(peers) >= srv.MaxPeers:
|
case !c.is(trustedConn|staticDialedConn) && len(peers) >= srv.MaxPeers:
|
||||||
return DiscTooManyPeers
|
return DiscTooManyPeers
|
||||||
|
case !c.is(trustedConn) && c.is(inboundConn) && inboundCount >= srv.maxInboundConns():
|
||||||
|
return DiscTooManyPeers
|
||||||
case peers[c.id] != nil:
|
case peers[c.id] != nil:
|
||||||
return DiscAlreadyConnected
|
return DiscAlreadyConnected
|
||||||
case c.id == srv.Self().ID:
|
case c.id == srv.Self().ID:
|
||||||
@ -704,6 +718,21 @@ func (srv *Server) encHandshakeChecks(peers map[discover.NodeID]*Peer, c *conn)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (srv *Server) maxInboundConns() int {
|
||||||
|
return srv.MaxPeers - srv.maxDialedConns()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (srv *Server) maxDialedConns() int {
|
||||||
|
if srv.NoDiscovery || srv.NoDial {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
r := srv.DialRatio
|
||||||
|
if r == 0 {
|
||||||
|
r = defaultDialRatio
|
||||||
|
}
|
||||||
|
return srv.MaxPeers / r
|
||||||
|
}
|
||||||
|
|
||||||
type tempError interface {
|
type tempError interface {
|
||||||
Temporary() bool
|
Temporary() bool
|
||||||
}
|
}
|
||||||
@ -714,10 +743,7 @@ func (srv *Server) listenLoop() {
|
|||||||
defer srv.loopWG.Done()
|
defer srv.loopWG.Done()
|
||||||
srv.log.Info("RLPx listener up", "self", srv.makeSelf(srv.listener, srv.ntab))
|
srv.log.Info("RLPx listener up", "self", srv.makeSelf(srv.listener, srv.ntab))
|
||||||
|
|
||||||
// This channel acts as a semaphore limiting
|
tokens := defaultMaxPendingPeers
|
||||||
// active inbound connections that are lingering pre-handshake.
|
|
||||||
// If all slots are taken, no further connections are accepted.
|
|
||||||
tokens := maxAcceptConns
|
|
||||||
if srv.MaxPendingPeers > 0 {
|
if srv.MaxPendingPeers > 0 {
|
||||||
tokens = srv.MaxPendingPeers
|
tokens = srv.MaxPendingPeers
|
||||||
}
|
}
|
||||||
@ -758,9 +784,6 @@ func (srv *Server) listenLoop() {
|
|||||||
|
|
||||||
fd = newMeteredConn(fd, true)
|
fd = newMeteredConn(fd, true)
|
||||||
srv.log.Trace("Accepted connection", "addr", fd.RemoteAddr())
|
srv.log.Trace("Accepted connection", "addr", fd.RemoteAddr())
|
||||||
|
|
||||||
// Spawn the handler. It will give the slot back when the connection
|
|
||||||
// has been established.
|
|
||||||
go func() {
|
go func() {
|
||||||
srv.SetupConn(fd, inboundConn, nil)
|
srv.SetupConn(fd, inboundConn, nil)
|
||||||
slots <- struct{}{}
|
slots <- struct{}{}
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
//
|
//
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
package adapters
|
package adapters
|
||||||
|
|
||||||
type SimStateStore struct {
|
type SimStateStore struct {
|
||||||
|
67
p2p/testing/peerpool.go
Normal file
67
p2p/testing/peerpool.go
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package testing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TestPeer interface {
|
||||||
|
ID() discover.NodeID
|
||||||
|
Drop(error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestPeerPool is an example peerPool to demonstrate registration of peer connections
|
||||||
|
type TestPeerPool struct {
|
||||||
|
lock sync.Mutex
|
||||||
|
peers map[discover.NodeID]TestPeer
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTestPeerPool() *TestPeerPool {
|
||||||
|
return &TestPeerPool{peers: make(map[discover.NodeID]TestPeer)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *TestPeerPool) Add(p TestPeer) {
|
||||||
|
self.lock.Lock()
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
log.Trace(fmt.Sprintf("pp add peer %v", p.ID()))
|
||||||
|
self.peers[p.ID()] = p
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *TestPeerPool) Remove(p TestPeer) {
|
||||||
|
self.lock.Lock()
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
delete(self.peers, p.ID())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *TestPeerPool) Has(id discover.NodeID) bool {
|
||||||
|
self.lock.Lock()
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
_, ok := self.peers[id]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *TestPeerPool) Get(id discover.NodeID) TestPeer {
|
||||||
|
self.lock.Lock()
|
||||||
|
defer self.lock.Unlock()
|
||||||
|
return self.peers[id]
|
||||||
|
}
|
280
p2p/testing/protocolsession.go
Normal file
280
p2p/testing/protocolsession.go
Normal file
@ -0,0 +1,280 @@
|
|||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package testing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errTimedOut = errors.New("timed out")
|
||||||
|
|
||||||
|
// ProtocolSession is a quasi simulation of a pivot node running
|
||||||
|
// a service and a number of dummy peers that can send (trigger) or
|
||||||
|
// receive (expect) messages
|
||||||
|
type ProtocolSession struct {
|
||||||
|
Server *p2p.Server
|
||||||
|
IDs []discover.NodeID
|
||||||
|
adapter *adapters.SimAdapter
|
||||||
|
events chan *p2p.PeerEvent
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exchange is the basic units of protocol tests
|
||||||
|
// the triggers and expects in the arrays are run immediately and asynchronously
|
||||||
|
// thus one cannot have multiple expects for the SAME peer with DIFFERENT message types
|
||||||
|
// because it's unpredictable which expect will receive which message
|
||||||
|
// (with expect #1 and #2, messages might be sent #2 and #1, and both expects will complain about wrong message code)
|
||||||
|
// an exchange is defined on a session
|
||||||
|
type Exchange struct {
|
||||||
|
Label string
|
||||||
|
Triggers []Trigger
|
||||||
|
Expects []Expect
|
||||||
|
Timeout time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trigger is part of the exchange, incoming message for the pivot node
|
||||||
|
// sent by a peer
|
||||||
|
type Trigger struct {
|
||||||
|
Msg interface{} // type of message to be sent
|
||||||
|
Code uint64 // code of message is given
|
||||||
|
Peer discover.NodeID // the peer to send the message to
|
||||||
|
Timeout time.Duration // timeout duration for the sending
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expect is part of an exchange, outgoing message from the pivot node
|
||||||
|
// received by a peer
|
||||||
|
type Expect struct {
|
||||||
|
Msg interface{} // type of message to expect
|
||||||
|
Code uint64 // code of message is now given
|
||||||
|
Peer discover.NodeID // the peer that expects the message
|
||||||
|
Timeout time.Duration // timeout duration for receiving
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disconnect represents a disconnect event, used and checked by TestDisconnected
|
||||||
|
type Disconnect struct {
|
||||||
|
Peer discover.NodeID // discconnected peer
|
||||||
|
Error error // disconnect reason
|
||||||
|
}
|
||||||
|
|
||||||
|
// trigger sends messages from peers
|
||||||
|
func (self *ProtocolSession) trigger(trig Trigger) error {
|
||||||
|
simNode, ok := self.adapter.GetNode(trig.Peer)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("trigger: peer %v does not exist (1- %v)", trig.Peer, len(self.IDs))
|
||||||
|
}
|
||||||
|
mockNode, ok := simNode.Services()[0].(*mockNode)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("trigger: peer %v is not a mock", trig.Peer)
|
||||||
|
}
|
||||||
|
|
||||||
|
errc := make(chan error)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
errc <- mockNode.Trigger(&trig)
|
||||||
|
}()
|
||||||
|
|
||||||
|
t := trig.Timeout
|
||||||
|
if t == time.Duration(0) {
|
||||||
|
t = 1000 * time.Millisecond
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case err := <-errc:
|
||||||
|
return err
|
||||||
|
case <-time.After(t):
|
||||||
|
return fmt.Errorf("timout expecting %v to send to peer %v", trig.Msg, trig.Peer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// expect checks an expectation of a message sent out by the pivot node
|
||||||
|
func (self *ProtocolSession) expect(exps []Expect) error {
|
||||||
|
// construct a map of expectations for each node
|
||||||
|
peerExpects := make(map[discover.NodeID][]Expect)
|
||||||
|
for _, exp := range exps {
|
||||||
|
if exp.Msg == nil {
|
||||||
|
return errors.New("no message to expect")
|
||||||
|
}
|
||||||
|
peerExpects[exp.Peer] = append(peerExpects[exp.Peer], exp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// construct a map of mockNodes for each node
|
||||||
|
mockNodes := make(map[discover.NodeID]*mockNode)
|
||||||
|
for nodeID := range peerExpects {
|
||||||
|
simNode, ok := self.adapter.GetNode(nodeID)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("trigger: peer %v does not exist (1- %v)", nodeID, len(self.IDs))
|
||||||
|
}
|
||||||
|
mockNode, ok := simNode.Services()[0].(*mockNode)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("trigger: peer %v is not a mock", nodeID)
|
||||||
|
}
|
||||||
|
mockNodes[nodeID] = mockNode
|
||||||
|
}
|
||||||
|
|
||||||
|
// done chanell cancels all created goroutines when function returns
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer close(done)
|
||||||
|
// errc catches the first error from
|
||||||
|
errc := make(chan error)
|
||||||
|
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(len(mockNodes))
|
||||||
|
for nodeID, mockNode := range mockNodes {
|
||||||
|
nodeID := nodeID
|
||||||
|
mockNode := mockNode
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
// Sum all Expect timeouts to give the maximum
|
||||||
|
// time for all expectations to finish.
|
||||||
|
// mockNode.Expect checks all received messages against
|
||||||
|
// a list of expected messages and timeout for each
|
||||||
|
// of them can not be checked separately.
|
||||||
|
var t time.Duration
|
||||||
|
for _, exp := range peerExpects[nodeID] {
|
||||||
|
if exp.Timeout == time.Duration(0) {
|
||||||
|
t += 2000 * time.Millisecond
|
||||||
|
} else {
|
||||||
|
t += exp.Timeout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
alarm := time.NewTimer(t)
|
||||||
|
defer alarm.Stop()
|
||||||
|
|
||||||
|
// expectErrc is used to check if error returned
|
||||||
|
// from mockNode.Expect is not nil and to send it to
|
||||||
|
// errc only in that case.
|
||||||
|
// done channel will be closed when function
|
||||||
|
expectErrc := make(chan error)
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case expectErrc <- mockNode.Expect(peerExpects[nodeID]...):
|
||||||
|
case <-done:
|
||||||
|
case <-alarm.C:
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-expectErrc:
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case errc <- err:
|
||||||
|
case <-done:
|
||||||
|
case <-alarm.C:
|
||||||
|
errc <- errTimedOut
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case <-done:
|
||||||
|
case <-alarm.C:
|
||||||
|
errc <- errTimedOut
|
||||||
|
}
|
||||||
|
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
// close errc when all goroutines finish to return nill err from errc
|
||||||
|
close(errc)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return <-errc
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestExchanges tests a series of exchanges against the session
|
||||||
|
func (self *ProtocolSession) TestExchanges(exchanges ...Exchange) error {
|
||||||
|
for i, e := range exchanges {
|
||||||
|
if err := self.testExchange(e); err != nil {
|
||||||
|
return fmt.Errorf("exchange #%d %q: %v", i, e.Label, err)
|
||||||
|
}
|
||||||
|
log.Trace(fmt.Sprintf("exchange #%d %q: run successfully", i, e.Label))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// testExchange tests a single Exchange.
|
||||||
|
// Default timeout value is 2 seconds.
|
||||||
|
func (self *ProtocolSession) testExchange(e Exchange) error {
|
||||||
|
errc := make(chan error)
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer close(done)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for _, trig := range e.Triggers {
|
||||||
|
err := self.trigger(trig)
|
||||||
|
if err != nil {
|
||||||
|
errc <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case errc <- self.expect(e.Expects):
|
||||||
|
case <-done:
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// time out globally or finish when all expectations satisfied
|
||||||
|
t := e.Timeout
|
||||||
|
if t == 0 {
|
||||||
|
t = 2000 * time.Millisecond
|
||||||
|
}
|
||||||
|
alarm := time.NewTimer(t)
|
||||||
|
select {
|
||||||
|
case err := <-errc:
|
||||||
|
return err
|
||||||
|
case <-alarm.C:
|
||||||
|
return errTimedOut
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDisconnected tests the disconnections given as arguments
|
||||||
|
// the disconnect structs describe what disconnect error is expected on which peer
|
||||||
|
func (self *ProtocolSession) TestDisconnected(disconnects ...*Disconnect) error {
|
||||||
|
expects := make(map[discover.NodeID]error)
|
||||||
|
for _, disconnect := range disconnects {
|
||||||
|
expects[disconnect.Peer] = disconnect.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
timeout := time.After(time.Second)
|
||||||
|
for len(expects) > 0 {
|
||||||
|
select {
|
||||||
|
case event := <-self.events:
|
||||||
|
if event.Type != p2p.PeerEventTypeDrop {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
expectErr, ok := expects[event.Peer]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !(expectErr == nil && event.Error == "" || expectErr != nil && expectErr.Error() == event.Error) {
|
||||||
|
return fmt.Errorf("unexpected error on peer %v. expected '%v', got '%v'", event.Peer, expectErr, event.Error)
|
||||||
|
}
|
||||||
|
delete(expects, event.Peer)
|
||||||
|
case <-timeout:
|
||||||
|
return fmt.Errorf("timed out waiting for peers to disconnect")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user