forked from cerc-io/ipld-eth-server
Merge pull request #28 from vulcanize/geth_update
update geth to 1.8.23 (post can'tstantinople)
This commit is contained in:
commit
f53a7b8b5d
6
Gopkg.lock
generated
6
Gopkg.lock
generated
@ -45,7 +45,7 @@
|
||||
version = "v1.7.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3d26f660432345429f6b09595e4707ee12745547323bcd1dc91457125aefeedc"
|
||||
digest = "1:49b5451a4ce27b50393cc0d4c6fdd9cf7fa148ee4c0dd9c1d6c515b232427022"
|
||||
name = "github.com/ethereum/go-ethereum"
|
||||
packages = [
|
||||
".",
|
||||
@ -82,8 +82,8 @@
|
||||
"trie",
|
||||
]
|
||||
pruneopts = ""
|
||||
revision = "9dc5d1a915ac0e0bd8429d6ac41df50eec91de5f"
|
||||
version = "v1.8.21"
|
||||
revision = "c942700427557e3ff6de3aaf6b916e2f056c1ec2"
|
||||
version = "v1.8.23"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd"
|
||||
|
@ -51,4 +51,4 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/ethereum/go-ethereum"
|
||||
version = "1.8.21"
|
||||
version = "1.8.23"
|
||||
|
@ -12,7 +12,7 @@ Vulcanize DB is a set of tools that make it easier for developers to write appli
|
||||
- Go 1.11+
|
||||
- Postgres 10.6
|
||||
- Ethereum Node
|
||||
- [Go Ethereum](https://ethereum.github.io/go-ethereum/downloads/) (1.8.21+)
|
||||
- [Go Ethereum](https://ethereum.github.io/go-ethereum/downloads/) (1.8.23+)
|
||||
- [Parity 1.8.11+](https://github.com/paritytech/parity/releases)
|
||||
|
||||
## Project Setup
|
||||
|
7
vendor/github.com/ethereum/go-ethereum/.travis.yml
generated
vendored
7
vendor/github.com/ethereum/go-ethereum/.travis.yml
generated
vendored
@ -68,8 +68,11 @@ matrix:
|
||||
- debhelper
|
||||
- dput
|
||||
- fakeroot
|
||||
- python-bzrlib
|
||||
- python-paramiko
|
||||
script:
|
||||
- go run build/ci.go debsrc -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>" -upload ppa:ethereum/ethereum
|
||||
- echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts
|
||||
- go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
|
||||
|
||||
# This builder does the Linux Azure uploads
|
||||
- if: type = push
|
||||
@ -156,7 +159,7 @@ matrix:
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
before_install:
|
||||
- curl https://storage.googleapis.com/golang/go1.11.4.linux-amd64.tar.gz | tar -xz
|
||||
- curl https://storage.googleapis.com/golang/go1.11.5.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
|
4
vendor/github.com/ethereum/go-ethereum/appveyor.yml
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/appveyor.yml
generated
vendored
@ -23,8 +23,8 @@ environment:
|
||||
install:
|
||||
- git submodule update --init
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.4.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.11.4.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.11.5.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.11.5.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
13
vendor/github.com/ethereum/go-ethereum/build/ci-notes.md
generated
vendored
13
vendor/github.com/ethereum/go-ethereum/build/ci-notes.md
generated
vendored
@ -7,11 +7,18 @@ Canonical.
|
||||
Packages of develop branch commits have suffix -unstable and cannot be installed alongside
|
||||
the stable version. Switching between release streams requires user intervention.
|
||||
|
||||
## Launchpad
|
||||
|
||||
The packages are built and served by launchpad.net. We generate a Debian source package
|
||||
for each distribution and upload it. Their builder picks up the source package, builds it
|
||||
and installs the new version into the PPA repository. Launchpad requires a valid signature
|
||||
by a team member for source package uploads. The signing key is stored in an environment
|
||||
variable which Travis CI makes available to certain builds.
|
||||
by a team member for source package uploads.
|
||||
|
||||
The signing key is stored in an environment variable which Travis CI makes available to
|
||||
certain builds. Since Travis CI doesn't support FTP, SFTP is used to transfer the
|
||||
packages. To set this up yourself, you need to create a Launchpad user and add a GPG key
|
||||
and SSH key to it. Then encode both keys as base64 and configure 'secret' environment
|
||||
variables `PPA_SIGNING_KEY` and `PPA_SSH_KEY` on Travis.
|
||||
|
||||
We want to build go-ethereum with the most recent version of Go, irrespective of the Go
|
||||
version that is available in the main Ubuntu repository. In order to make this possible,
|
||||
@ -27,7 +34,7 @@ Add the gophers PPA and install Go 1.10 and Debian packaging tools:
|
||||
|
||||
$ sudo apt-add-repository ppa:gophers/ubuntu/archive
|
||||
$ sudo apt-get update
|
||||
$ sudo apt-get install build-essential golang-1.10 devscripts debhelper
|
||||
$ sudo apt-get install build-essential golang-1.10 devscripts debhelper python-bzrlib python-paramiko
|
||||
|
||||
Create the source packages:
|
||||
|
||||
|
67
vendor/github.com/ethereum/go-ethereum/build/ci.go
generated
vendored
67
vendor/github.com/ethereum/go-ethereum/build/ci.go
generated
vendored
@ -441,11 +441,8 @@ func archiveBasename(arch string, archiveVersion string) string {
|
||||
func archiveUpload(archive string, blobstore string, signer string) error {
|
||||
// If signing was requested, generate the signature files
|
||||
if signer != "" {
|
||||
pgpkey, err := base64.StdEncoding.DecodeString(os.Getenv(signer))
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid base64 %s", signer)
|
||||
}
|
||||
if err := build.PGPSignFile(archive, archive+".asc", string(pgpkey)); err != nil {
|
||||
key := getenvBase64(signer)
|
||||
if err := build.PGPSignFile(archive, archive+".asc", string(key)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -488,7 +485,8 @@ func maybeSkipArchive(env build.Environment) {
|
||||
func doDebianSource(cmdline []string) {
|
||||
var (
|
||||
signer = flag.String("signer", "", `Signing key name, also used as package author`)
|
||||
upload = flag.String("upload", "", `Where to upload the source package (usually "ppa:ethereum/ethereum")`)
|
||||
upload = flag.String("upload", "", `Where to upload the source package (usually "ethereum/ethereum")`)
|
||||
sshUser = flag.String("sftp-user", "", `Username for SFTP upload (usually "geth-ci")`)
|
||||
workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`)
|
||||
now = time.Now()
|
||||
)
|
||||
@ -498,11 +496,7 @@ func doDebianSource(cmdline []string) {
|
||||
maybeSkipArchive(env)
|
||||
|
||||
// Import the signing key.
|
||||
if b64key := os.Getenv("PPA_SIGNING_KEY"); b64key != "" {
|
||||
key, err := base64.StdEncoding.DecodeString(b64key)
|
||||
if err != nil {
|
||||
log.Fatal("invalid base64 PPA_SIGNING_KEY")
|
||||
}
|
||||
if key := getenvBase64("PPA_SIGNING_KEY"); len(key) > 0 {
|
||||
gpg := exec.Command("gpg", "--import")
|
||||
gpg.Stdin = bytes.NewReader(key)
|
||||
build.MustRun(gpg)
|
||||
@ -513,22 +507,58 @@ func doDebianSource(cmdline []string) {
|
||||
for _, distro := range debDistros {
|
||||
meta := newDebMetadata(distro, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables)
|
||||
pkgdir := stageDebianSource(*workdir, meta)
|
||||
debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc")
|
||||
debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc", "-d", "-Zxz")
|
||||
debuild.Dir = pkgdir
|
||||
build.MustRun(debuild)
|
||||
|
||||
changes := fmt.Sprintf("%s_%s_source.changes", meta.Name(), meta.VersionString())
|
||||
changes = filepath.Join(*workdir, changes)
|
||||
var (
|
||||
basename = fmt.Sprintf("%s_%s", meta.Name(), meta.VersionString())
|
||||
source = filepath.Join(*workdir, basename+".tar.xz")
|
||||
dsc = filepath.Join(*workdir, basename+".dsc")
|
||||
changes = filepath.Join(*workdir, basename+"_source.changes")
|
||||
)
|
||||
if *signer != "" {
|
||||
build.MustRunCommand("debsign", changes)
|
||||
}
|
||||
if *upload != "" {
|
||||
build.MustRunCommand("dput", *upload, changes)
|
||||
ppaUpload(*workdir, *upload, *sshUser, []string{source, dsc, changes})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ppaUpload(workdir, ppa, sshUser string, files []string) {
|
||||
p := strings.Split(ppa, "/")
|
||||
if len(p) != 2 {
|
||||
log.Fatal("-upload PPA name must contain single /")
|
||||
}
|
||||
if sshUser == "" {
|
||||
sshUser = p[0]
|
||||
}
|
||||
incomingDir := fmt.Sprintf("~%s/ubuntu/%s", p[0], p[1])
|
||||
// Create the SSH identity file if it doesn't exist.
|
||||
var idfile string
|
||||
if sshkey := getenvBase64("PPA_SSH_KEY"); len(sshkey) > 0 {
|
||||
idfile = filepath.Join(workdir, "sshkey")
|
||||
if _, err := os.Stat(idfile); os.IsNotExist(err) {
|
||||
ioutil.WriteFile(idfile, sshkey, 0600)
|
||||
}
|
||||
}
|
||||
// Upload
|
||||
dest := sshUser + "@ppa.launchpad.net"
|
||||
if err := build.UploadSFTP(idfile, dest, incomingDir, files); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func getenvBase64(variable string) []byte {
|
||||
dec, err := base64.StdEncoding.DecodeString(os.Getenv(variable))
|
||||
if err != nil {
|
||||
log.Fatal("invalid base64 " + variable)
|
||||
}
|
||||
return []byte(dec)
|
||||
}
|
||||
|
||||
func makeWorkdir(wdflag string) string {
|
||||
var err error
|
||||
if wdflag != "" {
|
||||
@ -800,15 +830,10 @@ func doAndroidArchive(cmdline []string) {
|
||||
os.Rename(archive, meta.Package+".aar")
|
||||
if *signer != "" && *deploy != "" {
|
||||
// Import the signing key into the local GPG instance
|
||||
b64key := os.Getenv(*signer)
|
||||
key, err := base64.StdEncoding.DecodeString(b64key)
|
||||
if err != nil {
|
||||
log.Fatalf("invalid base64 %s", *signer)
|
||||
}
|
||||
key := getenvBase64(*signer)
|
||||
gpg := exec.Command("gpg", "--import")
|
||||
gpg.Stdin = bytes.NewReader(key)
|
||||
build.MustRun(gpg)
|
||||
|
||||
keyID, err := build.PGPKeyID(string(key))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
|
5
vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go
generated
vendored
5
vendor/github.com/ethereum/go-ethereum/cmd/geth/main.go
generated
vendored
@ -38,7 +38,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -121,6 +121,7 @@ var (
|
||||
utils.DeveloperPeriodFlag,
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
utils.GoerliFlag,
|
||||
utils.VMEnableDebugFlag,
|
||||
utils.NetworkIdFlag,
|
||||
utils.ConstantinopleOverrideFlag,
|
||||
@ -164,7 +165,7 @@ var (
|
||||
utils.MetricsInfluxDBDatabaseFlag,
|
||||
utils.MetricsInfluxDBUsernameFlag,
|
||||
utils.MetricsInfluxDBPasswordFlag,
|
||||
utils.MetricsInfluxDBHostTagFlag,
|
||||
utils.MetricsInfluxDBTagsFlag,
|
||||
}
|
||||
)
|
||||
|
||||
|
5
vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go
generated
vendored
5
vendor/github.com/ethereum/go-ethereum/cmd/geth/usage.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
// AppHelpTemplate is the test template for the default, global app help topic.
|
||||
@ -74,6 +74,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.NetworkIdFlag,
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
utils.GoerliFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.GCModeFlag,
|
||||
utils.EthStatsURLFlag,
|
||||
@ -229,7 +230,7 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.MetricsInfluxDBDatabaseFlag,
|
||||
utils.MetricsInfluxDBUsernameFlag,
|
||||
utils.MetricsInfluxDBPasswordFlag,
|
||||
utils.MetricsInfluxDBHostTagFlag,
|
||||
utils.MetricsInfluxDBTagsFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
54
vendor/github.com/ethereum/go-ethereum/cmd/puppeth/genesis.go
generated
vendored
54
vendor/github.com/ethereum/go-ethereum/cmd/puppeth/genesis.go
generated
vendored
@ -223,28 +223,29 @@ type parityChainSpec struct {
|
||||
} `json:"engine"`
|
||||
|
||||
Params struct {
|
||||
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"`
|
||||
MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"`
|
||||
EIP98Transition hexutil.Uint64 `json:"eip98Transition"`
|
||||
EIP150Transition hexutil.Uint64 `json:"eip150Transition"`
|
||||
EIP160Transition hexutil.Uint64 `json:"eip160Transition"`
|
||||
EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"`
|
||||
EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"`
|
||||
EIP155Transition hexutil.Uint64 `json:"eip155Transition"`
|
||||
EIP140Transition hexutil.Uint64 `json:"eip140Transition"`
|
||||
EIP211Transition hexutil.Uint64 `json:"eip211Transition"`
|
||||
EIP214Transition hexutil.Uint64 `json:"eip214Transition"`
|
||||
EIP658Transition hexutil.Uint64 `json:"eip658Transition"`
|
||||
EIP145Transition hexutil.Uint64 `json:"eip145Transition"`
|
||||
EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"`
|
||||
EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"`
|
||||
EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"`
|
||||
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"`
|
||||
MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"`
|
||||
EIP98Transition hexutil.Uint64 `json:"eip98Transition"`
|
||||
EIP150Transition hexutil.Uint64 `json:"eip150Transition"`
|
||||
EIP160Transition hexutil.Uint64 `json:"eip160Transition"`
|
||||
EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"`
|
||||
EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"`
|
||||
EIP155Transition hexutil.Uint64 `json:"eip155Transition"`
|
||||
EIP140Transition hexutil.Uint64 `json:"eip140Transition"`
|
||||
EIP211Transition hexutil.Uint64 `json:"eip211Transition"`
|
||||
EIP214Transition hexutil.Uint64 `json:"eip214Transition"`
|
||||
EIP658Transition hexutil.Uint64 `json:"eip658Transition"`
|
||||
EIP145Transition hexutil.Uint64 `json:"eip145Transition"`
|
||||
EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"`
|
||||
EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"`
|
||||
EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"`
|
||||
EIP1283DisableTransition hexutil.Uint64 `json:"eip1283DisableTransition"`
|
||||
} `json:"params"`
|
||||
|
||||
Genesis struct {
|
||||
@ -347,6 +348,11 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
|
||||
if num := genesis.Config.ConstantinopleBlock; num != nil {
|
||||
spec.setConstantinople(num)
|
||||
}
|
||||
// ConstantinopleFix (remove eip-1283)
|
||||
if num := genesis.Config.PetersburgBlock; num != nil {
|
||||
spec.setConstantinopleFix(num)
|
||||
}
|
||||
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
|
||||
spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor)
|
||||
@ -441,6 +447,10 @@ func (spec *parityChainSpec) setConstantinople(num *big.Int) {
|
||||
spec.Params.EIP1283Transition = n
|
||||
}
|
||||
|
||||
func (spec *parityChainSpec) setConstantinopleFix(num *big.Int) {
|
||||
spec.Params.EIP1283DisableTransition = hexutil.Uint64(num.Uint64())
|
||||
}
|
||||
|
||||
// pyEthereumGenesisSpec represents the genesis specification format used by the
|
||||
// Python Ethereum implementation.
|
||||
type pyEthereumGenesisSpec struct {
|
||||
|
49
vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_dashboard.go
generated
vendored
49
vendor/github.com/ethereum/go-ethereum/cmd/puppeth/module_dashboard.go
generated
vendored
@ -608,30 +608,31 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
|
||||
bootPython[i] = "'" + boot + "'"
|
||||
}
|
||||
template.Must(template.New("").Parse(dashboardContent)).Execute(indexfile, map[string]interface{}{
|
||||
"Network": network,
|
||||
"NetworkID": conf.Genesis.Config.ChainID,
|
||||
"NetworkTitle": strings.Title(network),
|
||||
"EthstatsPage": config.ethstats,
|
||||
"ExplorerPage": config.explorer,
|
||||
"WalletPage": config.wallet,
|
||||
"FaucetPage": config.faucet,
|
||||
"GethGenesis": network + ".json",
|
||||
"Bootnodes": conf.bootnodes,
|
||||
"BootnodesFlat": strings.Join(conf.bootnodes, ","),
|
||||
"Ethstats": statsLogin,
|
||||
"Ethash": conf.Genesis.Config.Ethash != nil,
|
||||
"CppGenesis": network + "-cpp.json",
|
||||
"CppBootnodes": strings.Join(bootCpp, " "),
|
||||
"HarmonyGenesis": network + "-harmony.json",
|
||||
"HarmonyBootnodes": strings.Join(bootHarmony, " "),
|
||||
"ParityGenesis": network + "-parity.json",
|
||||
"PythonGenesis": network + "-python.json",
|
||||
"PythonBootnodes": strings.Join(bootPython, ","),
|
||||
"Homestead": conf.Genesis.Config.HomesteadBlock,
|
||||
"Tangerine": conf.Genesis.Config.EIP150Block,
|
||||
"Spurious": conf.Genesis.Config.EIP155Block,
|
||||
"Byzantium": conf.Genesis.Config.ByzantiumBlock,
|
||||
"Constantinople": conf.Genesis.Config.ConstantinopleBlock,
|
||||
"Network": network,
|
||||
"NetworkID": conf.Genesis.Config.ChainID,
|
||||
"NetworkTitle": strings.Title(network),
|
||||
"EthstatsPage": config.ethstats,
|
||||
"ExplorerPage": config.explorer,
|
||||
"WalletPage": config.wallet,
|
||||
"FaucetPage": config.faucet,
|
||||
"GethGenesis": network + ".json",
|
||||
"Bootnodes": conf.bootnodes,
|
||||
"BootnodesFlat": strings.Join(conf.bootnodes, ","),
|
||||
"Ethstats": statsLogin,
|
||||
"Ethash": conf.Genesis.Config.Ethash != nil,
|
||||
"CppGenesis": network + "-cpp.json",
|
||||
"CppBootnodes": strings.Join(bootCpp, " "),
|
||||
"HarmonyGenesis": network + "-harmony.json",
|
||||
"HarmonyBootnodes": strings.Join(bootHarmony, " "),
|
||||
"ParityGenesis": network + "-parity.json",
|
||||
"PythonGenesis": network + "-python.json",
|
||||
"PythonBootnodes": strings.Join(bootPython, ","),
|
||||
"Homestead": conf.Genesis.Config.HomesteadBlock,
|
||||
"Tangerine": conf.Genesis.Config.EIP150Block,
|
||||
"Spurious": conf.Genesis.Config.EIP155Block,
|
||||
"Byzantium": conf.Genesis.Config.ByzantiumBlock,
|
||||
"Constantinople": conf.Genesis.Config.ConstantinopleBlock,
|
||||
"ConstantinopleFix": conf.Genesis.Config.PetersburgBlock,
|
||||
})
|
||||
files[filepath.Join(workdir, "index.html")] = indexfile.Bytes()
|
||||
|
||||
|
8
vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_genesis.go
generated
vendored
8
vendor/github.com/ethereum/go-ethereum/cmd/puppeth/wizard_genesis.go
generated
vendored
@ -222,10 +222,18 @@ func (w *wizard) manageGenesis() {
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should Constantinople come into effect? (default = %v)\n", w.conf.Genesis.Config.ConstantinopleBlock)
|
||||
w.conf.Genesis.Config.ConstantinopleBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ConstantinopleBlock)
|
||||
if w.conf.Genesis.Config.PetersburgBlock == nil {
|
||||
w.conf.Genesis.Config.PetersburgBlock = w.conf.Genesis.Config.ConstantinopleBlock
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should Constantinople-Fix (remove EIP-1283) come into effect? (default = %v)\n", w.conf.Genesis.Config.PetersburgBlock)
|
||||
w.conf.Genesis.Config.PetersburgBlock = w.readDefaultBigInt(w.conf.Genesis.Config.PetersburgBlock)
|
||||
|
||||
out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", " ")
|
||||
fmt.Printf("Chain configuration updated:\n\n%s\n", out)
|
||||
|
||||
w.conf.flush()
|
||||
|
||||
case "2":
|
||||
// Save whatever genesis configuration we currently have
|
||||
fmt.Println()
|
||||
|
61
vendor/github.com/ethereum/go-ethereum/cmd/swarm/bootnodes.go
generated
vendored
61
vendor/github.com/ethereum/go-ethereum/cmd/swarm/bootnodes.go
generated
vendored
@ -17,61 +17,8 @@
|
||||
package main
|
||||
|
||||
var SwarmBootnodes = []string{
|
||||
// Foundation Swarm Gateway Cluster
|
||||
"enode://e5c6f9215c919a5450a7b8c14c22535607b69f2c8e1e7f6f430cb25d7a2c27cd1df4c4f18ad7c1d7e5162e271ffcd3f20b1a1467fb6e790e7d727f3b2193de97@52.232.7.187:30399",
|
||||
"enode://9b2fe07e69ccc7db5fef15793dab7d7d2e697ed92132d6e9548218e68a34613a8671ad03a6658d862b468ed693cae8a0f8f8d37274e4a657ffb59ca84676e45b@52.232.7.187:30400",
|
||||
"enode://76c1059162c93ef9df0f01097c824d17c492634df211ef4c806935b349082233b63b90c23970254b3b7138d630400f7cf9b71e80355a446a8b733296cb04169a@52.232.7.187:30401",
|
||||
"enode://ce46bbe2a8263145d65252d52da06e000ad350ed09c876a71ea9544efa42f63c1e1b6cc56307373aaad8f9dd069c90d0ed2dd1530106200e16f4ca681dd8ae2d@52.232.7.187:30402",
|
||||
"enode://f431e0d6008a6c35c6e670373d828390c8323e53da8158e7bfc43cf07e632cc9e472188be8df01decadea2d4a068f1428caba769b632554a8fb0607bc296988f@52.232.7.187:30403",
|
||||
"enode://174720abfff83d7392f121108ae50ea54e04889afe020df883655c0f6cb95414db945a0228d8982fe000d86fc9f4b7669161adc89cd7cd56f78f01489ab2b99b@52.232.7.187:30404",
|
||||
"enode://2ae89be4be61a689b6f9ecee4360a59e185e010ab750f14b63b4ae43d4180e872e18e3437d4386ce44875dc7cc6eb761acba06412fe3178f3dac1dab3b65703e@52.232.7.187:30405",
|
||||
"enode://24abebe1c0e6d75d6052ce3219a87be8573fd6397b4cb51f0773b83abba9b3d872bfb273cdc07389715b87adfac02f5235f5241442c5089802cbd8d42e310fce@52.232.7.187:30406",
|
||||
"enode://d08dfa46bfbbdbcaafbb6e34abee4786610f6c91e0b76d7881f0334ac10dda41d8c1f2b6eedffb4493293c335c0ad46776443b2208d1fbbb9e1a90b25ee4eef2@52.232.7.187:30407",
|
||||
"enode://8d95eb0f837d27581a43668ed3b8783d69dc4e84aa3edd7a0897e026155c8f59c8702fdc0375ee7bac15757c9c78e1315d9b73e4ce59c936db52ea4ae2f501c7@52.232.7.187:30408",
|
||||
"enode://a5967cc804aebd422baaaba9f06f27c9e695ccab335b61088130f8cbe64e3cdf78793868c7051dfc06eecfe844fad54bc7f6dfaed9db3c7ecef279cb829c25fb@52.232.7.187:30409",
|
||||
"enode://5f00134d81a8f2ebcc46f8766f627f492893eda48138f811b7de2168308171968f01710bca6da05764e74f14bae41652f554e6321f1aed85fa3461e89d075dbf@52.232.7.187:30410",
|
||||
"enode://b2142b79b01a5aa66a5e23cc35e78219a8e97bc2412a6698cee24ae02e87078b725d71730711bd62e25ff1aa8658c6633778af8ac14c63814a337c3dd0ebda9f@52.232.7.187:30411",
|
||||
"enode://1ffa7651094867d6486ce3ef46d27a052c2cb968b618346c6df7040322c7efc3337547ba85d4cbba32e8b31c42c867202554735c06d4c664b9afada2ed0c4b3c@52.232.7.187:30412",
|
||||
"enode://129e0c3d5f5df12273754f6f703d2424409fa4baa599e0b758c55600169313887855e75b082028d2302ec034b303898cd697cc7ae8256ba924ce927510da2c8d@52.232.7.187:30413",
|
||||
"enode://419e2dc0d2f5b022cf16b0e28842658284909fa027a0fbbb5e2b755e7f846ea02a8f0b66a7534981edf6a7bcf8a14855344c6668e2cd4476ccd35a11537c9144@52.232.7.187:30414",
|
||||
"enode://23d55ad900583231b91f2f62e3f72eb498b342afd58b682be3af052eed62b5651094471065981de33d8786f075f05e3cca499503b0ac8ae84b2a06e99f5b0723@52.232.7.187:30415",
|
||||
"enode://bc56e4158c00e9f616d7ea533def20a89bef959df4e62a768ff238ff4e1e9223f57ecff969941c20921bad98749baae311c0fbebce53bf7bbb9d3dc903640990@52.232.7.187:30416",
|
||||
"enode://433ce15199c409875e7e72fffd69fdafe746f17b20f0d5555281722a65fde6c80328fab600d37d8624509adc072c445ce0dad4a1c01cff6acf3132c11d429d4d@52.232.7.187:30417",
|
||||
"enode://632ee95b8f0eac51ef89ceb29313fef3a60050181d66a6b125583b1a225a7694b252edc016efb58aa3b251da756cb73280842a022c658ed405223b2f58626343@52.232.7.187:30418",
|
||||
"enode://4a0f9bcff7a4b9ee453fb298d0fb222592efe121512e30cd72fef631beb8c6a15153a1456eb073ee18551c0e003c569651a101892dc4124e90b933733a498bb5@52.232.7.187:30419",
|
||||
"enode://f0d80fbc72d16df30e19aac3051eb56a7aff0c8367686702e01ea132d8b0b3ee00cadd6a859d2cca98ec68d3d574f8a8a87dba2347ec1e2818dc84bc3fa34fae@52.232.7.187:30420",
|
||||
"enode://a199146906e4f9f2b94b195a8308d9a59a3564b92efaab898a4243fe4c2ad918b7a8e4853d9d901d94fad878270a2669d644591299c3d43de1b298c00b92b4a7@52.232.7.187:30421",
|
||||
"enode://052036ea8736b37adbfb684d90ce43e11b3591b51f31489d7c726b03618dea4f73b1e659deb928e6bf40564edcdcf08351643f42db3d4ca1c2b5db95dad59e94@52.232.7.187:30422",
|
||||
"enode://460e2b8c6da8f12fac96c836e7d108f4b7ec55a1c64631bb8992339e117e1c28328fee83af863196e20af1487a655d13e5ceba90e980e92502d5bac5834c1f71@52.232.7.187:30423",
|
||||
"enode://6d2cdd13741b2e72e9031e1b93c6d9a4e68de2844aa4e939f6a8a8498a7c1d7e2ee4c64217e92a6df08c9a32c6764d173552810ef1bd2ecb356532d389dd2136@52.232.7.187:30424",
|
||||
"enode://62105fc25ce2cd5b299647f47eaa9211502dc76f0e9f461df915782df7242ac3223e3db04356ae6ed2977ccac20f0b16864406e9ca514a40a004cb6a5d0402aa@52.232.7.187:30425",
|
||||
"enode://e0e388fc520fd493c33f0ce16685e6f98fb6aec28f2edc14ee6b179594ee519a896425b0025bb6f0e182dd3e468443f19c70885fbc66560d000093a668a86aa8@52.232.7.187:30426",
|
||||
"enode://63f3353a72521ea10022127a4fe6b4acbef197c3fe668fd9f4805542d8a6fcf79f6335fbab62d180a35e19b739483e740858b113fdd7c13a26ad7b4e318a5aef@52.232.7.187:30427",
|
||||
"enode://33a42b927085678d4aefd4e70b861cfca6ef5f6c143696c4f755973fd29e64c9e658cad57a66a687a7a156da1e3688b1fbdd17bececff2ee009fff038fa5666b@52.232.7.187:30428",
|
||||
"enode://259ab5ab5c1daee3eab7e3819ab3177b82d25c29e6c2444fdd3f956e356afae79a72840ccf2d0665fe82c81ebc3b3734da1178ac9fd5d62c67e674b69f86b6be@52.232.7.187:30429",
|
||||
"enode://558bccad7445ce3fd8db116ed6ab4aed1324fdbdac2348417340c1764dc46d46bffe0728e5b7d5c36f12e794c289f18f57f08f085d2c65c9910a5c7a65b6a66a@52.232.7.187:30430",
|
||||
"enode://abe60937a0657ffded718e3f84a32987286983be257bdd6004775c4b525747c2b598f4fac49c8de324de5ce75b22673fa541a7ce2d555fb7f8ca325744ae3577@52.232.7.187:30431",
|
||||
"enode://bce6f0aaa5b230742680084df71d4f026b3eff7f564265599216a1b06b765303fdc9325de30ffd5dfdaf302ce4b14322891d2faea50ce2ca298d7409f5858339@52.232.7.187:30432",
|
||||
"enode://21b957c4e03277d42be6660730ec1b93f540764f26c6abdb54d006611139c7081248486206dfbf64fcaffd62589e9c6b8ea77a5297e4b21a605f1bcf49483ed0@52.232.7.187:30433",
|
||||
"enode://ff104e30e64f24c3d7328acee8b13354e5551bc8d60bb25ecbd9632d955c7e34bb2d969482d173355baad91c8282f8b592624eb3929151090da3b4448d4d58fb@52.232.7.187:30434",
|
||||
"enode://c76e2b5f81a521bceaec1518926a21380a345df9cf463461562c6845795512497fb67679e155fc96a74350f8b78de8f4c135dd52b106dbbb9795452021d09ea5@52.232.7.187:30435",
|
||||
"enode://3288fd860105164f3e9b69934c4eb18f7146cfab31b5a671f994e21a36e9287766e5f9f075aefbc404538c77f7c2eb2a4495020a7633a1c3970d94e9fa770aeb@52.232.7.187:30436",
|
||||
"enode://6cea859c7396d46b20cfcaa80f9a11cd112f8684f2f782f7b4c0e1e0af9212113429522075101923b9b957603e6c32095a6a07b5e5e35183c521952ee108dfaf@52.232.7.187:30437",
|
||||
"enode://f628ec56e4ca8317cc24cc4ac9b27b95edcce7b96e1c7f3b53e30de4a8580fe44f2f0694a513bdb0a431acaf2824074d6ace4690247bbc34c14f426af8c056ea@52.232.7.187:30438",
|
||||
"enode://055ec8b26fc105c4f97970a1cce9773a5e34c03f511b839db742198a1c571e292c54aa799e9afb991cc8a560529b8cdf3e0c344bc6c282aff2f68eec59361ddf@52.232.7.187:30439",
|
||||
"enode://48cb0d430c328974226aa33a931d8446cd5a8d40f3ead8f4ce7ad60faa1278192eb6d58bed91258d63e81f255fc107eec2425ce2ae8b22350dd556076e160610@52.232.7.187:30440",
|
||||
"enode://3fadb7af7f770d5ffc6b073b8d42834bebb18ce1fe8a4fe270d2b799e7051327093960dc61d9a18870db288f7746a0e6ea2a013cd6ab0e5f97ca08199473aace@52.232.7.187:30441",
|
||||
"enode://a5d7168024c9992769cf380ffa559a64b4f39a29d468f579559863814eb0ae0ed689ac0871a3a2b4c78b03297485ec322d578281131ef5d5c09a4beb6200a97a@52.232.7.187:30442",
|
||||
"enode://9c57744c5b2c2d71abcbe80512652f9234d4ab041b768a2a886ab390fe6f184860f40e113290698652d7e20a8ac74d27ac8671db23eb475b6c5e6253e4693bf8@52.232.7.187:30443",
|
||||
"enode://daca9ff0c3176045a0e0ed228dee00ec86bc0939b135dc6b1caa23745d20fd0332e1ee74ad04020e89df56c7146d831a91b89d15ca3df05ba7618769fefab376@52.232.7.187:30444",
|
||||
"enode://a3f6af59428cb4b9acb198db15ef5554fa43c2b0c18e468a269722d64a27218963a2975eaf82750b6262e42192b5e3669ea51337b4cda62b33987981bc5e0c1a@52.232.7.187:30445",
|
||||
"enode://fe571422fa4651c3354c85dac61911a6a6520dd3c0332967a49d4133ca30e16a8a4946fa73ca2cb5de77917ea701a905e1c3015b2f4defcd53132b61cc84127a@52.232.7.187:30446",
|
||||
|
||||
// Mainframe
|
||||
"enode://ee9a5a571ea6c8a59f9a8bb2c569c865e922b41c91d09b942e8c1d4dd2e1725bd2c26149da14de1f6321a2c6fdf1e07c503c3e093fb61696daebf74d6acd916b@54.186.219.160:30399",
|
||||
"enode://a03f0562ecb8a992ad5242345535e73483cdc18ab934d36bf24b567d43447c2cea68f89f1d51d504dd13acc30f24ebce5a150bea2ccb1b722122ce4271dc199d@52.67.248.147:30399",
|
||||
"enode://e2cbf9eafd85903d3b1c56743035284320695e0072bc8d7396e0542aa5e1c321b236f67eab66b79c2f15d4447fa4bbe74dd67d0467da23e7eb829f60ec8a812b@13.58.169.1:30399",
|
||||
"enode://8b8c6bda6047f1cad9fab2db4d3d02b7aa26279902c32879f7bcd4a7d189fee77fdc36ee151ce6b84279b4792e72578fd529d2274d014132465758fbfee51cee@13.209.13.15:30399",
|
||||
"enode://63f6a8818927e429585287cf2ca0cb9b11fa990b7b9b331c2962cdc6f21807a2473b26e8256225c26caff70d7218e59586d704d49061452c6852e382c885d03c@35.154.106.174:30399",
|
||||
"enode://ed4bd3b794ed73f18e6dcc70c6624dfec63b5654f6ab54e8f40b16eff8afbd342d4230e099ddea40e84423f81b2d2ea79799dc345257b1fec6f6c422c9d008f7@52.213.20.99:30399",
|
||||
// EF Swarm Bootnode - AWS - eu-central-1
|
||||
"enode://4c113504601930bf2000c29bcd98d1716b6167749f58bad703bae338332fe93cc9d9204f08afb44100dc7bea479205f5d162df579f9a8f76f8b402d339709023@3.122.203.99:30301",
|
||||
// EF Swarm Bootnode - AWS - us-west-2
|
||||
"enode://89f2ede3371bff1ad9f2088f2012984e280287a4e2b68007c2a6ad994909c51886b4a8e9e2ecc97f9910aca538398e0a5804b0ee80a187fde1ba4f32626322ba@52.35.212.179:30301",
|
||||
}
|
||||
|
32
vendor/github.com/ethereum/go-ethereum/cmd/swarm/config.go
generated
vendored
32
vendor/github.com/ethereum/go-ethereum/cmd/swarm/config.go
generated
vendored
@ -79,8 +79,10 @@ const (
|
||||
SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH"
|
||||
SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY"
|
||||
SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY"
|
||||
SWARM_ENV_BOOTNODE_MODE = "SWARM_BOOTNODE_MODE"
|
||||
SWARM_ACCESS_PASSWORD = "SWARM_ACCESS_PASSWORD"
|
||||
SWARM_AUTO_DEFAULTPATH = "SWARM_AUTO_DEFAULTPATH"
|
||||
SWARM_GLOBALSTORE_API = "SWARM_GLOBALSTORE_API"
|
||||
GETH_ENV_DATADIR = "GETH_DATADIR"
|
||||
)
|
||||
|
||||
@ -164,10 +166,9 @@ func configFileOverride(config *bzzapi.Config, ctx *cli.Context) (*bzzapi.Config
|
||||
return config, err
|
||||
}
|
||||
|
||||
//override the current config with whatever is provided through the command line
|
||||
//most values are not allowed a zero value (empty string), if not otherwise noted
|
||||
// cmdLineOverride overrides the current config with whatever is provided through the command line
|
||||
// most values are not allowed a zero value (empty string), if not otherwise noted
|
||||
func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Config {
|
||||
|
||||
if keyid := ctx.GlobalString(SwarmAccountFlag.Name); keyid != "" {
|
||||
currentConfig.BzzAccount = keyid
|
||||
}
|
||||
@ -258,14 +259,21 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
|
||||
currentConfig.LocalStoreParams.CacheCapacity = storeCacheCapacity
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(SwarmBootnodeModeFlag.Name) {
|
||||
currentConfig.BootnodeMode = ctx.GlobalBool(SwarmBootnodeModeFlag.Name)
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(SwarmGlobalStoreAPIFlag.Name) {
|
||||
currentConfig.GlobalStoreAPI = ctx.GlobalString(SwarmGlobalStoreAPIFlag.Name)
|
||||
}
|
||||
|
||||
return currentConfig
|
||||
|
||||
}
|
||||
|
||||
//override the current config with whatver is provided in environment variables
|
||||
//most values are not allowed a zero value (empty string), if not otherwise noted
|
||||
// envVarsOverride overrides the current config with whatver is provided in environment variables
|
||||
// most values are not allowed a zero value (empty string), if not otherwise noted
|
||||
func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
|
||||
|
||||
if keyid := os.Getenv(SWARM_ENV_ACCOUNT); keyid != "" {
|
||||
currentConfig.BzzAccount = keyid
|
||||
}
|
||||
@ -364,6 +372,18 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
|
||||
currentConfig.Cors = cors
|
||||
}
|
||||
|
||||
if bm := os.Getenv(SWARM_ENV_BOOTNODE_MODE); bm != "" {
|
||||
bootnodeMode, err := strconv.ParseBool(bm)
|
||||
if err != nil {
|
||||
utils.Fatalf("invalid environment variable %s: %v", SWARM_ENV_BOOTNODE_MODE, err)
|
||||
}
|
||||
currentConfig.BootnodeMode = bootnodeMode
|
||||
}
|
||||
|
||||
if api := os.Getenv(SWARM_GLOBALSTORE_API); api != "" {
|
||||
currentConfig.GlobalStoreAPI = api
|
||||
}
|
||||
|
||||
return currentConfig
|
||||
}
|
||||
|
||||
|
59
vendor/github.com/ethereum/go-ethereum/cmd/swarm/explore.go
generated
vendored
Normal file
59
vendor/github.com/ethereum/go-ethereum/cmd/swarm/explore.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Command bzzhash computes a swarm tree hash.
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var hashesCommand = cli.Command{
|
||||
Action: hashes,
|
||||
CustomHelpTemplate: helpTemplate,
|
||||
Name: "hashes",
|
||||
Usage: "print all hashes of a file to STDOUT",
|
||||
ArgsUsage: "<file>",
|
||||
Description: "Prints all hashes of a file to STDOUT",
|
||||
}
|
||||
|
||||
func hashes(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) < 1 {
|
||||
utils.Fatalf("Usage: swarm hashes <file name>")
|
||||
}
|
||||
f, err := os.Open(args[0])
|
||||
if err != nil {
|
||||
utils.Fatalf("Error opening file " + args[1])
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, storage.NewFileStoreParams())
|
||||
refs, err := fileStore.GetAllReferences(context.TODO(), f, false)
|
||||
if err != nil {
|
||||
utils.Fatalf("%v\n", err)
|
||||
} else {
|
||||
for _, r := range refs {
|
||||
fmt.Println(r.String())
|
||||
}
|
||||
}
|
||||
}
|
9
vendor/github.com/ethereum/go-ethereum/cmd/swarm/flags.go
generated
vendored
9
vendor/github.com/ethereum/go-ethereum/cmd/swarm/flags.go
generated
vendored
@ -156,6 +156,10 @@ var (
|
||||
Name: "compressed",
|
||||
Usage: "Prints encryption keys in compressed form",
|
||||
}
|
||||
SwarmBootnodeModeFlag = cli.BoolFlag{
|
||||
Name: "bootnode-mode",
|
||||
Usage: "Run Swarm in Bootnode mode",
|
||||
}
|
||||
SwarmFeedNameFlag = cli.StringFlag{
|
||||
Name: "name",
|
||||
Usage: "User-defined name for the new feed, limited to 32 characters. If combined with topic, it will refer to a subtopic with this name",
|
||||
@ -172,4 +176,9 @@ var (
|
||||
Name: "user",
|
||||
Usage: "Indicates the user who updates the feed",
|
||||
}
|
||||
SwarmGlobalStoreAPIFlag = cli.StringFlag{
|
||||
Name: "globalstore-api",
|
||||
Usage: "URL of the Global Store API provider (only for testing)",
|
||||
EnvVar: SWARM_GLOBALSTORE_API,
|
||||
}
|
||||
)
|
||||
|
100
vendor/github.com/ethereum/go-ethereum/cmd/swarm/global-store/global_store.go
generated
vendored
Normal file
100
vendor/github.com/ethereum/go-ethereum/cmd/swarm/global-store/global_store.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/mock"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/mock/db"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
// startHTTP starts a global store with HTTP RPC server.
|
||||
// It is used for "http" cli command.
|
||||
func startHTTP(ctx *cli.Context) (err error) {
|
||||
server, cleanup, err := newServer(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
listener, err := net.Listen("tcp", ctx.String("addr"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("http", "address", listener.Addr().String())
|
||||
|
||||
return http.Serve(listener, server)
|
||||
}
|
||||
|
||||
// startWS starts a global store with WebSocket RPC server.
|
||||
// It is used for "websocket" cli command.
|
||||
func startWS(ctx *cli.Context) (err error) {
|
||||
server, cleanup, err := newServer(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
listener, err := net.Listen("tcp", ctx.String("addr"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
origins := ctx.StringSlice("origins")
|
||||
log.Info("websocket", "address", listener.Addr().String(), "origins", origins)
|
||||
|
||||
return http.Serve(listener, server.WebsocketHandler(origins))
|
||||
}
|
||||
|
||||
// newServer creates a global store and returns its RPC server.
|
||||
// Returned cleanup function should be called only if err is nil.
|
||||
func newServer(ctx *cli.Context) (server *rpc.Server, cleanup func(), err error) {
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(ctx.Int("verbosity")), log.StreamHandler(os.Stdout, log.TerminalFormat(false))))
|
||||
|
||||
cleanup = func() {}
|
||||
var globalStore mock.GlobalStorer
|
||||
dir := ctx.String("dir")
|
||||
if dir != "" {
|
||||
dbStore, err := db.NewGlobalStore(dir)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cleanup = func() {
|
||||
dbStore.Close()
|
||||
}
|
||||
globalStore = dbStore
|
||||
log.Info("database global store", "dir", dir)
|
||||
} else {
|
||||
globalStore = mem.NewGlobalStore()
|
||||
log.Info("in-memory global store")
|
||||
}
|
||||
|
||||
server = rpc.NewServer()
|
||||
if err := server.RegisterName("mockStore", globalStore); err != nil {
|
||||
cleanup()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return server, cleanup, nil
|
||||
}
|
191
vendor/github.com/ethereum/go-ethereum/cmd/swarm/global-store/global_store_test.go
generated
vendored
Normal file
191
vendor/github.com/ethereum/go-ethereum/cmd/swarm/global-store/global_store_test.go
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
mockRPC "github.com/ethereum/go-ethereum/swarm/storage/mock/rpc"
|
||||
)
|
||||
|
||||
// TestHTTP_InMemory tests in-memory global store that exposes
|
||||
// HTTP server.
|
||||
func TestHTTP_InMemory(t *testing.T) {
|
||||
testHTTP(t, true)
|
||||
}
|
||||
|
||||
// TestHTTP_Database tests global store with persisted database
|
||||
// that exposes HTTP server.
|
||||
func TestHTTP_Database(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "swarm-global-store-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
// create a fresh global store
|
||||
testHTTP(t, true, "--dir", dir)
|
||||
|
||||
// check if data saved by the previous global store instance
|
||||
testHTTP(t, false, "--dir", dir)
|
||||
}
|
||||
|
||||
// testWebsocket starts global store binary with HTTP server
|
||||
// and validates that it can store and retrieve data.
|
||||
// If put is false, no data will be stored, only retrieved,
|
||||
// giving the possibility to check if data is present in the
|
||||
// storage directory.
|
||||
func testHTTP(t *testing.T, put bool, args ...string) {
|
||||
addr := findFreeTCPAddress(t)
|
||||
testCmd := runGlobalStore(t, append([]string{"http", "--addr", addr}, args...)...)
|
||||
defer testCmd.Interrupt()
|
||||
|
||||
client, err := rpc.DialHTTP("http://" + addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// wait until global store process is started as
|
||||
// rpc.DialHTTP is actually not connecting
|
||||
for i := 0; i < 1000; i++ {
|
||||
_, err = http.DefaultClient.Get("http://" + addr)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store := mockRPC.NewGlobalStore(client)
|
||||
defer store.Close()
|
||||
|
||||
node := store.NewNodeStore(common.HexToAddress("123abc"))
|
||||
|
||||
wantKey := "key"
|
||||
wantValue := "value"
|
||||
|
||||
if put {
|
||||
err = node.Put([]byte(wantKey), []byte(wantValue))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
gotValue, err := node.Get([]byte(wantKey))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(gotValue) != wantValue {
|
||||
t.Errorf("got value %s for key %s, want %s", string(gotValue), wantKey, wantValue)
|
||||
}
|
||||
}
|
||||
|
||||
// TestWebsocket_InMemory tests in-memory global store that exposes
|
||||
// WebSocket server.
|
||||
func TestWebsocket_InMemory(t *testing.T) {
|
||||
testWebsocket(t, true)
|
||||
}
|
||||
|
||||
// TestWebsocket_Database tests global store with persisted database
|
||||
// that exposes HTTP server.
|
||||
func TestWebsocket_Database(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "swarm-global-store-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
// create a fresh global store
|
||||
testWebsocket(t, true, "--dir", dir)
|
||||
|
||||
// check if data saved by the previous global store instance
|
||||
testWebsocket(t, false, "--dir", dir)
|
||||
}
|
||||
|
||||
// testWebsocket starts global store binary with WebSocket server
|
||||
// and validates that it can store and retrieve data.
|
||||
// If put is false, no data will be stored, only retrieved,
|
||||
// giving the possibility to check if data is present in the
|
||||
// storage directory.
|
||||
func testWebsocket(t *testing.T, put bool, args ...string) {
|
||||
addr := findFreeTCPAddress(t)
|
||||
testCmd := runGlobalStore(t, append([]string{"ws", "--addr", addr}, args...)...)
|
||||
defer testCmd.Interrupt()
|
||||
|
||||
var client *rpc.Client
|
||||
var err error
|
||||
// wait until global store process is started
|
||||
for i := 0; i < 1000; i++ {
|
||||
client, err = rpc.DialWebsocket(context.Background(), "ws://"+addr, "")
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store := mockRPC.NewGlobalStore(client)
|
||||
defer store.Close()
|
||||
|
||||
node := store.NewNodeStore(common.HexToAddress("123abc"))
|
||||
|
||||
wantKey := "key"
|
||||
wantValue := "value"
|
||||
|
||||
if put {
|
||||
err = node.Put([]byte(wantKey), []byte(wantValue))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
gotValue, err := node.Get([]byte(wantKey))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(gotValue) != wantValue {
|
||||
t.Errorf("got value %s for key %s, want %s", string(gotValue), wantKey, wantValue)
|
||||
}
|
||||
}
|
||||
|
||||
// findFreeTCPAddress returns a local address (IP:Port) to which
|
||||
// global store can listen on.
|
||||
func findFreeTCPAddress(t *testing.T) (addr string) {
|
||||
t.Helper()
|
||||
|
||||
listener, err := net.Listen("tcp", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer listener.Close()
|
||||
|
||||
return listener.Addr().String()
|
||||
}
|
104
vendor/github.com/ethereum/go-ethereum/cmd/swarm/global-store/main.go
generated
vendored
Normal file
104
vendor/github.com/ethereum/go-ethereum/cmd/swarm/global-store/main.go
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
|
||||
|
||||
func main() {
|
||||
err := newApp().Run(os.Args)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// newApp construct a new instance of Swarm Global Store.
|
||||
// Method Run is called on it in the main function and in tests.
|
||||
func newApp() (app *cli.App) {
|
||||
app = utils.NewApp(gitCommit, "Swarm Global Store")
|
||||
|
||||
app.Name = "global-store"
|
||||
|
||||
// app flags (for all commands)
|
||||
app.Flags = []cli.Flag{
|
||||
cli.IntFlag{
|
||||
Name: "verbosity",
|
||||
Value: 3,
|
||||
Usage: "verbosity level",
|
||||
},
|
||||
}
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "http",
|
||||
Aliases: []string{"h"},
|
||||
Usage: "start swarm global store with http server",
|
||||
Action: startHTTP,
|
||||
// Flags only for "start" command.
|
||||
// Allow app flags to be specified after the
|
||||
// command argument.
|
||||
Flags: append(app.Flags,
|
||||
cli.StringFlag{
|
||||
Name: "dir",
|
||||
Value: "",
|
||||
Usage: "data directory",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "addr",
|
||||
Value: "0.0.0.0:3033",
|
||||
Usage: "address to listen for http connection",
|
||||
},
|
||||
),
|
||||
},
|
||||
{
|
||||
Name: "websocket",
|
||||
Aliases: []string{"ws"},
|
||||
Usage: "start swarm global store with websocket server",
|
||||
Action: startWS,
|
||||
// Flags only for "start" command.
|
||||
// Allow app flags to be specified after the
|
||||
// command argument.
|
||||
Flags: append(app.Flags,
|
||||
cli.StringFlag{
|
||||
Name: "dir",
|
||||
Value: "",
|
||||
Usage: "data directory",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "addr",
|
||||
Value: "0.0.0.0:3033",
|
||||
Usage: "address to listen for websocket connection",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "origins",
|
||||
Value: &cli.StringSlice{"*"},
|
||||
Usage: "websocket origins",
|
||||
},
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
return app
|
||||
}
|
49
vendor/github.com/ethereum/go-ethereum/cmd/swarm/global-store/run_test.go
generated
vendored
Normal file
49
vendor/github.com/ethereum/go-ethereum/cmd/swarm/global-store/run_test.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||
)
|
||||
|
||||
func init() {
|
||||
reexec.Register("swarm-global-store", func() {
|
||||
if err := newApp().Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
})
|
||||
}
|
||||
|
||||
func runGlobalStore(t *testing.T, args ...string) *cmdtest.TestCmd {
|
||||
tt := cmdtest.NewTestCmd(t, nil)
|
||||
tt.Run("swarm-global-store", args...)
|
||||
return tt
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
57
vendor/github.com/ethereum/go-ethereum/cmd/swarm/main.go
generated
vendored
57
vendor/github.com/ethereum/go-ethereum/cmd/swarm/main.go
generated
vendored
@ -39,13 +39,16 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
|
||||
swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/mock"
|
||||
mockrpc "github.com/ethereum/go-ethereum/swarm/storage/mock/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm/tracing"
|
||||
sv "github.com/ethereum/go-ethereum/swarm/version"
|
||||
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
const clientIdentifier = "swarm"
|
||||
@ -66,9 +69,10 @@ OPTIONS:
|
||||
{{end}}{{end}}
|
||||
`
|
||||
|
||||
var (
|
||||
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
|
||||
)
|
||||
// Git SHA1 commit hash of the release (set via linker flags)
|
||||
// this variable will be assigned if corresponding parameter is passed with install, but not with test
|
||||
// e.g.: go install -ldflags "-X main.gitCommit=ed1312d01b19e04ef578946226e5d8069d5dfd5a" ./cmd/swarm
|
||||
var gitCommit string
|
||||
|
||||
//declare a few constant error messages, useful for later error check comparisons in test
|
||||
var (
|
||||
@ -89,6 +93,7 @@ var defaultNodeConfig = node.DefaultConfig
|
||||
|
||||
// This init function sets defaults so cmd/swarm can run alongside geth.
|
||||
func init() {
|
||||
sv.GitCommit = gitCommit
|
||||
defaultNodeConfig.Name = clientIdentifier
|
||||
defaultNodeConfig.Version = sv.VersionWithCommit(gitCommit)
|
||||
defaultNodeConfig.P2P.ListenAddr = ":30399"
|
||||
@ -140,6 +145,8 @@ func init() {
|
||||
dbCommand,
|
||||
// See config.go
|
||||
DumpConfigCommand,
|
||||
// hashesCommand
|
||||
hashesCommand,
|
||||
}
|
||||
|
||||
// append a hidden help subcommand to all commands that have subcommands
|
||||
@ -154,7 +161,6 @@ func init() {
|
||||
utils.BootnodesFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.ListenPortFlag,
|
||||
utils.NoDiscoverFlag,
|
||||
utils.DiscoveryV5Flag,
|
||||
utils.NetrestrictFlag,
|
||||
utils.NodeKeyFileFlag,
|
||||
@ -187,10 +193,13 @@ func init() {
|
||||
SwarmUploadDefaultPath,
|
||||
SwarmUpFromStdinFlag,
|
||||
SwarmUploadMimeType,
|
||||
// bootnode mode
|
||||
SwarmBootnodeModeFlag,
|
||||
// storage flags
|
||||
SwarmStorePath,
|
||||
SwarmStoreCapacity,
|
||||
SwarmStoreCacheCapacity,
|
||||
SwarmGlobalStoreAPIFlag,
|
||||
}
|
||||
rpcFlags := []cli.Flag{
|
||||
utils.WSEnabledFlag,
|
||||
@ -227,12 +236,17 @@ func main() {
|
||||
|
||||
func keys(ctx *cli.Context) error {
|
||||
privateKey := getPrivKey(ctx)
|
||||
pub := hex.EncodeToString(crypto.FromECDSAPub(&privateKey.PublicKey))
|
||||
pubkey := crypto.FromECDSAPub(&privateKey.PublicKey)
|
||||
pubkeyhex := hex.EncodeToString(pubkey)
|
||||
pubCompressed := hex.EncodeToString(crypto.CompressPubkey(&privateKey.PublicKey))
|
||||
bzzkey := crypto.Keccak256Hash(pubkey).Hex()
|
||||
|
||||
if !ctx.Bool(SwarmCompressedFlag.Name) {
|
||||
fmt.Println(fmt.Sprintf("publicKey=%s", pub))
|
||||
fmt.Println(fmt.Sprintf("bzzkey=%s", bzzkey[2:]))
|
||||
fmt.Println(fmt.Sprintf("publicKey=%s", pubkeyhex))
|
||||
}
|
||||
fmt.Println(fmt.Sprintf("publicKeyCompressed=%s", pubCompressed))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -272,6 +286,10 @@ func bzzd(ctx *cli.Context) error {
|
||||
setSwarmBootstrapNodes(ctx, &cfg)
|
||||
//setup the ethereum node
|
||||
utils.SetNodeConfig(ctx, &cfg)
|
||||
|
||||
//always disable discovery from p2p package - swarm discovery is done with the `hive` protocol
|
||||
cfg.P2P.NoDiscovery = true
|
||||
|
||||
stack, err := node.New(&cfg)
|
||||
if err != nil {
|
||||
utils.Fatalf("can't create node: %v", err)
|
||||
@ -294,6 +312,15 @@ func bzzd(ctx *cli.Context) error {
|
||||
stack.Stop()
|
||||
}()
|
||||
|
||||
// add swarm bootnodes, because swarm doesn't use p2p package's discovery discv5
|
||||
go func() {
|
||||
s := stack.Server()
|
||||
|
||||
for _, n := range cfg.P2P.BootstrapNodes {
|
||||
s.AddPeer(n)
|
||||
}
|
||||
}()
|
||||
|
||||
stack.Wait()
|
||||
return nil
|
||||
}
|
||||
@ -301,8 +328,18 @@ func bzzd(ctx *cli.Context) error {
|
||||
func registerBzzService(bzzconfig *bzzapi.Config, stack *node.Node) {
|
||||
//define the swarm service boot function
|
||||
boot := func(_ *node.ServiceContext) (node.Service, error) {
|
||||
// In production, mockStore must be always nil.
|
||||
return swarm.NewSwarm(bzzconfig, nil)
|
||||
var nodeStore *mock.NodeStore
|
||||
if bzzconfig.GlobalStoreAPI != "" {
|
||||
// connect to global store
|
||||
client, err := rpc.Dial(bzzconfig.GlobalStoreAPI)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("global store: %v", err)
|
||||
}
|
||||
globalStore := mockrpc.NewGlobalStore(client)
|
||||
// create a node store for this swarm key on global store
|
||||
nodeStore = globalStore.NewNodeStore(common.HexToAddress(bzzconfig.BzzKey))
|
||||
}
|
||||
return swarm.NewSwarm(bzzconfig, nodeStore)
|
||||
}
|
||||
//register within the ethereum node
|
||||
if err := stack.Register(boot); err != nil {
|
||||
@ -428,5 +465,5 @@ func setSwarmBootstrapNodes(ctx *cli.Context, cfg *node.Config) {
|
||||
}
|
||||
cfg.P2P.BootstrapNodes = append(cfg.P2P.BootstrapNodes, node)
|
||||
}
|
||||
log.Debug("added default swarm bootnodes", "length", len(cfg.P2P.BootstrapNodes))
|
||||
|
||||
}
|
||||
|
2
vendor/github.com/ethereum/go-ethereum/cmd/swarm/run_test.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/cmd/swarm/run_test.go
generated
vendored
@ -254,7 +254,6 @@ func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
|
||||
node.Cmd = runSwarm(t,
|
||||
"--port", p2pPort,
|
||||
"--nat", "extip:127.0.0.1",
|
||||
"--nodiscover",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
"--ens-api", "",
|
||||
@ -330,7 +329,6 @@ func newTestNode(t *testing.T, dir string) *testNode {
|
||||
node.Cmd = runSwarm(t,
|
||||
"--port", p2pPort,
|
||||
"--nat", "extip:127.0.0.1",
|
||||
"--nodiscover",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
"--ens-api", "",
|
||||
|
127
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/feed_upload_and_sync.go
generated
vendored
127
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/feed_upload_and_sync.go
generated
vendored
@ -2,13 +2,10 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
@ -19,12 +16,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
"github.com/pborman/uuid"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
@ -33,34 +26,28 @@ const (
|
||||
feedRandomDataLength = 8
|
||||
)
|
||||
|
||||
func cliFeedUploadAndSync(c *cli.Context) error {
|
||||
metrics.GetOrRegisterCounter("feed-and-sync", nil).Inc(1)
|
||||
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))))
|
||||
|
||||
func feedUploadAndSyncCmd(ctx *cli.Context, tuid string) error {
|
||||
errc := make(chan error)
|
||||
|
||||
go func() {
|
||||
errc <- feedUploadAndSync(c)
|
||||
errc <- feedUploadAndSync(ctx, tuid)
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-errc:
|
||||
if err != nil {
|
||||
metrics.GetOrRegisterCounter("feed-and-sync.fail", nil).Inc(1)
|
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.fail", commandName), nil).Inc(1)
|
||||
}
|
||||
return err
|
||||
case <-time.After(time.Duration(timeout) * time.Second):
|
||||
metrics.GetOrRegisterCounter("feed-and-sync.timeout", nil).Inc(1)
|
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.timeout", commandName), nil).Inc(1)
|
||||
|
||||
return fmt.Errorf("timeout after %v sec", timeout)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: retrieve with manifest + extract repeating code
|
||||
func feedUploadAndSync(c *cli.Context) error {
|
||||
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size (kb)", filesize) }(time.Now())
|
||||
|
||||
generateEndpoints(scheme, cluster, appName, from, to)
|
||||
|
||||
log.Info("generating and uploading feeds to " + endpoints[0] + " and syncing")
|
||||
func feedUploadAndSync(c *cli.Context, tuid string) error {
|
||||
log.Info("generating and uploading feeds to " + httpEndpoint(hosts[0]) + " and syncing")
|
||||
|
||||
// create a random private key to sign updates with and derive the address
|
||||
pkFile, err := ioutil.TempFile("", "swarm-feed-smoke-test")
|
||||
@ -114,7 +101,7 @@ func feedUploadAndSync(c *cli.Context) error {
|
||||
|
||||
// create feed manifest, topic only
|
||||
var out bytes.Buffer
|
||||
cmd := exec.Command("swarm", "--bzzapi", endpoints[0], "feed", "create", "--topic", topicHex, "--user", userHex)
|
||||
cmd := exec.Command("swarm", "--bzzapi", httpEndpoint(hosts[0]), "feed", "create", "--topic", topicHex, "--user", userHex)
|
||||
cmd.Stdout = &out
|
||||
log.Debug("create feed manifest topic cmd", "cmd", cmd)
|
||||
err = cmd.Run()
|
||||
@ -129,7 +116,7 @@ func feedUploadAndSync(c *cli.Context) error {
|
||||
out.Reset()
|
||||
|
||||
// create feed manifest, subtopic only
|
||||
cmd = exec.Command("swarm", "--bzzapi", endpoints[0], "feed", "create", "--name", subTopicHex, "--user", userHex)
|
||||
cmd = exec.Command("swarm", "--bzzapi", httpEndpoint(hosts[0]), "feed", "create", "--name", subTopicHex, "--user", userHex)
|
||||
cmd.Stdout = &out
|
||||
log.Debug("create feed manifest subtopic cmd", "cmd", cmd)
|
||||
err = cmd.Run()
|
||||
@ -144,7 +131,7 @@ func feedUploadAndSync(c *cli.Context) error {
|
||||
out.Reset()
|
||||
|
||||
// create feed manifest, merged topic
|
||||
cmd = exec.Command("swarm", "--bzzapi", endpoints[0], "feed", "create", "--topic", topicHex, "--name", subTopicHex, "--user", userHex)
|
||||
cmd = exec.Command("swarm", "--bzzapi", httpEndpoint(hosts[0]), "feed", "create", "--topic", topicHex, "--name", subTopicHex, "--user", userHex)
|
||||
cmd.Stdout = &out
|
||||
log.Debug("create feed manifest mergetopic cmd", "cmd", cmd)
|
||||
err = cmd.Run()
|
||||
@ -170,7 +157,7 @@ func feedUploadAndSync(c *cli.Context) error {
|
||||
dataHex := hexutil.Encode(data)
|
||||
|
||||
// update with topic
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--topic", topicHex, dataHex)
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", httpEndpoint(hosts[0]), "feed", "update", "--topic", topicHex, dataHex)
|
||||
cmd.Stdout = &out
|
||||
log.Debug("update feed manifest topic cmd", "cmd", cmd)
|
||||
err = cmd.Run()
|
||||
@ -181,7 +168,7 @@ func feedUploadAndSync(c *cli.Context) error {
|
||||
out.Reset()
|
||||
|
||||
// update with subtopic
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--name", subTopicHex, dataHex)
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", httpEndpoint(hosts[0]), "feed", "update", "--name", subTopicHex, dataHex)
|
||||
cmd.Stdout = &out
|
||||
log.Debug("update feed manifest subtopic cmd", "cmd", cmd)
|
||||
err = cmd.Run()
|
||||
@ -192,7 +179,7 @@ func feedUploadAndSync(c *cli.Context) error {
|
||||
out.Reset()
|
||||
|
||||
// update with merged topic
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--topic", topicHex, "--name", subTopicHex, dataHex)
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", httpEndpoint(hosts[0]), "feed", "update", "--topic", topicHex, "--name", subTopicHex, dataHex)
|
||||
cmd.Stdout = &out
|
||||
log.Debug("update feed manifest merged topic cmd", "cmd", cmd)
|
||||
err = cmd.Run()
|
||||
@ -206,14 +193,14 @@ func feedUploadAndSync(c *cli.Context) error {
|
||||
|
||||
// retrieve the data
|
||||
wg := sync.WaitGroup{}
|
||||
for _, endpoint := range endpoints {
|
||||
for _, host := range hosts {
|
||||
// raw retrieve, topic only
|
||||
for _, hex := range []string{topicHex, subTopicOnlyHex, mergedSubTopicHex} {
|
||||
wg.Add(1)
|
||||
ruid := uuid.New()[:8]
|
||||
go func(hex string, endpoint string, ruid string) {
|
||||
for {
|
||||
err := fetchFeed(hex, userHex, endpoint, dataHash, ruid)
|
||||
err := fetchFeed(hex, userHex, httpEndpoint(host), dataHash, ruid)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@ -221,20 +208,18 @@ func feedUploadAndSync(c *cli.Context) error {
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
}(hex, endpoint, ruid)
|
||||
|
||||
}(hex, httpEndpoint(host), ruid)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
log.Info("all endpoints synced random data successfully")
|
||||
|
||||
// upload test file
|
||||
seed := int(time.Now().UnixNano() / 1e6)
|
||||
log.Info("feed uploading to "+endpoints[0]+" and syncing", "seed", seed)
|
||||
log.Info("feed uploading to "+httpEndpoint(hosts[0])+" and syncing", "seed", seed)
|
||||
|
||||
randomBytes := testutil.RandomBytes(seed, filesize*1000)
|
||||
|
||||
hash, err := upload(&randomBytes, endpoints[0])
|
||||
hash, err := upload(randomBytes, httpEndpoint(hosts[0]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -243,15 +228,12 @@ func feedUploadAndSync(c *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
multihashHex := hexutil.Encode(hashBytes)
|
||||
fileHash, err := digest(bytes.NewReader(randomBytes))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fileHash := h.Sum(nil)
|
||||
|
||||
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fileHash))
|
||||
|
||||
// update file with topic
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--topic", topicHex, multihashHex)
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", httpEndpoint(hosts[0]), "feed", "update", "--topic", topicHex, multihashHex)
|
||||
cmd.Stdout = &out
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
@ -261,7 +243,7 @@ func feedUploadAndSync(c *cli.Context) error {
|
||||
out.Reset()
|
||||
|
||||
// update file with subtopic
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--name", subTopicHex, multihashHex)
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", httpEndpoint(hosts[0]), "feed", "update", "--name", subTopicHex, multihashHex)
|
||||
cmd.Stdout = &out
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
@ -271,7 +253,7 @@ func feedUploadAndSync(c *cli.Context) error {
|
||||
out.Reset()
|
||||
|
||||
// update file with merged topic
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", endpoints[0], "feed", "update", "--topic", topicHex, "--name", subTopicHex, multihashHex)
|
||||
cmd = exec.Command("swarm", "--bzzaccount", pkFile.Name(), "--bzzapi", httpEndpoint(hosts[0]), "feed", "update", "--topic", topicHex, "--name", subTopicHex, multihashHex)
|
||||
cmd.Stdout = &out
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
@ -282,7 +264,7 @@ func feedUploadAndSync(c *cli.Context) error {
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
for _, host := range hosts {
|
||||
|
||||
// manifest retrieve, topic only
|
||||
for _, url := range []string{manifestWithTopic, manifestWithSubTopic, manifestWithMergedTopic} {
|
||||
@ -290,7 +272,7 @@ func feedUploadAndSync(c *cli.Context) error {
|
||||
ruid := uuid.New()[:8]
|
||||
go func(url string, endpoint string, ruid string) {
|
||||
for {
|
||||
err := fetch(url, endpoint, fileHash, ruid)
|
||||
err := fetch(url, endpoint, fileHash, ruid, "")
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@ -298,7 +280,7 @@ func feedUploadAndSync(c *cli.Context) error {
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
}(url, endpoint, ruid)
|
||||
}(url, httpEndpoint(host), ruid)
|
||||
}
|
||||
|
||||
}
|
||||
@ -307,60 +289,3 @@ func feedUploadAndSync(c *cli.Context) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func fetchFeed(topic string, user string, endpoint string, original []byte, ruid string) error {
|
||||
ctx, sp := spancontext.StartSpan(context.Background(), "feed-and-sync.fetch")
|
||||
defer sp.Finish()
|
||||
|
||||
log.Trace("sleeping", "ruid", ruid)
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
log.Trace("http get request (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user)
|
||||
|
||||
var tn time.Time
|
||||
reqUri := endpoint + "/bzz-feed:/?topic=" + topic + "&user=" + user
|
||||
req, _ := http.NewRequest("GET", reqUri, nil)
|
||||
|
||||
opentracing.GlobalTracer().Inject(
|
||||
sp.Context(),
|
||||
opentracing.HTTPHeaders,
|
||||
opentracing.HTTPHeadersCarrier(req.Header))
|
||||
|
||||
trace := client.GetClientTrace("feed-and-sync - http get", "feed-and-sync", ruid, &tn)
|
||||
|
||||
req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
|
||||
transport := http.DefaultTransport
|
||||
|
||||
//transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
|
||||
tn = time.Now()
|
||||
res, err := transport.RoundTrip(req)
|
||||
if err != nil {
|
||||
log.Error(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Trace("http get response (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user, "code", res.StatusCode, "len", res.ContentLength)
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return fmt.Errorf("expected status code %d, got %v (ruid %v)", 200, res.StatusCode, ruid)
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
|
||||
rdigest, err := digest(res.Body)
|
||||
if err != nil {
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(rdigest, original) {
|
||||
err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original)
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
100
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/main.go
generated
vendored
100
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/main.go
generated
vendored
@ -37,18 +37,16 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
endpoints []string
|
||||
includeLocalhost bool
|
||||
cluster string
|
||||
appName string
|
||||
scheme string
|
||||
filesize int
|
||||
syncDelay int
|
||||
from int
|
||||
to int
|
||||
verbosity int
|
||||
timeout int
|
||||
single bool
|
||||
allhosts string
|
||||
hosts []string
|
||||
filesize int
|
||||
syncDelay int
|
||||
httpPort int
|
||||
wsPort int
|
||||
verbosity int
|
||||
timeout int
|
||||
single bool
|
||||
trackTimeout int
|
||||
)
|
||||
|
||||
func main() {
|
||||
@ -59,39 +57,22 @@ func main() {
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "cluster-endpoint",
|
||||
Value: "prod",
|
||||
Usage: "cluster to point to (prod or a given namespace)",
|
||||
Destination: &cluster,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "app",
|
||||
Value: "swarm",
|
||||
Usage: "application to point to (swarm or swarm-private)",
|
||||
Destination: &appName,
|
||||
Name: "hosts",
|
||||
Value: "",
|
||||
Usage: "comma-separated list of swarm hosts",
|
||||
Destination: &allhosts,
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "cluster-from",
|
||||
Value: 8501,
|
||||
Usage: "swarm node (from)",
|
||||
Destination: &from,
|
||||
Name: "http-port",
|
||||
Value: 80,
|
||||
Usage: "http port",
|
||||
Destination: &httpPort,
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "cluster-to",
|
||||
Value: 8512,
|
||||
Usage: "swarm node (to)",
|
||||
Destination: &to,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cluster-scheme",
|
||||
Value: "http",
|
||||
Usage: "http or https",
|
||||
Destination: &scheme,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "include-localhost",
|
||||
Usage: "whether to include localhost:8500 as an endpoint",
|
||||
Destination: &includeLocalhost,
|
||||
Name: "ws-port",
|
||||
Value: 8546,
|
||||
Usage: "ws port",
|
||||
Destination: &wsPort,
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "filesize",
|
||||
@ -122,6 +103,12 @@ func main() {
|
||||
Usage: "whether to fetch content from a single node or from all nodes",
|
||||
Destination: &single,
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "track-timeout",
|
||||
Value: 5,
|
||||
Usage: "timeout in seconds to wait for GetAllReferences to return",
|
||||
Destination: &trackTimeout,
|
||||
},
|
||||
}
|
||||
|
||||
app.Flags = append(app.Flags, []cli.Flag{
|
||||
@ -130,7 +117,7 @@ func main() {
|
||||
swarmmetrics.MetricsInfluxDBDatabaseFlag,
|
||||
swarmmetrics.MetricsInfluxDBUsernameFlag,
|
||||
swarmmetrics.MetricsInfluxDBPasswordFlag,
|
||||
swarmmetrics.MetricsInfluxDBHostTagFlag,
|
||||
swarmmetrics.MetricsInfluxDBTagsFlag,
|
||||
}...)
|
||||
|
||||
app.Flags = append(app.Flags, tracing.Flags...)
|
||||
@ -140,13 +127,25 @@ func main() {
|
||||
Name: "upload_and_sync",
|
||||
Aliases: []string{"c"},
|
||||
Usage: "upload and sync",
|
||||
Action: cliUploadAndSync,
|
||||
Action: wrapCliCommand("upload-and-sync", uploadAndSyncCmd),
|
||||
},
|
||||
{
|
||||
Name: "feed_sync",
|
||||
Aliases: []string{"f"},
|
||||
Usage: "feed update generate, upload and sync",
|
||||
Action: cliFeedUploadAndSync,
|
||||
Action: wrapCliCommand("feed-and-sync", feedUploadAndSyncCmd),
|
||||
},
|
||||
{
|
||||
Name: "upload_speed",
|
||||
Aliases: []string{"u"},
|
||||
Usage: "measure upload speed",
|
||||
Action: wrapCliCommand("upload-speed", uploadSpeedCmd),
|
||||
},
|
||||
{
|
||||
Name: "sliding_window",
|
||||
Aliases: []string{"s"},
|
||||
Usage: "measure network aggregate capacity",
|
||||
Action: wrapCliCommand("sliding-window", slidingWindowCmd),
|
||||
},
|
||||
}
|
||||
|
||||
@ -177,13 +176,14 @@ func emitMetrics(ctx *cli.Context) error {
|
||||
database = ctx.GlobalString(swarmmetrics.MetricsInfluxDBDatabaseFlag.Name)
|
||||
username = ctx.GlobalString(swarmmetrics.MetricsInfluxDBUsernameFlag.Name)
|
||||
password = ctx.GlobalString(swarmmetrics.MetricsInfluxDBPasswordFlag.Name)
|
||||
hosttag = ctx.GlobalString(swarmmetrics.MetricsInfluxDBHostTagFlag.Name)
|
||||
tags = ctx.GlobalString(swarmmetrics.MetricsInfluxDBTagsFlag.Name)
|
||||
)
|
||||
return influxdb.InfluxDBWithTagsOnce(gethmetrics.DefaultRegistry, endpoint, database, username, password, "swarm-smoke.", map[string]string{
|
||||
"host": hosttag,
|
||||
"version": gitCommit,
|
||||
"filesize": fmt.Sprintf("%v", filesize),
|
||||
})
|
||||
|
||||
tagsMap := utils.SplitTagsFlag(tags)
|
||||
tagsMap["version"] = gitCommit
|
||||
tagsMap["filesize"] = fmt.Sprintf("%v", filesize)
|
||||
|
||||
return influxdb.InfluxDBWithTagsOnce(gethmetrics.DefaultRegistry, endpoint, database, username, password, "swarm-smoke.", tagsMap)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
131
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/sliding_window.go
generated
vendored
Normal file
131
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/sliding_window.go
generated
vendored
Normal file
@ -0,0 +1,131 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||
"github.com/pborman/uuid"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
type uploadResult struct {
|
||||
hash string
|
||||
digest []byte
|
||||
}
|
||||
|
||||
func slidingWindowCmd(ctx *cli.Context, tuid string) error {
|
||||
errc := make(chan error)
|
||||
|
||||
go func() {
|
||||
errc <- slidingWindow(ctx, tuid)
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-errc:
|
||||
if err != nil {
|
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.fail", commandName), nil).Inc(1)
|
||||
}
|
||||
return err
|
||||
case <-time.After(time.Duration(timeout) * time.Second):
|
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.timeout", commandName), nil).Inc(1)
|
||||
|
||||
return fmt.Errorf("timeout after %v sec", timeout)
|
||||
}
|
||||
}
|
||||
|
||||
func slidingWindow(ctx *cli.Context, tuid string) error {
|
||||
hashes := []uploadResult{} //swarm hashes of the uploads
|
||||
nodes := len(hosts)
|
||||
const iterationTimeout = 30 * time.Second
|
||||
log.Info("sliding window test started", "tuid", tuid, "nodes", nodes, "filesize(kb)", filesize, "timeout", timeout)
|
||||
uploadedBytes := 0
|
||||
networkDepth := 0
|
||||
errored := false
|
||||
|
||||
outer:
|
||||
for {
|
||||
log.Info("uploading to "+httpEndpoint(hosts[0])+" and syncing", "seed", seed)
|
||||
|
||||
t1 := time.Now()
|
||||
|
||||
randomBytes := testutil.RandomBytes(seed, filesize*1000)
|
||||
|
||||
hash, err := upload(randomBytes, httpEndpoint(hosts[0]))
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
metrics.GetOrRegisterResettingTimer("sliding-window.upload-time", nil).UpdateSince(t1)
|
||||
|
||||
fhash, err := digest(bytes.NewReader(randomBytes))
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash), "sleeping", syncDelay)
|
||||
hashes = append(hashes, uploadResult{hash: hash, digest: fhash})
|
||||
time.Sleep(time.Duration(syncDelay) * time.Second)
|
||||
uploadedBytes += filesize * 1000
|
||||
|
||||
for i, v := range hashes {
|
||||
timeout := time.After(time.Duration(timeout) * time.Second)
|
||||
errored = false
|
||||
|
||||
inner:
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
errored = true
|
||||
log.Error("error retrieving hash. timeout", "hash idx", i, "err", err)
|
||||
metrics.GetOrRegisterCounter("sliding-window.single.error", nil).Inc(1)
|
||||
break inner
|
||||
default:
|
||||
idx := 1 + rand.Intn(len(hosts)-1)
|
||||
ruid := uuid.New()[:8]
|
||||
start := time.Now()
|
||||
err := fetch(v.hash, httpEndpoint(hosts[idx]), v.digest, ruid, "")
|
||||
if err != nil {
|
||||
continue inner
|
||||
}
|
||||
metrics.GetOrRegisterResettingTimer("sliding-window.single.fetch-time", nil).UpdateSince(start)
|
||||
break inner
|
||||
}
|
||||
}
|
||||
|
||||
if errored {
|
||||
break outer
|
||||
}
|
||||
networkDepth = i
|
||||
metrics.GetOrRegisterGauge("sliding-window.network-depth", nil).Update(int64(networkDepth))
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("sliding window test finished", "errored?", errored, "networkDepth", networkDepth, "networkDepth(kb)", networkDepth*filesize)
|
||||
log.Info("stats", "uploadedFiles", len(hashes), "uploadedKb", uploadedBytes/1000, "filesizeKb", filesize)
|
||||
|
||||
return nil
|
||||
}
|
245
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/upload_and_sync.go
generated
vendored
245
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/upload_and_sync.go
generated
vendored
@ -19,91 +19,122 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
crand "crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
"github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
"github.com/pborman/uuid"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func generateEndpoints(scheme string, cluster string, app string, from int, to int) {
|
||||
if cluster == "prod" {
|
||||
for port := from; port < to; port++ {
|
||||
endpoints = append(endpoints, fmt.Sprintf("%s://%v.swarm-gateways.net", scheme, port))
|
||||
}
|
||||
} else {
|
||||
for port := from; port < to; port++ {
|
||||
endpoints = append(endpoints, fmt.Sprintf("%s://%s-%v-%s.stg.swarm-gateways.net", scheme, app, port, cluster))
|
||||
}
|
||||
}
|
||||
|
||||
if includeLocalhost {
|
||||
endpoints = append(endpoints, "http://localhost:8500")
|
||||
}
|
||||
}
|
||||
|
||||
func cliUploadAndSync(c *cli.Context) error {
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(os.Stdout, log.TerminalFormat(true))))
|
||||
|
||||
metrics.GetOrRegisterCounter("upload-and-sync", nil).Inc(1)
|
||||
func uploadAndSyncCmd(ctx *cli.Context, tuid string) error {
|
||||
randomBytes := testutil.RandomBytes(seed, filesize*1000)
|
||||
|
||||
errc := make(chan error)
|
||||
|
||||
go func() {
|
||||
errc <- uploadAndSync(c)
|
||||
errc <- uplaodAndSync(ctx, randomBytes, tuid)
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-errc:
|
||||
if err != nil {
|
||||
metrics.GetOrRegisterCounter("upload-and-sync.fail", nil).Inc(1)
|
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.fail", commandName), nil).Inc(1)
|
||||
}
|
||||
return err
|
||||
case <-time.After(time.Duration(timeout) * time.Second):
|
||||
metrics.GetOrRegisterCounter("upload-and-sync.timeout", nil).Inc(1)
|
||||
return fmt.Errorf("timeout after %v sec", timeout)
|
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.timeout", commandName), nil).Inc(1)
|
||||
|
||||
e := fmt.Errorf("timeout after %v sec", timeout)
|
||||
// trigger debug functionality on randomBytes
|
||||
err := trackChunks(randomBytes[:])
|
||||
if err != nil {
|
||||
e = fmt.Errorf("%v; triggerChunkDebug failed: %v", e, err)
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
func uploadAndSync(c *cli.Context) error {
|
||||
defer func(now time.Time) {
|
||||
totalTime := time.Since(now)
|
||||
func trackChunks(testData []byte) error {
|
||||
log.Warn("Test timed out; running chunk debug sequence")
|
||||
|
||||
log.Info("total time", "time", totalTime, "kb", filesize)
|
||||
metrics.GetOrRegisterCounter("upload-and-sync.total-time", nil).Inc(int64(totalTime))
|
||||
}(time.Now())
|
||||
addrs, err := getAllRefs(testData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Trace("All references retrieved")
|
||||
|
||||
generateEndpoints(scheme, cluster, appName, from, to)
|
||||
seed := int(time.Now().UnixNano() / 1e6)
|
||||
log.Info("uploading to "+endpoints[0]+" and syncing", "seed", seed)
|
||||
// has-chunks
|
||||
for _, host := range hosts {
|
||||
httpHost := fmt.Sprintf("ws://%s:%d", host, 8546)
|
||||
log.Trace("Calling `Has` on host", "httpHost", httpHost)
|
||||
rpcClient, err := rpc.Dial(httpHost)
|
||||
if err != nil {
|
||||
log.Trace("Error dialing host", "err", err)
|
||||
return err
|
||||
}
|
||||
log.Trace("rpc dial ok")
|
||||
var hasInfo []api.HasInfo
|
||||
err = rpcClient.Call(&hasInfo, "bzz_has", addrs)
|
||||
if err != nil {
|
||||
log.Trace("Error calling host", "err", err)
|
||||
return err
|
||||
}
|
||||
log.Trace("rpc call ok")
|
||||
count := 0
|
||||
for _, info := range hasInfo {
|
||||
if !info.Has {
|
||||
count++
|
||||
log.Error("Host does not have chunk", "host", httpHost, "chunk", info.Addr)
|
||||
}
|
||||
}
|
||||
if count == 0 {
|
||||
log.Info("Host reported to have all chunks", "host", httpHost)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
randomBytes := testutil.RandomBytes(seed, filesize*1000)
|
||||
func getAllRefs(testData []byte) (storage.AddressCollection, error) {
|
||||
log.Trace("Getting all references for given root hash")
|
||||
datadir, err := ioutil.TempDir("", "chunk-debug")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(datadir)
|
||||
fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(trackTimeout)*time.Second)
|
||||
defer cancel()
|
||||
|
||||
reader := bytes.NewReader(testData)
|
||||
return fileStore.GetAllReferences(ctx, reader, false)
|
||||
}
|
||||
|
||||
func uplaodAndSync(c *cli.Context, randomBytes []byte, tuid string) error {
|
||||
log.Info("uploading to "+httpEndpoint(hosts[0])+" and syncing", "tuid", tuid, "seed", seed)
|
||||
|
||||
t1 := time.Now()
|
||||
hash, err := upload(&randomBytes, endpoints[0])
|
||||
hash, err := upload(randomBytes, httpEndpoint(hosts[0]))
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
metrics.GetOrRegisterCounter("upload-and-sync.upload-time", nil).Inc(int64(time.Since(t1)))
|
||||
t2 := time.Since(t1)
|
||||
metrics.GetOrRegisterResettingTimer("upload-and-sync.upload-time", nil).Update(t2)
|
||||
|
||||
fhash, err := digest(bytes.NewReader(randomBytes))
|
||||
if err != nil {
|
||||
@ -111,147 +142,53 @@ func uploadAndSync(c *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash))
|
||||
log.Info("uploaded successfully", "tuid", tuid, "hash", hash, "took", t2, "digest", fmt.Sprintf("%x", fhash))
|
||||
|
||||
time.Sleep(time.Duration(syncDelay) * time.Second)
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
if single {
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
randIndex := 1 + rand.Intn(len(endpoints)-1)
|
||||
randIndex := 1 + rand.Intn(len(hosts)-1)
|
||||
ruid := uuid.New()[:8]
|
||||
wg.Add(1)
|
||||
go func(endpoint string, ruid string) {
|
||||
for {
|
||||
start := time.Now()
|
||||
err := fetch(hash, endpoint, fhash, ruid)
|
||||
fetchTime := time.Since(start)
|
||||
err := fetch(hash, endpoint, fhash, ruid, tuid)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
ended := time.Since(start)
|
||||
|
||||
metrics.GetOrRegisterMeter("upload-and-sync.single.fetch-time", nil).Mark(int64(fetchTime))
|
||||
metrics.GetOrRegisterResettingTimer("upload-and-sync.single.fetch-time", nil).Update(ended)
|
||||
log.Info("fetch successful", "tuid", tuid, "ruid", ruid, "took", ended, "endpoint", endpoint)
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
}(endpoints[randIndex], ruid)
|
||||
}(httpEndpoint(hosts[randIndex]), ruid)
|
||||
} else {
|
||||
for _, endpoint := range endpoints {
|
||||
for _, endpoint := range hosts[1:] {
|
||||
ruid := uuid.New()[:8]
|
||||
wg.Add(1)
|
||||
go func(endpoint string, ruid string) {
|
||||
for {
|
||||
start := time.Now()
|
||||
err := fetch(hash, endpoint, fhash, ruid)
|
||||
fetchTime := time.Since(start)
|
||||
err := fetch(hash, endpoint, fhash, ruid, tuid)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
ended := time.Since(start)
|
||||
|
||||
metrics.GetOrRegisterMeter("upload-and-sync.each.fetch-time", nil).Mark(int64(fetchTime))
|
||||
metrics.GetOrRegisterResettingTimer("upload-and-sync.each.fetch-time", nil).Update(ended)
|
||||
log.Info("fetch successful", "tuid", tuid, "ruid", ruid, "took", ended, "endpoint", endpoint)
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
}(endpoint, ruid)
|
||||
}(httpEndpoint(endpoint), ruid)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
log.Info("all endpoints synced random file successfully")
|
||||
log.Info("all hosts synced random file successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
|
||||
func fetch(hash string, endpoint string, original []byte, ruid string) error {
|
||||
ctx, sp := spancontext.StartSpan(context.Background(), "upload-and-sync.fetch")
|
||||
defer sp.Finish()
|
||||
|
||||
log.Trace("sleeping", "ruid", ruid)
|
||||
time.Sleep(3 * time.Second)
|
||||
log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash)
|
||||
|
||||
var tn time.Time
|
||||
reqUri := endpoint + "/bzz:/" + hash + "/"
|
||||
req, _ := http.NewRequest("GET", reqUri, nil)
|
||||
|
||||
opentracing.GlobalTracer().Inject(
|
||||
sp.Context(),
|
||||
opentracing.HTTPHeaders,
|
||||
opentracing.HTTPHeadersCarrier(req.Header))
|
||||
|
||||
trace := client.GetClientTrace("upload-and-sync - http get", "upload-and-sync", ruid, &tn)
|
||||
|
||||
req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
|
||||
transport := http.DefaultTransport
|
||||
|
||||
//transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
|
||||
tn = time.Now()
|
||||
res, err := transport.RoundTrip(req)
|
||||
if err != nil {
|
||||
log.Error(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
log.Trace("http get response", "ruid", ruid, "api", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
err := fmt.Errorf("expected status code %d, got %v", 200, res.StatusCode)
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
|
||||
rdigest, err := digest(res.Body)
|
||||
if err != nil {
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(rdigest, original) {
|
||||
err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original)
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// upload is uploading a file `f` to `endpoint` via the `swarm up` cmd
|
||||
func upload(dataBytes *[]byte, endpoint string) (string, error) {
|
||||
swarm := client.NewClient(endpoint)
|
||||
f := &client.File{
|
||||
ReadCloser: ioutil.NopCloser(bytes.NewReader(*dataBytes)),
|
||||
ManifestEntry: api.ManifestEntry{
|
||||
ContentType: "text/plain",
|
||||
Mode: 0660,
|
||||
Size: int64(len(*dataBytes)),
|
||||
},
|
||||
}
|
||||
|
||||
// upload data to bzz:// and retrieve the content-addressed manifest hash, hex-encoded.
|
||||
return swarm.Upload(f, "", false)
|
||||
}
|
||||
|
||||
func digest(r io.Reader) ([]byte, error) {
|
||||
h := md5.New()
|
||||
_, err := io.Copy(h, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
// generates random data in heap buffer
|
||||
func generateRandomData(datasize int) ([]byte, error) {
|
||||
b := make([]byte, datasize)
|
||||
c, err := crand.Read(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if c != datasize {
|
||||
return nil, errors.New("short read")
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
73
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/upload_speed.go
generated
vendored
Normal file
73
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/upload_speed.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
func uploadSpeedCmd(ctx *cli.Context, tuid string) error {
|
||||
log.Info("uploading to "+hosts[0], "tuid", tuid, "seed", seed)
|
||||
randomBytes := testutil.RandomBytes(seed, filesize*1000)
|
||||
|
||||
errc := make(chan error)
|
||||
|
||||
go func() {
|
||||
errc <- uploadSpeed(ctx, tuid, randomBytes)
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-errc:
|
||||
if err != nil {
|
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.fail", commandName), nil).Inc(1)
|
||||
}
|
||||
return err
|
||||
case <-time.After(time.Duration(timeout) * time.Second):
|
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("%s.timeout", commandName), nil).Inc(1)
|
||||
|
||||
// trigger debug functionality on randomBytes
|
||||
|
||||
return fmt.Errorf("timeout after %v sec", timeout)
|
||||
}
|
||||
}
|
||||
|
||||
func uploadSpeed(c *cli.Context, tuid string, data []byte) error {
|
||||
t1 := time.Now()
|
||||
hash, err := upload(data, hosts[0])
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
metrics.GetOrRegisterCounter("upload-speed.upload-time", nil).Inc(int64(time.Since(t1)))
|
||||
|
||||
fhash, err := digest(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash))
|
||||
return nil
|
||||
}
|
235
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/util.go
generated
vendored
Normal file
235
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-smoke/util.go
generated
vendored
Normal file
@ -0,0 +1,235 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
crand "crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
"github.com/ethereum/go-ethereum/swarm/api/client"
|
||||
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
"github.com/pborman/uuid"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
commandName = ""
|
||||
seed = int(time.Now().UTC().UnixNano())
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(int64(seed))
|
||||
}
|
||||
|
||||
func httpEndpoint(host string) string {
|
||||
return fmt.Sprintf("http://%s:%d", host, httpPort)
|
||||
}
|
||||
|
||||
func wsEndpoint(host string) string {
|
||||
return fmt.Sprintf("ws://%s:%d", host, wsPort)
|
||||
}
|
||||
|
||||
func wrapCliCommand(name string, command func(*cli.Context, string) error) func(*cli.Context) error {
|
||||
return func(ctx *cli.Context) error {
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(os.Stdout, log.TerminalFormat(false))))
|
||||
|
||||
// test uuid
|
||||
tuid := uuid.New()[:8]
|
||||
|
||||
commandName = name
|
||||
|
||||
hosts = strings.Split(allhosts, ",")
|
||||
|
||||
defer func(now time.Time) {
|
||||
totalTime := time.Since(now)
|
||||
log.Info("total time", "tuid", tuid, "time", totalTime, "kb", filesize)
|
||||
metrics.GetOrRegisterResettingTimer(name+".total-time", nil).Update(totalTime)
|
||||
}(time.Now())
|
||||
|
||||
log.Info("smoke test starting", "tuid", tuid, "task", name, "timeout", timeout)
|
||||
metrics.GetOrRegisterCounter(name, nil).Inc(1)
|
||||
|
||||
return command(ctx, tuid)
|
||||
}
|
||||
}
|
||||
|
||||
func fetchFeed(topic string, user string, endpoint string, original []byte, ruid string) error {
|
||||
ctx, sp := spancontext.StartSpan(context.Background(), "feed-and-sync.fetch")
|
||||
defer sp.Finish()
|
||||
|
||||
log.Trace("sleeping", "ruid", ruid)
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
log.Trace("http get request (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user)
|
||||
|
||||
var tn time.Time
|
||||
reqUri := endpoint + "/bzz-feed:/?topic=" + topic + "&user=" + user
|
||||
req, _ := http.NewRequest("GET", reqUri, nil)
|
||||
|
||||
opentracing.GlobalTracer().Inject(
|
||||
sp.Context(),
|
||||
opentracing.HTTPHeaders,
|
||||
opentracing.HTTPHeadersCarrier(req.Header))
|
||||
|
||||
trace := client.GetClientTrace("feed-and-sync - http get", "feed-and-sync", ruid, &tn)
|
||||
|
||||
req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
|
||||
transport := http.DefaultTransport
|
||||
|
||||
//transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
|
||||
tn = time.Now()
|
||||
res, err := transport.RoundTrip(req)
|
||||
if err != nil {
|
||||
log.Error(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Trace("http get response (feed)", "ruid", ruid, "api", endpoint, "topic", topic, "user", user, "code", res.StatusCode, "len", res.ContentLength)
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
return fmt.Errorf("expected status code %d, got %v (ruid %v)", 200, res.StatusCode, ruid)
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
|
||||
rdigest, err := digest(res.Body)
|
||||
if err != nil {
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(rdigest, original) {
|
||||
err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original)
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
|
||||
func fetch(hash string, endpoint string, original []byte, ruid string, tuid string) error {
|
||||
ctx, sp := spancontext.StartSpan(context.Background(), "upload-and-sync.fetch")
|
||||
defer sp.Finish()
|
||||
|
||||
log.Info("http get request", "tuid", tuid, "ruid", ruid, "endpoint", endpoint, "hash", hash)
|
||||
|
||||
var tn time.Time
|
||||
reqUri := endpoint + "/bzz:/" + hash + "/"
|
||||
req, _ := http.NewRequest("GET", reqUri, nil)
|
||||
|
||||
opentracing.GlobalTracer().Inject(
|
||||
sp.Context(),
|
||||
opentracing.HTTPHeaders,
|
||||
opentracing.HTTPHeadersCarrier(req.Header))
|
||||
|
||||
trace := client.GetClientTrace(commandName+" - http get", commandName, ruid, &tn)
|
||||
|
||||
req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
|
||||
transport := http.DefaultTransport
|
||||
|
||||
//transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
|
||||
tn = time.Now()
|
||||
res, err := transport.RoundTrip(req)
|
||||
if err != nil {
|
||||
log.Error(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
log.Info("http get response", "tuid", tuid, "ruid", ruid, "endpoint", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
err := fmt.Errorf("expected status code %d, got %v", 200, res.StatusCode)
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
|
||||
rdigest, err := digest(res.Body)
|
||||
if err != nil {
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(rdigest, original) {
|
||||
err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original)
|
||||
log.Warn(err.Error(), "ruid", ruid)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// upload an arbitrary byte as a plaintext file to `endpoint` using the api client
|
||||
func upload(data []byte, endpoint string) (string, error) {
|
||||
swarm := client.NewClient(endpoint)
|
||||
f := &client.File{
|
||||
ReadCloser: ioutil.NopCloser(bytes.NewReader(data)),
|
||||
ManifestEntry: api.ManifestEntry{
|
||||
ContentType: "text/plain",
|
||||
Mode: 0660,
|
||||
Size: int64(len(data)),
|
||||
},
|
||||
}
|
||||
|
||||
// upload data to bzz:// and retrieve the content-addressed manifest hash, hex-encoded.
|
||||
return swarm.Upload(f, "", false)
|
||||
}
|
||||
|
||||
func digest(r io.Reader) ([]byte, error) {
|
||||
h := md5.New()
|
||||
_, err := io.Copy(h, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
// generates random data in heap buffer
|
||||
func generateRandomData(datasize int) ([]byte, error) {
|
||||
b := make([]byte, datasize)
|
||||
c, err := crand.Read(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if c != datasize {
|
||||
return nil, errors.New("short read")
|
||||
}
|
||||
return b, nil
|
||||
}
|
157
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-snapshot/create.go
generated
vendored
Normal file
157
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-snapshot/create.go
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
// create is used as the entry function for "create" app command.
|
||||
func create(ctx *cli.Context) error {
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(ctx.Int("verbosity")), log.StreamHandler(os.Stdout, log.TerminalFormat(true))))
|
||||
|
||||
if len(ctx.Args()) < 1 {
|
||||
return errors.New("argument should be the filename to verify or write-to")
|
||||
}
|
||||
filename, err := touchPath(ctx.Args()[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return createSnapshot(filename, ctx.Int("nodes"), strings.Split(ctx.String("services"), ","))
|
||||
}
|
||||
|
||||
// createSnapshot creates a new snapshot on filesystem with provided filename,
|
||||
// number of nodes and service names.
|
||||
func createSnapshot(filename string, nodes int, services []string) (err error) {
|
||||
log.Debug("create snapshot", "filename", filename, "nodes", nodes, "services", services)
|
||||
|
||||
sim := simulation.New(map[string]simulation.ServiceFunc{
|
||||
"bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
|
||||
addr := network.NewAddr(ctx.Config.Node())
|
||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||
hp := network.NewHiveParams()
|
||||
hp.KeepAliveInterval = time.Duration(200) * time.Millisecond
|
||||
hp.Discovery = true // discovery must be enabled when creating a snapshot
|
||||
|
||||
config := &network.BzzConfig{
|
||||
OverlayAddr: addr.Over(),
|
||||
UnderlayAddr: addr.Under(),
|
||||
HiveParams: hp,
|
||||
}
|
||||
return network.NewBzz(config, kad, nil, nil, nil), nil, nil
|
||||
},
|
||||
})
|
||||
defer sim.Close()
|
||||
|
||||
_, err = sim.AddNodes(nodes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("add nodes: %v", err)
|
||||
}
|
||||
|
||||
err = sim.Net.ConnectNodesRing(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("connect nodes: %v", err)
|
||||
}
|
||||
|
||||
ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancelSimRun()
|
||||
if _, err := sim.WaitTillHealthy(ctx); err != nil {
|
||||
return fmt.Errorf("wait for healthy kademlia: %v", err)
|
||||
}
|
||||
|
||||
var snap *simulations.Snapshot
|
||||
if len(services) > 0 {
|
||||
// If service names are provided, include them in the snapshot.
|
||||
// But, check if "bzz" service is not among them to remove it
|
||||
// form the snapshot as it exists on snapshot creation.
|
||||
var removeServices []string
|
||||
var wantBzz bool
|
||||
for _, s := range services {
|
||||
if s == "bzz" {
|
||||
wantBzz = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !wantBzz {
|
||||
removeServices = []string{"bzz"}
|
||||
}
|
||||
snap, err = sim.Net.SnapshotWithServices(services, removeServices)
|
||||
} else {
|
||||
snap, err = sim.Net.Snapshot()
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("create snapshot: %v", err)
|
||||
}
|
||||
jsonsnapshot, err := json.Marshal(snap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("json encode snapshot: %v", err)
|
||||
}
|
||||
return ioutil.WriteFile(filename, jsonsnapshot, 0666)
|
||||
}
|
||||
|
||||
// touchPath creates an empty file and all subdirectories
|
||||
// that are missing.
|
||||
func touchPath(filename string) (string, error) {
|
||||
if path.IsAbs(filename) {
|
||||
if _, err := os.Stat(filename); err == nil {
|
||||
// path exists, overwrite
|
||||
return filename, nil
|
||||
}
|
||||
}
|
||||
|
||||
d, f := path.Split(filename)
|
||||
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, err = os.Stat(path.Join(dir, filename))
|
||||
if err == nil {
|
||||
// path exists, overwrite
|
||||
return filename, nil
|
||||
}
|
||||
|
||||
dirPath := path.Join(dir, d)
|
||||
filePath := path.Join(dirPath, f)
|
||||
if d != "" {
|
||||
err = os.MkdirAll(dirPath, os.ModeDir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return filePath, nil
|
||||
}
|
143
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-snapshot/create_test.go
generated
vendored
Normal file
143
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-snapshot/create_test.go
generated
vendored
Normal file
@ -0,0 +1,143 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
)
|
||||
|
||||
// TestSnapshotCreate is a high level e2e test that tests for snapshot generation.
|
||||
// It runs a few "create" commands with different flag values and loads generated
|
||||
// snapshot files to validate their content.
|
||||
func TestSnapshotCreate(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
for _, v := range []struct {
|
||||
name string
|
||||
nodes int
|
||||
services string
|
||||
}{
|
||||
{
|
||||
name: "defaults",
|
||||
},
|
||||
{
|
||||
name: "more nodes",
|
||||
nodes: defaultNodes + 5,
|
||||
},
|
||||
{
|
||||
name: "services",
|
||||
services: "stream,pss,zorglub",
|
||||
},
|
||||
{
|
||||
name: "services with bzz",
|
||||
services: "bzz,pss",
|
||||
},
|
||||
} {
|
||||
t.Run(v.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
file, err := ioutil.TempFile("", "swarm-snapshot")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Remove(file.Name())
|
||||
|
||||
if err = file.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
args := []string{"create"}
|
||||
if v.nodes > 0 {
|
||||
args = append(args, "--nodes", strconv.Itoa(v.nodes))
|
||||
}
|
||||
if v.services != "" {
|
||||
args = append(args, "--services", v.services)
|
||||
}
|
||||
testCmd := runSnapshot(t, append(args, file.Name())...)
|
||||
|
||||
testCmd.ExpectExit()
|
||||
if code := testCmd.ExitStatus(); code != 0 {
|
||||
t.Fatalf("command exit code %v, expected 0", code)
|
||||
}
|
||||
|
||||
f, err := os.Open(file.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
err := f.Close()
|
||||
if err != nil {
|
||||
t.Error("closing snapshot file", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
b, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var snap simulations.Snapshot
|
||||
err = json.Unmarshal(b, &snap)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wantNodes := v.nodes
|
||||
if wantNodes == 0 {
|
||||
wantNodes = defaultNodes
|
||||
}
|
||||
gotNodes := len(snap.Nodes)
|
||||
if gotNodes != wantNodes {
|
||||
t.Errorf("got %v nodes, want %v", gotNodes, wantNodes)
|
||||
}
|
||||
|
||||
if len(snap.Conns) == 0 {
|
||||
t.Error("no connections in a snapshot")
|
||||
}
|
||||
|
||||
var wantServices []string
|
||||
if v.services != "" {
|
||||
wantServices = strings.Split(v.services, ",")
|
||||
} else {
|
||||
wantServices = []string{"bzz"}
|
||||
}
|
||||
// sort service names so they can be comparable
|
||||
// as strings to every node sorted services
|
||||
sort.Strings(wantServices)
|
||||
|
||||
for i, n := range snap.Nodes {
|
||||
gotServices := n.Node.Config.Services
|
||||
sort.Strings(gotServices)
|
||||
if fmt.Sprint(gotServices) != fmt.Sprint(wantServices) {
|
||||
t.Errorf("got services %v for node %v, want %v", gotServices, i, wantServices)
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
82
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-snapshot/main.go
generated
vendored
Normal file
82
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-snapshot/main.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
|
||||
|
||||
// default value for "create" command --nodes flag
|
||||
const defaultNodes = 10
|
||||
|
||||
func main() {
|
||||
err := newApp().Run(os.Args)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// newApp construct a new instance of Swarm Snapshot Utility.
|
||||
// Method Run is called on it in the main function and in tests.
|
||||
func newApp() (app *cli.App) {
|
||||
app = utils.NewApp(gitCommit, "Swarm Snapshot Utility")
|
||||
|
||||
app.Name = "swarm-snapshot"
|
||||
app.Usage = ""
|
||||
|
||||
// app flags (for all commands)
|
||||
app.Flags = []cli.Flag{
|
||||
cli.IntFlag{
|
||||
Name: "verbosity",
|
||||
Value: 1,
|
||||
Usage: "verbosity level",
|
||||
},
|
||||
}
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "create",
|
||||
Aliases: []string{"c"},
|
||||
Usage: "create a swarm snapshot",
|
||||
Action: create,
|
||||
// Flags only for "create" command.
|
||||
// Allow app flags to be specified after the
|
||||
// command argument.
|
||||
Flags: append(app.Flags,
|
||||
cli.IntFlag{
|
||||
Name: "nodes",
|
||||
Value: defaultNodes,
|
||||
Usage: "number of nodes",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "services",
|
||||
Value: "bzz",
|
||||
Usage: "comma separated list of services to boot the nodes with",
|
||||
},
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
return app
|
||||
}
|
49
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-snapshot/run_test.go
generated
vendored
Normal file
49
vendor/github.com/ethereum/go-ethereum/cmd/swarm/swarm-snapshot/run_test.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||
)
|
||||
|
||||
func init() {
|
||||
reexec.Register("swarm-snapshot", func() {
|
||||
if err := newApp().Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
})
|
||||
}
|
||||
|
||||
func runSnapshot(t *testing.T, args ...string) *cmdtest.TestCmd {
|
||||
tt := cmdtest.NewTestCmd(t, nil)
|
||||
tt.Run("swarm-snapshot", args...)
|
||||
return tt
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
69
vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go
generated
vendored
69
vendor/github.com/ethereum/go-ethereum/cmd/utils/flags.go
generated
vendored
@ -57,7 +57,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv6"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -140,6 +140,10 @@ var (
|
||||
Name: "rinkeby",
|
||||
Usage: "Rinkeby network: pre-configured proof-of-authority test network",
|
||||
}
|
||||
GoerliFlag = cli.BoolFlag{
|
||||
Name: "goerli",
|
||||
Usage: "Görli network: pre-configured proof-of-authority test network",
|
||||
}
|
||||
ConstantinopleOverrideFlag = cli.Uint64Flag{
|
||||
Name: "override.constantinople",
|
||||
Usage: "Manually specify constantinople fork-block, overriding the bundled setting",
|
||||
@ -614,14 +618,14 @@ var (
|
||||
Usage: "Password to authorize access to the database",
|
||||
Value: "test",
|
||||
}
|
||||
// The `host` tag is part of every measurement sent to InfluxDB. Queries on tags are faster in InfluxDB.
|
||||
// It is used so that we can group all nodes and average a measurement across all of them, but also so
|
||||
// that we can select a specific node and inspect its measurements.
|
||||
// Tags are part of every measurement sent to InfluxDB. Queries on tags are faster in InfluxDB.
|
||||
// For example `host` tag could be used so that we can group all nodes and average a measurement
|
||||
// across all of them, but also so that we can select a specific node and inspect its measurements.
|
||||
// https://docs.influxdata.com/influxdb/v1.4/concepts/key_concepts/#tag-key
|
||||
MetricsInfluxDBHostTagFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.host.tag",
|
||||
Usage: "InfluxDB `host` tag attached to all measurements",
|
||||
Value: "localhost",
|
||||
MetricsInfluxDBTagsFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.tags",
|
||||
Usage: "Comma-separated InfluxDB tags (key/values) attached to all measurements",
|
||||
Value: "host=localhost",
|
||||
}
|
||||
|
||||
EWASMInterpreterFlag = cli.StringFlag{
|
||||
@ -647,6 +651,9 @@ func MakeDataDir(ctx *cli.Context) string {
|
||||
if ctx.GlobalBool(RinkebyFlag.Name) {
|
||||
return filepath.Join(path, "rinkeby")
|
||||
}
|
||||
if ctx.GlobalBool(GoerliFlag.Name) {
|
||||
return filepath.Join(path, "goerli")
|
||||
}
|
||||
return path
|
||||
}
|
||||
Fatalf("Cannot determine default data directory, please set manually (--datadir)")
|
||||
@ -701,6 +708,8 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
|
||||
urls = params.TestnetBootnodes
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
urls = params.RinkebyBootnodes
|
||||
case ctx.GlobalBool(GoerliFlag.Name):
|
||||
urls = params.GoerliBootnodes
|
||||
case cfg.BootstrapNodes != nil:
|
||||
return // already set, don't apply defaults.
|
||||
}
|
||||
@ -728,6 +737,8 @@ func setBootstrapNodesV5(ctx *cli.Context, cfg *p2p.Config) {
|
||||
}
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
urls = params.RinkebyBootnodes
|
||||
case ctx.GlobalBool(GoerliFlag.Name):
|
||||
urls = params.GoerliBootnodes
|
||||
case cfg.BootstrapNodesV5 != nil:
|
||||
return // already set, don't apply defaults.
|
||||
}
|
||||
@ -836,10 +847,11 @@ func makeDatabaseHandles() int {
|
||||
if err != nil {
|
||||
Fatalf("Failed to retrieve file descriptor allowance: %v", err)
|
||||
}
|
||||
if err := fdlimit.Raise(uint64(limit)); err != nil {
|
||||
raised, err := fdlimit.Raise(uint64(limit))
|
||||
if err != nil {
|
||||
Fatalf("Failed to raise file descriptor allowance: %v", err)
|
||||
}
|
||||
return limit / 2 // Leave half for networking and other stuff
|
||||
return int(raised / 2) // Leave half for networking and other stuff
|
||||
}
|
||||
|
||||
// MakeAddress converts an account specified directly as a hex encoded string or
|
||||
@ -980,7 +992,6 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
|
||||
setHTTP(ctx, cfg)
|
||||
setWS(ctx, cfg)
|
||||
setNodeUserIdent(ctx, cfg)
|
||||
|
||||
setDataDir(ctx, cfg)
|
||||
|
||||
if ctx.GlobalIsSet(KeyStoreDirFlag.Name) {
|
||||
@ -1004,6 +1015,8 @@ func setDataDir(ctx *cli.Context, cfg *node.Config) {
|
||||
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
|
||||
case ctx.GlobalBool(GoerliFlag.Name):
|
||||
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli")
|
||||
}
|
||||
}
|
||||
|
||||
@ -1160,7 +1173,7 @@ func SetShhConfig(ctx *cli.Context, stack *node.Node, cfg *whisper.Config) {
|
||||
// SetEthConfig applies eth-related command line flags to the config.
|
||||
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
// Avoid conflicting network flags
|
||||
checkExclusive(ctx, DeveloperFlag, TestnetFlag, RinkebyFlag)
|
||||
checkExclusive(ctx, DeveloperFlag, TestnetFlag, RinkebyFlag, GoerliFlag)
|
||||
checkExclusive(ctx, LightServFlag, SyncModeFlag, "light")
|
||||
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
@ -1256,6 +1269,11 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
cfg.NetworkId = 4
|
||||
}
|
||||
cfg.Genesis = core.DefaultRinkebyGenesisBlock()
|
||||
case ctx.GlobalBool(GoerliFlag.Name):
|
||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = 5
|
||||
}
|
||||
cfg.Genesis = core.DefaultGoerliGenesisBlock()
|
||||
case ctx.GlobalBool(DeveloperFlag.Name):
|
||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = 1337
|
||||
@ -1360,18 +1378,35 @@ func SetupMetrics(ctx *cli.Context) {
|
||||
database = ctx.GlobalString(MetricsInfluxDBDatabaseFlag.Name)
|
||||
username = ctx.GlobalString(MetricsInfluxDBUsernameFlag.Name)
|
||||
password = ctx.GlobalString(MetricsInfluxDBPasswordFlag.Name)
|
||||
hosttag = ctx.GlobalString(MetricsInfluxDBHostTagFlag.Name)
|
||||
)
|
||||
|
||||
if enableExport {
|
||||
tagsMap := SplitTagsFlag(ctx.GlobalString(MetricsInfluxDBTagsFlag.Name))
|
||||
|
||||
log.Info("Enabling metrics export to InfluxDB")
|
||||
go influxdb.InfluxDBWithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "geth.", map[string]string{
|
||||
"host": hosttag,
|
||||
})
|
||||
|
||||
go influxdb.InfluxDBWithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "geth.", tagsMap)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func SplitTagsFlag(tagsFlag string) map[string]string {
|
||||
tags := strings.Split(tagsFlag, ",")
|
||||
tagsMap := map[string]string{}
|
||||
|
||||
for _, t := range tags {
|
||||
if t != "" {
|
||||
kv := strings.Split(t, "=")
|
||||
|
||||
if len(kv) == 2 {
|
||||
tagsMap[kv[0]] = kv[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return tagsMap
|
||||
}
|
||||
|
||||
// MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails.
|
||||
func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
|
||||
var (
|
||||
@ -1396,6 +1431,8 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
|
||||
genesis = core.DefaultTestnetGenesisBlock()
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
genesis = core.DefaultRinkebyGenesisBlock()
|
||||
case ctx.GlobalBool(GoerliFlag.Name):
|
||||
genesis = core.DefaultGoerliGenesisBlock()
|
||||
case ctx.GlobalBool(DeveloperFlag.Name):
|
||||
Fatalf("Developer chains are ephemeral")
|
||||
}
|
||||
|
64
vendor/github.com/ethereum/go-ethereum/cmd/utils/flags_test.go
generated
vendored
Normal file
64
vendor/github.com/ethereum/go-ethereum/cmd/utils/flags_test.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package utils contains internal helper functions for go-ethereum commands.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_SplitTagsFlag(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args string
|
||||
want map[string]string
|
||||
}{
|
||||
{
|
||||
"2 tags case",
|
||||
"host=localhost,bzzkey=123",
|
||||
map[string]string{
|
||||
"host": "localhost",
|
||||
"bzzkey": "123",
|
||||
},
|
||||
},
|
||||
{
|
||||
"1 tag case",
|
||||
"host=localhost123",
|
||||
map[string]string{
|
||||
"host": "localhost123",
|
||||
},
|
||||
},
|
||||
{
|
||||
"empty case",
|
||||
"",
|
||||
map[string]string{},
|
||||
},
|
||||
{
|
||||
"garbage",
|
||||
"smth=smthelse=123",
|
||||
map[string]string{},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := SplitTagsFlag(tt.args); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("splitTagsFlag() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
11
vendor/github.com/ethereum/go-ethereum/common/fdlimit/fdlimit_freebsd.go
generated
vendored
11
vendor/github.com/ethereum/go-ethereum/common/fdlimit/fdlimit_freebsd.go
generated
vendored
@ -26,11 +26,11 @@ import "syscall"
|
||||
|
||||
// Raise tries to maximize the file descriptor allowance of this process
|
||||
// to the maximum hard-limit allowed by the OS.
|
||||
func Raise(max uint64) error {
|
||||
func Raise(max uint64) (uint64, error) {
|
||||
// Get the current limit
|
||||
var limit syscall.Rlimit
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
// Try to update the limit to the max allowance
|
||||
limit.Cur = limit.Max
|
||||
@ -38,9 +38,12 @@ func Raise(max uint64) error {
|
||||
limit.Cur = int64(max)
|
||||
}
|
||||
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
return nil
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return limit.Cur, nil
|
||||
}
|
||||
|
||||
// Current retrieves the number of file descriptors allowed to be opened by this
|
||||
|
2
vendor/github.com/ethereum/go-ethereum/common/fdlimit/fdlimit_test.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/common/fdlimit/fdlimit_test.go
generated
vendored
@ -36,7 +36,7 @@ func TestFileDescriptorLimits(t *testing.T) {
|
||||
if limit, err := Current(); err != nil || limit <= 0 {
|
||||
t.Fatalf("failed to retrieve file descriptor limit (%d): %v", limit, err)
|
||||
}
|
||||
if err := Raise(uint64(target)); err != nil {
|
||||
if _, err := Raise(uint64(target)); err != nil {
|
||||
t.Fatalf("failed to raise file allowance")
|
||||
}
|
||||
if limit, err := Current(); err != nil || limit < target {
|
||||
|
13
vendor/github.com/ethereum/go-ethereum/common/fdlimit/fdlimit_unix.go
generated
vendored
13
vendor/github.com/ethereum/go-ethereum/common/fdlimit/fdlimit_unix.go
generated
vendored
@ -22,11 +22,12 @@ import "syscall"
|
||||
|
||||
// Raise tries to maximize the file descriptor allowance of this process
|
||||
// to the maximum hard-limit allowed by the OS.
|
||||
func Raise(max uint64) error {
|
||||
// Returns the size it was set to (may differ from the desired 'max')
|
||||
func Raise(max uint64) (uint64, error) {
|
||||
// Get the current limit
|
||||
var limit syscall.Rlimit
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
// Try to update the limit to the max allowance
|
||||
limit.Cur = limit.Max
|
||||
@ -34,9 +35,13 @@ func Raise(max uint64) error {
|
||||
limit.Cur = max
|
||||
}
|
||||
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
return nil
|
||||
// MacOS can silently apply further caps, so retrieve the actually set limit
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return limit.Cur, nil
|
||||
}
|
||||
|
||||
// Current retrieves the number of file descriptors allowed to be opened by this
|
||||
|
14
vendor/github.com/ethereum/go-ethereum/common/fdlimit/fdlimit_windows.go
generated
vendored
14
vendor/github.com/ethereum/go-ethereum/common/fdlimit/fdlimit_windows.go
generated
vendored
@ -16,28 +16,30 @@
|
||||
|
||||
package fdlimit
|
||||
|
||||
import "errors"
|
||||
import "fmt"
|
||||
|
||||
const hardlimit = 16384
|
||||
|
||||
// Raise tries to maximize the file descriptor allowance of this process
|
||||
// to the maximum hard-limit allowed by the OS.
|
||||
func Raise(max uint64) error {
|
||||
func Raise(max uint64) (uint64, error) {
|
||||
// This method is NOP by design:
|
||||
// * Linux/Darwin counterparts need to manually increase per process limits
|
||||
// * On Windows Go uses the CreateFile API, which is limited to 16K files, non
|
||||
// changeable from within a running process
|
||||
// This way we can always "request" raising the limits, which will either have
|
||||
// or not have effect based on the platform we're running on.
|
||||
if max > 16384 {
|
||||
return errors.New("file descriptor limit (16384) reached")
|
||||
if max > hardlimit {
|
||||
return hardlimit, fmt.Errorf("file descriptor limit (%d) reached", hardlimit)
|
||||
}
|
||||
return nil
|
||||
return max, nil
|
||||
}
|
||||
|
||||
// Current retrieves the number of file descriptors allowed to be opened by this
|
||||
// process.
|
||||
func Current() (int, error) {
|
||||
// Please see Raise for the reason why we use hard coded 16K as the limit
|
||||
return 16384, nil
|
||||
return hardlimit, nil
|
||||
}
|
||||
|
||||
// Maximum retrieves the maximum number of file descriptors this process is
|
||||
|
22
vendor/github.com/ethereum/go-ethereum/contracts/chequebook/api.go
generated
vendored
22
vendor/github.com/ethereum/go-ethereum/contracts/chequebook/api.go
generated
vendored
@ -27,40 +27,40 @@ const Version = "1.0"
|
||||
|
||||
var errNoChequebook = errors.New("no chequebook")
|
||||
|
||||
type Api struct {
|
||||
type API struct {
|
||||
chequebookf func() *Chequebook
|
||||
}
|
||||
|
||||
func NewApi(ch func() *Chequebook) *Api {
|
||||
return &Api{ch}
|
||||
func NewAPI(ch func() *Chequebook) *API {
|
||||
return &API{ch}
|
||||
}
|
||||
|
||||
func (self *Api) Balance() (string, error) {
|
||||
ch := self.chequebookf()
|
||||
func (a *API) Balance() (string, error) {
|
||||
ch := a.chequebookf()
|
||||
if ch == nil {
|
||||
return "", errNoChequebook
|
||||
}
|
||||
return ch.Balance().String(), nil
|
||||
}
|
||||
|
||||
func (self *Api) Issue(beneficiary common.Address, amount *big.Int) (cheque *Cheque, err error) {
|
||||
ch := self.chequebookf()
|
||||
func (a *API) Issue(beneficiary common.Address, amount *big.Int) (cheque *Cheque, err error) {
|
||||
ch := a.chequebookf()
|
||||
if ch == nil {
|
||||
return nil, errNoChequebook
|
||||
}
|
||||
return ch.Issue(beneficiary, amount)
|
||||
}
|
||||
|
||||
func (self *Api) Cash(cheque *Cheque) (txhash string, err error) {
|
||||
ch := self.chequebookf()
|
||||
func (a *API) Cash(cheque *Cheque) (txhash string, err error) {
|
||||
ch := a.chequebookf()
|
||||
if ch == nil {
|
||||
return "", errNoChequebook
|
||||
}
|
||||
return ch.Cash(cheque)
|
||||
}
|
||||
|
||||
func (self *Api) Deposit(amount *big.Int) (txhash string, err error) {
|
||||
ch := self.chequebookf()
|
||||
func (a *API) Deposit(amount *big.Int) (txhash string, err error) {
|
||||
ch := a.chequebookf()
|
||||
if ch == nil {
|
||||
return "", errNoChequebook
|
||||
}
|
||||
|
280
vendor/github.com/ethereum/go-ethereum/contracts/chequebook/cheque.go
generated
vendored
280
vendor/github.com/ethereum/go-ethereum/contracts/chequebook/cheque.go
generated
vendored
@ -75,8 +75,8 @@ type Cheque struct {
|
||||
Sig []byte // signature Sign(Keccak256(contract, beneficiary, amount), prvKey)
|
||||
}
|
||||
|
||||
func (self *Cheque) String() string {
|
||||
return fmt.Sprintf("contract: %s, beneficiary: %s, amount: %v, signature: %x", self.Contract.Hex(), self.Beneficiary.Hex(), self.Amount, self.Sig)
|
||||
func (ch *Cheque) String() string {
|
||||
return fmt.Sprintf("contract: %s, beneficiary: %s, amount: %v, signature: %x", ch.Contract.Hex(), ch.Beneficiary.Hex(), ch.Amount, ch.Sig)
|
||||
}
|
||||
|
||||
type Params struct {
|
||||
@ -109,8 +109,8 @@ type Chequebook struct {
|
||||
log log.Logger // contextual logger with the contract address embedded
|
||||
}
|
||||
|
||||
func (self *Chequebook) String() string {
|
||||
return fmt.Sprintf("contract: %s, owner: %s, balance: %v, signer: %x", self.contractAddr.Hex(), self.owner.Hex(), self.balance, self.prvKey.PublicKey)
|
||||
func (chbook *Chequebook) String() string {
|
||||
return fmt.Sprintf("contract: %s, owner: %s, balance: %v, signer: %x", chbook.contractAddr.Hex(), chbook.owner.Hex(), chbook.balance, chbook.prvKey.PublicKey)
|
||||
}
|
||||
|
||||
// NewChequebook creates a new Chequebook.
|
||||
@ -148,12 +148,12 @@ func NewChequebook(path string, contractAddr common.Address, prvKey *ecdsa.Priva
|
||||
return
|
||||
}
|
||||
|
||||
func (self *Chequebook) setBalanceFromBlockChain() {
|
||||
balance, err := self.backend.BalanceAt(context.TODO(), self.contractAddr, nil)
|
||||
func (chbook *Chequebook) setBalanceFromBlockChain() {
|
||||
balance, err := chbook.backend.BalanceAt(context.TODO(), chbook.contractAddr, nil)
|
||||
if err != nil {
|
||||
log.Error("Failed to retrieve chequebook balance", "err", err)
|
||||
} else {
|
||||
self.balance.Set(balance)
|
||||
chbook.balance.Set(balance)
|
||||
}
|
||||
}
|
||||
|
||||
@ -187,19 +187,19 @@ type chequebookFile struct {
|
||||
}
|
||||
|
||||
// UnmarshalJSON deserialises a chequebook.
|
||||
func (self *Chequebook) UnmarshalJSON(data []byte) error {
|
||||
func (chbook *Chequebook) UnmarshalJSON(data []byte) error {
|
||||
var file chequebookFile
|
||||
err := json.Unmarshal(data, &file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, ok := self.balance.SetString(file.Balance, 10)
|
||||
_, ok := chbook.balance.SetString(file.Balance, 10)
|
||||
if !ok {
|
||||
return fmt.Errorf("cumulative amount sent: unable to convert string to big integer: %v", file.Balance)
|
||||
}
|
||||
self.contractAddr = common.HexToAddress(file.Contract)
|
||||
chbook.contractAddr = common.HexToAddress(file.Contract)
|
||||
for addr, sent := range file.Sent {
|
||||
self.sent[common.HexToAddress(addr)], ok = new(big.Int).SetString(sent, 10)
|
||||
chbook.sent[common.HexToAddress(addr)], ok = new(big.Int).SetString(sent, 10)
|
||||
if !ok {
|
||||
return fmt.Errorf("beneficiary %v cumulative amount sent: unable to convert string to big integer: %v", addr, sent)
|
||||
}
|
||||
@ -208,14 +208,14 @@ func (self *Chequebook) UnmarshalJSON(data []byte) error {
|
||||
}
|
||||
|
||||
// MarshalJSON serialises a chequebook.
|
||||
func (self *Chequebook) MarshalJSON() ([]byte, error) {
|
||||
func (chbook *Chequebook) MarshalJSON() ([]byte, error) {
|
||||
var file = &chequebookFile{
|
||||
Balance: self.balance.String(),
|
||||
Contract: self.contractAddr.Hex(),
|
||||
Owner: self.owner.Hex(),
|
||||
Balance: chbook.balance.String(),
|
||||
Contract: chbook.contractAddr.Hex(),
|
||||
Owner: chbook.owner.Hex(),
|
||||
Sent: make(map[string]string),
|
||||
}
|
||||
for addr, sent := range self.sent {
|
||||
for addr, sent := range chbook.sent {
|
||||
file.Sent[addr.Hex()] = sent.String()
|
||||
}
|
||||
return json.Marshal(file)
|
||||
@ -223,67 +223,67 @@ func (self *Chequebook) MarshalJSON() ([]byte, error) {
|
||||
|
||||
// Save persists the chequebook on disk, remembering balance, contract address and
|
||||
// cumulative amount of funds sent for each beneficiary.
|
||||
func (self *Chequebook) Save() (err error) {
|
||||
data, err := json.MarshalIndent(self, "", " ")
|
||||
func (chbook *Chequebook) Save() (err error) {
|
||||
data, err := json.MarshalIndent(chbook, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
self.log.Trace("Saving chequebook to disk", self.path)
|
||||
chbook.log.Trace("Saving chequebook to disk", chbook.path)
|
||||
|
||||
return ioutil.WriteFile(self.path, data, os.ModePerm)
|
||||
return ioutil.WriteFile(chbook.path, data, os.ModePerm)
|
||||
}
|
||||
|
||||
// Stop quits the autodeposit go routine to terminate
|
||||
func (self *Chequebook) Stop() {
|
||||
defer self.lock.Unlock()
|
||||
self.lock.Lock()
|
||||
if self.quit != nil {
|
||||
close(self.quit)
|
||||
self.quit = nil
|
||||
func (chbook *Chequebook) Stop() {
|
||||
defer chbook.lock.Unlock()
|
||||
chbook.lock.Lock()
|
||||
if chbook.quit != nil {
|
||||
close(chbook.quit)
|
||||
chbook.quit = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Issue creates a cheque signed by the chequebook owner's private key. The
|
||||
// signer commits to a contract (one that they own), a beneficiary and amount.
|
||||
func (self *Chequebook) Issue(beneficiary common.Address, amount *big.Int) (ch *Cheque, err error) {
|
||||
defer self.lock.Unlock()
|
||||
self.lock.Lock()
|
||||
func (chbook *Chequebook) Issue(beneficiary common.Address, amount *big.Int) (ch *Cheque, err error) {
|
||||
defer chbook.lock.Unlock()
|
||||
chbook.lock.Lock()
|
||||
|
||||
if amount.Sign() <= 0 {
|
||||
return nil, fmt.Errorf("amount must be greater than zero (%v)", amount)
|
||||
}
|
||||
if self.balance.Cmp(amount) < 0 {
|
||||
err = fmt.Errorf("insufficient funds to issue cheque for amount: %v. balance: %v", amount, self.balance)
|
||||
if chbook.balance.Cmp(amount) < 0 {
|
||||
err = fmt.Errorf("insufficient funds to issue cheque for amount: %v. balance: %v", amount, chbook.balance)
|
||||
} else {
|
||||
var sig []byte
|
||||
sent, found := self.sent[beneficiary]
|
||||
sent, found := chbook.sent[beneficiary]
|
||||
if !found {
|
||||
sent = new(big.Int)
|
||||
self.sent[beneficiary] = sent
|
||||
chbook.sent[beneficiary] = sent
|
||||
}
|
||||
sum := new(big.Int).Set(sent)
|
||||
sum.Add(sum, amount)
|
||||
|
||||
sig, err = crypto.Sign(sigHash(self.contractAddr, beneficiary, sum), self.prvKey)
|
||||
sig, err = crypto.Sign(sigHash(chbook.contractAddr, beneficiary, sum), chbook.prvKey)
|
||||
if err == nil {
|
||||
ch = &Cheque{
|
||||
Contract: self.contractAddr,
|
||||
Contract: chbook.contractAddr,
|
||||
Beneficiary: beneficiary,
|
||||
Amount: sum,
|
||||
Sig: sig,
|
||||
}
|
||||
sent.Set(sum)
|
||||
self.balance.Sub(self.balance, amount) // subtract amount from balance
|
||||
chbook.balance.Sub(chbook.balance, amount) // subtract amount from balance
|
||||
}
|
||||
}
|
||||
|
||||
// auto deposit if threshold is set and balance is less then threshold
|
||||
// note this is called even if issuing cheque fails
|
||||
// so we reattempt depositing
|
||||
if self.threshold != nil {
|
||||
if self.balance.Cmp(self.threshold) < 0 {
|
||||
send := new(big.Int).Sub(self.buffer, self.balance)
|
||||
self.deposit(send)
|
||||
if chbook.threshold != nil {
|
||||
if chbook.balance.Cmp(chbook.threshold) < 0 {
|
||||
send := new(big.Int).Sub(chbook.buffer, chbook.balance)
|
||||
chbook.deposit(send)
|
||||
}
|
||||
}
|
||||
|
||||
@ -291,8 +291,8 @@ func (self *Chequebook) Issue(beneficiary common.Address, amount *big.Int) (ch *
|
||||
}
|
||||
|
||||
// Cash is a convenience method to cash any cheque.
|
||||
func (self *Chequebook) Cash(ch *Cheque) (txhash string, err error) {
|
||||
return ch.Cash(self.session)
|
||||
func (chbook *Chequebook) Cash(ch *Cheque) (txhash string, err error) {
|
||||
return ch.Cash(chbook.session)
|
||||
}
|
||||
|
||||
// data to sign: contract address, beneficiary, cumulative amount of funds ever sent
|
||||
@ -309,73 +309,73 @@ func sigHash(contract, beneficiary common.Address, sum *big.Int) []byte {
|
||||
}
|
||||
|
||||
// Balance returns the current balance of the chequebook.
|
||||
func (self *Chequebook) Balance() *big.Int {
|
||||
defer self.lock.Unlock()
|
||||
self.lock.Lock()
|
||||
return new(big.Int).Set(self.balance)
|
||||
func (chbook *Chequebook) Balance() *big.Int {
|
||||
defer chbook.lock.Unlock()
|
||||
chbook.lock.Lock()
|
||||
return new(big.Int).Set(chbook.balance)
|
||||
}
|
||||
|
||||
// Owner returns the owner account of the chequebook.
|
||||
func (self *Chequebook) Owner() common.Address {
|
||||
return self.owner
|
||||
func (chbook *Chequebook) Owner() common.Address {
|
||||
return chbook.owner
|
||||
}
|
||||
|
||||
// Address returns the on-chain contract address of the chequebook.
|
||||
func (self *Chequebook) Address() common.Address {
|
||||
return self.contractAddr
|
||||
func (chbook *Chequebook) Address() common.Address {
|
||||
return chbook.contractAddr
|
||||
}
|
||||
|
||||
// Deposit deposits money to the chequebook account.
|
||||
func (self *Chequebook) Deposit(amount *big.Int) (string, error) {
|
||||
defer self.lock.Unlock()
|
||||
self.lock.Lock()
|
||||
return self.deposit(amount)
|
||||
func (chbook *Chequebook) Deposit(amount *big.Int) (string, error) {
|
||||
defer chbook.lock.Unlock()
|
||||
chbook.lock.Lock()
|
||||
return chbook.deposit(amount)
|
||||
}
|
||||
|
||||
// deposit deposits amount to the chequebook account.
|
||||
// The caller must hold self.lock.
|
||||
func (self *Chequebook) deposit(amount *big.Int) (string, error) {
|
||||
func (chbook *Chequebook) deposit(amount *big.Int) (string, error) {
|
||||
// since the amount is variable here, we do not use sessions
|
||||
depositTransactor := bind.NewKeyedTransactor(self.prvKey)
|
||||
depositTransactor := bind.NewKeyedTransactor(chbook.prvKey)
|
||||
depositTransactor.Value = amount
|
||||
chbookRaw := &contract.ChequebookRaw{Contract: self.contract}
|
||||
chbookRaw := &contract.ChequebookRaw{Contract: chbook.contract}
|
||||
tx, err := chbookRaw.Transfer(depositTransactor)
|
||||
if err != nil {
|
||||
self.log.Warn("Failed to fund chequebook", "amount", amount, "balance", self.balance, "target", self.buffer, "err", err)
|
||||
chbook.log.Warn("Failed to fund chequebook", "amount", amount, "balance", chbook.balance, "target", chbook.buffer, "err", err)
|
||||
return "", err
|
||||
}
|
||||
// assume that transaction is actually successful, we add the amount to balance right away
|
||||
self.balance.Add(self.balance, amount)
|
||||
self.log.Trace("Deposited funds to chequebook", "amount", amount, "balance", self.balance, "target", self.buffer)
|
||||
chbook.balance.Add(chbook.balance, amount)
|
||||
chbook.log.Trace("Deposited funds to chequebook", "amount", amount, "balance", chbook.balance, "target", chbook.buffer)
|
||||
return tx.Hash().Hex(), nil
|
||||
}
|
||||
|
||||
// AutoDeposit (re)sets interval time and amount which triggers sending funds to the
|
||||
// chequebook. Contract backend needs to be set if threshold is not less than buffer, then
|
||||
// deposit will be triggered on every new cheque issued.
|
||||
func (self *Chequebook) AutoDeposit(interval time.Duration, threshold, buffer *big.Int) {
|
||||
defer self.lock.Unlock()
|
||||
self.lock.Lock()
|
||||
self.threshold = threshold
|
||||
self.buffer = buffer
|
||||
self.autoDeposit(interval)
|
||||
func (chbook *Chequebook) AutoDeposit(interval time.Duration, threshold, buffer *big.Int) {
|
||||
defer chbook.lock.Unlock()
|
||||
chbook.lock.Lock()
|
||||
chbook.threshold = threshold
|
||||
chbook.buffer = buffer
|
||||
chbook.autoDeposit(interval)
|
||||
}
|
||||
|
||||
// autoDeposit starts a goroutine that periodically sends funds to the chequebook
|
||||
// contract caller holds the lock the go routine terminates if Chequebook.quit is closed.
|
||||
func (self *Chequebook) autoDeposit(interval time.Duration) {
|
||||
if self.quit != nil {
|
||||
close(self.quit)
|
||||
self.quit = nil
|
||||
func (chbook *Chequebook) autoDeposit(interval time.Duration) {
|
||||
if chbook.quit != nil {
|
||||
close(chbook.quit)
|
||||
chbook.quit = nil
|
||||
}
|
||||
// if threshold >= balance autodeposit after every cheque issued
|
||||
if interval == time.Duration(0) || self.threshold != nil && self.buffer != nil && self.threshold.Cmp(self.buffer) >= 0 {
|
||||
if interval == time.Duration(0) || chbook.threshold != nil && chbook.buffer != nil && chbook.threshold.Cmp(chbook.buffer) >= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(interval)
|
||||
self.quit = make(chan bool)
|
||||
quit := self.quit
|
||||
chbook.quit = make(chan bool)
|
||||
quit := chbook.quit
|
||||
|
||||
go func() {
|
||||
for {
|
||||
@ -383,15 +383,15 @@ func (self *Chequebook) autoDeposit(interval time.Duration) {
|
||||
case <-quit:
|
||||
return
|
||||
case <-ticker.C:
|
||||
self.lock.Lock()
|
||||
if self.balance.Cmp(self.buffer) < 0 {
|
||||
amount := new(big.Int).Sub(self.buffer, self.balance)
|
||||
txhash, err := self.deposit(amount)
|
||||
chbook.lock.Lock()
|
||||
if chbook.balance.Cmp(chbook.buffer) < 0 {
|
||||
amount := new(big.Int).Sub(chbook.buffer, chbook.balance)
|
||||
txhash, err := chbook.deposit(amount)
|
||||
if err == nil {
|
||||
self.txhash = txhash
|
||||
chbook.txhash = txhash
|
||||
}
|
||||
}
|
||||
self.lock.Unlock()
|
||||
chbook.lock.Unlock()
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -409,21 +409,21 @@ func NewOutbox(chbook *Chequebook, beneficiary common.Address) *Outbox {
|
||||
}
|
||||
|
||||
// Issue creates cheque.
|
||||
func (self *Outbox) Issue(amount *big.Int) (swap.Promise, error) {
|
||||
return self.chequeBook.Issue(self.beneficiary, amount)
|
||||
func (o *Outbox) Issue(amount *big.Int) (swap.Promise, error) {
|
||||
return o.chequeBook.Issue(o.beneficiary, amount)
|
||||
}
|
||||
|
||||
// AutoDeposit enables auto-deposits on the underlying chequebook.
|
||||
func (self *Outbox) AutoDeposit(interval time.Duration, threshold, buffer *big.Int) {
|
||||
self.chequeBook.AutoDeposit(interval, threshold, buffer)
|
||||
func (o *Outbox) AutoDeposit(interval time.Duration, threshold, buffer *big.Int) {
|
||||
o.chequeBook.AutoDeposit(interval, threshold, buffer)
|
||||
}
|
||||
|
||||
// Stop helps satisfy the swap.OutPayment interface.
|
||||
func (self *Outbox) Stop() {}
|
||||
func (o *Outbox) Stop() {}
|
||||
|
||||
// String implements fmt.Stringer.
|
||||
func (self *Outbox) String() string {
|
||||
return fmt.Sprintf("chequebook: %v, beneficiary: %s, balance: %v", self.chequeBook.Address().Hex(), self.beneficiary.Hex(), self.chequeBook.Balance())
|
||||
func (o *Outbox) String() string {
|
||||
return fmt.Sprintf("chequebook: %v, beneficiary: %s, balance: %v", o.chequeBook.Address().Hex(), o.beneficiary.Hex(), o.chequeBook.Balance())
|
||||
}
|
||||
|
||||
// Inbox can deposit, verify and cash cheques from a single contract to a single
|
||||
@ -474,55 +474,55 @@ func NewInbox(prvKey *ecdsa.PrivateKey, contractAddr, beneficiary common.Address
|
||||
return
|
||||
}
|
||||
|
||||
func (self *Inbox) String() string {
|
||||
return fmt.Sprintf("chequebook: %v, beneficiary: %s, balance: %v", self.contract.Hex(), self.beneficiary.Hex(), self.cheque.Amount)
|
||||
func (i *Inbox) String() string {
|
||||
return fmt.Sprintf("chequebook: %v, beneficiary: %s, balance: %v", i.contract.Hex(), i.beneficiary.Hex(), i.cheque.Amount)
|
||||
}
|
||||
|
||||
// Stop quits the autocash goroutine.
|
||||
func (self *Inbox) Stop() {
|
||||
defer self.lock.Unlock()
|
||||
self.lock.Lock()
|
||||
if self.quit != nil {
|
||||
close(self.quit)
|
||||
self.quit = nil
|
||||
func (i *Inbox) Stop() {
|
||||
defer i.lock.Unlock()
|
||||
i.lock.Lock()
|
||||
if i.quit != nil {
|
||||
close(i.quit)
|
||||
i.quit = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Cash attempts to cash the current cheque.
|
||||
func (self *Inbox) Cash() (txhash string, err error) {
|
||||
if self.cheque != nil {
|
||||
txhash, err = self.cheque.Cash(self.session)
|
||||
self.log.Trace("Cashing in chequebook cheque", "amount", self.cheque.Amount, "beneficiary", self.beneficiary)
|
||||
self.cashed = self.cheque.Amount
|
||||
func (i *Inbox) Cash() (txhash string, err error) {
|
||||
if i.cheque != nil {
|
||||
txhash, err = i.cheque.Cash(i.session)
|
||||
i.log.Trace("Cashing in chequebook cheque", "amount", i.cheque.Amount, "beneficiary", i.beneficiary)
|
||||
i.cashed = i.cheque.Amount
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AutoCash (re)sets maximum time and amount which triggers cashing of the last uncashed
|
||||
// cheque if maxUncashed is set to 0, then autocash on receipt.
|
||||
func (self *Inbox) AutoCash(cashInterval time.Duration, maxUncashed *big.Int) {
|
||||
defer self.lock.Unlock()
|
||||
self.lock.Lock()
|
||||
self.maxUncashed = maxUncashed
|
||||
self.autoCash(cashInterval)
|
||||
func (i *Inbox) AutoCash(cashInterval time.Duration, maxUncashed *big.Int) {
|
||||
defer i.lock.Unlock()
|
||||
i.lock.Lock()
|
||||
i.maxUncashed = maxUncashed
|
||||
i.autoCash(cashInterval)
|
||||
}
|
||||
|
||||
// autoCash starts a loop that periodically clears the last cheque
|
||||
// if the peer is trusted. Clearing period could be 24h or a week.
|
||||
// The caller must hold self.lock.
|
||||
func (self *Inbox) autoCash(cashInterval time.Duration) {
|
||||
if self.quit != nil {
|
||||
close(self.quit)
|
||||
self.quit = nil
|
||||
func (i *Inbox) autoCash(cashInterval time.Duration) {
|
||||
if i.quit != nil {
|
||||
close(i.quit)
|
||||
i.quit = nil
|
||||
}
|
||||
// if maxUncashed is set to 0, then autocash on receipt
|
||||
if cashInterval == time.Duration(0) || self.maxUncashed != nil && self.maxUncashed.Sign() == 0 {
|
||||
if cashInterval == time.Duration(0) || i.maxUncashed != nil && i.maxUncashed.Sign() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(cashInterval)
|
||||
self.quit = make(chan bool)
|
||||
quit := self.quit
|
||||
i.quit = make(chan bool)
|
||||
quit := i.quit
|
||||
|
||||
go func() {
|
||||
for {
|
||||
@ -530,14 +530,14 @@ func (self *Inbox) autoCash(cashInterval time.Duration) {
|
||||
case <-quit:
|
||||
return
|
||||
case <-ticker.C:
|
||||
self.lock.Lock()
|
||||
if self.cheque != nil && self.cheque.Amount.Cmp(self.cashed) != 0 {
|
||||
txhash, err := self.Cash()
|
||||
i.lock.Lock()
|
||||
if i.cheque != nil && i.cheque.Amount.Cmp(i.cashed) != 0 {
|
||||
txhash, err := i.Cash()
|
||||
if err == nil {
|
||||
self.txhash = txhash
|
||||
i.txhash = txhash
|
||||
}
|
||||
}
|
||||
self.lock.Unlock()
|
||||
i.lock.Unlock()
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -545,56 +545,56 @@ func (self *Inbox) autoCash(cashInterval time.Duration) {
|
||||
|
||||
// Receive is called to deposit the latest cheque to the incoming Inbox.
|
||||
// The given promise must be a *Cheque.
|
||||
func (self *Inbox) Receive(promise swap.Promise) (*big.Int, error) {
|
||||
func (i *Inbox) Receive(promise swap.Promise) (*big.Int, error) {
|
||||
ch := promise.(*Cheque)
|
||||
|
||||
defer self.lock.Unlock()
|
||||
self.lock.Lock()
|
||||
defer i.lock.Unlock()
|
||||
i.lock.Lock()
|
||||
|
||||
var sum *big.Int
|
||||
if self.cheque == nil {
|
||||
if i.cheque == nil {
|
||||
// the sum is checked against the blockchain once a cheque is received
|
||||
tally, err := self.session.Sent(self.beneficiary)
|
||||
tally, err := i.session.Sent(i.beneficiary)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("inbox: error calling backend to set amount: %v", err)
|
||||
}
|
||||
sum = tally
|
||||
} else {
|
||||
sum = self.cheque.Amount
|
||||
sum = i.cheque.Amount
|
||||
}
|
||||
|
||||
amount, err := ch.Verify(self.signer, self.contract, self.beneficiary, sum)
|
||||
amount, err := ch.Verify(i.signer, i.contract, i.beneficiary, sum)
|
||||
var uncashed *big.Int
|
||||
if err == nil {
|
||||
self.cheque = ch
|
||||
i.cheque = ch
|
||||
|
||||
if self.maxUncashed != nil {
|
||||
uncashed = new(big.Int).Sub(ch.Amount, self.cashed)
|
||||
if self.maxUncashed.Cmp(uncashed) < 0 {
|
||||
self.Cash()
|
||||
if i.maxUncashed != nil {
|
||||
uncashed = new(big.Int).Sub(ch.Amount, i.cashed)
|
||||
if i.maxUncashed.Cmp(uncashed) < 0 {
|
||||
i.Cash()
|
||||
}
|
||||
}
|
||||
self.log.Trace("Received cheque in chequebook inbox", "amount", amount, "uncashed", uncashed)
|
||||
i.log.Trace("Received cheque in chequebook inbox", "amount", amount, "uncashed", uncashed)
|
||||
}
|
||||
|
||||
return amount, err
|
||||
}
|
||||
|
||||
// Verify verifies cheque for signer, contract, beneficiary, amount, valid signature.
|
||||
func (self *Cheque) Verify(signerKey *ecdsa.PublicKey, contract, beneficiary common.Address, sum *big.Int) (*big.Int, error) {
|
||||
log.Trace("Verifying chequebook cheque", "cheque", self, "sum", sum)
|
||||
func (ch *Cheque) Verify(signerKey *ecdsa.PublicKey, contract, beneficiary common.Address, sum *big.Int) (*big.Int, error) {
|
||||
log.Trace("Verifying chequebook cheque", "cheque", ch, "sum", sum)
|
||||
if sum == nil {
|
||||
return nil, fmt.Errorf("invalid amount")
|
||||
}
|
||||
|
||||
if self.Beneficiary != beneficiary {
|
||||
return nil, fmt.Errorf("beneficiary mismatch: %v != %v", self.Beneficiary.Hex(), beneficiary.Hex())
|
||||
if ch.Beneficiary != beneficiary {
|
||||
return nil, fmt.Errorf("beneficiary mismatch: %v != %v", ch.Beneficiary.Hex(), beneficiary.Hex())
|
||||
}
|
||||
if self.Contract != contract {
|
||||
return nil, fmt.Errorf("contract mismatch: %v != %v", self.Contract.Hex(), contract.Hex())
|
||||
if ch.Contract != contract {
|
||||
return nil, fmt.Errorf("contract mismatch: %v != %v", ch.Contract.Hex(), contract.Hex())
|
||||
}
|
||||
|
||||
amount := new(big.Int).Set(self.Amount)
|
||||
amount := new(big.Int).Set(ch.Amount)
|
||||
if sum != nil {
|
||||
amount.Sub(amount, sum)
|
||||
if amount.Sign() <= 0 {
|
||||
@ -602,7 +602,7 @@ func (self *Cheque) Verify(signerKey *ecdsa.PublicKey, contract, beneficiary com
|
||||
}
|
||||
}
|
||||
|
||||
pubKey, err := crypto.SigToPub(sigHash(self.Contract, beneficiary, self.Amount), self.Sig)
|
||||
pubKey, err := crypto.SigToPub(sigHash(ch.Contract, beneficiary, ch.Amount), ch.Sig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid signature: %v", err)
|
||||
}
|
||||
@ -621,9 +621,9 @@ func sig2vrs(sig []byte) (v byte, r, s [32]byte) {
|
||||
}
|
||||
|
||||
// Cash cashes the cheque by sending an Ethereum transaction.
|
||||
func (self *Cheque) Cash(session *contract.ChequebookSession) (string, error) {
|
||||
v, r, s := sig2vrs(self.Sig)
|
||||
tx, err := session.Cash(self.Beneficiary, self.Amount, v, r, s)
|
||||
func (ch *Cheque) Cash(session *contract.ChequebookSession) (string, error) {
|
||||
v, r, s := sig2vrs(ch.Sig)
|
||||
tx, err := session.Cash(ch.Beneficiary, ch.Amount, v, r, s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
12
vendor/github.com/ethereum/go-ethereum/contracts/chequebook/contract/chequebook.go
generated
vendored
12
vendor/github.com/ethereum/go-ethereum/contracts/chequebook/contract/chequebook.go
generated
vendored
@ -205,22 +205,22 @@ func (_Chequebook *ChequebookCallerSession) Sent(arg0 common.Address) (*big.Int,
|
||||
// Cash is a paid mutator transaction binding the contract method 0xfbf788d6.
|
||||
//
|
||||
// Solidity: function cash(beneficiary address, amount uint256, sig_v uint8, sig_r bytes32, sig_s bytes32) returns()
|
||||
func (_Chequebook *ChequebookTransactor) Cash(opts *bind.TransactOpts, beneficiary common.Address, amount *big.Int, sig_v uint8, sig_r [32]byte, sig_s [32]byte) (*types.Transaction, error) {
|
||||
return _Chequebook.contract.Transact(opts, "cash", beneficiary, amount, sig_v, sig_r, sig_s)
|
||||
func (_Chequebook *ChequebookTransactor) Cash(opts *bind.TransactOpts, beneficiary common.Address, amount *big.Int, sigV uint8, sigR [32]byte, sigS [32]byte) (*types.Transaction, error) {
|
||||
return _Chequebook.contract.Transact(opts, "cash", beneficiary, amount, sigV, sigR, sigS)
|
||||
}
|
||||
|
||||
// Cash is a paid mutator transaction binding the contract method 0xfbf788d6.
|
||||
//
|
||||
// Solidity: function cash(beneficiary address, amount uint256, sig_v uint8, sig_r bytes32, sig_s bytes32) returns()
|
||||
func (_Chequebook *ChequebookSession) Cash(beneficiary common.Address, amount *big.Int, sig_v uint8, sig_r [32]byte, sig_s [32]byte) (*types.Transaction, error) {
|
||||
return _Chequebook.Contract.Cash(&_Chequebook.TransactOpts, beneficiary, amount, sig_v, sig_r, sig_s)
|
||||
func (_Chequebook *ChequebookSession) Cash(beneficiary common.Address, amount *big.Int, sigV uint8, sigR [32]byte, sigS [32]byte) (*types.Transaction, error) {
|
||||
return _Chequebook.Contract.Cash(&_Chequebook.TransactOpts, beneficiary, amount, sigV, sigR, sigS)
|
||||
}
|
||||
|
||||
// Cash is a paid mutator transaction binding the contract method 0xfbf788d6.
|
||||
//
|
||||
// Solidity: function cash(beneficiary address, amount uint256, sig_v uint8, sig_r bytes32, sig_s bytes32) returns()
|
||||
func (_Chequebook *ChequebookTransactorSession) Cash(beneficiary common.Address, amount *big.Int, sig_v uint8, sig_r [32]byte, sig_s [32]byte) (*types.Transaction, error) {
|
||||
return _Chequebook.Contract.Cash(&_Chequebook.TransactOpts, beneficiary, amount, sig_v, sig_r, sig_s)
|
||||
func (_Chequebook *ChequebookTransactorSession) Cash(beneficiary common.Address, amount *big.Int, sigV uint8, sigR [32]byte, sigS [32]byte) (*types.Transaction, error) {
|
||||
return _Chequebook.Contract.Cash(&_Chequebook.TransactOpts, beneficiary, amount, sigV, sigR, sigS)
|
||||
}
|
||||
|
||||
// Kill is a paid mutator transaction binding the contract method 0x41c0e1b5.
|
||||
|
18
vendor/github.com/ethereum/go-ethereum/contracts/ens/contract/ens.go
generated
vendored
18
vendor/github.com/ethereum/go-ethereum/contracts/ens/contract/ens.go
generated
vendored
@ -227,10 +227,10 @@ func (_ENS *ENSCallerSession) Resolver(node [32]byte) (common.Address, error) {
|
||||
return _ENS.Contract.Resolver(&_ENS.CallOpts, node)
|
||||
}
|
||||
|
||||
// Ttl is a free data retrieval call binding the contract method 0x16a25cbd.
|
||||
// TTL is a free data retrieval call binding the contract method 0x16a25cbd.
|
||||
//
|
||||
// Solidity: function ttl(node bytes32) constant returns(uint64)
|
||||
func (_ENS *ENSCaller) Ttl(opts *bind.CallOpts, node [32]byte) (uint64, error) {
|
||||
func (_ENS *ENSCaller) TTL(opts *bind.CallOpts, node [32]byte) (uint64, error) {
|
||||
var (
|
||||
ret0 = new(uint64)
|
||||
)
|
||||
@ -239,18 +239,18 @@ func (_ENS *ENSCaller) Ttl(opts *bind.CallOpts, node [32]byte) (uint64, error) {
|
||||
return *ret0, err
|
||||
}
|
||||
|
||||
// Ttl is a free data retrieval call binding the contract method 0x16a25cbd.
|
||||
// TTL is a free data retrieval call binding the contract method 0x16a25cbd.
|
||||
//
|
||||
// Solidity: function ttl(node bytes32) constant returns(uint64)
|
||||
func (_ENS *ENSSession) Ttl(node [32]byte) (uint64, error) {
|
||||
return _ENS.Contract.Ttl(&_ENS.CallOpts, node)
|
||||
func (_ENS *ENSSession) TTL(node [32]byte) (uint64, error) {
|
||||
return _ENS.Contract.TTL(&_ENS.CallOpts, node)
|
||||
}
|
||||
|
||||
// Ttl is a free data retrieval call binding the contract method 0x16a25cbd.
|
||||
// TTL is a free data retrieval call binding the contract method 0x16a25cbd.
|
||||
//
|
||||
// Solidity: function ttl(node bytes32) constant returns(uint64)
|
||||
func (_ENS *ENSCallerSession) Ttl(node [32]byte) (uint64, error) {
|
||||
return _ENS.Contract.Ttl(&_ENS.CallOpts, node)
|
||||
func (_ENS *ENSCallerSession) TTL(node [32]byte) (uint64, error) {
|
||||
return _ENS.Contract.TTL(&_ENS.CallOpts, node)
|
||||
}
|
||||
|
||||
// SetOwner is a paid mutator transaction binding the contract method 0x5b0fc9c3.
|
||||
@ -682,7 +682,7 @@ func (it *ENSNewTTLIterator) Close() error {
|
||||
// ENSNewTTL represents a NewTTL event raised by the ENS contract.
|
||||
type ENSNewTTL struct {
|
||||
Node [32]byte
|
||||
Ttl uint64
|
||||
TTL uint64
|
||||
Raw types.Log // Blockchain specific contextual infos
|
||||
}
|
||||
|
||||
|
69
vendor/github.com/ethereum/go-ethereum/contracts/ens/ens.go
generated
vendored
69
vendor/github.com/ethereum/go-ethereum/contracts/ens/ens.go
generated
vendored
@ -35,7 +35,7 @@ var (
|
||||
TestNetAddress = common.HexToAddress("0x112234455c3a32fd11230c42e7bccd4a84e02010")
|
||||
)
|
||||
|
||||
// swarm domain name registry and resolver
|
||||
// ENS is the swarm domain name registry and resolver
|
||||
type ENS struct {
|
||||
*contract.ENSSession
|
||||
contractBackend bind.ContractBackend
|
||||
@ -48,7 +48,6 @@ func NewENS(transactOpts *bind.TransactOpts, contractAddr common.Address, contra
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ENS{
|
||||
&contract.ENSSession{
|
||||
Contract: ens,
|
||||
@ -60,27 +59,24 @@ func NewENS(transactOpts *bind.TransactOpts, contractAddr common.Address, contra
|
||||
|
||||
// DeployENS deploys an instance of the ENS nameservice, with a 'first-in, first-served' root registrar.
|
||||
func DeployENS(transactOpts *bind.TransactOpts, contractBackend bind.ContractBackend) (common.Address, *ENS, error) {
|
||||
// Deploy the ENS registry.
|
||||
// Deploy the ENS registry
|
||||
ensAddr, _, _, err := contract.DeployENS(transactOpts, contractBackend)
|
||||
if err != nil {
|
||||
return ensAddr, nil, err
|
||||
}
|
||||
|
||||
ens, err := NewENS(transactOpts, ensAddr, contractBackend)
|
||||
if err != nil {
|
||||
return ensAddr, nil, err
|
||||
}
|
||||
|
||||
// Deploy the registrar.
|
||||
// Deploy the registrar
|
||||
regAddr, _, _, err := contract.DeployFIFSRegistrar(transactOpts, contractBackend, ensAddr, [32]byte{})
|
||||
if err != nil {
|
||||
return ensAddr, nil, err
|
||||
}
|
||||
// Set the registrar as owner of the ENS root.
|
||||
// Set the registrar as owner of the ENS root
|
||||
if _, err = ens.SetOwner([32]byte{}, regAddr); err != nil {
|
||||
return ensAddr, nil, err
|
||||
}
|
||||
|
||||
return ensAddr, ens, nil
|
||||
}
|
||||
|
||||
@ -89,10 +85,9 @@ func ensParentNode(name string) (common.Hash, common.Hash) {
|
||||
label := crypto.Keccak256Hash([]byte(parts[0]))
|
||||
if len(parts) == 1 {
|
||||
return [32]byte{}, label
|
||||
} else {
|
||||
parentNode, parentLabel := ensParentNode(parts[1])
|
||||
return crypto.Keccak256Hash(parentNode[:], parentLabel[:]), label
|
||||
}
|
||||
parentNode, parentLabel := ensParentNode(parts[1])
|
||||
return crypto.Keccak256Hash(parentNode[:], parentLabel[:]), label
|
||||
}
|
||||
|
||||
func EnsNode(name string) common.Hash {
|
||||
@ -100,111 +95,101 @@ func EnsNode(name string) common.Hash {
|
||||
return crypto.Keccak256Hash(parentNode[:], parentLabel[:])
|
||||
}
|
||||
|
||||
func (self *ENS) getResolver(node [32]byte) (*contract.PublicResolverSession, error) {
|
||||
resolverAddr, err := self.Resolver(node)
|
||||
func (ens *ENS) getResolver(node [32]byte) (*contract.PublicResolverSession, error) {
|
||||
resolverAddr, err := ens.Resolver(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resolver, err := contract.NewPublicResolver(resolverAddr, self.contractBackend)
|
||||
resolver, err := contract.NewPublicResolver(resolverAddr, ens.contractBackend)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &contract.PublicResolverSession{
|
||||
Contract: resolver,
|
||||
TransactOpts: self.TransactOpts,
|
||||
TransactOpts: ens.TransactOpts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (self *ENS) getRegistrar(node [32]byte) (*contract.FIFSRegistrarSession, error) {
|
||||
registrarAddr, err := self.Owner(node)
|
||||
func (ens *ENS) getRegistrar(node [32]byte) (*contract.FIFSRegistrarSession, error) {
|
||||
registrarAddr, err := ens.Owner(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
registrar, err := contract.NewFIFSRegistrar(registrarAddr, self.contractBackend)
|
||||
registrar, err := contract.NewFIFSRegistrar(registrarAddr, ens.contractBackend)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &contract.FIFSRegistrarSession{
|
||||
Contract: registrar,
|
||||
TransactOpts: self.TransactOpts,
|
||||
TransactOpts: ens.TransactOpts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Resolve is a non-transactional call that returns the content hash associated with a name.
|
||||
func (self *ENS) Resolve(name string) (common.Hash, error) {
|
||||
func (ens *ENS) Resolve(name string) (common.Hash, error) {
|
||||
node := EnsNode(name)
|
||||
|
||||
resolver, err := self.getResolver(node)
|
||||
resolver, err := ens.getResolver(node)
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
ret, err := resolver.Content(node)
|
||||
if err != nil {
|
||||
return common.Hash{}, err
|
||||
}
|
||||
|
||||
return common.BytesToHash(ret[:]), nil
|
||||
}
|
||||
|
||||
// Addr is a non-transactional call that returns the address associated with a name.
|
||||
func (self *ENS) Addr(name string) (common.Address, error) {
|
||||
func (ens *ENS) Addr(name string) (common.Address, error) {
|
||||
node := EnsNode(name)
|
||||
|
||||
resolver, err := self.getResolver(node)
|
||||
resolver, err := ens.getResolver(node)
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
|
||||
ret, err := resolver.Addr(node)
|
||||
if err != nil {
|
||||
return common.Address{}, err
|
||||
}
|
||||
|
||||
return common.BytesToAddress(ret[:]), nil
|
||||
}
|
||||
|
||||
// SetAddress sets the address associated with a name. Only works if the caller
|
||||
// owns the name, and the associated resolver implements a `setAddress` function.
|
||||
func (self *ENS) SetAddr(name string, addr common.Address) (*types.Transaction, error) {
|
||||
func (ens *ENS) SetAddr(name string, addr common.Address) (*types.Transaction, error) {
|
||||
node := EnsNode(name)
|
||||
|
||||
resolver, err := self.getResolver(node)
|
||||
resolver, err := ens.getResolver(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts := self.TransactOpts
|
||||
opts := ens.TransactOpts
|
||||
opts.GasLimit = 200000
|
||||
return resolver.Contract.SetAddr(&opts, node, addr)
|
||||
}
|
||||
|
||||
// Register registers a new domain name for the caller, making them the owner of the new name.
|
||||
// Only works if the registrar for the parent domain implements the FIFS registrar protocol.
|
||||
func (self *ENS) Register(name string) (*types.Transaction, error) {
|
||||
func (ens *ENS) Register(name string) (*types.Transaction, error) {
|
||||
parentNode, label := ensParentNode(name)
|
||||
registrar, err := self.getRegistrar(parentNode)
|
||||
registrar, err := ens.getRegistrar(parentNode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return registrar.Contract.Register(&self.TransactOpts, label, self.TransactOpts.From)
|
||||
return registrar.Contract.Register(&ens.TransactOpts, label, ens.TransactOpts.From)
|
||||
}
|
||||
|
||||
// SetContentHash sets the content hash associated with a name. Only works if the caller
|
||||
// owns the name, and the associated resolver implements a `setContent` function.
|
||||
func (self *ENS) SetContentHash(name string, hash common.Hash) (*types.Transaction, error) {
|
||||
func (ens *ENS) SetContentHash(name string, hash common.Hash) (*types.Transaction, error) {
|
||||
node := EnsNode(name)
|
||||
|
||||
resolver, err := self.getResolver(node)
|
||||
resolver, err := ens.getResolver(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts := self.TransactOpts
|
||||
opts := ens.TransactOpts
|
||||
opts.GasLimit = 200000
|
||||
return resolver.Contract.SetContent(&opts, node, hash)
|
||||
}
|
||||
|
34
vendor/github.com/ethereum/go-ethereum/core/blockchain.go
generated
vendored
34
vendor/github.com/ethereum/go-ethereum/core/blockchain.go
generated
vendored
@ -972,20 +972,26 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
|
||||
triedb.Cap(limit - ethdb.IdealBatchSize)
|
||||
}
|
||||
// Find the next state trie we need to commit
|
||||
header := bc.GetHeaderByNumber(current - triesInMemory)
|
||||
chosen := header.Number.Uint64()
|
||||
chosen := current - triesInMemory
|
||||
|
||||
// If we exceeded out time allowance, flush an entire trie to disk
|
||||
if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
|
||||
// If we're exceeding limits but haven't reached a large enough memory gap,
|
||||
// warn the user that the system is becoming unstable.
|
||||
if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
|
||||
log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
||||
// If the header is missing (canonical chain behind), we're reorging a low
|
||||
// diff sidechain. Suspend committing until this operation is completed.
|
||||
header := bc.GetHeaderByNumber(chosen)
|
||||
if header == nil {
|
||||
log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
|
||||
} else {
|
||||
// If we're exceeding limits but haven't reached a large enough memory gap,
|
||||
// warn the user that the system is becoming unstable.
|
||||
if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
|
||||
log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory)
|
||||
}
|
||||
// Flush an entire trie and restart the counters
|
||||
triedb.Commit(header.Root, true)
|
||||
lastWrite = chosen
|
||||
bc.gcproc = 0
|
||||
}
|
||||
// Flush an entire trie and restart the counters
|
||||
triedb.Commit(header.Root, true)
|
||||
lastWrite = chosen
|
||||
bc.gcproc = 0
|
||||
}
|
||||
// Garbage collect anything below our required write retention
|
||||
for !bc.triegc.Empty() {
|
||||
@ -1136,7 +1142,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
switch {
|
||||
// First block is pruned, insert as sidechain and reorg only if TD grows enough
|
||||
case err == consensus.ErrPrunedAncestor:
|
||||
return bc.insertSidechain(it)
|
||||
return bc.insertSidechain(block, it)
|
||||
|
||||
// First block is future, shove it (and all children) to the future queue (unknown ancestor)
|
||||
case err == consensus.ErrFutureBlock || (err == consensus.ErrUnknownAncestor && bc.futureBlocks.Contains(it.first().ParentHash())):
|
||||
@ -1278,7 +1284,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
|
||||
//
|
||||
// The method writes all (header-and-body-valid) blocks to disk, then tries to
|
||||
// switch over to the new chain if the TD exceeded the current chain.
|
||||
func (bc *BlockChain) insertSidechain(it *insertIterator) (int, []interface{}, []*types.Log, error) {
|
||||
func (bc *BlockChain) insertSidechain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, error) {
|
||||
var (
|
||||
externTd *big.Int
|
||||
current = bc.CurrentBlock().NumberU64()
|
||||
@ -1287,7 +1293,7 @@ func (bc *BlockChain) insertSidechain(it *insertIterator) (int, []interface{}, [
|
||||
// Since we don't import them here, we expect ErrUnknownAncestor for the remaining
|
||||
// ones. Any other errors means that the block is invalid, and should not be written
|
||||
// to disk.
|
||||
block, err := it.current(), consensus.ErrPrunedAncestor
|
||||
err := consensus.ErrPrunedAncestor
|
||||
for ; block != nil && (err == consensus.ErrPrunedAncestor); block, err = it.next() {
|
||||
// Check the canonical state root for that number
|
||||
if number := block.NumberU64(); current >= number {
|
||||
@ -1317,7 +1323,7 @@ func (bc *BlockChain) insertSidechain(it *insertIterator) (int, []interface{}, [
|
||||
if err := bc.WriteBlockWithoutState(block, externTd); err != nil {
|
||||
return it.index, nil, nil, err
|
||||
}
|
||||
log.Debug("Inserted sidechain block", "number", block.Number(), "hash", block.Hash(),
|
||||
log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
|
||||
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
|
||||
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
|
||||
"root", block.Root())
|
||||
|
8
vendor/github.com/ethereum/go-ethereum/core/blockchain_insert.go
generated
vendored
8
vendor/github.com/ethereum/go-ethereum/core/blockchain_insert.go
generated
vendored
@ -111,14 +111,6 @@ func (it *insertIterator) next() (*types.Block, error) {
|
||||
return it.chain[it.index], it.validator.ValidateBody(it.chain[it.index])
|
||||
}
|
||||
|
||||
// current returns the current block that's being processed.
|
||||
func (it *insertIterator) current() *types.Block {
|
||||
if it.index < 0 || it.index+1 >= len(it.chain) {
|
||||
return nil
|
||||
}
|
||||
return it.chain[it.index]
|
||||
}
|
||||
|
||||
// previous returns the previous block was being processed, or nil
|
||||
func (it *insertIterator) previous() *types.Block {
|
||||
if it.index < 1 {
|
||||
|
55
vendor/github.com/ethereum/go-ethereum/core/blockchain_test.go
generated
vendored
55
vendor/github.com/ethereum/go-ethereum/core/blockchain_test.go
generated
vendored
@ -1483,3 +1483,58 @@ func BenchmarkBlockChain_1x1000Executions(b *testing.B) {
|
||||
|
||||
benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn)
|
||||
}
|
||||
|
||||
// Tests that importing a very large side fork, which is larger than the canon chain,
|
||||
// but where the difficulty per block is kept low: this means that it will not
|
||||
// overtake the 'canon' chain until after it's passed canon by about 200 blocks.
|
||||
//
|
||||
// Details at:
|
||||
// - https://github.com/ethereum/go-ethereum/issues/18977
|
||||
// - https://github.com/ethereum/go-ethereum/pull/18988
|
||||
func TestLowDiffLongChain(t *testing.T) {
|
||||
// Generate a canonical chain to act as the main dataset
|
||||
engine := ethash.NewFaker()
|
||||
db := ethdb.NewMemDatabase()
|
||||
genesis := new(Genesis).MustCommit(db)
|
||||
|
||||
// We must use a pretty long chain to ensure that the fork doesn't overtake us
|
||||
// until after at least 128 blocks post tip
|
||||
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 6*triesInMemory, func(i int, b *BlockGen) {
|
||||
b.SetCoinbase(common.Address{1})
|
||||
b.OffsetTime(-9)
|
||||
})
|
||||
|
||||
// Import the canonical chain
|
||||
diskdb := ethdb.NewMemDatabase()
|
||||
new(Genesis).MustCommit(diskdb)
|
||||
|
||||
chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create tester chain: %v", err)
|
||||
}
|
||||
if n, err := chain.InsertChain(blocks); err != nil {
|
||||
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
||||
}
|
||||
// Generate fork chain, starting from an early block
|
||||
parent := blocks[10]
|
||||
fork, _ := GenerateChain(params.TestChainConfig, parent, engine, db, 8*triesInMemory, func(i int, b *BlockGen) {
|
||||
b.SetCoinbase(common.Address{2})
|
||||
})
|
||||
|
||||
// And now import the fork
|
||||
if i, err := chain.InsertChain(fork); err != nil {
|
||||
t.Fatalf("block %d: failed to insert into chain: %v", i, err)
|
||||
}
|
||||
head := chain.CurrentBlock()
|
||||
if got := fork[len(fork)-1].Hash(); got != head.Hash() {
|
||||
t.Fatalf("head wrong, expected %x got %x", head.Hash(), got)
|
||||
}
|
||||
// Sanity check that all the canonical numbers are present
|
||||
header := chain.CurrentHeader()
|
||||
for number := head.NumberU64(); number > 0; number-- {
|
||||
if hash := chain.GetHeaderByNumber(number).Hash(); hash != header.Hash() {
|
||||
t.Fatalf("header %d: canonical hash mismatch: have %x, want %x", number, hash, header.Hash())
|
||||
}
|
||||
header = chain.GetHeader(header.ParentHash, number-1)
|
||||
}
|
||||
}
|
||||
|
2
vendor/github.com/ethereum/go-ethereum/core/chain_makers.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/core/chain_makers.go
generated
vendored
@ -149,7 +149,7 @@ func (b *BlockGen) PrevBlock(index int) *types.Block {
|
||||
// associated difficulty. It's useful to test scenarios where forking is not
|
||||
// tied to chain length directly.
|
||||
func (b *BlockGen) OffsetTime(seconds int64) {
|
||||
b.header.Time.Add(b.header.Time, new(big.Int).SetInt64(seconds))
|
||||
b.header.Time.Add(b.header.Time, big.NewInt(seconds))
|
||||
if b.header.Time.Cmp(b.parent.Header().Time) <= 0 {
|
||||
panic("block time out of range")
|
||||
}
|
||||
|
14
vendor/github.com/ethereum/go-ethereum/core/genesis.go
generated
vendored
14
vendor/github.com/ethereum/go-ethereum/core/genesis.go
generated
vendored
@ -157,7 +157,6 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, constant
|
||||
if genesis != nil && genesis.Config == nil {
|
||||
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
|
||||
}
|
||||
|
||||
// Just commit the new block if there is no stored genesis block.
|
||||
stored := rawdb.ReadCanonicalHash(db, 0)
|
||||
if (stored == common.Hash{}) {
|
||||
@ -183,6 +182,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, constant
|
||||
newcfg := genesis.configOrDefault(stored)
|
||||
if constantinopleOverride != nil {
|
||||
newcfg.ConstantinopleBlock = constantinopleOverride
|
||||
newcfg.PetersburgBlock = constantinopleOverride
|
||||
}
|
||||
storedcfg := rawdb.ReadChainConfig(db, stored)
|
||||
if storedcfg == nil {
|
||||
@ -339,6 +339,18 @@ func DefaultRinkebyGenesisBlock() *Genesis {
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultGoerliGenesisBlock returns the Görli network genesis block.
|
||||
func DefaultGoerliGenesisBlock() *Genesis {
|
||||
return &Genesis{
|
||||
Config: params.GoerliChainConfig,
|
||||
Timestamp: 1548854791,
|
||||
ExtraData: hexutil.MustDecode("0x22466c6578692069732061207468696e6722202d204166726900000000000000e0a2bd4258d2768837baa26a28fe71dc079f84c70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||
GasLimit: 10485760,
|
||||
Difficulty: big.NewInt(1),
|
||||
Alloc: decodePrealloc(goerliAllocData),
|
||||
}
|
||||
}
|
||||
|
||||
// DeveloperGenesisBlock returns the 'geth --dev' genesis block. Note, this must
|
||||
// be seeded with the
|
||||
func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis {
|
||||
|
1
vendor/github.com/ethereum/go-ethereum/core/genesis_alloc.go
generated
vendored
1
vendor/github.com/ethereum/go-ethereum/core/genesis_alloc.go
generated
vendored
File diff suppressed because one or more lines are too long
4
vendor/github.com/ethereum/go-ethereum/core/vm/gas_table.go
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/core/vm/gas_table.go
generated
vendored
@ -121,7 +121,9 @@ func gasSStore(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, m
|
||||
current = evm.StateDB.GetState(contract.Address(), common.BigToHash(x))
|
||||
)
|
||||
// The legacy gas metering only takes into consideration the current state
|
||||
if !evm.chainRules.IsConstantinople {
|
||||
// Legacy rules should be applied if we are in Petersburg (removal of EIP-1283)
|
||||
// OR Constantinople is not active
|
||||
if evm.chainRules.IsPetersburg || !evm.chainRules.IsConstantinople {
|
||||
// This checks for 3 scenario's and calculates gas accordingly:
|
||||
//
|
||||
// 1. From a zero-value address to a non-zero value (NEW VALUE)
|
||||
|
6
vendor/github.com/ethereum/go-ethereum/core/vm/logger_json.go
generated
vendored
6
vendor/github.com/ethereum/go-ethereum/core/vm/logger_json.go
generated
vendored
@ -34,7 +34,11 @@ type JSONLogger struct {
|
||||
// NewJSONLogger creates a new EVM tracer that prints execution steps as JSON objects
|
||||
// into the provided stream.
|
||||
func NewJSONLogger(cfg *LogConfig, writer io.Writer) *JSONLogger {
|
||||
return &JSONLogger{json.NewEncoder(writer), cfg}
|
||||
l := &JSONLogger{json.NewEncoder(writer), cfg}
|
||||
if l.cfg == nil {
|
||||
l.cfg = &LogConfig{}
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *JSONLogger) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error {
|
||||
|
12
vendor/github.com/ethereum/go-ethereum/eth/api_tracer.go
generated
vendored
12
vendor/github.com/ethereum/go-ethereum/eth/api_tracer.go
generated
vendored
@ -214,7 +214,8 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl
|
||||
log.Warn("Tracing failed", "hash", tx.Hash(), "block", task.block.NumberU64(), "err", err)
|
||||
break
|
||||
}
|
||||
task.statedb.Finalise(true)
|
||||
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
|
||||
task.statedb.Finalise(api.eth.blockchain.Config().IsEIP158(task.block.Number()))
|
||||
task.results[i] = &txTraceResult{Result: res}
|
||||
}
|
||||
// Stream the result back to the user or abort on teardown
|
||||
@ -506,7 +507,8 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block,
|
||||
break
|
||||
}
|
||||
// Finalize the state so any modifications are written to the trie
|
||||
statedb.Finalise(true)
|
||||
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
|
||||
statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))
|
||||
}
|
||||
close(jobs)
|
||||
pend.Wait()
|
||||
@ -608,7 +610,8 @@ func (api *PrivateDebugAPI) standardTraceBlockToFile(ctx context.Context, block
|
||||
return dumps, err
|
||||
}
|
||||
// Finalize the state so any modifications are written to the trie
|
||||
statedb.Finalise(true)
|
||||
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
|
||||
statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))
|
||||
|
||||
// If we've traced the transaction we were looking for, abort
|
||||
if tx.Hash() == txHash {
|
||||
@ -799,7 +802,8 @@ func (api *PrivateDebugAPI) computeTxEnv(blockHash common.Hash, txIndex int, ree
|
||||
return nil, vm.Context{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
|
||||
}
|
||||
// Ensure any modifications are committed to the state
|
||||
statedb.Finalise(true)
|
||||
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
|
||||
statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))
|
||||
}
|
||||
return nil, vm.Context{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, blockHash)
|
||||
}
|
||||
|
31
vendor/github.com/ethereum/go-ethereum/internal/build/util.go
generated
vendored
31
vendor/github.com/ethereum/go-ethereum/internal/build/util.go
generated
vendored
@ -177,3 +177,34 @@ func ExpandPackagesNoVendor(patterns []string) []string {
|
||||
}
|
||||
return patterns
|
||||
}
|
||||
|
||||
// UploadSFTP uploads files to a remote host using the sftp command line tool.
|
||||
// The destination host may be specified either as [user@]host[: or as a URI in
|
||||
// the form sftp://[user@]host[:port].
|
||||
func UploadSFTP(identityFile, host, dir string, files []string) error {
|
||||
sftp := exec.Command("sftp")
|
||||
sftp.Stdout = nil
|
||||
sftp.Stderr = os.Stderr
|
||||
if identityFile != "" {
|
||||
sftp.Args = append(sftp.Args, "-i", identityFile)
|
||||
}
|
||||
sftp.Args = append(sftp.Args, host)
|
||||
fmt.Println(">>>", strings.Join(sftp.Args, " "))
|
||||
if *DryRunFlag {
|
||||
return nil
|
||||
}
|
||||
|
||||
stdin, err := sftp.StdinPipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create stdin pipe for sftp: %v", err)
|
||||
}
|
||||
if err := sftp.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
in := io.MultiWriter(stdin, os.Stdout)
|
||||
for _, f := range files {
|
||||
fmt.Fprintln(in, "put", f, path.Join(dir, filepath.Base(f)))
|
||||
}
|
||||
stdin.Close()
|
||||
return sftp.Wait()
|
||||
}
|
||||
|
5
vendor/github.com/ethereum/go-ethereum/metrics/registry.go
generated
vendored
5
vendor/github.com/ethereum/go-ethereum/metrics/registry.go
generated
vendored
@ -312,8 +312,9 @@ func (r *PrefixedRegistry) UnregisterAll() {
|
||||
}
|
||||
|
||||
var (
|
||||
DefaultRegistry = NewRegistry()
|
||||
EphemeralRegistry = NewRegistry()
|
||||
DefaultRegistry = NewRegistry()
|
||||
EphemeralRegistry = NewRegistry()
|
||||
AccountingRegistry = NewRegistry() // registry used in swarm
|
||||
)
|
||||
|
||||
// Call the given function for each registered metric.
|
||||
|
3
vendor/github.com/ethereum/go-ethereum/p2p/discover/node.go
generated
vendored
3
vendor/github.com/ethereum/go-ethereum/p2p/discover/node.go
generated
vendored
@ -33,7 +33,8 @@ import (
|
||||
// The fields of Node may not be modified.
|
||||
type node struct {
|
||||
enode.Node
|
||||
addedAt time.Time // time when the node was added to the table
|
||||
addedAt time.Time // time when the node was added to the table
|
||||
livenessChecks uint // how often liveness was checked
|
||||
}
|
||||
|
||||
type encPubkey [64]byte
|
||||
|
193
vendor/github.com/ethereum/go-ethereum/p2p/discover/table.go
generated
vendored
193
vendor/github.com/ethereum/go-ethereum/p2p/discover/table.go
generated
vendored
@ -75,8 +75,10 @@ type Table struct {
|
||||
net transport
|
||||
refreshReq chan chan struct{}
|
||||
initDone chan struct{}
|
||||
closeReq chan struct{}
|
||||
closed chan struct{}
|
||||
|
||||
closeOnce sync.Once
|
||||
closeReq chan struct{}
|
||||
closed chan struct{}
|
||||
|
||||
nodeAddedHook func(*node) // for testing
|
||||
}
|
||||
@ -180,16 +182,14 @@ func (tab *Table) ReadRandomNodes(buf []*enode.Node) (n int) {
|
||||
|
||||
// Close terminates the network listener and flushes the node database.
|
||||
func (tab *Table) Close() {
|
||||
if tab.net != nil {
|
||||
tab.net.close()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-tab.closed:
|
||||
// already closed.
|
||||
case tab.closeReq <- struct{}{}:
|
||||
<-tab.closed // wait for refreshLoop to end.
|
||||
}
|
||||
tab.closeOnce.Do(func() {
|
||||
if tab.net != nil {
|
||||
tab.net.close()
|
||||
}
|
||||
// Wait for loop to end.
|
||||
close(tab.closeReq)
|
||||
<-tab.closed
|
||||
})
|
||||
}
|
||||
|
||||
// setFallbackNodes sets the initial points of contact. These nodes
|
||||
@ -290,12 +290,16 @@ func (tab *Table) lookup(targetKey encPubkey, refreshIfEmpty bool) []*node {
|
||||
// we have asked all closest nodes, stop the search
|
||||
break
|
||||
}
|
||||
// wait for the next reply
|
||||
for _, n := range <-reply {
|
||||
if n != nil && !seen[n.ID()] {
|
||||
seen[n.ID()] = true
|
||||
result.push(n, bucketSize)
|
||||
select {
|
||||
case nodes := <-reply:
|
||||
for _, n := range nodes {
|
||||
if n != nil && !seen[n.ID()] {
|
||||
seen[n.ID()] = true
|
||||
result.push(n, bucketSize)
|
||||
}
|
||||
}
|
||||
case <-tab.closeReq:
|
||||
return nil // shutdown, no need to continue.
|
||||
}
|
||||
pendingQueries--
|
||||
}
|
||||
@ -303,24 +307,28 @@ func (tab *Table) lookup(targetKey encPubkey, refreshIfEmpty bool) []*node {
|
||||
}
|
||||
|
||||
func (tab *Table) findnode(n *node, targetKey encPubkey, reply chan<- []*node) {
|
||||
fails := tab.db.FindFails(n.ID())
|
||||
fails := tab.db.FindFails(n.ID(), n.IP())
|
||||
r, err := tab.net.findnode(n.ID(), n.addr(), targetKey)
|
||||
if err != nil || len(r) == 0 {
|
||||
if err == errClosed {
|
||||
// Avoid recording failures on shutdown.
|
||||
reply <- nil
|
||||
return
|
||||
} else if err != nil || len(r) == 0 {
|
||||
fails++
|
||||
tab.db.UpdateFindFails(n.ID(), fails)
|
||||
tab.db.UpdateFindFails(n.ID(), n.IP(), fails)
|
||||
log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err)
|
||||
if fails >= maxFindnodeFailures {
|
||||
log.Trace("Too many findnode failures, dropping", "id", n.ID(), "failcount", fails)
|
||||
tab.delete(n)
|
||||
}
|
||||
} else if fails > 0 {
|
||||
tab.db.UpdateFindFails(n.ID(), fails-1)
|
||||
tab.db.UpdateFindFails(n.ID(), n.IP(), fails-1)
|
||||
}
|
||||
|
||||
// Grab as many nodes as possible. Some of them might not be alive anymore, but we'll
|
||||
// just remove those again during revalidation.
|
||||
for _, n := range r {
|
||||
tab.add(n)
|
||||
tab.addSeenNode(n)
|
||||
}
|
||||
reply <- r
|
||||
}
|
||||
@ -329,7 +337,7 @@ func (tab *Table) refresh() <-chan struct{} {
|
||||
done := make(chan struct{})
|
||||
select {
|
||||
case tab.refreshReq <- done:
|
||||
case <-tab.closed:
|
||||
case <-tab.closeReq:
|
||||
close(done)
|
||||
}
|
||||
return done
|
||||
@ -433,9 +441,9 @@ func (tab *Table) loadSeedNodes() {
|
||||
seeds = append(seeds, tab.nursery...)
|
||||
for i := range seeds {
|
||||
seed := seeds[i]
|
||||
age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID())) }}
|
||||
age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID(), seed.IP())) }}
|
||||
log.Trace("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
|
||||
tab.add(seed)
|
||||
tab.addSeenNode(seed)
|
||||
}
|
||||
}
|
||||
|
||||
@ -458,16 +466,17 @@ func (tab *Table) doRevalidate(done chan<- struct{}) {
|
||||
b := tab.buckets[bi]
|
||||
if err == nil {
|
||||
// The node responded, move it to the front.
|
||||
log.Debug("Revalidated node", "b", bi, "id", last.ID())
|
||||
b.bump(last)
|
||||
last.livenessChecks++
|
||||
log.Debug("Revalidated node", "b", bi, "id", last.ID(), "checks", last.livenessChecks)
|
||||
tab.bumpInBucket(b, last)
|
||||
return
|
||||
}
|
||||
// No reply received, pick a replacement or delete the node if there aren't
|
||||
// any replacements.
|
||||
if r := tab.replace(b, last); r != nil {
|
||||
log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "r", r.ID(), "rip", r.IP())
|
||||
log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks, "r", r.ID(), "rip", r.IP())
|
||||
} else {
|
||||
log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP())
|
||||
log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "checks", last.livenessChecks)
|
||||
}
|
||||
}
|
||||
|
||||
@ -502,7 +511,7 @@ func (tab *Table) copyLiveNodes() {
|
||||
now := time.Now()
|
||||
for _, b := range &tab.buckets {
|
||||
for _, n := range b.entries {
|
||||
if now.Sub(n.addedAt) >= seedMinTableTime {
|
||||
if n.livenessChecks > 0 && now.Sub(n.addedAt) >= seedMinTableTime {
|
||||
tab.db.UpdateNode(unwrapNode(n))
|
||||
}
|
||||
}
|
||||
@ -518,7 +527,9 @@ func (tab *Table) closest(target enode.ID, nresults int) *nodesByDistance {
|
||||
close := &nodesByDistance{target: target}
|
||||
for _, b := range &tab.buckets {
|
||||
for _, n := range b.entries {
|
||||
close.push(n, nresults)
|
||||
if n.livenessChecks > 0 {
|
||||
close.push(n, nresults)
|
||||
}
|
||||
}
|
||||
}
|
||||
return close
|
||||
@ -540,12 +551,12 @@ func (tab *Table) bucket(id enode.ID) *bucket {
|
||||
return tab.buckets[d-bucketMinDistance-1]
|
||||
}
|
||||
|
||||
// add attempts to add the given node to its corresponding bucket. If the bucket has space
|
||||
// available, adding the node succeeds immediately. Otherwise, the node is added if the
|
||||
// least recently active node in the bucket does not respond to a ping packet.
|
||||
// addSeenNode adds a node which may or may not be live to the end of a bucket. If the
|
||||
// bucket has space available, adding the node succeeds immediately. Otherwise, the node is
|
||||
// added to the replacements list.
|
||||
//
|
||||
// The caller must not hold tab.mutex.
|
||||
func (tab *Table) add(n *node) {
|
||||
func (tab *Table) addSeenNode(n *node) {
|
||||
if n.ID() == tab.self().ID() {
|
||||
return
|
||||
}
|
||||
@ -553,39 +564,67 @@ func (tab *Table) add(n *node) {
|
||||
tab.mutex.Lock()
|
||||
defer tab.mutex.Unlock()
|
||||
b := tab.bucket(n.ID())
|
||||
if !tab.bumpOrAdd(b, n) {
|
||||
// Node is not in table. Add it to the replacement list.
|
||||
if contains(b.entries, n.ID()) {
|
||||
// Already in bucket, don't add.
|
||||
return
|
||||
}
|
||||
if len(b.entries) >= bucketSize {
|
||||
// Bucket full, maybe add as replacement.
|
||||
tab.addReplacement(b, n)
|
||||
return
|
||||
}
|
||||
if !tab.addIP(b, n.IP()) {
|
||||
// Can't add: IP limit reached.
|
||||
return
|
||||
}
|
||||
// Add to end of bucket:
|
||||
b.entries = append(b.entries, n)
|
||||
b.replacements = deleteNode(b.replacements, n)
|
||||
n.addedAt = time.Now()
|
||||
if tab.nodeAddedHook != nil {
|
||||
tab.nodeAddedHook(n)
|
||||
}
|
||||
}
|
||||
|
||||
// addThroughPing adds the given node to the table. Compared to plain
|
||||
// 'add' there is an additional safety measure: if the table is still
|
||||
// initializing the node is not added. This prevents an attack where the
|
||||
// table could be filled by just sending ping repeatedly.
|
||||
// addVerifiedNode adds a node whose existence has been verified recently to the front of a
|
||||
// bucket. If the node is already in the bucket, it is moved to the front. If the bucket
|
||||
// has no space, the node is added to the replacements list.
|
||||
//
|
||||
// There is an additional safety measure: if the table is still initializing the node
|
||||
// is not added. This prevents an attack where the table could be filled by just sending
|
||||
// ping repeatedly.
|
||||
//
|
||||
// The caller must not hold tab.mutex.
|
||||
func (tab *Table) addThroughPing(n *node) {
|
||||
func (tab *Table) addVerifiedNode(n *node) {
|
||||
if !tab.isInitDone() {
|
||||
return
|
||||
}
|
||||
tab.add(n)
|
||||
}
|
||||
if n.ID() == tab.self().ID() {
|
||||
return
|
||||
}
|
||||
|
||||
// stuff adds nodes the table to the end of their corresponding bucket
|
||||
// if the bucket is not full. The caller must not hold tab.mutex.
|
||||
func (tab *Table) stuff(nodes []*node) {
|
||||
tab.mutex.Lock()
|
||||
defer tab.mutex.Unlock()
|
||||
|
||||
for _, n := range nodes {
|
||||
if n.ID() == tab.self().ID() {
|
||||
continue // don't add self
|
||||
}
|
||||
b := tab.bucket(n.ID())
|
||||
if len(b.entries) < bucketSize {
|
||||
tab.bumpOrAdd(b, n)
|
||||
}
|
||||
b := tab.bucket(n.ID())
|
||||
if tab.bumpInBucket(b, n) {
|
||||
// Already in bucket, moved to front.
|
||||
return
|
||||
}
|
||||
if len(b.entries) >= bucketSize {
|
||||
// Bucket full, maybe add as replacement.
|
||||
tab.addReplacement(b, n)
|
||||
return
|
||||
}
|
||||
if !tab.addIP(b, n.IP()) {
|
||||
// Can't add: IP limit reached.
|
||||
return
|
||||
}
|
||||
// Add to front of bucket.
|
||||
b.entries, _ = pushNode(b.entries, n, bucketSize)
|
||||
b.replacements = deleteNode(b.replacements, n)
|
||||
n.addedAt = time.Now()
|
||||
if tab.nodeAddedHook != nil {
|
||||
tab.nodeAddedHook(n)
|
||||
}
|
||||
}
|
||||
|
||||
@ -657,12 +696,21 @@ func (tab *Table) replace(b *bucket, last *node) *node {
|
||||
return r
|
||||
}
|
||||
|
||||
// bump moves the given node to the front of the bucket entry list
|
||||
// bumpInBucket moves the given node to the front of the bucket entry list
|
||||
// if it is contained in that list.
|
||||
func (b *bucket) bump(n *node) bool {
|
||||
func (tab *Table) bumpInBucket(b *bucket, n *node) bool {
|
||||
for i := range b.entries {
|
||||
if b.entries[i].ID() == n.ID() {
|
||||
// move it to the front
|
||||
if !n.IP().Equal(b.entries[i].IP()) {
|
||||
// Endpoint has changed, ensure that the new IP fits into table limits.
|
||||
tab.removeIP(b, b.entries[i].IP())
|
||||
if !tab.addIP(b, n.IP()) {
|
||||
// It doesn't, put the previous one back.
|
||||
tab.addIP(b, b.entries[i].IP())
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Move it to the front.
|
||||
copy(b.entries[1:], b.entries[:i])
|
||||
b.entries[0] = n
|
||||
return true
|
||||
@ -671,29 +719,20 @@ func (b *bucket) bump(n *node) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// bumpOrAdd moves n to the front of the bucket entry list or adds it if the list isn't
|
||||
// full. The return value is true if n is in the bucket.
|
||||
func (tab *Table) bumpOrAdd(b *bucket, n *node) bool {
|
||||
if b.bump(n) {
|
||||
return true
|
||||
}
|
||||
if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP()) {
|
||||
return false
|
||||
}
|
||||
b.entries, _ = pushNode(b.entries, n, bucketSize)
|
||||
b.replacements = deleteNode(b.replacements, n)
|
||||
n.addedAt = time.Now()
|
||||
if tab.nodeAddedHook != nil {
|
||||
tab.nodeAddedHook(n)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (tab *Table) deleteInBucket(b *bucket, n *node) {
|
||||
b.entries = deleteNode(b.entries, n)
|
||||
tab.removeIP(b, n.IP())
|
||||
}
|
||||
|
||||
func contains(ns []*node, id enode.ID) bool {
|
||||
for _, n := range ns {
|
||||
if n.ID() == id {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// pushNode adds n to the front of list, keeping at most max items.
|
||||
func pushNode(list []*node, n *node, max int) ([]*node, *node) {
|
||||
if len(list) < max {
|
||||
|
130
vendor/github.com/ethereum/go-ethereum/p2p/discover/table_test.go
generated
vendored
130
vendor/github.com/ethereum/go-ethereum/p2p/discover/table_test.go
generated
vendored
@ -30,6 +30,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||
)
|
||||
|
||||
func TestTable_pingReplace(t *testing.T) {
|
||||
@ -50,8 +51,8 @@ func TestTable_pingReplace(t *testing.T) {
|
||||
func testPingReplace(t *testing.T, newNodeIsResponding, lastInBucketIsResponding bool) {
|
||||
transport := newPingRecorder()
|
||||
tab, db := newTestTable(transport)
|
||||
defer tab.Close()
|
||||
defer db.Close()
|
||||
defer tab.Close()
|
||||
|
||||
<-tab.initDone
|
||||
|
||||
@ -64,7 +65,7 @@ func testPingReplace(t *testing.T, newNodeIsResponding, lastInBucketIsResponding
|
||||
// its bucket if it is unresponsive. Revalidate again to ensure that
|
||||
transport.dead[last.ID()] = !lastInBucketIsResponding
|
||||
transport.dead[pingSender.ID()] = !newNodeIsResponding
|
||||
tab.add(pingSender)
|
||||
tab.addSeenNode(pingSender)
|
||||
tab.doRevalidate(make(chan struct{}, 1))
|
||||
tab.doRevalidate(make(chan struct{}, 1))
|
||||
|
||||
@ -114,10 +115,14 @@ func TestBucket_bumpNoDuplicates(t *testing.T) {
|
||||
}
|
||||
|
||||
prop := func(nodes []*node, bumps []int) (ok bool) {
|
||||
tab, db := newTestTable(newPingRecorder())
|
||||
defer db.Close()
|
||||
defer tab.Close()
|
||||
|
||||
b := &bucket{entries: make([]*node, len(nodes))}
|
||||
copy(b.entries, nodes)
|
||||
for i, pos := range bumps {
|
||||
b.bump(b.entries[pos])
|
||||
tab.bumpInBucket(b, b.entries[pos])
|
||||
if hasDuplicates(b.entries) {
|
||||
t.Logf("bucket has duplicates after %d/%d bumps:", i+1, len(bumps))
|
||||
for _, n := range b.entries {
|
||||
@ -126,6 +131,7 @@ func TestBucket_bumpNoDuplicates(t *testing.T) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
checkIPLimitInvariant(t, tab)
|
||||
return true
|
||||
}
|
||||
if err := quick.Check(prop, cfg); err != nil {
|
||||
@ -137,33 +143,51 @@ func TestBucket_bumpNoDuplicates(t *testing.T) {
|
||||
func TestTable_IPLimit(t *testing.T) {
|
||||
transport := newPingRecorder()
|
||||
tab, db := newTestTable(transport)
|
||||
defer tab.Close()
|
||||
defer db.Close()
|
||||
defer tab.Close()
|
||||
|
||||
for i := 0; i < tableIPLimit+1; i++ {
|
||||
n := nodeAtDistance(tab.self().ID(), i, net.IP{172, 0, 1, byte(i)})
|
||||
tab.add(n)
|
||||
tab.addSeenNode(n)
|
||||
}
|
||||
if tab.len() > tableIPLimit {
|
||||
t.Errorf("too many nodes in table")
|
||||
}
|
||||
checkIPLimitInvariant(t, tab)
|
||||
}
|
||||
|
||||
// This checks that the per-bucket IP limit is applied correctly.
|
||||
func TestTable_BucketIPLimit(t *testing.T) {
|
||||
transport := newPingRecorder()
|
||||
tab, db := newTestTable(transport)
|
||||
defer tab.Close()
|
||||
defer db.Close()
|
||||
defer tab.Close()
|
||||
|
||||
d := 3
|
||||
for i := 0; i < bucketIPLimit+1; i++ {
|
||||
n := nodeAtDistance(tab.self().ID(), d, net.IP{172, 0, 1, byte(i)})
|
||||
tab.add(n)
|
||||
tab.addSeenNode(n)
|
||||
}
|
||||
if tab.len() > bucketIPLimit {
|
||||
t.Errorf("too many nodes in table")
|
||||
}
|
||||
checkIPLimitInvariant(t, tab)
|
||||
}
|
||||
|
||||
// checkIPLimitInvariant checks that ip limit sets contain an entry for every
|
||||
// node in the table and no extra entries.
|
||||
func checkIPLimitInvariant(t *testing.T, tab *Table) {
|
||||
t.Helper()
|
||||
|
||||
tabset := netutil.DistinctNetSet{Subnet: tableSubnet, Limit: tableIPLimit}
|
||||
for _, b := range tab.buckets {
|
||||
for _, n := range b.entries {
|
||||
tabset.Add(n.IP())
|
||||
}
|
||||
}
|
||||
if tabset.String() != tab.ips.String() {
|
||||
t.Errorf("table IP set is incorrect:\nhave: %v\nwant: %v", tab.ips, tabset)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTable_closest(t *testing.T) {
|
||||
@ -173,9 +197,9 @@ func TestTable_closest(t *testing.T) {
|
||||
// for any node table, Target and N
|
||||
transport := newPingRecorder()
|
||||
tab, db := newTestTable(transport)
|
||||
defer tab.Close()
|
||||
defer db.Close()
|
||||
tab.stuff(test.All)
|
||||
defer tab.Close()
|
||||
fillTable(tab, test.All)
|
||||
|
||||
// check that closest(Target, N) returns nodes
|
||||
result := tab.closest(test.Target, test.N).entries
|
||||
@ -234,13 +258,13 @@ func TestTable_ReadRandomNodesGetAll(t *testing.T) {
|
||||
test := func(buf []*enode.Node) bool {
|
||||
transport := newPingRecorder()
|
||||
tab, db := newTestTable(transport)
|
||||
defer tab.Close()
|
||||
defer db.Close()
|
||||
defer tab.Close()
|
||||
<-tab.initDone
|
||||
|
||||
for i := 0; i < len(buf); i++ {
|
||||
ld := cfg.Rand.Intn(len(tab.buckets))
|
||||
tab.stuff([]*node{nodeAtDistance(tab.self().ID(), ld, intIP(ld))})
|
||||
fillTable(tab, []*node{nodeAtDistance(tab.self().ID(), ld, intIP(ld))})
|
||||
}
|
||||
gotN := tab.ReadRandomNodes(buf)
|
||||
if gotN != tab.len() {
|
||||
@ -272,16 +296,82 @@ func (*closeTest) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||
N: rand.Intn(bucketSize),
|
||||
}
|
||||
for _, id := range gen([]enode.ID{}, rand).([]enode.ID) {
|
||||
n := enode.SignNull(new(enr.Record), id)
|
||||
t.All = append(t.All, wrapNode(n))
|
||||
r := new(enr.Record)
|
||||
r.Set(enr.IP(genIP(rand)))
|
||||
n := wrapNode(enode.SignNull(r, id))
|
||||
n.livenessChecks = 1
|
||||
t.All = append(t.All, n)
|
||||
}
|
||||
return reflect.ValueOf(t)
|
||||
}
|
||||
|
||||
func TestTable_addVerifiedNode(t *testing.T) {
|
||||
tab, db := newTestTable(newPingRecorder())
|
||||
<-tab.initDone
|
||||
defer db.Close()
|
||||
defer tab.Close()
|
||||
|
||||
// Insert two nodes.
|
||||
n1 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 1})
|
||||
n2 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 2})
|
||||
tab.addSeenNode(n1)
|
||||
tab.addSeenNode(n2)
|
||||
|
||||
// Verify bucket content:
|
||||
bcontent := []*node{n1, n2}
|
||||
if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, bcontent) {
|
||||
t.Fatalf("wrong bucket content: %v", tab.bucket(n1.ID()).entries)
|
||||
}
|
||||
|
||||
// Add a changed version of n2.
|
||||
newrec := n2.Record()
|
||||
newrec.Set(enr.IP{99, 99, 99, 99})
|
||||
newn2 := wrapNode(enode.SignNull(newrec, n2.ID()))
|
||||
tab.addVerifiedNode(newn2)
|
||||
|
||||
// Check that bucket is updated correctly.
|
||||
newBcontent := []*node{newn2, n1}
|
||||
if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, newBcontent) {
|
||||
t.Fatalf("wrong bucket content after update: %v", tab.bucket(n1.ID()).entries)
|
||||
}
|
||||
checkIPLimitInvariant(t, tab)
|
||||
}
|
||||
|
||||
func TestTable_addSeenNode(t *testing.T) {
|
||||
tab, db := newTestTable(newPingRecorder())
|
||||
<-tab.initDone
|
||||
defer db.Close()
|
||||
defer tab.Close()
|
||||
|
||||
// Insert two nodes.
|
||||
n1 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 1})
|
||||
n2 := nodeAtDistance(tab.self().ID(), 256, net.IP{88, 77, 66, 2})
|
||||
tab.addSeenNode(n1)
|
||||
tab.addSeenNode(n2)
|
||||
|
||||
// Verify bucket content:
|
||||
bcontent := []*node{n1, n2}
|
||||
if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, bcontent) {
|
||||
t.Fatalf("wrong bucket content: %v", tab.bucket(n1.ID()).entries)
|
||||
}
|
||||
|
||||
// Add a changed version of n2.
|
||||
newrec := n2.Record()
|
||||
newrec.Set(enr.IP{99, 99, 99, 99})
|
||||
newn2 := wrapNode(enode.SignNull(newrec, n2.ID()))
|
||||
tab.addSeenNode(newn2)
|
||||
|
||||
// Check that bucket content is unchanged.
|
||||
if !reflect.DeepEqual(tab.bucket(n1.ID()).entries, bcontent) {
|
||||
t.Fatalf("wrong bucket content after update: %v", tab.bucket(n1.ID()).entries)
|
||||
}
|
||||
checkIPLimitInvariant(t, tab)
|
||||
}
|
||||
|
||||
func TestTable_Lookup(t *testing.T) {
|
||||
tab, db := newTestTable(lookupTestnet)
|
||||
defer tab.Close()
|
||||
defer db.Close()
|
||||
defer tab.Close()
|
||||
|
||||
// lookup on empty table returns no nodes
|
||||
if results := tab.lookup(lookupTestnet.target, false); len(results) > 0 {
|
||||
@ -289,8 +379,9 @@ func TestTable_Lookup(t *testing.T) {
|
||||
}
|
||||
// seed table with initial node (otherwise lookup will terminate immediately)
|
||||
seedKey, _ := decodePubkey(lookupTestnet.dists[256][0])
|
||||
seed := wrapNode(enode.NewV4(seedKey, net.IP{}, 0, 256))
|
||||
tab.stuff([]*node{seed})
|
||||
seed := wrapNode(enode.NewV4(seedKey, net.IP{127, 0, 0, 1}, 0, 256))
|
||||
seed.livenessChecks = 1
|
||||
fillTable(tab, []*node{seed})
|
||||
|
||||
results := tab.lookup(lookupTestnet.target, true)
|
||||
t.Logf("results:")
|
||||
@ -531,7 +622,6 @@ func (tn *preminedTestnet) findnode(toid enode.ID, toaddr *net.UDPAddr, target e
|
||||
}
|
||||
|
||||
func (*preminedTestnet) close() {}
|
||||
func (*preminedTestnet) waitping(from enode.ID) error { return nil }
|
||||
func (*preminedTestnet) ping(toid enode.ID, toaddr *net.UDPAddr) error { return nil }
|
||||
|
||||
// mine generates a testnet struct literal with nodes at
|
||||
@ -578,6 +668,12 @@ func gen(typ interface{}, rand *rand.Rand) interface{} {
|
||||
return v.Interface()
|
||||
}
|
||||
|
||||
func genIP(rand *rand.Rand) net.IP {
|
||||
ip := make(net.IP, 4)
|
||||
rand.Read(ip)
|
||||
return ip
|
||||
}
|
||||
|
||||
func quickcfg() *quick.Config {
|
||||
return &quick.Config{
|
||||
MaxCount: 5000,
|
||||
|
21
vendor/github.com/ethereum/go-ethereum/p2p/discover/table_util_test.go
generated
vendored
21
vendor/github.com/ethereum/go-ethereum/p2p/discover/table_util_test.go
generated
vendored
@ -83,6 +83,14 @@ func fillBucket(tab *Table, n *node) (last *node) {
|
||||
return b.entries[bucketSize-1]
|
||||
}
|
||||
|
||||
// fillTable adds nodes the table to the end of their corresponding bucket
|
||||
// if the bucket is not full. The caller must not hold tab.mutex.
|
||||
func fillTable(tab *Table, nodes []*node) {
|
||||
for _, n := range nodes {
|
||||
tab.addSeenNode(n)
|
||||
}
|
||||
}
|
||||
|
||||
type pingRecorder struct {
|
||||
mu sync.Mutex
|
||||
dead, pinged map[enode.ID]bool
|
||||
@ -109,10 +117,6 @@ func (t *pingRecorder) findnode(toid enode.ID, toaddr *net.UDPAddr, target encPu
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (t *pingRecorder) waitping(from enode.ID) error {
|
||||
return nil // remote always pings
|
||||
}
|
||||
|
||||
func (t *pingRecorder) ping(toid enode.ID, toaddr *net.UDPAddr) error {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
@ -141,15 +145,6 @@ func hasDuplicates(slice []*node) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func contains(ns []*node, id enode.ID) bool {
|
||||
for _, n := range ns {
|
||||
if n.ID() == id {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func sortedByDistanceTo(distbase enode.ID, slice []*node) bool {
|
||||
var last enode.ID
|
||||
for i, e := range slice {
|
||||
|
224
vendor/github.com/ethereum/go-ethereum/p2p/discover/udp.go
generated
vendored
224
vendor/github.com/ethereum/go-ethereum/p2p/discover/udp.go
generated
vendored
@ -67,6 +67,8 @@ const (
|
||||
// RPC request structures
|
||||
type (
|
||||
ping struct {
|
||||
senderKey *ecdsa.PublicKey // filled in by preverify
|
||||
|
||||
Version uint
|
||||
From, To rpcEndpoint
|
||||
Expiration uint64
|
||||
@ -155,8 +157,13 @@ func nodeToRPC(n *node) rpcNode {
|
||||
return rpcNode{ID: ekey, IP: n.IP(), UDP: uint16(n.UDP()), TCP: uint16(n.TCP())}
|
||||
}
|
||||
|
||||
// packet is implemented by all protocol messages.
|
||||
type packet interface {
|
||||
handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error
|
||||
// preverify checks whether the packet is valid and should be handled at all.
|
||||
preverify(t *udp, from *net.UDPAddr, fromID enode.ID, fromKey encPubkey) error
|
||||
// handle handles the packet.
|
||||
handle(t *udp, from *net.UDPAddr, fromID enode.ID, mac []byte)
|
||||
// name returns the name of the packet for logging purposes.
|
||||
name() string
|
||||
}
|
||||
|
||||
@ -177,43 +184,48 @@ type udp struct {
|
||||
tab *Table
|
||||
wg sync.WaitGroup
|
||||
|
||||
addpending chan *pending
|
||||
gotreply chan reply
|
||||
closing chan struct{}
|
||||
addReplyMatcher chan *replyMatcher
|
||||
gotreply chan reply
|
||||
closing chan struct{}
|
||||
}
|
||||
|
||||
// pending represents a pending reply.
|
||||
//
|
||||
// some implementations of the protocol wish to send more than one
|
||||
// reply packet to findnode. in general, any neighbors packet cannot
|
||||
// Some implementations of the protocol wish to send more than one
|
||||
// reply packet to findnode. In general, any neighbors packet cannot
|
||||
// be matched up with a specific findnode packet.
|
||||
//
|
||||
// our implementation handles this by storing a callback function for
|
||||
// each pending reply. incoming packets from a node are dispatched
|
||||
// to all the callback functions for that node.
|
||||
type pending struct {
|
||||
// Our implementation handles this by storing a callback function for
|
||||
// each pending reply. Incoming packets from a node are dispatched
|
||||
// to all callback functions for that node.
|
||||
type replyMatcher struct {
|
||||
// these fields must match in the reply.
|
||||
from enode.ID
|
||||
ip net.IP
|
||||
ptype byte
|
||||
|
||||
// time when the request must complete
|
||||
deadline time.Time
|
||||
|
||||
// callback is called when a matching reply arrives. if it returns
|
||||
// true, the callback is removed from the pending reply queue.
|
||||
// if it returns false, the reply is considered incomplete and
|
||||
// the callback will be invoked again for the next matching reply.
|
||||
callback func(resp interface{}) (done bool)
|
||||
// callback is called when a matching reply arrives. If it returns matched == true, the
|
||||
// reply was acceptable. The second return value indicates whether the callback should
|
||||
// be removed from the pending reply queue. If it returns false, the reply is considered
|
||||
// incomplete and the callback will be invoked again for the next matching reply.
|
||||
callback replyMatchFunc
|
||||
|
||||
// errc receives nil when the callback indicates completion or an
|
||||
// error if no further reply is received within the timeout.
|
||||
errc chan<- error
|
||||
}
|
||||
|
||||
type replyMatchFunc func(interface{}) (matched bool, requestDone bool)
|
||||
|
||||
type reply struct {
|
||||
from enode.ID
|
||||
ip net.IP
|
||||
ptype byte
|
||||
data interface{}
|
||||
data packet
|
||||
|
||||
// loop indicates whether there was
|
||||
// a matching request by sending on this channel.
|
||||
matched chan<- bool
|
||||
@ -247,14 +259,14 @@ func ListenUDP(c conn, ln *enode.LocalNode, cfg Config) (*Table, error) {
|
||||
|
||||
func newUDP(c conn, ln *enode.LocalNode, cfg Config) (*Table, *udp, error) {
|
||||
udp := &udp{
|
||||
conn: c,
|
||||
priv: cfg.PrivateKey,
|
||||
netrestrict: cfg.NetRestrict,
|
||||
localNode: ln,
|
||||
db: ln.Database(),
|
||||
closing: make(chan struct{}),
|
||||
gotreply: make(chan reply),
|
||||
addpending: make(chan *pending),
|
||||
conn: c,
|
||||
priv: cfg.PrivateKey,
|
||||
netrestrict: cfg.NetRestrict,
|
||||
localNode: ln,
|
||||
db: ln.Database(),
|
||||
closing: make(chan struct{}),
|
||||
gotreply: make(chan reply),
|
||||
addReplyMatcher: make(chan *replyMatcher),
|
||||
}
|
||||
tab, err := newTable(udp, ln.Database(), cfg.Bootnodes)
|
||||
if err != nil {
|
||||
@ -304,35 +316,37 @@ func (t *udp) sendPing(toid enode.ID, toaddr *net.UDPAddr, callback func()) <-ch
|
||||
errc <- err
|
||||
return errc
|
||||
}
|
||||
errc := t.pending(toid, pongPacket, func(p interface{}) bool {
|
||||
ok := bytes.Equal(p.(*pong).ReplyTok, hash)
|
||||
if ok && callback != nil {
|
||||
// Add a matcher for the reply to the pending reply queue. Pongs are matched if they
|
||||
// reference the ping we're about to send.
|
||||
errc := t.pending(toid, toaddr.IP, pongPacket, func(p interface{}) (matched bool, requestDone bool) {
|
||||
matched = bytes.Equal(p.(*pong).ReplyTok, hash)
|
||||
if matched && callback != nil {
|
||||
callback()
|
||||
}
|
||||
return ok
|
||||
return matched, matched
|
||||
})
|
||||
// Send the packet.
|
||||
t.localNode.UDPContact(toaddr)
|
||||
t.write(toaddr, req.name(), packet)
|
||||
t.write(toaddr, toid, req.name(), packet)
|
||||
return errc
|
||||
}
|
||||
|
||||
func (t *udp) waitping(from enode.ID) error {
|
||||
return <-t.pending(from, pingPacket, func(interface{}) bool { return true })
|
||||
}
|
||||
|
||||
// findnode sends a findnode request to the given node and waits until
|
||||
// the node has sent up to k neighbors.
|
||||
func (t *udp) findnode(toid enode.ID, toaddr *net.UDPAddr, target encPubkey) ([]*node, error) {
|
||||
// If we haven't seen a ping from the destination node for a while, it won't remember
|
||||
// our endpoint proof and reject findnode. Solicit a ping first.
|
||||
if time.Since(t.db.LastPingReceived(toid)) > bondExpiration {
|
||||
if time.Since(t.db.LastPingReceived(toid, toaddr.IP)) > bondExpiration {
|
||||
t.ping(toid, toaddr)
|
||||
t.waitping(toid)
|
||||
// Wait for them to ping back and process our pong.
|
||||
time.Sleep(respTimeout)
|
||||
}
|
||||
|
||||
// Add a matcher for 'neighbours' replies to the pending reply queue. The matcher is
|
||||
// active until enough nodes have been received.
|
||||
nodes := make([]*node, 0, bucketSize)
|
||||
nreceived := 0
|
||||
errc := t.pending(toid, neighborsPacket, func(r interface{}) bool {
|
||||
errc := t.pending(toid, toaddr.IP, neighborsPacket, func(r interface{}) (matched bool, requestDone bool) {
|
||||
reply := r.(*neighbors)
|
||||
for _, rn := range reply.Nodes {
|
||||
nreceived++
|
||||
@ -343,22 +357,22 @@ func (t *udp) findnode(toid enode.ID, toaddr *net.UDPAddr, target encPubkey) ([]
|
||||
}
|
||||
nodes = append(nodes, n)
|
||||
}
|
||||
return nreceived >= bucketSize
|
||||
return true, nreceived >= bucketSize
|
||||
})
|
||||
t.send(toaddr, findnodePacket, &findnode{
|
||||
t.send(toaddr, toid, findnodePacket, &findnode{
|
||||
Target: target,
|
||||
Expiration: uint64(time.Now().Add(expiration).Unix()),
|
||||
})
|
||||
return nodes, <-errc
|
||||
}
|
||||
|
||||
// pending adds a reply callback to the pending reply queue.
|
||||
// see the documentation of type pending for a detailed explanation.
|
||||
func (t *udp) pending(id enode.ID, ptype byte, callback func(interface{}) bool) <-chan error {
|
||||
// pending adds a reply matcher to the pending reply queue.
|
||||
// see the documentation of type replyMatcher for a detailed explanation.
|
||||
func (t *udp) pending(id enode.ID, ip net.IP, ptype byte, callback replyMatchFunc) <-chan error {
|
||||
ch := make(chan error, 1)
|
||||
p := &pending{from: id, ptype: ptype, callback: callback, errc: ch}
|
||||
p := &replyMatcher{from: id, ip: ip, ptype: ptype, callback: callback, errc: ch}
|
||||
select {
|
||||
case t.addpending <- p:
|
||||
case t.addReplyMatcher <- p:
|
||||
// loop will handle it
|
||||
case <-t.closing:
|
||||
ch <- errClosed
|
||||
@ -366,10 +380,12 @@ func (t *udp) pending(id enode.ID, ptype byte, callback func(interface{}) bool)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (t *udp) handleReply(from enode.ID, ptype byte, req packet) bool {
|
||||
// handleReply dispatches a reply packet, invoking reply matchers. It returns
|
||||
// whether any matcher considered the packet acceptable.
|
||||
func (t *udp) handleReply(from enode.ID, fromIP net.IP, ptype byte, req packet) bool {
|
||||
matched := make(chan bool, 1)
|
||||
select {
|
||||
case t.gotreply <- reply{from, ptype, req, matched}:
|
||||
case t.gotreply <- reply{from, fromIP, ptype, req, matched}:
|
||||
// loop will handle it
|
||||
return <-matched
|
||||
case <-t.closing:
|
||||
@ -385,8 +401,8 @@ func (t *udp) loop() {
|
||||
var (
|
||||
plist = list.New()
|
||||
timeout = time.NewTimer(0)
|
||||
nextTimeout *pending // head of plist when timeout was last reset
|
||||
contTimeouts = 0 // number of continuous timeouts to do NTP checks
|
||||
nextTimeout *replyMatcher // head of plist when timeout was last reset
|
||||
contTimeouts = 0 // number of continuous timeouts to do NTP checks
|
||||
ntpWarnTime = time.Unix(0, 0)
|
||||
)
|
||||
<-timeout.C // ignore first timeout
|
||||
@ -399,7 +415,7 @@ func (t *udp) loop() {
|
||||
// Start the timer so it fires when the next pending reply has expired.
|
||||
now := time.Now()
|
||||
for el := plist.Front(); el != nil; el = el.Next() {
|
||||
nextTimeout = el.Value.(*pending)
|
||||
nextTimeout = el.Value.(*replyMatcher)
|
||||
if dist := nextTimeout.deadline.Sub(now); dist < 2*respTimeout {
|
||||
timeout.Reset(dist)
|
||||
return
|
||||
@ -420,25 +436,23 @@ func (t *udp) loop() {
|
||||
select {
|
||||
case <-t.closing:
|
||||
for el := plist.Front(); el != nil; el = el.Next() {
|
||||
el.Value.(*pending).errc <- errClosed
|
||||
el.Value.(*replyMatcher).errc <- errClosed
|
||||
}
|
||||
return
|
||||
|
||||
case p := <-t.addpending:
|
||||
case p := <-t.addReplyMatcher:
|
||||
p.deadline = time.Now().Add(respTimeout)
|
||||
plist.PushBack(p)
|
||||
|
||||
case r := <-t.gotreply:
|
||||
var matched bool
|
||||
var matched bool // whether any replyMatcher considered the reply acceptable.
|
||||
for el := plist.Front(); el != nil; el = el.Next() {
|
||||
p := el.Value.(*pending)
|
||||
if p.from == r.from && p.ptype == r.ptype {
|
||||
matched = true
|
||||
// Remove the matcher if its callback indicates
|
||||
// that all replies have been received. This is
|
||||
// required for packet types that expect multiple
|
||||
// reply packets.
|
||||
if p.callback(r.data) {
|
||||
p := el.Value.(*replyMatcher)
|
||||
if p.from == r.from && p.ptype == r.ptype && p.ip.Equal(r.ip) {
|
||||
ok, requestDone := p.callback(r.data)
|
||||
matched = matched || ok
|
||||
// Remove the matcher if callback indicates that all replies have been received.
|
||||
if requestDone {
|
||||
p.errc <- nil
|
||||
plist.Remove(el)
|
||||
}
|
||||
@ -453,7 +467,7 @@ func (t *udp) loop() {
|
||||
|
||||
// Notify and remove callbacks whose deadline is in the past.
|
||||
for el := plist.Front(); el != nil; el = el.Next() {
|
||||
p := el.Value.(*pending)
|
||||
p := el.Value.(*replyMatcher)
|
||||
if now.After(p.deadline) || now.Equal(p.deadline) {
|
||||
p.errc <- errTimeout
|
||||
plist.Remove(el)
|
||||
@ -504,17 +518,17 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *udp) send(toaddr *net.UDPAddr, ptype byte, req packet) ([]byte, error) {
|
||||
func (t *udp) send(toaddr *net.UDPAddr, toid enode.ID, ptype byte, req packet) ([]byte, error) {
|
||||
packet, hash, err := encodePacket(t.priv, ptype, req)
|
||||
if err != nil {
|
||||
return hash, err
|
||||
}
|
||||
return hash, t.write(toaddr, req.name(), packet)
|
||||
return hash, t.write(toaddr, toid, req.name(), packet)
|
||||
}
|
||||
|
||||
func (t *udp) write(toaddr *net.UDPAddr, what string, packet []byte) error {
|
||||
func (t *udp) write(toaddr *net.UDPAddr, toid enode.ID, what string, packet []byte) error {
|
||||
_, err := t.conn.WriteToUDP(packet, toaddr)
|
||||
log.Trace(">> "+what, "addr", toaddr, "err", err)
|
||||
log.Trace(">> "+what, "id", toid, "addr", toaddr, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -573,13 +587,19 @@ func (t *udp) readLoop(unhandled chan<- ReadPacket) {
|
||||
}
|
||||
|
||||
func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error {
|
||||
packet, fromID, hash, err := decodePacket(buf)
|
||||
packet, fromKey, hash, err := decodePacket(buf)
|
||||
if err != nil {
|
||||
log.Debug("Bad discv4 packet", "addr", from, "err", err)
|
||||
return err
|
||||
}
|
||||
err = packet.handle(t, from, fromID, hash)
|
||||
log.Trace("<< "+packet.name(), "addr", from, "err", err)
|
||||
fromID := fromKey.id()
|
||||
if err == nil {
|
||||
err = packet.preverify(t, from, fromID, fromKey)
|
||||
}
|
||||
log.Trace("<< "+packet.name(), "id", fromID, "addr", from, "err", err)
|
||||
if err == nil {
|
||||
packet.handle(t, from, fromID, hash)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@ -615,54 +635,67 @@ func decodePacket(buf []byte) (packet, encPubkey, []byte, error) {
|
||||
return req, fromKey, hash, err
|
||||
}
|
||||
|
||||
func (req *ping) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
|
||||
// Packet Handlers
|
||||
|
||||
func (req *ping) preverify(t *udp, from *net.UDPAddr, fromID enode.ID, fromKey encPubkey) error {
|
||||
if expired(req.Expiration) {
|
||||
return errExpired
|
||||
}
|
||||
key, err := decodePubkey(fromKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid public key: %v", err)
|
||||
return errors.New("invalid public key")
|
||||
}
|
||||
t.send(from, pongPacket, &pong{
|
||||
req.senderKey = key
|
||||
return nil
|
||||
}
|
||||
|
||||
func (req *ping) handle(t *udp, from *net.UDPAddr, fromID enode.ID, mac []byte) {
|
||||
// Reply.
|
||||
t.send(from, fromID, pongPacket, &pong{
|
||||
To: makeEndpoint(from, req.From.TCP),
|
||||
ReplyTok: mac,
|
||||
Expiration: uint64(time.Now().Add(expiration).Unix()),
|
||||
})
|
||||
n := wrapNode(enode.NewV4(key, from.IP, int(req.From.TCP), from.Port))
|
||||
t.handleReply(n.ID(), pingPacket, req)
|
||||
if time.Since(t.db.LastPongReceived(n.ID())) > bondExpiration {
|
||||
t.sendPing(n.ID(), from, func() { t.tab.addThroughPing(n) })
|
||||
|
||||
// Ping back if our last pong on file is too far in the past.
|
||||
n := wrapNode(enode.NewV4(req.senderKey, from.IP, int(req.From.TCP), from.Port))
|
||||
if time.Since(t.db.LastPongReceived(n.ID(), from.IP)) > bondExpiration {
|
||||
t.sendPing(fromID, from, func() {
|
||||
t.tab.addVerifiedNode(n)
|
||||
})
|
||||
} else {
|
||||
t.tab.addThroughPing(n)
|
||||
t.tab.addVerifiedNode(n)
|
||||
}
|
||||
|
||||
// Update node database and endpoint predictor.
|
||||
t.db.UpdateLastPingReceived(n.ID(), from.IP, time.Now())
|
||||
t.localNode.UDPEndpointStatement(from, &net.UDPAddr{IP: req.To.IP, Port: int(req.To.UDP)})
|
||||
t.db.UpdateLastPingReceived(n.ID(), time.Now())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (req *ping) name() string { return "PING/v4" }
|
||||
|
||||
func (req *pong) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
|
||||
func (req *pong) preverify(t *udp, from *net.UDPAddr, fromID enode.ID, fromKey encPubkey) error {
|
||||
if expired(req.Expiration) {
|
||||
return errExpired
|
||||
}
|
||||
fromID := fromKey.id()
|
||||
if !t.handleReply(fromID, pongPacket, req) {
|
||||
if !t.handleReply(fromID, from.IP, pongPacket, req) {
|
||||
return errUnsolicitedReply
|
||||
}
|
||||
t.localNode.UDPEndpointStatement(from, &net.UDPAddr{IP: req.To.IP, Port: int(req.To.UDP)})
|
||||
t.db.UpdateLastPongReceived(fromID, time.Now())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (req *pong) handle(t *udp, from *net.UDPAddr, fromID enode.ID, mac []byte) {
|
||||
t.localNode.UDPEndpointStatement(from, &net.UDPAddr{IP: req.To.IP, Port: int(req.To.UDP)})
|
||||
t.db.UpdateLastPongReceived(fromID, from.IP, time.Now())
|
||||
}
|
||||
|
||||
func (req *pong) name() string { return "PONG/v4" }
|
||||
|
||||
func (req *findnode) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
|
||||
func (req *findnode) preverify(t *udp, from *net.UDPAddr, fromID enode.ID, fromKey encPubkey) error {
|
||||
if expired(req.Expiration) {
|
||||
return errExpired
|
||||
}
|
||||
fromID := fromKey.id()
|
||||
if time.Since(t.db.LastPongReceived(fromID)) > bondExpiration {
|
||||
if time.Since(t.db.LastPongReceived(fromID, from.IP)) > bondExpiration {
|
||||
// No endpoint proof pong exists, we don't process the packet. This prevents an
|
||||
// attack vector where the discovery protocol could be used to amplify traffic in a
|
||||
// DDOS attack. A malicious actor would send a findnode request with the IP address
|
||||
@ -671,43 +704,50 @@ func (req *findnode) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []
|
||||
// findnode) to the victim.
|
||||
return errUnknownNode
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID enode.ID, mac []byte) {
|
||||
// Determine closest nodes.
|
||||
target := enode.ID(crypto.Keccak256Hash(req.Target[:]))
|
||||
t.tab.mutex.Lock()
|
||||
closest := t.tab.closest(target, bucketSize).entries
|
||||
t.tab.mutex.Unlock()
|
||||
|
||||
p := neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())}
|
||||
var sent bool
|
||||
// Send neighbors in chunks with at most maxNeighbors per packet
|
||||
// to stay below the 1280 byte limit.
|
||||
p := neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())}
|
||||
var sent bool
|
||||
for _, n := range closest {
|
||||
if netutil.CheckRelayIP(from.IP, n.IP()) == nil {
|
||||
p.Nodes = append(p.Nodes, nodeToRPC(n))
|
||||
}
|
||||
if len(p.Nodes) == maxNeighbors {
|
||||
t.send(from, neighborsPacket, &p)
|
||||
t.send(from, fromID, neighborsPacket, &p)
|
||||
p.Nodes = p.Nodes[:0]
|
||||
sent = true
|
||||
}
|
||||
}
|
||||
if len(p.Nodes) > 0 || !sent {
|
||||
t.send(from, neighborsPacket, &p)
|
||||
t.send(from, fromID, neighborsPacket, &p)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (req *findnode) name() string { return "FINDNODE/v4" }
|
||||
|
||||
func (req *neighbors) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
|
||||
func (req *neighbors) preverify(t *udp, from *net.UDPAddr, fromID enode.ID, fromKey encPubkey) error {
|
||||
if expired(req.Expiration) {
|
||||
return errExpired
|
||||
}
|
||||
if !t.handleReply(fromKey.id(), neighborsPacket, req) {
|
||||
if !t.handleReply(fromID, from.IP, neighborsPacket, req) {
|
||||
return errUnsolicitedReply
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (req *neighbors) handle(t *udp, from *net.UDPAddr, fromID enode.ID, mac []byte) {
|
||||
}
|
||||
|
||||
func (req *neighbors) name() string { return "NEIGHBORS/v4" }
|
||||
|
||||
func expired(ts uint64) bool {
|
||||
|
137
vendor/github.com/ethereum/go-ethereum/p2p/discover/udp_test.go
generated
vendored
137
vendor/github.com/ethereum/go-ethereum/p2p/discover/udp_test.go
generated
vendored
@ -19,6 +19,7 @@ package discover
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
crand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
@ -57,6 +58,7 @@ type udpTest struct {
|
||||
t *testing.T
|
||||
pipe *dgramPipe
|
||||
table *Table
|
||||
db *enode.DB
|
||||
udp *udp
|
||||
sent [][]byte
|
||||
localkey, remotekey *ecdsa.PrivateKey
|
||||
@ -71,22 +73,32 @@ func newUDPTest(t *testing.T) *udpTest {
|
||||
remotekey: newkey(),
|
||||
remoteaddr: &net.UDPAddr{IP: net.IP{10, 0, 1, 99}, Port: 30303},
|
||||
}
|
||||
db, _ := enode.OpenDB("")
|
||||
ln := enode.NewLocalNode(db, test.localkey)
|
||||
test.db, _ = enode.OpenDB("")
|
||||
ln := enode.NewLocalNode(test.db, test.localkey)
|
||||
test.table, test.udp, _ = newUDP(test.pipe, ln, Config{PrivateKey: test.localkey})
|
||||
// Wait for initial refresh so the table doesn't send unexpected findnode.
|
||||
<-test.table.initDone
|
||||
return test
|
||||
}
|
||||
|
||||
func (test *udpTest) close() {
|
||||
test.table.Close()
|
||||
test.db.Close()
|
||||
}
|
||||
|
||||
// handles a packet as if it had been sent to the transport.
|
||||
func (test *udpTest) packetIn(wantError error, ptype byte, data packet) error {
|
||||
enc, _, err := encodePacket(test.remotekey, ptype, data)
|
||||
return test.packetInFrom(wantError, test.remotekey, test.remoteaddr, ptype, data)
|
||||
}
|
||||
|
||||
// handles a packet as if it had been sent to the transport by the key/endpoint.
|
||||
func (test *udpTest) packetInFrom(wantError error, key *ecdsa.PrivateKey, addr *net.UDPAddr, ptype byte, data packet) error {
|
||||
enc, _, err := encodePacket(key, ptype, data)
|
||||
if err != nil {
|
||||
return test.errorf("packet (%d) encode error: %v", ptype, err)
|
||||
}
|
||||
test.sent = append(test.sent, enc)
|
||||
if err = test.udp.handlePacket(test.remoteaddr, enc); err != wantError {
|
||||
if err = test.udp.handlePacket(addr, enc); err != wantError {
|
||||
return test.errorf("error mismatch: got %q, want %q", err, wantError)
|
||||
}
|
||||
return nil
|
||||
@ -94,19 +106,19 @@ func (test *udpTest) packetIn(wantError error, ptype byte, data packet) error {
|
||||
|
||||
// waits for a packet to be sent by the transport.
|
||||
// validate should have type func(*udpTest, X) error, where X is a packet type.
|
||||
func (test *udpTest) waitPacketOut(validate interface{}) ([]byte, error) {
|
||||
func (test *udpTest) waitPacketOut(validate interface{}) (*net.UDPAddr, []byte, error) {
|
||||
dgram := test.pipe.waitPacketOut()
|
||||
p, _, hash, err := decodePacket(dgram)
|
||||
p, _, hash, err := decodePacket(dgram.data)
|
||||
if err != nil {
|
||||
return hash, test.errorf("sent packet decode error: %v", err)
|
||||
return &dgram.to, hash, test.errorf("sent packet decode error: %v", err)
|
||||
}
|
||||
fn := reflect.ValueOf(validate)
|
||||
exptype := fn.Type().In(0)
|
||||
if reflect.TypeOf(p) != exptype {
|
||||
return hash, test.errorf("sent packet type mismatch, got: %v, want: %v", reflect.TypeOf(p), exptype)
|
||||
return &dgram.to, hash, test.errorf("sent packet type mismatch, got: %v, want: %v", reflect.TypeOf(p), exptype)
|
||||
}
|
||||
fn.Call([]reflect.Value{reflect.ValueOf(p)})
|
||||
return hash, nil
|
||||
return &dgram.to, hash, nil
|
||||
}
|
||||
|
||||
func (test *udpTest) errorf(format string, args ...interface{}) error {
|
||||
@ -125,7 +137,7 @@ func (test *udpTest) errorf(format string, args ...interface{}) error {
|
||||
|
||||
func TestUDP_packetErrors(t *testing.T) {
|
||||
test := newUDPTest(t)
|
||||
defer test.table.Close()
|
||||
defer test.close()
|
||||
|
||||
test.packetIn(errExpired, pingPacket, &ping{From: testRemote, To: testLocalAnnounced, Version: 4})
|
||||
test.packetIn(errUnsolicitedReply, pongPacket, &pong{ReplyTok: []byte{}, Expiration: futureExp})
|
||||
@ -136,7 +148,7 @@ func TestUDP_packetErrors(t *testing.T) {
|
||||
func TestUDP_pingTimeout(t *testing.T) {
|
||||
t.Parallel()
|
||||
test := newUDPTest(t)
|
||||
defer test.table.Close()
|
||||
defer test.close()
|
||||
|
||||
toaddr := &net.UDPAddr{IP: net.ParseIP("1.2.3.4"), Port: 2222}
|
||||
toid := enode.ID{1, 2, 3, 4}
|
||||
@ -148,7 +160,7 @@ func TestUDP_pingTimeout(t *testing.T) {
|
||||
func TestUDP_responseTimeouts(t *testing.T) {
|
||||
t.Parallel()
|
||||
test := newUDPTest(t)
|
||||
defer test.table.Close()
|
||||
defer test.close()
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
randomDuration := func(max time.Duration) time.Duration {
|
||||
@ -166,20 +178,20 @@ func TestUDP_responseTimeouts(t *testing.T) {
|
||||
// with ptype <= 128 will not get a reply and should time out.
|
||||
// For all other requests, a reply is scheduled to arrive
|
||||
// within the timeout window.
|
||||
p := &pending{
|
||||
p := &replyMatcher{
|
||||
ptype: byte(rand.Intn(255)),
|
||||
callback: func(interface{}) bool { return true },
|
||||
callback: func(interface{}) (bool, bool) { return true, true },
|
||||
}
|
||||
binary.BigEndian.PutUint64(p.from[:], uint64(i))
|
||||
if p.ptype <= 128 {
|
||||
p.errc = timeoutErr
|
||||
test.udp.addpending <- p
|
||||
test.udp.addReplyMatcher <- p
|
||||
nTimeouts++
|
||||
} else {
|
||||
p.errc = nilErr
|
||||
test.udp.addpending <- p
|
||||
test.udp.addReplyMatcher <- p
|
||||
time.AfterFunc(randomDuration(60*time.Millisecond), func() {
|
||||
if !test.udp.handleReply(p.from, p.ptype, nil) {
|
||||
if !test.udp.handleReply(p.from, p.ip, p.ptype, nil) {
|
||||
t.Logf("not matched: %v", p)
|
||||
}
|
||||
})
|
||||
@ -220,7 +232,7 @@ func TestUDP_responseTimeouts(t *testing.T) {
|
||||
func TestUDP_findnodeTimeout(t *testing.T) {
|
||||
t.Parallel()
|
||||
test := newUDPTest(t)
|
||||
defer test.table.Close()
|
||||
defer test.close()
|
||||
|
||||
toaddr := &net.UDPAddr{IP: net.ParseIP("1.2.3.4"), Port: 2222}
|
||||
toid := enode.ID{1, 2, 3, 4}
|
||||
@ -236,50 +248,65 @@ func TestUDP_findnodeTimeout(t *testing.T) {
|
||||
|
||||
func TestUDP_findnode(t *testing.T) {
|
||||
test := newUDPTest(t)
|
||||
defer test.table.Close()
|
||||
defer test.close()
|
||||
|
||||
// put a few nodes into the table. their exact
|
||||
// distribution shouldn't matter much, although we need to
|
||||
// take care not to overflow any bucket.
|
||||
nodes := &nodesByDistance{target: testTarget.id()}
|
||||
for i := 0; i < bucketSize; i++ {
|
||||
live := make(map[enode.ID]bool)
|
||||
numCandidates := 2 * bucketSize
|
||||
for i := 0; i < numCandidates; i++ {
|
||||
key := newkey()
|
||||
n := wrapNode(enode.NewV4(&key.PublicKey, net.IP{10, 13, 0, 1}, 0, i))
|
||||
nodes.push(n, bucketSize)
|
||||
ip := net.IP{10, 13, 0, byte(i)}
|
||||
n := wrapNode(enode.NewV4(&key.PublicKey, ip, 0, 2000))
|
||||
// Ensure half of table content isn't verified live yet.
|
||||
if i > numCandidates/2 {
|
||||
n.livenessChecks = 1
|
||||
live[n.ID()] = true
|
||||
}
|
||||
nodes.push(n, numCandidates)
|
||||
}
|
||||
test.table.stuff(nodes.entries)
|
||||
fillTable(test.table, nodes.entries)
|
||||
|
||||
// ensure there's a bond with the test node,
|
||||
// findnode won't be accepted otherwise.
|
||||
remoteID := encodePubkey(&test.remotekey.PublicKey).id()
|
||||
test.table.db.UpdateLastPongReceived(remoteID, time.Now())
|
||||
test.table.db.UpdateLastPongReceived(remoteID, test.remoteaddr.IP, time.Now())
|
||||
|
||||
// check that closest neighbors are returned.
|
||||
test.packetIn(nil, findnodePacket, &findnode{Target: testTarget, Expiration: futureExp})
|
||||
expected := test.table.closest(testTarget.id(), bucketSize)
|
||||
|
||||
test.packetIn(nil, findnodePacket, &findnode{Target: testTarget, Expiration: futureExp})
|
||||
waitNeighbors := func(want []*node) {
|
||||
test.waitPacketOut(func(p *neighbors) {
|
||||
if len(p.Nodes) != len(want) {
|
||||
t.Errorf("wrong number of results: got %d, want %d", len(p.Nodes), bucketSize)
|
||||
}
|
||||
for i := range p.Nodes {
|
||||
if p.Nodes[i].ID.id() != want[i].ID() {
|
||||
t.Errorf("result mismatch at %d:\n got: %v\n want: %v", i, p.Nodes[i], expected.entries[i])
|
||||
for i, n := range p.Nodes {
|
||||
if n.ID.id() != want[i].ID() {
|
||||
t.Errorf("result mismatch at %d:\n got: %v\n want: %v", i, n, expected.entries[i])
|
||||
}
|
||||
if !live[n.ID.id()] {
|
||||
t.Errorf("result includes dead node %v", n.ID.id())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
waitNeighbors(expected.entries[:maxNeighbors])
|
||||
waitNeighbors(expected.entries[maxNeighbors:])
|
||||
// Receive replies.
|
||||
want := expected.entries
|
||||
if len(want) > maxNeighbors {
|
||||
waitNeighbors(want[:maxNeighbors])
|
||||
want = want[maxNeighbors:]
|
||||
}
|
||||
waitNeighbors(want)
|
||||
}
|
||||
|
||||
func TestUDP_findnodeMultiReply(t *testing.T) {
|
||||
test := newUDPTest(t)
|
||||
defer test.table.Close()
|
||||
defer test.close()
|
||||
|
||||
rid := enode.PubkeyToIDV4(&test.remotekey.PublicKey)
|
||||
test.table.db.UpdateLastPingReceived(rid, time.Now())
|
||||
test.table.db.UpdateLastPingReceived(rid, test.remoteaddr.IP, time.Now())
|
||||
|
||||
// queue a pending findnode request
|
||||
resultc, errc := make(chan []*node), make(chan error)
|
||||
@ -329,11 +356,40 @@ func TestUDP_findnodeMultiReply(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestUDP_pingMatch(t *testing.T) {
|
||||
test := newUDPTest(t)
|
||||
defer test.close()
|
||||
|
||||
randToken := make([]byte, 32)
|
||||
crand.Read(randToken)
|
||||
|
||||
test.packetIn(nil, pingPacket, &ping{From: testRemote, To: testLocalAnnounced, Version: 4, Expiration: futureExp})
|
||||
test.waitPacketOut(func(*pong) error { return nil })
|
||||
test.waitPacketOut(func(*ping) error { return nil })
|
||||
test.packetIn(errUnsolicitedReply, pongPacket, &pong{ReplyTok: randToken, To: testLocalAnnounced, Expiration: futureExp})
|
||||
}
|
||||
|
||||
func TestUDP_pingMatchIP(t *testing.T) {
|
||||
test := newUDPTest(t)
|
||||
defer test.close()
|
||||
|
||||
test.packetIn(nil, pingPacket, &ping{From: testRemote, To: testLocalAnnounced, Version: 4, Expiration: futureExp})
|
||||
test.waitPacketOut(func(*pong) error { return nil })
|
||||
|
||||
_, hash, _ := test.waitPacketOut(func(*ping) error { return nil })
|
||||
wrongAddr := &net.UDPAddr{IP: net.IP{33, 44, 1, 2}, Port: 30000}
|
||||
test.packetInFrom(errUnsolicitedReply, test.remotekey, wrongAddr, pongPacket, &pong{
|
||||
ReplyTok: hash,
|
||||
To: testLocalAnnounced,
|
||||
Expiration: futureExp,
|
||||
})
|
||||
}
|
||||
|
||||
func TestUDP_successfulPing(t *testing.T) {
|
||||
test := newUDPTest(t)
|
||||
added := make(chan *node, 1)
|
||||
test.table.nodeAddedHook = func(n *node) { added <- n }
|
||||
defer test.table.Close()
|
||||
defer test.close()
|
||||
|
||||
// The remote side sends a ping packet to initiate the exchange.
|
||||
go test.packetIn(nil, pingPacket, &ping{From: testRemote, To: testLocalAnnounced, Version: 4, Expiration: futureExp})
|
||||
@ -356,7 +412,7 @@ func TestUDP_successfulPing(t *testing.T) {
|
||||
})
|
||||
|
||||
// remote is unknown, the table pings back.
|
||||
hash, _ := test.waitPacketOut(func(p *ping) error {
|
||||
_, hash, _ := test.waitPacketOut(func(p *ping) error {
|
||||
if !reflect.DeepEqual(p.From, test.udp.ourEndpoint()) {
|
||||
t.Errorf("got ping.From %#v, want %#v", p.From, test.udp.ourEndpoint())
|
||||
}
|
||||
@ -510,7 +566,12 @@ type dgramPipe struct {
|
||||
cond *sync.Cond
|
||||
closing chan struct{}
|
||||
closed bool
|
||||
queue [][]byte
|
||||
queue []dgram
|
||||
}
|
||||
|
||||
type dgram struct {
|
||||
to net.UDPAddr
|
||||
data []byte
|
||||
}
|
||||
|
||||
func newpipe() *dgramPipe {
|
||||
@ -531,7 +592,7 @@ func (c *dgramPipe) WriteToUDP(b []byte, to *net.UDPAddr) (n int, err error) {
|
||||
if c.closed {
|
||||
return 0, errors.New("closed")
|
||||
}
|
||||
c.queue = append(c.queue, msg)
|
||||
c.queue = append(c.queue, dgram{*to, b})
|
||||
c.cond.Signal()
|
||||
return len(b), nil
|
||||
}
|
||||
@ -556,7 +617,7 @@ func (c *dgramPipe) LocalAddr() net.Addr {
|
||||
return &net.UDPAddr{IP: testLocal.IP, Port: int(testLocal.UDP)}
|
||||
}
|
||||
|
||||
func (c *dgramPipe) waitPacketOut() []byte {
|
||||
func (c *dgramPipe) waitPacketOut() dgram {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
for len(c.queue) == 0 {
|
||||
|
214
vendor/github.com/ethereum/go-ethereum/p2p/enode/nodedb.go
generated
vendored
214
vendor/github.com/ethereum/go-ethereum/p2p/enode/nodedb.go
generated
vendored
@ -21,11 +21,11 @@ import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/errors"
|
||||
@ -37,24 +37,31 @@ import (
|
||||
|
||||
// Keys in the node database.
|
||||
const (
|
||||
dbVersionKey = "version" // Version of the database to flush if changes
|
||||
dbItemPrefix = "n:" // Identifier to prefix node entries with
|
||||
dbVersionKey = "version" // Version of the database to flush if changes
|
||||
dbNodePrefix = "n:" // Identifier to prefix node entries with
|
||||
dbLocalPrefix = "local:"
|
||||
dbDiscoverRoot = "v4"
|
||||
|
||||
dbDiscoverRoot = ":discover"
|
||||
dbDiscoverSeq = dbDiscoverRoot + ":seq"
|
||||
dbDiscoverPing = dbDiscoverRoot + ":lastping"
|
||||
dbDiscoverPong = dbDiscoverRoot + ":lastpong"
|
||||
dbDiscoverFindFails = dbDiscoverRoot + ":findfail"
|
||||
dbLocalRoot = ":local"
|
||||
dbLocalSeq = dbLocalRoot + ":seq"
|
||||
// These fields are stored per ID and IP, the full key is "n:<ID>:v4:<IP>:findfail".
|
||||
// Use nodeItemKey to create those keys.
|
||||
dbNodeFindFails = "findfail"
|
||||
dbNodePing = "lastping"
|
||||
dbNodePong = "lastpong"
|
||||
dbNodeSeq = "seq"
|
||||
|
||||
// Local information is keyed by ID only, the full key is "local:<ID>:seq".
|
||||
// Use localItemKey to create those keys.
|
||||
dbLocalSeq = "seq"
|
||||
)
|
||||
|
||||
var (
|
||||
const (
|
||||
dbNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped.
|
||||
dbCleanupCycle = time.Hour // Time period for running the expiration task.
|
||||
dbVersion = 7
|
||||
dbVersion = 8
|
||||
)
|
||||
|
||||
var zeroIP = make(net.IP, 16)
|
||||
|
||||
// DB is the node database, storing previously seen nodes and any collected metadata about
|
||||
// them for QoS purposes.
|
||||
type DB struct {
|
||||
@ -119,27 +126,58 @@ func newPersistentDB(path string) (*DB, error) {
|
||||
return &DB{lvl: db, quit: make(chan struct{})}, nil
|
||||
}
|
||||
|
||||
// makeKey generates the leveldb key-blob from a node id and its particular
|
||||
// field of interest.
|
||||
func makeKey(id ID, field string) []byte {
|
||||
if (id == ID{}) {
|
||||
return []byte(field)
|
||||
}
|
||||
return append([]byte(dbItemPrefix), append(id[:], field...)...)
|
||||
// nodeKey returns the database key for a node record.
|
||||
func nodeKey(id ID) []byte {
|
||||
key := append([]byte(dbNodePrefix), id[:]...)
|
||||
key = append(key, ':')
|
||||
key = append(key, dbDiscoverRoot...)
|
||||
return key
|
||||
}
|
||||
|
||||
// splitKey tries to split a database key into a node id and a field part.
|
||||
func splitKey(key []byte) (id ID, field string) {
|
||||
// If the key is not of a node, return it plainly
|
||||
if !bytes.HasPrefix(key, []byte(dbItemPrefix)) {
|
||||
return ID{}, string(key)
|
||||
// splitNodeKey returns the node ID of a key created by nodeKey.
|
||||
func splitNodeKey(key []byte) (id ID, rest []byte) {
|
||||
if !bytes.HasPrefix(key, []byte(dbNodePrefix)) {
|
||||
return ID{}, nil
|
||||
}
|
||||
// Otherwise split the id and field
|
||||
item := key[len(dbItemPrefix):]
|
||||
item := key[len(dbNodePrefix):]
|
||||
copy(id[:], item[:len(id)])
|
||||
field = string(item[len(id):])
|
||||
return id, item[len(id)+1:]
|
||||
}
|
||||
|
||||
return id, field
|
||||
// nodeItemKey returns the database key for a node metadata field.
|
||||
func nodeItemKey(id ID, ip net.IP, field string) []byte {
|
||||
ip16 := ip.To16()
|
||||
if ip16 == nil {
|
||||
panic(fmt.Errorf("invalid IP (length %d)", len(ip)))
|
||||
}
|
||||
return bytes.Join([][]byte{nodeKey(id), ip16, []byte(field)}, []byte{':'})
|
||||
}
|
||||
|
||||
// splitNodeItemKey returns the components of a key created by nodeItemKey.
|
||||
func splitNodeItemKey(key []byte) (id ID, ip net.IP, field string) {
|
||||
id, key = splitNodeKey(key)
|
||||
// Skip discover root.
|
||||
if string(key) == dbDiscoverRoot {
|
||||
return id, nil, ""
|
||||
}
|
||||
key = key[len(dbDiscoverRoot)+1:]
|
||||
// Split out the IP.
|
||||
ip = net.IP(key[:16])
|
||||
if ip4 := ip.To4(); ip4 != nil {
|
||||
ip = ip4
|
||||
}
|
||||
key = key[16+1:]
|
||||
// Field is the remainder of key.
|
||||
field = string(key)
|
||||
return id, ip, field
|
||||
}
|
||||
|
||||
// localItemKey returns the key of a local node item.
|
||||
func localItemKey(id ID, field string) []byte {
|
||||
key := append([]byte(dbLocalPrefix), id[:]...)
|
||||
key = append(key, ':')
|
||||
key = append(key, field...)
|
||||
return key
|
||||
}
|
||||
|
||||
// fetchInt64 retrieves an integer associated with a particular key.
|
||||
@ -181,7 +219,7 @@ func (db *DB) storeUint64(key []byte, n uint64) error {
|
||||
|
||||
// Node retrieves a node with a given id from the database.
|
||||
func (db *DB) Node(id ID) *Node {
|
||||
blob, err := db.lvl.Get(makeKey(id, dbDiscoverRoot), nil)
|
||||
blob, err := db.lvl.Get(nodeKey(id), nil)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@ -207,15 +245,15 @@ func (db *DB) UpdateNode(node *Node) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := db.lvl.Put(makeKey(node.ID(), dbDiscoverRoot), blob, nil); err != nil {
|
||||
if err := db.lvl.Put(nodeKey(node.ID()), blob, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return db.storeUint64(makeKey(node.ID(), dbDiscoverSeq), node.Seq())
|
||||
return db.storeUint64(nodeItemKey(node.ID(), zeroIP, dbNodeSeq), node.Seq())
|
||||
}
|
||||
|
||||
// NodeSeq returns the stored record sequence number of the given node.
|
||||
func (db *DB) NodeSeq(id ID) uint64 {
|
||||
return db.fetchUint64(makeKey(id, dbDiscoverSeq))
|
||||
return db.fetchUint64(nodeItemKey(id, zeroIP, dbNodeSeq))
|
||||
}
|
||||
|
||||
// Resolve returns the stored record of the node if it has a larger sequence
|
||||
@ -227,15 +265,17 @@ func (db *DB) Resolve(n *Node) *Node {
|
||||
return db.Node(n.ID())
|
||||
}
|
||||
|
||||
// DeleteNode deletes all information/keys associated with a node.
|
||||
func (db *DB) DeleteNode(id ID) error {
|
||||
deleter := db.lvl.NewIterator(util.BytesPrefix(makeKey(id, "")), nil)
|
||||
for deleter.Next() {
|
||||
if err := db.lvl.Delete(deleter.Key(), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
// DeleteNode deletes all information associated with a node.
|
||||
func (db *DB) DeleteNode(id ID) {
|
||||
deleteRange(db.lvl, nodeKey(id))
|
||||
}
|
||||
|
||||
func deleteRange(db *leveldb.DB, prefix []byte) {
|
||||
it := db.NewIterator(util.BytesPrefix(prefix), nil)
|
||||
defer it.Release()
|
||||
for it.Next() {
|
||||
db.Delete(it.Key(), nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureExpirer is a small helper method ensuring that the data expiration
|
||||
@ -259,9 +299,7 @@ func (db *DB) expirer() {
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
if err := db.expireNodes(); err != nil {
|
||||
log.Error("Failed to expire nodedb items", "err", err)
|
||||
}
|
||||
db.expireNodes()
|
||||
case <-db.quit:
|
||||
return
|
||||
}
|
||||
@ -269,71 +307,85 @@ func (db *DB) expirer() {
|
||||
}
|
||||
|
||||
// expireNodes iterates over the database and deletes all nodes that have not
|
||||
// been seen (i.e. received a pong from) for some allotted time.
|
||||
func (db *DB) expireNodes() error {
|
||||
threshold := time.Now().Add(-dbNodeExpiration)
|
||||
|
||||
// Find discovered nodes that are older than the allowance
|
||||
it := db.lvl.NewIterator(nil, nil)
|
||||
// been seen (i.e. received a pong from) for some time.
|
||||
func (db *DB) expireNodes() {
|
||||
it := db.lvl.NewIterator(util.BytesPrefix([]byte(dbNodePrefix)), nil)
|
||||
defer it.Release()
|
||||
|
||||
for it.Next() {
|
||||
// Skip the item if not a discovery node
|
||||
id, field := splitKey(it.Key())
|
||||
if field != dbDiscoverRoot {
|
||||
continue
|
||||
}
|
||||
// Skip the node if not expired yet (and not self)
|
||||
if seen := db.LastPongReceived(id); seen.After(threshold) {
|
||||
continue
|
||||
}
|
||||
// Otherwise delete all associated information
|
||||
db.DeleteNode(id)
|
||||
if !it.Next() {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
threshold = time.Now().Add(-dbNodeExpiration).Unix()
|
||||
youngestPong int64
|
||||
atEnd = false
|
||||
)
|
||||
for !atEnd {
|
||||
id, ip, field := splitNodeItemKey(it.Key())
|
||||
if field == dbNodePong {
|
||||
time, _ := binary.Varint(it.Value())
|
||||
if time > youngestPong {
|
||||
youngestPong = time
|
||||
}
|
||||
if time < threshold {
|
||||
// Last pong from this IP older than threshold, remove fields belonging to it.
|
||||
deleteRange(db.lvl, nodeItemKey(id, ip, ""))
|
||||
}
|
||||
}
|
||||
atEnd = !it.Next()
|
||||
nextID, _ := splitNodeKey(it.Key())
|
||||
if atEnd || nextID != id {
|
||||
// We've moved beyond the last entry of the current ID.
|
||||
// Remove everything if there was no recent enough pong.
|
||||
if youngestPong > 0 && youngestPong < threshold {
|
||||
deleteRange(db.lvl, nodeKey(id))
|
||||
}
|
||||
youngestPong = 0
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LastPingReceived retrieves the time of the last ping packet received from
|
||||
// a remote node.
|
||||
func (db *DB) LastPingReceived(id ID) time.Time {
|
||||
return time.Unix(db.fetchInt64(makeKey(id, dbDiscoverPing)), 0)
|
||||
func (db *DB) LastPingReceived(id ID, ip net.IP) time.Time {
|
||||
return time.Unix(db.fetchInt64(nodeItemKey(id, ip, dbNodePing)), 0)
|
||||
}
|
||||
|
||||
// UpdateLastPingReceived updates the last time we tried contacting a remote node.
|
||||
func (db *DB) UpdateLastPingReceived(id ID, instance time.Time) error {
|
||||
return db.storeInt64(makeKey(id, dbDiscoverPing), instance.Unix())
|
||||
func (db *DB) UpdateLastPingReceived(id ID, ip net.IP, instance time.Time) error {
|
||||
return db.storeInt64(nodeItemKey(id, ip, dbNodePing), instance.Unix())
|
||||
}
|
||||
|
||||
// LastPongReceived retrieves the time of the last successful pong from remote node.
|
||||
func (db *DB) LastPongReceived(id ID) time.Time {
|
||||
func (db *DB) LastPongReceived(id ID, ip net.IP) time.Time {
|
||||
// Launch expirer
|
||||
db.ensureExpirer()
|
||||
return time.Unix(db.fetchInt64(makeKey(id, dbDiscoverPong)), 0)
|
||||
return time.Unix(db.fetchInt64(nodeItemKey(id, ip, dbNodePong)), 0)
|
||||
}
|
||||
|
||||
// UpdateLastPongReceived updates the last pong time of a node.
|
||||
func (db *DB) UpdateLastPongReceived(id ID, instance time.Time) error {
|
||||
return db.storeInt64(makeKey(id, dbDiscoverPong), instance.Unix())
|
||||
func (db *DB) UpdateLastPongReceived(id ID, ip net.IP, instance time.Time) error {
|
||||
return db.storeInt64(nodeItemKey(id, ip, dbNodePong), instance.Unix())
|
||||
}
|
||||
|
||||
// FindFails retrieves the number of findnode failures since bonding.
|
||||
func (db *DB) FindFails(id ID) int {
|
||||
return int(db.fetchInt64(makeKey(id, dbDiscoverFindFails)))
|
||||
func (db *DB) FindFails(id ID, ip net.IP) int {
|
||||
return int(db.fetchInt64(nodeItemKey(id, ip, dbNodeFindFails)))
|
||||
}
|
||||
|
||||
// UpdateFindFails updates the number of findnode failures since bonding.
|
||||
func (db *DB) UpdateFindFails(id ID, fails int) error {
|
||||
return db.storeInt64(makeKey(id, dbDiscoverFindFails), int64(fails))
|
||||
func (db *DB) UpdateFindFails(id ID, ip net.IP, fails int) error {
|
||||
return db.storeInt64(nodeItemKey(id, ip, dbNodeFindFails), int64(fails))
|
||||
}
|
||||
|
||||
// LocalSeq retrieves the local record sequence counter.
|
||||
func (db *DB) localSeq(id ID) uint64 {
|
||||
return db.fetchUint64(makeKey(id, dbLocalSeq))
|
||||
return db.fetchUint64(nodeItemKey(id, zeroIP, dbLocalSeq))
|
||||
}
|
||||
|
||||
// storeLocalSeq stores the local record sequence counter.
|
||||
func (db *DB) storeLocalSeq(id ID, n uint64) {
|
||||
db.storeUint64(makeKey(id, dbLocalSeq), n)
|
||||
db.storeUint64(nodeItemKey(id, zeroIP, dbLocalSeq), n)
|
||||
}
|
||||
|
||||
// QuerySeeds retrieves random nodes to be used as potential seed nodes
|
||||
@ -355,14 +407,14 @@ seek:
|
||||
ctr := id[0]
|
||||
rand.Read(id[:])
|
||||
id[0] = ctr + id[0]%16
|
||||
it.Seek(makeKey(id, dbDiscoverRoot))
|
||||
it.Seek(nodeKey(id))
|
||||
|
||||
n := nextNode(it)
|
||||
if n == nil {
|
||||
id[0] = 0
|
||||
continue seek // iterator exhausted
|
||||
}
|
||||
if now.Sub(db.LastPongReceived(n.ID())) > maxAge {
|
||||
if now.Sub(db.LastPongReceived(n.ID(), n.IP())) > maxAge {
|
||||
continue seek
|
||||
}
|
||||
for i := range nodes {
|
||||
@ -379,8 +431,8 @@ seek:
|
||||
// database entries.
|
||||
func nextNode(it iterator.Iterator) *Node {
|
||||
for end := false; !end; end = !it.Next() {
|
||||
id, field := splitKey(it.Key())
|
||||
if field != dbDiscoverRoot {
|
||||
id, rest := splitNodeKey(it.Key())
|
||||
if string(rest) != dbDiscoverRoot {
|
||||
continue
|
||||
}
|
||||
return mustDecodeNode(id[:], it.Value())
|
||||
|
216
vendor/github.com/ethereum/go-ethereum/p2p/enode/nodedb_test.go
generated
vendored
216
vendor/github.com/ethereum/go-ethereum/p2p/enode/nodedb_test.go
generated
vendored
@ -28,42 +28,54 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var nodeDBKeyTests = []struct {
|
||||
id ID
|
||||
field string
|
||||
key []byte
|
||||
}{
|
||||
{
|
||||
id: ID{},
|
||||
field: "version",
|
||||
key: []byte{0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e}, // field
|
||||
},
|
||||
{
|
||||
id: HexID("51232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
|
||||
field: ":discover",
|
||||
key: []byte{
|
||||
0x6e, 0x3a, // prefix
|
||||
0x51, 0x23, 0x2b, 0x8d, 0x78, 0x21, 0x61, 0x7d, // node id
|
||||
0x2b, 0x29, 0xb5, 0x4b, 0x81, 0xcd, 0xef, 0xb9, //
|
||||
0xb3, 0xe9, 0xc3, 0x7d, 0x7f, 0xd5, 0xf6, 0x32, //
|
||||
0x70, 0xbc, 0xc9, 0xe1, 0xa6, 0xf6, 0xa4, 0x39, //
|
||||
0x3a, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, // field
|
||||
},
|
||||
},
|
||||
var keytestID = HexID("51232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439")
|
||||
|
||||
func TestDBNodeKey(t *testing.T) {
|
||||
enc := nodeKey(keytestID)
|
||||
want := []byte{
|
||||
'n', ':',
|
||||
0x51, 0x23, 0x2b, 0x8d, 0x78, 0x21, 0x61, 0x7d, // node id
|
||||
0x2b, 0x29, 0xb5, 0x4b, 0x81, 0xcd, 0xef, 0xb9, //
|
||||
0xb3, 0xe9, 0xc3, 0x7d, 0x7f, 0xd5, 0xf6, 0x32, //
|
||||
0x70, 0xbc, 0xc9, 0xe1, 0xa6, 0xf6, 0xa4, 0x39, //
|
||||
':', 'v', '4',
|
||||
}
|
||||
if !bytes.Equal(enc, want) {
|
||||
t.Errorf("wrong encoded key:\ngot %q\nwant %q", enc, want)
|
||||
}
|
||||
id, _ := splitNodeKey(enc)
|
||||
if id != keytestID {
|
||||
t.Errorf("wrong ID from splitNodeKey")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDBKeys(t *testing.T) {
|
||||
for i, tt := range nodeDBKeyTests {
|
||||
if key := makeKey(tt.id, tt.field); !bytes.Equal(key, tt.key) {
|
||||
t.Errorf("make test %d: key mismatch: have 0x%x, want 0x%x", i, key, tt.key)
|
||||
}
|
||||
id, field := splitKey(tt.key)
|
||||
if !bytes.Equal(id[:], tt.id[:]) {
|
||||
t.Errorf("split test %d: id mismatch: have 0x%x, want 0x%x", i, id, tt.id)
|
||||
}
|
||||
if field != tt.field {
|
||||
t.Errorf("split test %d: field mismatch: have 0x%x, want 0x%x", i, field, tt.field)
|
||||
}
|
||||
func TestDBNodeItemKey(t *testing.T) {
|
||||
wantIP := net.IP{127, 0, 0, 3}
|
||||
wantField := "foobar"
|
||||
enc := nodeItemKey(keytestID, wantIP, wantField)
|
||||
want := []byte{
|
||||
'n', ':',
|
||||
0x51, 0x23, 0x2b, 0x8d, 0x78, 0x21, 0x61, 0x7d, // node id
|
||||
0x2b, 0x29, 0xb5, 0x4b, 0x81, 0xcd, 0xef, 0xb9, //
|
||||
0xb3, 0xe9, 0xc3, 0x7d, 0x7f, 0xd5, 0xf6, 0x32, //
|
||||
0x70, 0xbc, 0xc9, 0xe1, 0xa6, 0xf6, 0xa4, 0x39, //
|
||||
':', 'v', '4', ':',
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // IP
|
||||
0x00, 0x00, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x03, //
|
||||
':', 'f', 'o', 'o', 'b', 'a', 'r',
|
||||
}
|
||||
if !bytes.Equal(enc, want) {
|
||||
t.Errorf("wrong encoded key:\ngot %q\nwant %q", enc, want)
|
||||
}
|
||||
id, ip, field := splitNodeItemKey(enc)
|
||||
if id != keytestID {
|
||||
t.Errorf("splitNodeItemKey returned wrong ID: %v", id)
|
||||
}
|
||||
if !bytes.Equal(ip, wantIP) {
|
||||
t.Errorf("splitNodeItemKey returned wrong IP: %v", ip)
|
||||
}
|
||||
if field != wantField {
|
||||
t.Errorf("splitNodeItemKey returned wrong field: %q", field)
|
||||
}
|
||||
}
|
||||
|
||||
@ -113,33 +125,33 @@ func TestDBFetchStore(t *testing.T) {
|
||||
defer db.Close()
|
||||
|
||||
// Check fetch/store operations on a node ping object
|
||||
if stored := db.LastPingReceived(node.ID()); stored.Unix() != 0 {
|
||||
if stored := db.LastPingReceived(node.ID(), node.IP()); stored.Unix() != 0 {
|
||||
t.Errorf("ping: non-existing object: %v", stored)
|
||||
}
|
||||
if err := db.UpdateLastPingReceived(node.ID(), inst); err != nil {
|
||||
if err := db.UpdateLastPingReceived(node.ID(), node.IP(), inst); err != nil {
|
||||
t.Errorf("ping: failed to update: %v", err)
|
||||
}
|
||||
if stored := db.LastPingReceived(node.ID()); stored.Unix() != inst.Unix() {
|
||||
if stored := db.LastPingReceived(node.ID(), node.IP()); stored.Unix() != inst.Unix() {
|
||||
t.Errorf("ping: value mismatch: have %v, want %v", stored, inst)
|
||||
}
|
||||
// Check fetch/store operations on a node pong object
|
||||
if stored := db.LastPongReceived(node.ID()); stored.Unix() != 0 {
|
||||
if stored := db.LastPongReceived(node.ID(), node.IP()); stored.Unix() != 0 {
|
||||
t.Errorf("pong: non-existing object: %v", stored)
|
||||
}
|
||||
if err := db.UpdateLastPongReceived(node.ID(), inst); err != nil {
|
||||
if err := db.UpdateLastPongReceived(node.ID(), node.IP(), inst); err != nil {
|
||||
t.Errorf("pong: failed to update: %v", err)
|
||||
}
|
||||
if stored := db.LastPongReceived(node.ID()); stored.Unix() != inst.Unix() {
|
||||
if stored := db.LastPongReceived(node.ID(), node.IP()); stored.Unix() != inst.Unix() {
|
||||
t.Errorf("pong: value mismatch: have %v, want %v", stored, inst)
|
||||
}
|
||||
// Check fetch/store operations on a node findnode-failure object
|
||||
if stored := db.FindFails(node.ID()); stored != 0 {
|
||||
if stored := db.FindFails(node.ID(), node.IP()); stored != 0 {
|
||||
t.Errorf("find-node fails: non-existing object: %v", stored)
|
||||
}
|
||||
if err := db.UpdateFindFails(node.ID(), num); err != nil {
|
||||
if err := db.UpdateFindFails(node.ID(), node.IP(), num); err != nil {
|
||||
t.Errorf("find-node fails: failed to update: %v", err)
|
||||
}
|
||||
if stored := db.FindFails(node.ID()); stored != num {
|
||||
if stored := db.FindFails(node.ID(), node.IP()); stored != num {
|
||||
t.Errorf("find-node fails: value mismatch: have %v, want %v", stored, num)
|
||||
}
|
||||
// Check fetch/store operations on an actual node object
|
||||
@ -256,7 +268,7 @@ func testSeedQuery() error {
|
||||
if err := db.UpdateNode(seed.node); err != nil {
|
||||
return fmt.Errorf("node %d: failed to insert: %v", i, err)
|
||||
}
|
||||
if err := db.UpdateLastPongReceived(seed.node.ID(), seed.pong); err != nil {
|
||||
if err := db.UpdateLastPongReceived(seed.node.ID(), seed.node.IP(), seed.pong); err != nil {
|
||||
return fmt.Errorf("node %d: failed to insert bondTime: %v", i, err)
|
||||
}
|
||||
}
|
||||
@ -321,10 +333,12 @@ func TestDBPersistency(t *testing.T) {
|
||||
}
|
||||
|
||||
var nodeDBExpirationNodes = []struct {
|
||||
node *Node
|
||||
pong time.Time
|
||||
exp bool
|
||||
node *Node
|
||||
pong time.Time
|
||||
storeNode bool
|
||||
exp bool
|
||||
}{
|
||||
// Node has new enough pong time and isn't expired:
|
||||
{
|
||||
node: NewV4(
|
||||
hexPubkey("8d110e2ed4b446d9b5fb50f117e5f37fb7597af455e1dab0e6f045a6eeaa786a6781141659020d38bdc5e698ed3d4d2bafa8b5061810dfa63e8ac038db2e9b67"),
|
||||
@ -332,17 +346,79 @@ var nodeDBExpirationNodes = []struct {
|
||||
30303,
|
||||
30303,
|
||||
),
|
||||
pong: time.Now().Add(-dbNodeExpiration + time.Minute),
|
||||
exp: false,
|
||||
}, {
|
||||
storeNode: true,
|
||||
pong: time.Now().Add(-dbNodeExpiration + time.Minute),
|
||||
exp: false,
|
||||
},
|
||||
// Node with pong time before expiration is removed:
|
||||
{
|
||||
node: NewV4(
|
||||
hexPubkey("913a205579c32425b220dfba999d215066e5bdbf900226b11da1907eae5e93eb40616d47412cf819664e9eacbdfcca6b0c6e07e09847a38472d4be46ab0c3672"),
|
||||
net.IP{127, 0, 0, 2},
|
||||
30303,
|
||||
30303,
|
||||
),
|
||||
pong: time.Now().Add(-dbNodeExpiration - time.Minute),
|
||||
exp: true,
|
||||
storeNode: true,
|
||||
pong: time.Now().Add(-dbNodeExpiration - time.Minute),
|
||||
exp: true,
|
||||
},
|
||||
// Just pong time, no node stored:
|
||||
{
|
||||
node: NewV4(
|
||||
hexPubkey("b56670e0b6bad2c5dab9f9fe6f061a16cf78d68b6ae2cfda3144262d08d97ce5f46fd8799b6d1f709b1abe718f2863e224488bd7518e5e3b43809ac9bd1138ca"),
|
||||
net.IP{127, 0, 0, 3},
|
||||
30303,
|
||||
30303,
|
||||
),
|
||||
storeNode: false,
|
||||
pong: time.Now().Add(-dbNodeExpiration - time.Minute),
|
||||
exp: true,
|
||||
},
|
||||
// Node with multiple pong times, all older than expiration.
|
||||
{
|
||||
node: NewV4(
|
||||
hexPubkey("29f619cebfd32c9eab34aec797ed5e3fe15b9b45be95b4df3f5fe6a9ae892f433eb08d7698b2ef3621568b0fb70d57b515ab30d4e72583b798298e0f0a66b9d1"),
|
||||
net.IP{127, 0, 0, 4},
|
||||
30303,
|
||||
30303,
|
||||
),
|
||||
storeNode: true,
|
||||
pong: time.Now().Add(-dbNodeExpiration - time.Minute),
|
||||
exp: true,
|
||||
},
|
||||
{
|
||||
node: NewV4(
|
||||
hexPubkey("29f619cebfd32c9eab34aec797ed5e3fe15b9b45be95b4df3f5fe6a9ae892f433eb08d7698b2ef3621568b0fb70d57b515ab30d4e72583b798298e0f0a66b9d1"),
|
||||
net.IP{127, 0, 0, 5},
|
||||
30303,
|
||||
30303,
|
||||
),
|
||||
storeNode: false,
|
||||
pong: time.Now().Add(-dbNodeExpiration - 2*time.Minute),
|
||||
exp: true,
|
||||
},
|
||||
// Node with multiple pong times, one newer, one older than expiration.
|
||||
{
|
||||
node: NewV4(
|
||||
hexPubkey("3b73a9e5f4af6c4701c57c73cc8cfa0f4802840b24c11eba92aac3aef65644a3728b4b2aec8199f6d72bd66be2c65861c773129039bd47daa091ca90a6d4c857"),
|
||||
net.IP{127, 0, 0, 6},
|
||||
30303,
|
||||
30303,
|
||||
),
|
||||
storeNode: true,
|
||||
pong: time.Now().Add(-dbNodeExpiration + time.Minute),
|
||||
exp: false,
|
||||
},
|
||||
{
|
||||
node: NewV4(
|
||||
hexPubkey("3b73a9e5f4af6c4701c57c73cc8cfa0f4802840b24c11eba92aac3aef65644a3728b4b2aec8199f6d72bd66be2c65861c773129039bd47daa091ca90a6d4c857"),
|
||||
net.IP{127, 0, 0, 7},
|
||||
30303,
|
||||
30303,
|
||||
),
|
||||
storeNode: false,
|
||||
pong: time.Now().Add(-dbNodeExpiration - time.Minute),
|
||||
exp: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -350,23 +426,39 @@ func TestDBExpiration(t *testing.T) {
|
||||
db, _ := OpenDB("")
|
||||
defer db.Close()
|
||||
|
||||
// Add all the test nodes and set their last pong time
|
||||
// Add all the test nodes and set their last pong time.
|
||||
for i, seed := range nodeDBExpirationNodes {
|
||||
if err := db.UpdateNode(seed.node); err != nil {
|
||||
t.Fatalf("node %d: failed to insert: %v", i, err)
|
||||
if seed.storeNode {
|
||||
if err := db.UpdateNode(seed.node); err != nil {
|
||||
t.Fatalf("node %d: failed to insert: %v", i, err)
|
||||
}
|
||||
}
|
||||
if err := db.UpdateLastPongReceived(seed.node.ID(), seed.pong); err != nil {
|
||||
if err := db.UpdateLastPongReceived(seed.node.ID(), seed.node.IP(), seed.pong); err != nil {
|
||||
t.Fatalf("node %d: failed to update bondTime: %v", i, err)
|
||||
}
|
||||
}
|
||||
// Expire some of them, and check the rest
|
||||
if err := db.expireNodes(); err != nil {
|
||||
t.Fatalf("failed to expire nodes: %v", err)
|
||||
}
|
||||
|
||||
db.expireNodes()
|
||||
|
||||
// Check that expired entries have been removed.
|
||||
unixZeroTime := time.Unix(0, 0)
|
||||
for i, seed := range nodeDBExpirationNodes {
|
||||
node := db.Node(seed.node.ID())
|
||||
if (node == nil && !seed.exp) || (node != nil && seed.exp) {
|
||||
t.Errorf("node %d: expiration mismatch: have %v, want %v", i, node, seed.exp)
|
||||
pong := db.LastPongReceived(seed.node.ID(), seed.node.IP())
|
||||
if seed.exp {
|
||||
if seed.storeNode && node != nil {
|
||||
t.Errorf("node %d (%s) shouldn't be present after expiration", i, seed.node.ID().TerminalString())
|
||||
}
|
||||
if !pong.Equal(unixZeroTime) {
|
||||
t.Errorf("pong time %d (%s %v) shouldn't be present after expiration", i, seed.node.ID().TerminalString(), seed.node.IP())
|
||||
}
|
||||
} else {
|
||||
if seed.storeNode && node == nil {
|
||||
t.Errorf("node %d (%s) should be present after expiration", i, seed.node.ID().TerminalString())
|
||||
}
|
||||
if !pong.Equal(seed.pong.Truncate(1 * time.Second)) {
|
||||
t.Errorf("pong time %d (%s) should be %v after expiration, but is %v", i, seed.node.ID().TerminalString(), seed.pong, pong)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
35
vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting.go
generated
vendored
35
vendor/github.com/ethereum/go-ethereum/p2p/protocols/accounting.go
generated
vendored
@ -27,23 +27,21 @@ var (
|
||||
// All metrics are cumulative
|
||||
|
||||
// total amount of units credited
|
||||
mBalanceCredit metrics.Counter
|
||||
mBalanceCredit = metrics.NewRegisteredCounterForced("account.balance.credit", metrics.AccountingRegistry)
|
||||
// total amount of units debited
|
||||
mBalanceDebit metrics.Counter
|
||||
mBalanceDebit = metrics.NewRegisteredCounterForced("account.balance.debit", metrics.AccountingRegistry)
|
||||
// total amount of bytes credited
|
||||
mBytesCredit metrics.Counter
|
||||
mBytesCredit = metrics.NewRegisteredCounterForced("account.bytes.credit", metrics.AccountingRegistry)
|
||||
// total amount of bytes debited
|
||||
mBytesDebit metrics.Counter
|
||||
mBytesDebit = metrics.NewRegisteredCounterForced("account.bytes.debit", metrics.AccountingRegistry)
|
||||
// total amount of credited messages
|
||||
mMsgCredit metrics.Counter
|
||||
mMsgCredit = metrics.NewRegisteredCounterForced("account.msg.credit", metrics.AccountingRegistry)
|
||||
// total amount of debited messages
|
||||
mMsgDebit metrics.Counter
|
||||
mMsgDebit = metrics.NewRegisteredCounterForced("account.msg.debit", metrics.AccountingRegistry)
|
||||
// how many times local node had to drop remote peers
|
||||
mPeerDrops metrics.Counter
|
||||
mPeerDrops = metrics.NewRegisteredCounterForced("account.peerdrops", metrics.AccountingRegistry)
|
||||
// how many times local node overdrafted and dropped
|
||||
mSelfDrops metrics.Counter
|
||||
|
||||
MetricsRegistry metrics.Registry
|
||||
mSelfDrops = metrics.NewRegisteredCounterForced("account.selfdrops", metrics.AccountingRegistry)
|
||||
)
|
||||
|
||||
// Prices defines how prices are being passed on to the accounting instance
|
||||
@ -110,24 +108,13 @@ func NewAccounting(balance Balance, po Prices) *Accounting {
|
||||
return ah
|
||||
}
|
||||
|
||||
// SetupAccountingMetrics creates a separate registry for p2p accounting metrics;
|
||||
// SetupAccountingMetrics uses a separate registry for p2p accounting metrics;
|
||||
// this registry should be independent of any other metrics as it persists at different endpoints.
|
||||
// It also instantiates the given metrics and starts the persisting go-routine which
|
||||
// It also starts the persisting go-routine which
|
||||
// at the passed interval writes the metrics to a LevelDB
|
||||
func SetupAccountingMetrics(reportInterval time.Duration, path string) *AccountingMetrics {
|
||||
// create an empty registry
|
||||
MetricsRegistry = metrics.NewRegistry()
|
||||
// instantiate the metrics
|
||||
mBalanceCredit = metrics.NewRegisteredCounterForced("account.balance.credit", MetricsRegistry)
|
||||
mBalanceDebit = metrics.NewRegisteredCounterForced("account.balance.debit", MetricsRegistry)
|
||||
mBytesCredit = metrics.NewRegisteredCounterForced("account.bytes.credit", MetricsRegistry)
|
||||
mBytesDebit = metrics.NewRegisteredCounterForced("account.bytes.debit", MetricsRegistry)
|
||||
mMsgCredit = metrics.NewRegisteredCounterForced("account.msg.credit", MetricsRegistry)
|
||||
mMsgDebit = metrics.NewRegisteredCounterForced("account.msg.debit", MetricsRegistry)
|
||||
mPeerDrops = metrics.NewRegisteredCounterForced("account.peerdrops", MetricsRegistry)
|
||||
mSelfDrops = metrics.NewRegisteredCounterForced("account.selfdrops", MetricsRegistry)
|
||||
// create the DB and start persisting
|
||||
return NewAccountingMetrics(MetricsRegistry, reportInterval, path)
|
||||
return NewAccountingMetrics(metrics.AccountingRegistry, reportInterval, path)
|
||||
}
|
||||
|
||||
// Send takes a peer, a size and a msg and
|
||||
|
14
vendor/github.com/ethereum/go-ethereum/p2p/protocols/protocol.go
generated
vendored
14
vendor/github.com/ethereum/go-ethereum/p2p/protocols/protocol.go
generated
vendored
@ -423,3 +423,17 @@ func (p *Peer) Handshake(ctx context.Context, hs interface{}, verify func(interf
|
||||
}
|
||||
return rhs, nil
|
||||
}
|
||||
|
||||
// HasCap returns true if Peer has a capability
|
||||
// with provided name.
|
||||
func (p *Peer) HasCap(capName string) (yes bool) {
|
||||
if p == nil || p.Peer == nil {
|
||||
return false
|
||||
}
|
||||
for _, c := range p.Caps() {
|
||||
if c.Name == capName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
12
vendor/github.com/ethereum/go-ethereum/p2p/protocols/protocol_test.go
generated
vendored
12
vendor/github.com/ethereum/go-ethereum/p2p/protocols/protocol_test.go
generated
vendored
@ -142,9 +142,9 @@ func newProtocol(pp *p2ptest.TestPeerPool) func(*p2p.Peer, p2p.MsgReadWriter) er
|
||||
}
|
||||
}
|
||||
|
||||
func protocolTester(t *testing.T, pp *p2ptest.TestPeerPool) *p2ptest.ProtocolTester {
|
||||
func protocolTester(pp *p2ptest.TestPeerPool) *p2ptest.ProtocolTester {
|
||||
conf := adapters.RandomNodeConfig()
|
||||
return p2ptest.NewProtocolTester(t, conf.ID, 2, newProtocol(pp))
|
||||
return p2ptest.NewProtocolTester(conf.ID, 2, newProtocol(pp))
|
||||
}
|
||||
|
||||
func protoHandshakeExchange(id enode.ID, proto *protoHandshake) []p2ptest.Exchange {
|
||||
@ -173,7 +173,7 @@ func protoHandshakeExchange(id enode.ID, proto *protoHandshake) []p2ptest.Exchan
|
||||
|
||||
func runProtoHandshake(t *testing.T, proto *protoHandshake, errs ...error) {
|
||||
pp := p2ptest.NewTestPeerPool()
|
||||
s := protocolTester(t, pp)
|
||||
s := protocolTester(pp)
|
||||
// TODO: make this more than one handshake
|
||||
node := s.Nodes[0]
|
||||
if err := s.TestExchanges(protoHandshakeExchange(node.ID(), proto)...); err != nil {
|
||||
@ -250,7 +250,7 @@ func TestProtocolHook(t *testing.T) {
|
||||
}
|
||||
|
||||
conf := adapters.RandomNodeConfig()
|
||||
tester := p2ptest.NewProtocolTester(t, conf.ID, 2, runFunc)
|
||||
tester := p2ptest.NewProtocolTester(conf.ID, 2, runFunc)
|
||||
err := tester.TestExchanges(p2ptest.Exchange{
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
@ -389,7 +389,7 @@ func moduleHandshakeExchange(id enode.ID, resp uint) []p2ptest.Exchange {
|
||||
|
||||
func runModuleHandshake(t *testing.T, resp uint, errs ...error) {
|
||||
pp := p2ptest.NewTestPeerPool()
|
||||
s := protocolTester(t, pp)
|
||||
s := protocolTester(pp)
|
||||
node := s.Nodes[0]
|
||||
if err := s.TestExchanges(protoHandshakeExchange(node.ID(), &protoHandshake{42, "420"})...); err != nil {
|
||||
t.Fatal(err)
|
||||
@ -469,7 +469,7 @@ func testMultiPeerSetup(a, b enode.ID) []p2ptest.Exchange {
|
||||
|
||||
func runMultiplePeers(t *testing.T, peer int, errs ...error) {
|
||||
pp := p2ptest.NewTestPeerPool()
|
||||
s := protocolTester(t, pp)
|
||||
s := protocolTester(pp)
|
||||
|
||||
if err := s.TestExchanges(testMultiPeerSetup(s.Nodes[0].ID(), s.Nodes[1].ID())...); err != nil {
|
||||
t.Fatal(err)
|
||||
|
28
vendor/github.com/ethereum/go-ethereum/p2p/protocols/reporter_test.go
generated
vendored
28
vendor/github.com/ethereum/go-ethereum/p2p/protocols/reporter_test.go
generated
vendored
@ -43,21 +43,27 @@ func TestReporter(t *testing.T) {
|
||||
metrics := SetupAccountingMetrics(reportInterval, filepath.Join(dir, "test.db"))
|
||||
log.Debug("Done.")
|
||||
|
||||
//do some metrics
|
||||
//change metrics
|
||||
mBalanceCredit.Inc(12)
|
||||
mBytesCredit.Inc(34)
|
||||
mMsgDebit.Inc(9)
|
||||
|
||||
//store expected metrics
|
||||
expectedBalanceCredit := mBalanceCredit.Count()
|
||||
expectedBytesCredit := mBytesCredit.Count()
|
||||
expectedMsgDebit := mMsgDebit.Count()
|
||||
|
||||
//give the reporter time to write the metrics to DB
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
//set the metrics to nil - this effectively simulates the node having shut down...
|
||||
mBalanceCredit = nil
|
||||
mBytesCredit = nil
|
||||
mMsgDebit = nil
|
||||
//close the DB also, or we can't create a new one
|
||||
metrics.Close()
|
||||
|
||||
//clear the metrics - this effectively simulates the node having shut down...
|
||||
mBalanceCredit.Clear()
|
||||
mBytesCredit.Clear()
|
||||
mMsgDebit.Clear()
|
||||
|
||||
//setup the metrics again
|
||||
log.Debug("Setting up metrics second time")
|
||||
metrics = SetupAccountingMetrics(reportInterval, filepath.Join(dir, "test.db"))
|
||||
@ -65,13 +71,13 @@ func TestReporter(t *testing.T) {
|
||||
log.Debug("Done.")
|
||||
|
||||
//now check the metrics, they should have the same value as before "shutdown"
|
||||
if mBalanceCredit.Count() != 12 {
|
||||
t.Fatalf("Expected counter to be %d, but is %d", 12, mBalanceCredit.Count())
|
||||
if mBalanceCredit.Count() != expectedBalanceCredit {
|
||||
t.Fatalf("Expected counter to be %d, but is %d", expectedBalanceCredit, mBalanceCredit.Count())
|
||||
}
|
||||
if mBytesCredit.Count() != 34 {
|
||||
t.Fatalf("Expected counter to be %d, but is %d", 23, mBytesCredit.Count())
|
||||
if mBytesCredit.Count() != expectedBytesCredit {
|
||||
t.Fatalf("Expected counter to be %d, but is %d", expectedBytesCredit, mBytesCredit.Count())
|
||||
}
|
||||
if mMsgDebit.Count() != 9 {
|
||||
t.Fatalf("Expected counter to be %d, but is %d", 9, mMsgDebit.Count())
|
||||
if mMsgDebit.Count() != expectedMsgDebit {
|
||||
t.Fatalf("Expected counter to be %d, but is %d", expectedMsgDebit, mMsgDebit.Count())
|
||||
}
|
||||
}
|
||||
|
43
vendor/github.com/ethereum/go-ethereum/p2p/simulations/connect.go
generated
vendored
43
vendor/github.com/ethereum/go-ethereum/p2p/simulations/connect.go
generated
vendored
@ -32,6 +32,9 @@ var (
|
||||
// It is useful when constructing a chain network topology
|
||||
// when Network adds and removes nodes dynamically.
|
||||
func (net *Network) ConnectToLastNode(id enode.ID) (err error) {
|
||||
net.lock.Lock()
|
||||
defer net.lock.Unlock()
|
||||
|
||||
ids := net.getUpNodeIDs()
|
||||
l := len(ids)
|
||||
if l < 2 {
|
||||
@ -41,29 +44,35 @@ func (net *Network) ConnectToLastNode(id enode.ID) (err error) {
|
||||
if last == id {
|
||||
last = ids[l-2]
|
||||
}
|
||||
return net.connect(last, id)
|
||||
return net.connectNotConnected(last, id)
|
||||
}
|
||||
|
||||
// ConnectToRandomNode connects the node with provided NodeID
|
||||
// to a random node that is up.
|
||||
func (net *Network) ConnectToRandomNode(id enode.ID) (err error) {
|
||||
selected := net.GetRandomUpNode(id)
|
||||
net.lock.Lock()
|
||||
defer net.lock.Unlock()
|
||||
|
||||
selected := net.getRandomUpNode(id)
|
||||
if selected == nil {
|
||||
return ErrNodeNotFound
|
||||
}
|
||||
return net.connect(selected.ID(), id)
|
||||
return net.connectNotConnected(selected.ID(), id)
|
||||
}
|
||||
|
||||
// ConnectNodesFull connects all nodes one to another.
|
||||
// It provides a complete connectivity in the network
|
||||
// which should be rarely needed.
|
||||
func (net *Network) ConnectNodesFull(ids []enode.ID) (err error) {
|
||||
net.lock.Lock()
|
||||
defer net.lock.Unlock()
|
||||
|
||||
if ids == nil {
|
||||
ids = net.getUpNodeIDs()
|
||||
}
|
||||
for i, lid := range ids {
|
||||
for _, rid := range ids[i+1:] {
|
||||
if err = net.connect(lid, rid); err != nil {
|
||||
if err = net.connectNotConnected(lid, rid); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -74,12 +83,19 @@ func (net *Network) ConnectNodesFull(ids []enode.ID) (err error) {
|
||||
// ConnectNodesChain connects all nodes in a chain topology.
|
||||
// If ids argument is nil, all nodes that are up will be connected.
|
||||
func (net *Network) ConnectNodesChain(ids []enode.ID) (err error) {
|
||||
net.lock.Lock()
|
||||
defer net.lock.Unlock()
|
||||
|
||||
return net.connectNodesChain(ids)
|
||||
}
|
||||
|
||||
func (net *Network) connectNodesChain(ids []enode.ID) (err error) {
|
||||
if ids == nil {
|
||||
ids = net.getUpNodeIDs()
|
||||
}
|
||||
l := len(ids)
|
||||
for i := 0; i < l-1; i++ {
|
||||
if err := net.connect(ids[i], ids[i+1]); err != nil {
|
||||
if err := net.connectNotConnected(ids[i], ids[i+1]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -89,6 +105,9 @@ func (net *Network) ConnectNodesChain(ids []enode.ID) (err error) {
|
||||
// ConnectNodesRing connects all nodes in a ring topology.
|
||||
// If ids argument is nil, all nodes that are up will be connected.
|
||||
func (net *Network) ConnectNodesRing(ids []enode.ID) (err error) {
|
||||
net.lock.Lock()
|
||||
defer net.lock.Unlock()
|
||||
|
||||
if ids == nil {
|
||||
ids = net.getUpNodeIDs()
|
||||
}
|
||||
@ -96,15 +115,18 @@ func (net *Network) ConnectNodesRing(ids []enode.ID) (err error) {
|
||||
if l < 2 {
|
||||
return nil
|
||||
}
|
||||
if err := net.ConnectNodesChain(ids); err != nil {
|
||||
if err := net.connectNodesChain(ids); err != nil {
|
||||
return err
|
||||
}
|
||||
return net.connect(ids[l-1], ids[0])
|
||||
return net.connectNotConnected(ids[l-1], ids[0])
|
||||
}
|
||||
|
||||
// ConnectNodesStar connects all nodes into a star topology
|
||||
// If ids argument is nil, all nodes that are up will be connected.
|
||||
func (net *Network) ConnectNodesStar(ids []enode.ID, center enode.ID) (err error) {
|
||||
net.lock.Lock()
|
||||
defer net.lock.Unlock()
|
||||
|
||||
if ids == nil {
|
||||
ids = net.getUpNodeIDs()
|
||||
}
|
||||
@ -112,16 +134,15 @@ func (net *Network) ConnectNodesStar(ids []enode.ID, center enode.ID) (err error
|
||||
if center == id {
|
||||
continue
|
||||
}
|
||||
if err := net.connect(center, id); err != nil {
|
||||
if err := net.connectNotConnected(center, id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// connect connects two nodes but ignores already connected error.
|
||||
func (net *Network) connect(oneID, otherID enode.ID) error {
|
||||
return ignoreAlreadyConnectedErr(net.Connect(oneID, otherID))
|
||||
func (net *Network) connectNotConnected(oneID, otherID enode.ID) error {
|
||||
return ignoreAlreadyConnectedErr(net.connect(oneID, otherID))
|
||||
}
|
||||
|
||||
func ignoreAlreadyConnectedErr(err error) error {
|
||||
|
2
vendor/github.com/ethereum/go-ethereum/p2p/simulations/events.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/p2p/simulations/events.go
generated
vendored
@ -100,7 +100,7 @@ func ControlEvent(v interface{}) *Event {
|
||||
func (e *Event) String() string {
|
||||
switch e.Type {
|
||||
case EventTypeNode:
|
||||
return fmt.Sprintf("<node-event> id: %s up: %t", e.Node.ID().TerminalString(), e.Node.Up)
|
||||
return fmt.Sprintf("<node-event> id: %s up: %t", e.Node.ID().TerminalString(), e.Node.Up())
|
||||
case EventTypeConn:
|
||||
return fmt.Sprintf("<conn-event> nodes: %s->%s up: %t", e.Conn.One.TerminalString(), e.Conn.Other.TerminalString(), e.Conn.Up)
|
||||
case EventTypeMsg:
|
||||
|
18
vendor/github.com/ethereum/go-ethereum/p2p/simulations/http_test.go
generated
vendored
18
vendor/github.com/ethereum/go-ethereum/p2p/simulations/http_test.go
generated
vendored
@ -421,14 +421,15 @@ type expectEvents struct {
|
||||
}
|
||||
|
||||
func (t *expectEvents) nodeEvent(id string, up bool) *Event {
|
||||
node := Node{
|
||||
Config: &adapters.NodeConfig{
|
||||
ID: enode.HexID(id),
|
||||
},
|
||||
up: up,
|
||||
}
|
||||
return &Event{
|
||||
Type: EventTypeNode,
|
||||
Node: &Node{
|
||||
Config: &adapters.NodeConfig{
|
||||
ID: enode.HexID(id),
|
||||
},
|
||||
Up: up,
|
||||
},
|
||||
Node: &node,
|
||||
}
|
||||
}
|
||||
|
||||
@ -480,6 +481,7 @@ loop:
|
||||
}
|
||||
|
||||
func (t *expectEvents) expect(events ...*Event) {
|
||||
t.Helper()
|
||||
timeout := time.After(10 * time.Second)
|
||||
i := 0
|
||||
for {
|
||||
@ -501,8 +503,8 @@ func (t *expectEvents) expect(events ...*Event) {
|
||||
if event.Node.ID() != expected.Node.ID() {
|
||||
t.Fatalf("expected node event %d to have id %q, got %q", i, expected.Node.ID().TerminalString(), event.Node.ID().TerminalString())
|
||||
}
|
||||
if event.Node.Up != expected.Node.Up {
|
||||
t.Fatalf("expected node event %d to have up=%t, got up=%t", i, expected.Node.Up, event.Node.Up)
|
||||
if event.Node.Up() != expected.Node.Up() {
|
||||
t.Fatalf("expected node event %d to have up=%t, got up=%t", i, expected.Node.Up(), event.Node.Up())
|
||||
}
|
||||
|
||||
case EventTypeConn:
|
||||
|
9
vendor/github.com/ethereum/go-ethereum/p2p/simulations/mocker_test.go
generated
vendored
9
vendor/github.com/ethereum/go-ethereum/p2p/simulations/mocker_test.go
generated
vendored
@ -90,15 +90,12 @@ func TestMocker(t *testing.T) {
|
||||
for {
|
||||
select {
|
||||
case event := <-events:
|
||||
//if the event is a node Up event only
|
||||
if event.Node != nil && event.Node.Up {
|
||||
if isNodeUp(event) {
|
||||
//add the correspondent node ID to the map
|
||||
nodemap[event.Node.Config.ID] = true
|
||||
//this means all nodes got a nodeUp event, so we can continue the test
|
||||
if len(nodemap) == nodeCount {
|
||||
nodesComplete = true
|
||||
//wait for 3s as the mocker will need time to connect the nodes
|
||||
//time.Sleep( 3 *time.Second)
|
||||
}
|
||||
} else if event.Conn != nil && nodesComplete {
|
||||
connCount += 1
|
||||
@ -169,3 +166,7 @@ func TestMocker(t *testing.T) {
|
||||
t.Fatalf("Expected empty list of nodes, got: %d", len(nodesInfo))
|
||||
}
|
||||
}
|
||||
|
||||
func isNodeUp(event *Event) bool {
|
||||
return event.Node != nil && event.Node.Up()
|
||||
}
|
||||
|
160
vendor/github.com/ethereum/go-ethereum/p2p/simulations/network.go
generated
vendored
160
vendor/github.com/ethereum/go-ethereum/p2p/simulations/network.go
generated
vendored
@ -136,7 +136,7 @@ func (net *Network) Config() *NetworkConfig {
|
||||
// StartAll starts all nodes in the network
|
||||
func (net *Network) StartAll() error {
|
||||
for _, node := range net.Nodes {
|
||||
if node.Up {
|
||||
if node.Up() {
|
||||
continue
|
||||
}
|
||||
if err := net.Start(node.ID()); err != nil {
|
||||
@ -149,7 +149,7 @@ func (net *Network) StartAll() error {
|
||||
// StopAll stops all nodes in the network
|
||||
func (net *Network) StopAll() error {
|
||||
for _, node := range net.Nodes {
|
||||
if !node.Up {
|
||||
if !node.Up() {
|
||||
continue
|
||||
}
|
||||
if err := net.Stop(node.ID()); err != nil {
|
||||
@ -174,7 +174,7 @@ func (net *Network) startWithSnapshots(id enode.ID, snapshots map[string][]byte)
|
||||
if node == nil {
|
||||
return fmt.Errorf("node %v does not exist", id)
|
||||
}
|
||||
if node.Up {
|
||||
if node.Up() {
|
||||
return fmt.Errorf("node %v already up", id)
|
||||
}
|
||||
log.Trace("Starting node", "id", id, "adapter", net.nodeAdapter.Name())
|
||||
@ -182,10 +182,10 @@ func (net *Network) startWithSnapshots(id enode.ID, snapshots map[string][]byte)
|
||||
log.Warn("Node startup failed", "id", id, "err", err)
|
||||
return err
|
||||
}
|
||||
node.Up = true
|
||||
node.SetUp(true)
|
||||
log.Info("Started node", "id", id)
|
||||
|
||||
net.events.Send(NewEvent(node))
|
||||
ev := NewEvent(node)
|
||||
net.events.Send(ev)
|
||||
|
||||
// subscribe to peer events
|
||||
client, err := node.Client()
|
||||
@ -210,12 +210,14 @@ func (net *Network) watchPeerEvents(id enode.ID, events chan *p2p.PeerEvent, sub
|
||||
// assume the node is now down
|
||||
net.lock.Lock()
|
||||
defer net.lock.Unlock()
|
||||
|
||||
node := net.getNode(id)
|
||||
if node == nil {
|
||||
return
|
||||
}
|
||||
node.Up = false
|
||||
net.events.Send(NewEvent(node))
|
||||
node.SetUp(false)
|
||||
ev := NewEvent(node)
|
||||
net.events.Send(ev)
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
@ -251,34 +253,57 @@ func (net *Network) watchPeerEvents(id enode.ID, events chan *p2p.PeerEvent, sub
|
||||
|
||||
// Stop stops the node with the given ID
|
||||
func (net *Network) Stop(id enode.ID) error {
|
||||
net.lock.Lock()
|
||||
node := net.getNode(id)
|
||||
if node == nil {
|
||||
return fmt.Errorf("node %v does not exist", id)
|
||||
}
|
||||
if !node.Up {
|
||||
return fmt.Errorf("node %v already down", id)
|
||||
}
|
||||
node.Up = false
|
||||
net.lock.Unlock()
|
||||
// IMPORTANT: node.Stop() must NOT be called under net.lock as
|
||||
// node.Reachable() closure has a reference to the network and
|
||||
// calls net.InitConn() what also locks the network. => DEADLOCK
|
||||
// That holds until the following ticket is not resolved:
|
||||
|
||||
err := node.Stop()
|
||||
if err != nil {
|
||||
var err error
|
||||
|
||||
node, err := func() (*Node, error) {
|
||||
net.lock.Lock()
|
||||
node.Up = true
|
||||
net.lock.Unlock()
|
||||
defer net.lock.Unlock()
|
||||
|
||||
node := net.getNode(id)
|
||||
if node == nil {
|
||||
return nil, fmt.Errorf("node %v does not exist", id)
|
||||
}
|
||||
if !node.Up() {
|
||||
return nil, fmt.Errorf("node %v already down", id)
|
||||
}
|
||||
node.SetUp(false)
|
||||
return node, nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = node.Stop() // must be called without net.lock
|
||||
|
||||
net.lock.Lock()
|
||||
defer net.lock.Unlock()
|
||||
|
||||
if err != nil {
|
||||
node.SetUp(true)
|
||||
return err
|
||||
}
|
||||
log.Info("Stopped node", "id", id, "err", err)
|
||||
net.events.Send(ControlEvent(node))
|
||||
ev := ControlEvent(node)
|
||||
net.events.Send(ev)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Connect connects two nodes together by calling the "admin_addPeer" RPC
|
||||
// method on the "one" node so that it connects to the "other" node
|
||||
func (net *Network) Connect(oneID, otherID enode.ID) error {
|
||||
net.lock.Lock()
|
||||
defer net.lock.Unlock()
|
||||
return net.connect(oneID, otherID)
|
||||
}
|
||||
|
||||
func (net *Network) connect(oneID, otherID enode.ID) error {
|
||||
log.Debug("Connecting nodes with addPeer", "id", oneID, "other", otherID)
|
||||
conn, err := net.InitConn(oneID, otherID)
|
||||
conn, err := net.initConn(oneID, otherID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -376,6 +401,14 @@ func (net *Network) GetNode(id enode.ID) *Node {
|
||||
return net.getNode(id)
|
||||
}
|
||||
|
||||
func (net *Network) getNode(id enode.ID) *Node {
|
||||
i, found := net.nodeMap[id]
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
return net.Nodes[i]
|
||||
}
|
||||
|
||||
// GetNode gets the node with the given name, returning nil if the node does
|
||||
// not exist
|
||||
func (net *Network) GetNodeByName(name string) *Node {
|
||||
@ -398,28 +431,29 @@ func (net *Network) GetNodes() (nodes []*Node) {
|
||||
net.lock.RLock()
|
||||
defer net.lock.RUnlock()
|
||||
|
||||
nodes = append(nodes, net.Nodes...)
|
||||
return nodes
|
||||
return net.getNodes()
|
||||
}
|
||||
|
||||
func (net *Network) getNode(id enode.ID) *Node {
|
||||
i, found := net.nodeMap[id]
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
return net.Nodes[i]
|
||||
func (net *Network) getNodes() (nodes []*Node) {
|
||||
nodes = append(nodes, net.Nodes...)
|
||||
return nodes
|
||||
}
|
||||
|
||||
// GetRandomUpNode returns a random node on the network, which is running.
|
||||
func (net *Network) GetRandomUpNode(excludeIDs ...enode.ID) *Node {
|
||||
net.lock.RLock()
|
||||
defer net.lock.RUnlock()
|
||||
return net.getRandomUpNode(excludeIDs...)
|
||||
}
|
||||
|
||||
// GetRandomUpNode returns a random node on the network, which is running.
|
||||
func (net *Network) getRandomUpNode(excludeIDs ...enode.ID) *Node {
|
||||
return net.getRandomNode(net.getUpNodeIDs(), excludeIDs)
|
||||
}
|
||||
|
||||
func (net *Network) getUpNodeIDs() (ids []enode.ID) {
|
||||
for _, node := range net.Nodes {
|
||||
if node.Up {
|
||||
if node.Up() {
|
||||
ids = append(ids, node.ID())
|
||||
}
|
||||
}
|
||||
@ -434,8 +468,8 @@ func (net *Network) GetRandomDownNode(excludeIDs ...enode.ID) *Node {
|
||||
}
|
||||
|
||||
func (net *Network) getDownNodeIDs() (ids []enode.ID) {
|
||||
for _, node := range net.GetNodes() {
|
||||
if !node.Up {
|
||||
for _, node := range net.getNodes() {
|
||||
if !node.Up() {
|
||||
ids = append(ids, node.ID())
|
||||
}
|
||||
}
|
||||
@ -449,7 +483,7 @@ func (net *Network) getRandomNode(ids []enode.ID, excludeIDs []enode.ID) *Node {
|
||||
if l == 0 {
|
||||
return nil
|
||||
}
|
||||
return net.GetNode(filtered[rand.Intn(l)])
|
||||
return net.getNode(filtered[rand.Intn(l)])
|
||||
}
|
||||
|
||||
func filterIDs(ids []enode.ID, excludeIDs []enode.ID) []enode.ID {
|
||||
@ -527,6 +561,10 @@ func (net *Network) getConn(oneID, otherID enode.ID) *Conn {
|
||||
func (net *Network) InitConn(oneID, otherID enode.ID) (*Conn, error) {
|
||||
net.lock.Lock()
|
||||
defer net.lock.Unlock()
|
||||
return net.initConn(oneID, otherID)
|
||||
}
|
||||
|
||||
func (net *Network) initConn(oneID, otherID enode.ID) (*Conn, error) {
|
||||
if oneID == otherID {
|
||||
return nil, fmt.Errorf("refusing to connect to self %v", oneID)
|
||||
}
|
||||
@ -584,8 +622,21 @@ type Node struct {
|
||||
// Config if the config used to created the node
|
||||
Config *adapters.NodeConfig `json:"config"`
|
||||
|
||||
// Up tracks whether or not the node is running
|
||||
Up bool `json:"up"`
|
||||
// up tracks whether or not the node is running
|
||||
up bool
|
||||
upMu sync.RWMutex
|
||||
}
|
||||
|
||||
func (n *Node) Up() bool {
|
||||
n.upMu.RLock()
|
||||
defer n.upMu.RUnlock()
|
||||
return n.up
|
||||
}
|
||||
|
||||
func (n *Node) SetUp(up bool) {
|
||||
n.upMu.Lock()
|
||||
defer n.upMu.Unlock()
|
||||
n.up = up
|
||||
}
|
||||
|
||||
// ID returns the ID of the node
|
||||
@ -619,10 +670,29 @@ func (n *Node) MarshalJSON() ([]byte, error) {
|
||||
}{
|
||||
Info: n.NodeInfo(),
|
||||
Config: n.Config,
|
||||
Up: n.Up,
|
||||
Up: n.Up(),
|
||||
})
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler interface so that we don't lose
|
||||
// Node.up status. IMPORTANT: The implementation is incomplete; we lose
|
||||
// p2p.NodeInfo.
|
||||
func (n *Node) UnmarshalJSON(raw []byte) error {
|
||||
// TODO: How should we turn back NodeInfo into n.Node?
|
||||
// Ticket: https://github.com/ethersphere/go-ethereum/issues/1177
|
||||
node := struct {
|
||||
Config *adapters.NodeConfig `json:"config,omitempty"`
|
||||
Up bool `json:"up"`
|
||||
}{}
|
||||
if err := json.Unmarshal(raw, &node); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n.SetUp(node.Up)
|
||||
n.Config = node.Config
|
||||
return nil
|
||||
}
|
||||
|
||||
// Conn represents a connection between two nodes in the network
|
||||
type Conn struct {
|
||||
// One is the node which initiated the connection
|
||||
@ -642,10 +712,10 @@ type Conn struct {
|
||||
|
||||
// nodesUp returns whether both nodes are currently up
|
||||
func (c *Conn) nodesUp() error {
|
||||
if !c.one.Up {
|
||||
if !c.one.Up() {
|
||||
return fmt.Errorf("one %v is not up", c.One)
|
||||
}
|
||||
if !c.other.Up {
|
||||
if !c.other.Up() {
|
||||
return fmt.Errorf("other %v is not up", c.Other)
|
||||
}
|
||||
return nil
|
||||
@ -717,7 +787,7 @@ func (net *Network) snapshot(addServices []string, removeServices []string) (*Sn
|
||||
}
|
||||
for i, node := range net.Nodes {
|
||||
snap.Nodes[i] = NodeSnapshot{Node: *node}
|
||||
if !node.Up {
|
||||
if !node.Up() {
|
||||
continue
|
||||
}
|
||||
snapshots, err := node.Snapshots()
|
||||
@ -772,7 +842,7 @@ func (net *Network) Load(snap *Snapshot) error {
|
||||
if _, err := net.NewNodeWithConfig(n.Node.Config); err != nil {
|
||||
return err
|
||||
}
|
||||
if !n.Node.Up {
|
||||
if !n.Node.Up() {
|
||||
continue
|
||||
}
|
||||
if err := net.startWithSnapshots(n.Node.Config.ID, n.Snapshots); err != nil {
|
||||
@ -844,7 +914,7 @@ func (net *Network) Load(snap *Snapshot) error {
|
||||
// Start connecting.
|
||||
for _, conn := range snap.Conns {
|
||||
|
||||
if !net.GetNode(conn.One).Up || !net.GetNode(conn.Other).Up {
|
||||
if !net.GetNode(conn.One).Up() || !net.GetNode(conn.Other).Up() {
|
||||
//in this case, at least one of the nodes of a connection is not up,
|
||||
//so it would result in the snapshot `Load` to fail
|
||||
continue
|
||||
@ -898,7 +968,7 @@ func (net *Network) executeControlEvent(event *Event) {
|
||||
}
|
||||
|
||||
func (net *Network) executeNodeEvent(e *Event) error {
|
||||
if !e.Node.Up {
|
||||
if !e.Node.Up() {
|
||||
return net.Stop(e.Node.ID())
|
||||
}
|
||||
|
||||
|
135
vendor/github.com/ethereum/go-ethereum/p2p/simulations/network_test.go
generated
vendored
135
vendor/github.com/ethereum/go-ethereum/p2p/simulations/network_test.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -485,3 +486,137 @@ func benchmarkMinimalServiceTmp(b *testing.B) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNode_UnmarshalJSON(t *testing.T) {
|
||||
t.Run(
|
||||
"test unmarshal of Node up field",
|
||||
func(t *testing.T) {
|
||||
runNodeUnmarshalJSON(t, casesNodeUnmarshalJSONUpField())
|
||||
},
|
||||
)
|
||||
t.Run(
|
||||
"test unmarshal of Node Config field",
|
||||
func(t *testing.T) {
|
||||
runNodeUnmarshalJSON(t, casesNodeUnmarshalJSONConfigField())
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func runNodeUnmarshalJSON(t *testing.T, tests []nodeUnmarshalTestCase) {
|
||||
t.Helper()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var got Node
|
||||
if err := got.UnmarshalJSON([]byte(tt.marshaled)); err != nil {
|
||||
expectErrorMessageToContain(t, err, tt.wantErr)
|
||||
}
|
||||
expectNodeEquality(t, got, tt.want)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type nodeUnmarshalTestCase struct {
|
||||
name string
|
||||
marshaled string
|
||||
want Node
|
||||
wantErr string
|
||||
}
|
||||
|
||||
func expectErrorMessageToContain(t *testing.T, got error, want string) {
|
||||
t.Helper()
|
||||
if got == nil && want == "" {
|
||||
return
|
||||
}
|
||||
|
||||
if got == nil && want != "" {
|
||||
t.Errorf("error was expected, got: nil, want: %v", want)
|
||||
return
|
||||
}
|
||||
|
||||
if !strings.Contains(got.Error(), want) {
|
||||
t.Errorf(
|
||||
"unexpected error message, got %v, want: %v",
|
||||
want,
|
||||
got,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func expectNodeEquality(t *testing.T, got Node, want Node) {
|
||||
t.Helper()
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("Node.UnmarshalJSON() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func casesNodeUnmarshalJSONUpField() []nodeUnmarshalTestCase {
|
||||
return []nodeUnmarshalTestCase{
|
||||
{
|
||||
name: "empty json",
|
||||
marshaled: "{}",
|
||||
want: Node{
|
||||
up: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "a stopped node",
|
||||
marshaled: "{\"up\": false}",
|
||||
want: Node{
|
||||
up: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "a running node",
|
||||
marshaled: "{\"up\": true}",
|
||||
want: Node{
|
||||
up: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid JSON value on valid key",
|
||||
marshaled: "{\"up\": foo}",
|
||||
wantErr: "invalid character",
|
||||
},
|
||||
{
|
||||
name: "invalid JSON key and value",
|
||||
marshaled: "{foo: bar}",
|
||||
wantErr: "invalid character",
|
||||
},
|
||||
{
|
||||
name: "bool value expected but got something else (string)",
|
||||
marshaled: "{\"up\": \"true\"}",
|
||||
wantErr: "cannot unmarshal string into Go struct",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func casesNodeUnmarshalJSONConfigField() []nodeUnmarshalTestCase {
|
||||
// Don't do a big fuss around testing, as adapters.NodeConfig should
|
||||
// handle it's own serialization. Just do a sanity check.
|
||||
return []nodeUnmarshalTestCase{
|
||||
{
|
||||
name: "Config field is omitted",
|
||||
marshaled: "{}",
|
||||
want: Node{
|
||||
Config: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Config field is nil",
|
||||
marshaled: "{\"config\": nil}",
|
||||
want: Node{
|
||||
Config: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "a non default Config field",
|
||||
marshaled: "{\"config\":{\"name\":\"node_ecdd0\",\"port\":44665}}",
|
||||
want: Node{
|
||||
Config: &adapters.NodeConfig{
|
||||
Name: "node_ecdd0",
|
||||
Port: 44665,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
3
vendor/github.com/ethereum/go-ethereum/p2p/testing/protocoltester.go
generated
vendored
3
vendor/github.com/ethereum/go-ethereum/p2p/testing/protocoltester.go
generated
vendored
@ -30,7 +30,6 @@ import (
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
@ -52,7 +51,7 @@ type ProtocolTester struct {
|
||||
// NewProtocolTester constructs a new ProtocolTester
|
||||
// it takes as argument the pivot node id, the number of dummy peers and the
|
||||
// protocol run function called on a peer connection by the p2p server
|
||||
func NewProtocolTester(t *testing.T, id enode.ID, n int, run func(*p2p.Peer, p2p.MsgReadWriter) error) *ProtocolTester {
|
||||
func NewProtocolTester(id enode.ID, n int, run func(*p2p.Peer, p2p.MsgReadWriter) error) *ProtocolTester {
|
||||
services := adapters.Services{
|
||||
"test": func(ctx *adapters.ServiceContext) (node.Service, error) {
|
||||
return &testNode{run}, nil
|
||||
|
14
vendor/github.com/ethereum/go-ethereum/params/bootnodes.go
generated
vendored
14
vendor/github.com/ethereum/go-ethereum/params/bootnodes.go
generated
vendored
@ -47,6 +47,20 @@ var RinkebyBootnodes = []string{
|
||||
"enode://b6b28890b006743680c52e64e0d16db57f28124885595fa03a562be1d2bf0f3a1da297d56b13da25fb992888fd556d4c1a27b1f39d531bde7de1921c90061cc6@159.89.28.211:30303", // AKASHA
|
||||
}
|
||||
|
||||
// GoerliBootnodes are the enode URLs of the P2P bootstrap nodes running on the
|
||||
// Görli test network.
|
||||
var GoerliBootnodes = []string{
|
||||
// Upstrem bootnodes
|
||||
"enode://011f758e6552d105183b1761c5e2dea0111bc20fd5f6422bc7f91e0fabbec9a6595caf6239b37feb773dddd3f87240d99d859431891e4a642cf2a0a9e6cbb98a@51.141.78.53:30303",
|
||||
"enode://176b9417f511d05b6b2cf3e34b756cf0a7096b3094572a8f6ef4cdcb9d1f9d00683bf0f83347eebdf3b81c3521c2332086d9592802230bf528eaf606a1d9677b@13.93.54.137:30303",
|
||||
"enode://46add44b9f13965f7b9875ac6b85f016f341012d84f975377573800a863526f4da19ae2c620ec73d11591fa9510e992ecc03ad0751f53cc02f7c7ed6d55c7291@94.237.54.114:30313",
|
||||
"enode://c1f8b7c2ac4453271fa07d8e9ecf9a2e8285aa0bd0c07df0131f47153306b0736fd3db8924e7a9bf0bed6b1d8d4f87362a71b033dc7c64547728d953e43e59b2@52.64.155.147:30303",
|
||||
"enode://f4a9c6ee28586009fb5a96c8af13a58ed6d8315a9eee4772212c1d4d9cebe5a8b8a78ea4434f318726317d04a3f531a1ef0420cf9752605a562cfe858c46e263@213.186.16.82:30303",
|
||||
|
||||
// Ethereum Foundation bootnode
|
||||
"enode://573b6607cd59f241e30e4c4943fd50e99e2b6f42f9bd5ca111659d309c06741247f4f1e93843ad3e8c8c18b6e2d94c161b7ef67479b3938780a97134b618b5ce@52.56.136.200:30303",
|
||||
}
|
||||
|
||||
// DiscoveryV5Bootnodes are the enode URLs of the P2P bootstrap nodes for the
|
||||
// experimental RLPx v5 topic-discovery network.
|
||||
var DiscoveryV5Bootnodes = []string{
|
||||
|
83
vendor/github.com/ethereum/go-ethereum/params/config.go
generated
vendored
83
vendor/github.com/ethereum/go-ethereum/params/config.go
generated
vendored
@ -42,17 +42,18 @@ var (
|
||||
EIP155Block: big.NewInt(2675000),
|
||||
EIP158Block: big.NewInt(2675000),
|
||||
ByzantiumBlock: big.NewInt(4370000),
|
||||
ConstantinopleBlock: nil,
|
||||
ConstantinopleBlock: big.NewInt(7280000),
|
||||
PetersburgBlock: big.NewInt(7280000),
|
||||
Ethash: new(EthashConfig),
|
||||
}
|
||||
|
||||
// MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network.
|
||||
MainnetTrustedCheckpoint = &TrustedCheckpoint{
|
||||
Name: "mainnet",
|
||||
SectionIndex: 208,
|
||||
SectionHead: common.HexToHash("0x5e9f7696c397d9df8f3b1abda857753575c6f5cff894e1a3d9e1a2af1bd9d6ac"),
|
||||
CHTRoot: common.HexToHash("0x954a63134f6897f015f026387c59c98c4dae7b336610ff5a143455aac9153e9d"),
|
||||
BloomRoot: common.HexToHash("0x8006c5e44b14d90d7cc9cd5fa1cb48cf53697ee3bbbf4b76fdfa70b0242500a9"),
|
||||
SectionIndex: 216,
|
||||
SectionHead: common.HexToHash("0xae3e551c8d60d06fd411a8e6008e90625d3bb0cbbf664b65d5ed90b318553541"),
|
||||
CHTRoot: common.HexToHash("0xeea7d2ab3545a37deecc66fc43c9556ae337c3ea1c6893e401428207bdb8e434"),
|
||||
BloomRoot: common.HexToHash("0xb0d4176d160d67b99a9f963281e52bce0583a566b74b4497fe3ed24ae04004ff"),
|
||||
}
|
||||
|
||||
// TestnetChainConfig contains the chain parameters to run a node on the Ropsten test network.
|
||||
@ -67,16 +68,17 @@ var (
|
||||
EIP158Block: big.NewInt(10),
|
||||
ByzantiumBlock: big.NewInt(1700000),
|
||||
ConstantinopleBlock: big.NewInt(4230000),
|
||||
PetersburgBlock: big.NewInt(4939394),
|
||||
Ethash: new(EthashConfig),
|
||||
}
|
||||
|
||||
// TestnetTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network.
|
||||
TestnetTrustedCheckpoint = &TrustedCheckpoint{
|
||||
Name: "testnet",
|
||||
SectionIndex: 139,
|
||||
SectionHead: common.HexToHash("0x9fad89a5e3b993c8339b9cf2cbbeb72cd08774ea6b71b105b3dd880420c618f4"),
|
||||
CHTRoot: common.HexToHash("0xc815833881989c5d2035147e1a79a33d22cbc5313e104ff01e6ab405bd28b317"),
|
||||
BloomRoot: common.HexToHash("0xd94ee9f3c480858f53ec5d059aebdbb2e8d904702f100875ee59ec5f366e841d"),
|
||||
SectionIndex: 148,
|
||||
SectionHead: common.HexToHash("0x4d3181bedb6aa96a6f3efa866c71f7802400d0fb4a6906946c453630d850efc0"),
|
||||
CHTRoot: common.HexToHash("0x25df2f9d63a5f84b2852988f0f0f7af5a7877da061c11b85c812780b5a27a5ec"),
|
||||
BloomRoot: common.HexToHash("0x0584834e5222471a06c669d210e302ca602780eaaddd04634fd65471c2a91419"),
|
||||
}
|
||||
|
||||
// RinkebyChainConfig contains the chain parameters to run a node on the Rinkeby test network.
|
||||
@ -91,6 +93,7 @@ var (
|
||||
EIP158Block: big.NewInt(3),
|
||||
ByzantiumBlock: big.NewInt(1035301),
|
||||
ConstantinopleBlock: big.NewInt(3660663),
|
||||
PetersburgBlock: big.NewInt(9999999), //TODO! Insert Rinkeby block number
|
||||
Clique: &CliqueConfig{
|
||||
Period: 15,
|
||||
Epoch: 30000,
|
||||
@ -100,10 +103,37 @@ var (
|
||||
// RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network.
|
||||
RinkebyTrustedCheckpoint = &TrustedCheckpoint{
|
||||
Name: "rinkeby",
|
||||
SectionIndex: 105,
|
||||
SectionHead: common.HexToHash("0xec8147d43f936258aaf1b9b9ec91b0a853abf7109f436a23649be809ea43d507"),
|
||||
CHTRoot: common.HexToHash("0xd92703b444846a3db928e87e450770e5d5cbe193131dc8f7c4cf18b4de925a75"),
|
||||
BloomRoot: common.HexToHash("0xff45a6f807138a2cde0cea0c209d9ce5ad8e43ccaae5a7c41af801bb72a1ef96"),
|
||||
SectionIndex: 113,
|
||||
SectionHead: common.HexToHash("0xb812f3095af3af1cb2de7d7c2086ee807736a7315992c461b0986699185daf77"),
|
||||
CHTRoot: common.HexToHash("0x5416d0924925eb835987ad3d1f059ecc66778c51959c8246a7a35b22ec5f3109"),
|
||||
BloomRoot: common.HexToHash("0xcf74ca2c14e843b366561dab4fc64237bf6bb335119cbc97d723f3b501863470"),
|
||||
}
|
||||
|
||||
// GoerliChainConfig contains the chain parameters to run a node on the Görli test network.
|
||||
GoerliChainConfig = &ChainConfig{
|
||||
ChainID: big.NewInt(5),
|
||||
HomesteadBlock: big.NewInt(0),
|
||||
DAOForkBlock: nil,
|
||||
DAOForkSupport: true,
|
||||
EIP150Block: big.NewInt(0),
|
||||
EIP155Block: big.NewInt(0),
|
||||
EIP158Block: big.NewInt(0),
|
||||
ByzantiumBlock: big.NewInt(0),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
PetersburgBlock: big.NewInt(0),
|
||||
Clique: &CliqueConfig{
|
||||
Period: 15,
|
||||
Epoch: 30000,
|
||||
},
|
||||
}
|
||||
|
||||
// GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network.
|
||||
GoerliTrustedCheckpoint = &TrustedCheckpoint{
|
||||
Name: "goerli",
|
||||
SectionIndex: 0,
|
||||
SectionHead: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
CHTRoot: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
BloomRoot: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
}
|
||||
|
||||
// AllEthashProtocolChanges contains every protocol change (EIPs) introduced
|
||||
@ -111,16 +141,16 @@ var (
|
||||
//
|
||||
// This configuration is intentionally not using keyed fields to force anyone
|
||||
// adding flags to the config to also have to set these fields.
|
||||
AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
|
||||
AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
|
||||
|
||||
// AllCliqueProtocolChanges contains every protocol change (EIPs) introduced
|
||||
// and accepted by the Ethereum core developers into the Clique consensus.
|
||||
//
|
||||
// This configuration is intentionally not using keyed fields to force anyone
|
||||
// adding flags to the config to also have to set these fields.
|
||||
AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
|
||||
AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
|
||||
|
||||
TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
|
||||
TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
|
||||
TestRules = TestChainConfig.Rules(new(big.Int))
|
||||
)
|
||||
|
||||
@ -158,6 +188,7 @@ type ChainConfig struct {
|
||||
|
||||
ByzantiumBlock *big.Int `json:"byzantiumBlock,omitempty"` // Byzantium switch block (nil = no fork, 0 = already on byzantium)
|
||||
ConstantinopleBlock *big.Int `json:"constantinopleBlock,omitempty"` // Constantinople switch block (nil = no fork, 0 = already activated)
|
||||
PetersburgBlock *big.Int `json:"petersburgBlock,omitempty"` // Petersburg switch block (nil = same as Constantinople)
|
||||
EWASMBlock *big.Int `json:"ewasmBlock,omitempty"` // EWASM switch block (nil = no fork, 0 = already activated)
|
||||
|
||||
// Various consensus engines
|
||||
@ -195,7 +226,7 @@ func (c *ChainConfig) String() string {
|
||||
default:
|
||||
engine = "unknown"
|
||||
}
|
||||
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Engine: %v}",
|
||||
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v ConstantinopleFix: %v Engine: %v}",
|
||||
c.ChainID,
|
||||
c.HomesteadBlock,
|
||||
c.DAOForkBlock,
|
||||
@ -205,6 +236,7 @@ func (c *ChainConfig) String() string {
|
||||
c.EIP158Block,
|
||||
c.ByzantiumBlock,
|
||||
c.ConstantinopleBlock,
|
||||
c.PetersburgBlock,
|
||||
engine,
|
||||
)
|
||||
}
|
||||
@ -244,6 +276,13 @@ func (c *ChainConfig) IsConstantinople(num *big.Int) bool {
|
||||
return isForked(c.ConstantinopleBlock, num)
|
||||
}
|
||||
|
||||
// IsPetersburg returns whether num is either
|
||||
// - equal to or greater than the PetersburgBlock fork block,
|
||||
// - OR is nil, and Constantinople is active
|
||||
func (c *ChainConfig) IsPetersburg(num *big.Int) bool {
|
||||
return isForked(c.PetersburgBlock, num) || c.PetersburgBlock == nil && isForked(c.ConstantinopleBlock, num)
|
||||
}
|
||||
|
||||
// IsEWASM returns whether num represents a block number after the EWASM fork
|
||||
func (c *ChainConfig) IsEWASM(num *big.Int) bool {
|
||||
return isForked(c.EWASMBlock, num)
|
||||
@ -314,6 +353,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *Confi
|
||||
if isForkIncompatible(c.ConstantinopleBlock, newcfg.ConstantinopleBlock, head) {
|
||||
return newCompatError("Constantinople fork block", c.ConstantinopleBlock, newcfg.ConstantinopleBlock)
|
||||
}
|
||||
if isForkIncompatible(c.PetersburgBlock, newcfg.PetersburgBlock, head) {
|
||||
return newCompatError("ConstantinopleFix fork block", c.PetersburgBlock, newcfg.PetersburgBlock)
|
||||
}
|
||||
if isForkIncompatible(c.EWASMBlock, newcfg.EWASMBlock, head) {
|
||||
return newCompatError("ewasm fork block", c.EWASMBlock, newcfg.EWASMBlock)
|
||||
}
|
||||
@ -381,9 +423,9 @@ func (err *ConfigCompatError) Error() string {
|
||||
// Rules is a one time interface meaning that it shouldn't be used in between transition
|
||||
// phases.
|
||||
type Rules struct {
|
||||
ChainID *big.Int
|
||||
IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool
|
||||
IsByzantium, IsConstantinople bool
|
||||
ChainID *big.Int
|
||||
IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool
|
||||
IsByzantium, IsConstantinople, IsPetersburg bool
|
||||
}
|
||||
|
||||
// Rules ensures c's ChainID is not nil.
|
||||
@ -400,5 +442,6 @@ func (c *ChainConfig) Rules(num *big.Int) Rules {
|
||||
IsEIP158: c.IsEIP158(num),
|
||||
IsByzantium: c.IsByzantium(num),
|
||||
IsConstantinople: c.IsConstantinople(num),
|
||||
IsPetersburg: c.IsPetersburg(num),
|
||||
}
|
||||
}
|
||||
|
2
vendor/github.com/ethereum/go-ethereum/params/version.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/params/version.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
const (
|
||||
VersionMajor = 1 // Major version component of the current release
|
||||
VersionMinor = 8 // Minor version component of the current release
|
||||
VersionPatch = 21 // Patch version component of the current release
|
||||
VersionPatch = 23 // Patch version component of the current release
|
||||
VersionMeta = "stable" // Version metadata to append to the version string
|
||||
)
|
||||
|
||||
|
2
vendor/github.com/ethereum/go-ethereum/swarm/api/config.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/swarm/api/config.go
generated
vendored
@ -66,10 +66,12 @@ type Config struct {
|
||||
DeliverySkipCheck bool
|
||||
MaxStreamPeerServers int
|
||||
LightNodeEnabled bool
|
||||
BootnodeMode bool
|
||||
SyncUpdateDelay time.Duration
|
||||
SwapAPI string
|
||||
Cors string
|
||||
BzzAccount string
|
||||
GlobalStoreAPI string
|
||||
privateKey *ecdsa.PrivateKey
|
||||
}
|
||||
|
||||
|
58
vendor/github.com/ethereum/go-ethereum/swarm/api/inspector.go
generated
vendored
Normal file
58
vendor/github.com/ethereum/go-ethereum/swarm/api/inspector.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
type Inspector struct {
|
||||
api *API
|
||||
hive *network.Hive
|
||||
netStore *storage.NetStore
|
||||
}
|
||||
|
||||
func NewInspector(api *API, hive *network.Hive, netStore *storage.NetStore) *Inspector {
|
||||
return &Inspector{api, hive, netStore}
|
||||
}
|
||||
|
||||
// Hive prints the kademlia table
|
||||
func (inspector *Inspector) Hive() string {
|
||||
return inspector.hive.String()
|
||||
}
|
||||
|
||||
type HasInfo struct {
|
||||
Addr string `json:"address"`
|
||||
Has bool `json:"has"`
|
||||
}
|
||||
|
||||
// Has checks whether each chunk address is present in the underlying datastore,
|
||||
// the bool in the returned structs indicates if the underlying datastore has
|
||||
// the chunk stored with the given address (true), or not (false)
|
||||
func (inspector *Inspector) Has(chunkAddresses []storage.Address) []HasInfo {
|
||||
results := make([]HasInfo, 0)
|
||||
for _, addr := range chunkAddresses {
|
||||
res := HasInfo{}
|
||||
res.Addr = addr.String()
|
||||
res.Has = inspector.netStore.Has(context.Background(), addr)
|
||||
results = append(results, res)
|
||||
}
|
||||
return results
|
||||
}
|
21
vendor/github.com/ethereum/go-ethereum/swarm/docker/Dockerfile
generated
vendored
21
vendor/github.com/ethereum/go-ethereum/swarm/docker/Dockerfile
generated
vendored
@ -10,14 +10,23 @@ RUN mkdir -p $GOPATH/src/github.com/ethereum && \
|
||||
git checkout ${VERSION} && \
|
||||
go install -ldflags "-X main.gitCommit=${VERSION}" ./cmd/swarm && \
|
||||
go install -ldflags "-X main.gitCommit=${VERSION}" ./cmd/swarm/swarm-smoke && \
|
||||
go install -ldflags "-X main.gitCommit=${VERSION}" ./cmd/geth && \
|
||||
cp $GOPATH/bin/swarm /swarm && cp $GOPATH/bin/geth /geth && cp $GOPATH/bin/swarm-smoke /swarm-smoke
|
||||
go install -ldflags "-X main.gitCommit=${VERSION}" ./cmd/swarm/global-store && \
|
||||
go install -ldflags "-X main.gitCommit=${VERSION}" ./cmd/geth
|
||||
|
||||
|
||||
# Release image with the required binaries and scripts
|
||||
FROM alpine:3.8
|
||||
FROM alpine:3.8 as swarm-smoke
|
||||
WORKDIR /
|
||||
COPY --from=builder /swarm /geth /swarm-smoke /
|
||||
ADD run.sh /run.sh
|
||||
COPY --from=builder /go/bin/swarm-smoke /
|
||||
ADD run-smoke.sh /run-smoke.sh
|
||||
ENTRYPOINT ["/run-smoke.sh"]
|
||||
|
||||
FROM alpine:3.8 as swarm-global-store
|
||||
WORKDIR /
|
||||
COPY --from=builder /go/bin/global-store /
|
||||
ENTRYPOINT ["/global-store"]
|
||||
|
||||
FROM alpine:3.8 as swarm
|
||||
WORKDIR /
|
||||
COPY --from=builder /go/bin/swarm /go/bin/geth /
|
||||
ADD run.sh /run.sh
|
||||
ENTRYPOINT ["/run.sh"]
|
||||
|
46
vendor/github.com/ethereum/go-ethereum/swarm/metrics/flags.go
generated
vendored
46
vendor/github.com/ethereum/go-ethereum/swarm/metrics/flags.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
gethmetrics "github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/metrics/influxdb"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -31,6 +31,10 @@ var (
|
||||
Name: "metrics.influxdb.export",
|
||||
Usage: "Enable metrics export/push to an external InfluxDB database",
|
||||
}
|
||||
MetricsEnableInfluxDBAccountingExportFlag = cli.BoolFlag{
|
||||
Name: "metrics.influxdb.accounting",
|
||||
Usage: "Enable accounting metrics export/push to an external InfluxDB database",
|
||||
}
|
||||
MetricsInfluxDBEndpointFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.endpoint",
|
||||
Usage: "Metrics InfluxDB endpoint",
|
||||
@ -51,14 +55,14 @@ var (
|
||||
Usage: "Metrics InfluxDB password",
|
||||
Value: "",
|
||||
}
|
||||
// The `host` tag is part of every measurement sent to InfluxDB. Queries on tags are faster in InfluxDB.
|
||||
// It is used so that we can group all nodes and average a measurement across all of them, but also so
|
||||
// that we can select a specific node and inspect its measurements.
|
||||
// Tags are part of every measurement sent to InfluxDB. Queries on tags are faster in InfluxDB.
|
||||
// For example `host` tag could be used so that we can group all nodes and average a measurement
|
||||
// across all of them, but also so that we can select a specific node and inspect its measurements.
|
||||
// https://docs.influxdata.com/influxdb/v1.4/concepts/key_concepts/#tag-key
|
||||
MetricsInfluxDBHostTagFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.host.tag",
|
||||
Usage: "Metrics InfluxDB `host` tag attached to all measurements",
|
||||
Value: "localhost",
|
||||
MetricsInfluxDBTagsFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.tags",
|
||||
Usage: "Comma-separated InfluxDB tags (key/values) attached to all measurements",
|
||||
Value: "host=localhost",
|
||||
}
|
||||
)
|
||||
|
||||
@ -66,33 +70,39 @@ var (
|
||||
var Flags = []cli.Flag{
|
||||
utils.MetricsEnabledFlag,
|
||||
MetricsEnableInfluxDBExportFlag,
|
||||
MetricsEnableInfluxDBAccountingExportFlag,
|
||||
MetricsInfluxDBEndpointFlag,
|
||||
MetricsInfluxDBDatabaseFlag,
|
||||
MetricsInfluxDBUsernameFlag,
|
||||
MetricsInfluxDBPasswordFlag,
|
||||
MetricsInfluxDBHostTagFlag,
|
||||
MetricsInfluxDBTagsFlag,
|
||||
}
|
||||
|
||||
func Setup(ctx *cli.Context) {
|
||||
if gethmetrics.Enabled {
|
||||
log.Info("Enabling swarm metrics collection")
|
||||
var (
|
||||
enableExport = ctx.GlobalBool(MetricsEnableInfluxDBExportFlag.Name)
|
||||
endpoint = ctx.GlobalString(MetricsInfluxDBEndpointFlag.Name)
|
||||
database = ctx.GlobalString(MetricsInfluxDBDatabaseFlag.Name)
|
||||
username = ctx.GlobalString(MetricsInfluxDBUsernameFlag.Name)
|
||||
password = ctx.GlobalString(MetricsInfluxDBPasswordFlag.Name)
|
||||
hosttag = ctx.GlobalString(MetricsInfluxDBHostTagFlag.Name)
|
||||
endpoint = ctx.GlobalString(MetricsInfluxDBEndpointFlag.Name)
|
||||
database = ctx.GlobalString(MetricsInfluxDBDatabaseFlag.Name)
|
||||
username = ctx.GlobalString(MetricsInfluxDBUsernameFlag.Name)
|
||||
password = ctx.GlobalString(MetricsInfluxDBPasswordFlag.Name)
|
||||
enableExport = ctx.GlobalBool(MetricsEnableInfluxDBExportFlag.Name)
|
||||
enableAccountingExport = ctx.GlobalBool(MetricsEnableInfluxDBAccountingExportFlag.Name)
|
||||
)
|
||||
|
||||
// Start system runtime metrics collection
|
||||
go gethmetrics.CollectProcessMetrics(2 * time.Second)
|
||||
|
||||
tagsMap := utils.SplitTagsFlag(ctx.GlobalString(MetricsInfluxDBTagsFlag.Name))
|
||||
|
||||
if enableExport {
|
||||
log.Info("Enabling swarm metrics export to InfluxDB")
|
||||
go influxdb.InfluxDBWithTags(gethmetrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "swarm.", map[string]string{
|
||||
"host": hosttag,
|
||||
})
|
||||
go influxdb.InfluxDBWithTags(gethmetrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "swarm.", tagsMap)
|
||||
}
|
||||
|
||||
if enableAccountingExport {
|
||||
log.Info("Exporting swarm accounting metrics to InfluxDB")
|
||||
go influxdb.InfluxDBWithTags(gethmetrics.AccountingRegistry, 10*time.Second, endpoint, database, username, password, "accounting.", tagsMap)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
70
vendor/github.com/ethereum/go-ethereum/swarm/network/fetcher.go
generated
vendored
70
vendor/github.com/ethereum/go-ethereum/swarm/network/fetcher.go
generated
vendored
@ -26,20 +26,23 @@ import (
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
var searchTimeout = 1 * time.Second
|
||||
const (
|
||||
defaultSearchTimeout = 1 * time.Second
|
||||
// maximum number of forwarded requests (hops), to make sure requests are not
|
||||
// forwarded forever in peer loops
|
||||
maxHopCount uint8 = 20
|
||||
)
|
||||
|
||||
// Time to consider peer to be skipped.
|
||||
// Also used in stream delivery.
|
||||
var RequestTimeout = 10 * time.Second
|
||||
|
||||
var maxHopCount uint8 = 20 // maximum number of forwarded requests (hops), to make sure requests are not forwarded forever in peer loops
|
||||
|
||||
type RequestFunc func(context.Context, *Request) (*enode.ID, chan struct{}, error)
|
||||
|
||||
// Fetcher is created when a chunk is not found locally. It starts a request handler loop once and
|
||||
// keeps it alive until all active requests are completed. This can happen:
|
||||
// 1. either because the chunk is delivered
|
||||
// 2. or becuse the requestor cancelled/timed out
|
||||
// 2. or because the requester cancelled/timed out
|
||||
// Fetcher self destroys itself after it is completed.
|
||||
// TODO: cancel all forward requests after termination
|
||||
type Fetcher struct {
|
||||
@ -47,7 +50,9 @@ type Fetcher struct {
|
||||
addr storage.Address // the address of the chunk to be fetched
|
||||
offerC chan *enode.ID // channel of sources (peer node id strings)
|
||||
requestC chan uint8 // channel for incoming requests (with the hopCount value in it)
|
||||
searchTimeout time.Duration
|
||||
skipCheck bool
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
@ -79,7 +84,7 @@ func (r *Request) SkipPeer(nodeID string) bool {
|
||||
}
|
||||
t, ok := val.(time.Time)
|
||||
if ok && time.Now().After(t.Add(RequestTimeout)) {
|
||||
// deadine expired
|
||||
// deadline expired
|
||||
r.peersToSkip.Delete(nodeID)
|
||||
return false
|
||||
}
|
||||
@ -100,32 +105,35 @@ func NewFetcherFactory(request RequestFunc, skipCheck bool) *FetcherFactory {
|
||||
}
|
||||
}
|
||||
|
||||
// New contructs a new Fetcher, for the given chunk. All peers in peersToSkip are not requested to
|
||||
// deliver the given chunk. peersToSkip should always contain the peers which are actively requesting
|
||||
// this chunk, to make sure we don't request back the chunks from them.
|
||||
// New constructs a new Fetcher, for the given chunk. All peers in peersToSkip
|
||||
// are not requested to deliver the given chunk. peersToSkip should always
|
||||
// contain the peers which are actively requesting this chunk, to make sure we
|
||||
// don't request back the chunks from them.
|
||||
// The created Fetcher is started and returned.
|
||||
func (f *FetcherFactory) New(ctx context.Context, source storage.Address, peersToSkip *sync.Map) storage.NetFetcher {
|
||||
fetcher := NewFetcher(source, f.request, f.skipCheck)
|
||||
go fetcher.run(ctx, peersToSkip)
|
||||
func (f *FetcherFactory) New(ctx context.Context, source storage.Address, peers *sync.Map) storage.NetFetcher {
|
||||
fetcher := NewFetcher(ctx, source, f.request, f.skipCheck)
|
||||
go fetcher.run(peers)
|
||||
return fetcher
|
||||
}
|
||||
|
||||
// NewFetcher creates a new Fetcher for the given chunk address using the given request function.
|
||||
func NewFetcher(addr storage.Address, rf RequestFunc, skipCheck bool) *Fetcher {
|
||||
func NewFetcher(ctx context.Context, addr storage.Address, rf RequestFunc, skipCheck bool) *Fetcher {
|
||||
return &Fetcher{
|
||||
addr: addr,
|
||||
protoRequestFunc: rf,
|
||||
offerC: make(chan *enode.ID),
|
||||
requestC: make(chan uint8),
|
||||
searchTimeout: defaultSearchTimeout,
|
||||
skipCheck: skipCheck,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
// Offer is called when an upstream peer offers the chunk via syncing as part of `OfferedHashesMsg` and the node does not have the chunk locally.
|
||||
func (f *Fetcher) Offer(ctx context.Context, source *enode.ID) {
|
||||
func (f *Fetcher) Offer(source *enode.ID) {
|
||||
// First we need to have this select to make sure that we return if context is done
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-f.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
@ -134,15 +142,15 @@ func (f *Fetcher) Offer(ctx context.Context, source *enode.ID) {
|
||||
// push to offerC instead if offerC is available (see number 2 in https://golang.org/ref/spec#Select_statements)
|
||||
select {
|
||||
case f.offerC <- source:
|
||||
case <-ctx.Done():
|
||||
case <-f.ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
// Request is called when an upstream peer request the chunk as part of `RetrieveRequestMsg`, or from a local request through FileStore, and the node does not have the chunk locally.
|
||||
func (f *Fetcher) Request(ctx context.Context, hopCount uint8) {
|
||||
func (f *Fetcher) Request(hopCount uint8) {
|
||||
// First we need to have this select to make sure that we return if context is done
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-f.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
@ -156,13 +164,13 @@ func (f *Fetcher) Request(ctx context.Context, hopCount uint8) {
|
||||
// push to offerC instead if offerC is available (see number 2 in https://golang.org/ref/spec#Select_statements)
|
||||
select {
|
||||
case f.requestC <- hopCount + 1:
|
||||
case <-ctx.Done():
|
||||
case <-f.ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
// start prepares the Fetcher
|
||||
// it keeps the Fetcher alive within the lifecycle of the passed context
|
||||
func (f *Fetcher) run(ctx context.Context, peers *sync.Map) {
|
||||
func (f *Fetcher) run(peers *sync.Map) {
|
||||
var (
|
||||
doRequest bool // determines if retrieval is initiated in the current iteration
|
||||
wait *time.Timer // timer for search timeout
|
||||
@ -176,7 +184,7 @@ func (f *Fetcher) run(ctx context.Context, peers *sync.Map) {
|
||||
// loop that keeps the fetching process alive
|
||||
// after every request a timer is set. If this goes off we request again from another peer
|
||||
// note that the previous request is still alive and has the chance to deliver, so
|
||||
// rerequesting extends the search. ie.,
|
||||
// requesting again extends the search. ie.,
|
||||
// if a peer we requested from is gone we issue a new request, so the number of active
|
||||
// requests never decreases
|
||||
for {
|
||||
@ -209,20 +217,20 @@ func (f *Fetcher) run(ctx context.Context, peers *sync.Map) {
|
||||
// search timeout: too much time passed since the last request,
|
||||
// extend the search to a new peer if we can find one
|
||||
case <-waitC:
|
||||
log.Trace("search timed out: rerequesting", "request addr", f.addr)
|
||||
log.Trace("search timed out: requesting", "request addr", f.addr)
|
||||
doRequest = requested
|
||||
|
||||
// all Fetcher context closed, can quit
|
||||
case <-ctx.Done():
|
||||
case <-f.ctx.Done():
|
||||
log.Trace("terminate fetcher", "request addr", f.addr)
|
||||
// TODO: send cancelations to all peers left over in peers map (i.e., those we requested from)
|
||||
// TODO: send cancellations to all peers left over in peers map (i.e., those we requested from)
|
||||
return
|
||||
}
|
||||
|
||||
// need to issue a new request
|
||||
if doRequest {
|
||||
var err error
|
||||
sources, err = f.doRequest(ctx, gone, peers, sources, hopCount)
|
||||
sources, err = f.doRequest(gone, peers, sources, hopCount)
|
||||
if err != nil {
|
||||
log.Info("unable to request", "request addr", f.addr, "err", err)
|
||||
}
|
||||
@ -231,7 +239,7 @@ func (f *Fetcher) run(ctx context.Context, peers *sync.Map) {
|
||||
// if wait channel is not set, set it to a timer
|
||||
if requested {
|
||||
if wait == nil {
|
||||
wait = time.NewTimer(searchTimeout)
|
||||
wait = time.NewTimer(f.searchTimeout)
|
||||
defer wait.Stop()
|
||||
waitC = wait.C
|
||||
} else {
|
||||
@ -242,8 +250,8 @@ func (f *Fetcher) run(ctx context.Context, peers *sync.Map) {
|
||||
default:
|
||||
}
|
||||
}
|
||||
// reset the timer to go off after searchTimeout
|
||||
wait.Reset(searchTimeout)
|
||||
// reset the timer to go off after defaultSearchTimeout
|
||||
wait.Reset(f.searchTimeout)
|
||||
}
|
||||
}
|
||||
doRequest = false
|
||||
@ -260,7 +268,7 @@ func (f *Fetcher) run(ctx context.Context, peers *sync.Map) {
|
||||
// * the peer's address is added to the set of peers to skip
|
||||
// * the peer's address is removed from prospective sources, and
|
||||
// * a go routine is started that reports on the gone channel if the peer is disconnected (or terminated their streamer)
|
||||
func (f *Fetcher) doRequest(ctx context.Context, gone chan *enode.ID, peersToSkip *sync.Map, sources []*enode.ID, hopCount uint8) ([]*enode.ID, error) {
|
||||
func (f *Fetcher) doRequest(gone chan *enode.ID, peersToSkip *sync.Map, sources []*enode.ID, hopCount uint8) ([]*enode.ID, error) {
|
||||
var i int
|
||||
var sourceID *enode.ID
|
||||
var quit chan struct{}
|
||||
@ -277,7 +285,7 @@ func (f *Fetcher) doRequest(ctx context.Context, gone chan *enode.ID, peersToSki
|
||||
for i = 0; i < len(sources); i++ {
|
||||
req.Source = sources[i]
|
||||
var err error
|
||||
sourceID, quit, err = f.protoRequestFunc(ctx, req)
|
||||
sourceID, quit, err = f.protoRequestFunc(f.ctx, req)
|
||||
if err == nil {
|
||||
// remove the peer from known sources
|
||||
// Note: we can modify the source although we are looping on it, because we break from the loop immediately
|
||||
@ -291,7 +299,7 @@ func (f *Fetcher) doRequest(ctx context.Context, gone chan *enode.ID, peersToSki
|
||||
if !foundSource {
|
||||
req.Source = nil
|
||||
var err error
|
||||
sourceID, quit, err = f.protoRequestFunc(ctx, req)
|
||||
sourceID, quit, err = f.protoRequestFunc(f.ctx, req)
|
||||
if err != nil {
|
||||
// if no peers found to request from
|
||||
return sources, err
|
||||
@ -308,7 +316,7 @@ func (f *Fetcher) doRequest(ctx context.Context, gone chan *enode.ID, peersToSki
|
||||
select {
|
||||
case <-quit:
|
||||
gone <- sourceID
|
||||
case <-ctx.Done():
|
||||
case <-f.ctx.Done():
|
||||
}
|
||||
}()
|
||||
return sources, nil
|
||||
|
129
vendor/github.com/ethereum/go-ethereum/swarm/network/fetcher_test.go
generated
vendored
129
vendor/github.com/ethereum/go-ethereum/swarm/network/fetcher_test.go
generated
vendored
@ -69,7 +69,11 @@ func (m *mockRequester) doRequest(ctx context.Context, request *Request) (*enode
|
||||
func TestFetcherSingleRequest(t *testing.T) {
|
||||
requester := newMockRequester()
|
||||
addr := make([]byte, 32)
|
||||
fetcher := NewFetcher(addr, requester.doRequest, true)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
peers := []string{"a", "b", "c", "d"}
|
||||
peersToSkip := &sync.Map{}
|
||||
@ -77,13 +81,9 @@ func TestFetcherSingleRequest(t *testing.T) {
|
||||
peersToSkip.Store(p, time.Now())
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
go fetcher.run(ctx, peersToSkip)
|
||||
|
||||
rctx := context.Background()
|
||||
fetcher.Request(rctx, 0)
|
||||
fetcher.Request(0)
|
||||
|
||||
select {
|
||||
case request := <-requester.requestC:
|
||||
@ -115,20 +115,19 @@ func TestFetcherSingleRequest(t *testing.T) {
|
||||
func TestFetcherCancelStopsFetcher(t *testing.T) {
|
||||
requester := newMockRequester()
|
||||
addr := make([]byte, 32)
|
||||
fetcher := NewFetcher(addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
// we start the fetcher, and then we immediately cancel the context
|
||||
go fetcher.run(ctx, peersToSkip)
|
||||
go fetcher.run(peersToSkip)
|
||||
cancel()
|
||||
|
||||
rctx, rcancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
defer rcancel()
|
||||
// we call Request with an active context
|
||||
fetcher.Request(rctx, 0)
|
||||
fetcher.Request(0)
|
||||
|
||||
// fetcher should not initiate request, we can only check by waiting a bit and making sure no request is happening
|
||||
select {
|
||||
@ -140,23 +139,23 @@ func TestFetcherCancelStopsFetcher(t *testing.T) {
|
||||
|
||||
// TestFetchCancelStopsRequest tests that calling a Request function with a cancelled context does not initiate a request
|
||||
func TestFetcherCancelStopsRequest(t *testing.T) {
|
||||
t.Skip("since context is now per fetcher, this test is likely redundant")
|
||||
|
||||
requester := newMockRequester(100 * time.Millisecond)
|
||||
addr := make([]byte, 32)
|
||||
fetcher := NewFetcher(addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// we start the fetcher with an active context
|
||||
go fetcher.run(ctx, peersToSkip)
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
rctx, rcancel := context.WithCancel(context.Background())
|
||||
rcancel()
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
// we start the fetcher with an active context
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
// we call Request with a cancelled context
|
||||
fetcher.Request(rctx, 0)
|
||||
fetcher.Request(0)
|
||||
|
||||
// fetcher should not initiate request, we can only check by waiting a bit and making sure no request is happening
|
||||
select {
|
||||
@ -166,8 +165,7 @@ func TestFetcherCancelStopsRequest(t *testing.T) {
|
||||
}
|
||||
|
||||
// if there is another Request with active context, there should be a request, because the fetcher itself is not cancelled
|
||||
rctx = context.Background()
|
||||
fetcher.Request(rctx, 0)
|
||||
fetcher.Request(0)
|
||||
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
@ -182,19 +180,19 @@ func TestFetcherCancelStopsRequest(t *testing.T) {
|
||||
func TestFetcherOfferUsesSource(t *testing.T) {
|
||||
requester := newMockRequester(100 * time.Millisecond)
|
||||
addr := make([]byte, 32)
|
||||
fetcher := NewFetcher(addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// start the fetcher
|
||||
go fetcher.run(ctx, peersToSkip)
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
// start the fetcher
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
rctx := context.Background()
|
||||
// call the Offer function with the source peer
|
||||
fetcher.Offer(rctx, &sourcePeerID)
|
||||
fetcher.Offer(&sourcePeerID)
|
||||
|
||||
// fetcher should not initiate request
|
||||
select {
|
||||
@ -204,8 +202,7 @@ func TestFetcherOfferUsesSource(t *testing.T) {
|
||||
}
|
||||
|
||||
// call Request after the Offer
|
||||
rctx = context.Background()
|
||||
fetcher.Request(rctx, 0)
|
||||
fetcher.Request(0)
|
||||
|
||||
// there should be exactly 1 request coming from fetcher
|
||||
var request *Request
|
||||
@ -234,19 +231,19 @@ func TestFetcherOfferUsesSource(t *testing.T) {
|
||||
func TestFetcherOfferAfterRequestUsesSourceFromContext(t *testing.T) {
|
||||
requester := newMockRequester(100 * time.Millisecond)
|
||||
addr := make([]byte, 32)
|
||||
fetcher := NewFetcher(addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
// start the fetcher
|
||||
go fetcher.run(ctx, peersToSkip)
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
// call Request first
|
||||
rctx := context.Background()
|
||||
fetcher.Request(rctx, 0)
|
||||
fetcher.Request(0)
|
||||
|
||||
// there should be a request coming from fetcher
|
||||
var request *Request
|
||||
@ -260,7 +257,7 @@ func TestFetcherOfferAfterRequestUsesSourceFromContext(t *testing.T) {
|
||||
}
|
||||
|
||||
// after the Request call Offer
|
||||
fetcher.Offer(context.Background(), &sourcePeerID)
|
||||
fetcher.Offer(&sourcePeerID)
|
||||
|
||||
// there should be a request coming from fetcher
|
||||
select {
|
||||
@ -283,25 +280,21 @@ func TestFetcherOfferAfterRequestUsesSourceFromContext(t *testing.T) {
|
||||
func TestFetcherRetryOnTimeout(t *testing.T) {
|
||||
requester := newMockRequester()
|
||||
addr := make([]byte, 32)
|
||||
fetcher := NewFetcher(addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
// set searchTimeOut to low value so the test is quicker
|
||||
defer func(t time.Duration) {
|
||||
searchTimeout = t
|
||||
}(searchTimeout)
|
||||
searchTimeout = 250 * time.Millisecond
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
// set searchTimeOut to low value so the test is quicker
|
||||
fetcher.searchTimeout = 250 * time.Millisecond
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
// start the fetcher
|
||||
go fetcher.run(ctx, peersToSkip)
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
// call the fetch function with an active context
|
||||
rctx := context.Background()
|
||||
fetcher.Request(rctx, 0)
|
||||
fetcher.Request(0)
|
||||
|
||||
// after 100ms the first request should be initiated
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
@ -343,7 +336,7 @@ func TestFetcherFactory(t *testing.T) {
|
||||
|
||||
fetcher := fetcherFactory.New(context.Background(), addr, peersToSkip)
|
||||
|
||||
fetcher.Request(context.Background(), 0)
|
||||
fetcher.Request(0)
|
||||
|
||||
// check if the created fetchFunction really starts a fetcher and initiates a request
|
||||
select {
|
||||
@ -357,23 +350,21 @@ func TestFetcherFactory(t *testing.T) {
|
||||
func TestFetcherRequestQuitRetriesRequest(t *testing.T) {
|
||||
requester := newMockRequester()
|
||||
addr := make([]byte, 32)
|
||||
fetcher := NewFetcher(addr, requester.doRequest, true)
|
||||
|
||||
// make sure searchTimeout is long so it is sure the request is not retried because of timeout
|
||||
defer func(t time.Duration) {
|
||||
searchTimeout = t
|
||||
}(searchTimeout)
|
||||
searchTimeout = 10 * time.Second
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
go fetcher.run(ctx, peersToSkip)
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
rctx := context.Background()
|
||||
fetcher.Request(rctx, 0)
|
||||
// make sure the searchTimeout is long so it is sure the request is not
|
||||
// retried because of timeout
|
||||
fetcher.searchTimeout = 10 * time.Second
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
fetcher.Request(0)
|
||||
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
@ -466,17 +457,15 @@ func TestRequestSkipPeerPermanent(t *testing.T) {
|
||||
func TestFetcherMaxHopCount(t *testing.T) {
|
||||
requester := newMockRequester()
|
||||
addr := make([]byte, 32)
|
||||
fetcher := NewFetcher(addr, requester.doRequest, true)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
go fetcher.run(ctx, peersToSkip)
|
||||
|
||||
rctx := context.Background()
|
||||
fetcher.Request(rctx, maxHopCount)
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
// if hopCount is already at max no request should be initiated
|
||||
select {
|
||||
|
60
vendor/github.com/ethereum/go-ethereum/swarm/network/hive_test.go
generated
vendored
60
vendor/github.com/ethereum/go-ethereum/swarm/network/hive_test.go
generated
vendored
@ -18,9 +18,9 @@ package network
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
|
||||
"github.com/ethereum/go-ethereum/swarm/state"
|
||||
@ -35,6 +35,8 @@ func newHiveTester(t *testing.T, params *HiveParams, n int, store state.Store) (
|
||||
return newBzzBaseTester(t, n, addr, DiscoverySpec, pp.Run), pp
|
||||
}
|
||||
|
||||
// TestRegisterAndConnect verifies that the protocol runs successfully
|
||||
// and that the peer connection exists afterwards
|
||||
func TestRegisterAndConnect(t *testing.T) {
|
||||
params := NewHiveParams()
|
||||
s, pp := newHiveTester(t, params, 1, nil)
|
||||
@ -43,25 +45,57 @@ func TestRegisterAndConnect(t *testing.T) {
|
||||
raddr := NewAddr(node)
|
||||
pp.Register(raddr)
|
||||
|
||||
// start the hive and wait for the connection
|
||||
// start the hive
|
||||
err := pp.Start(s.Server)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer pp.Stop()
|
||||
// retrieve and broadcast
|
||||
|
||||
// both hive connect and disconect check have time delays
|
||||
// therefore we need to verify that peer is connected
|
||||
// so that we are sure that the disconnect timeout doesn't complete
|
||||
// before the hive connect method is run at least once
|
||||
timeout := time.After(time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
t.Fatalf("expected connection")
|
||||
default:
|
||||
}
|
||||
i := 0
|
||||
pp.Kademlia.EachConn(nil, 256, func(addr *Peer, po int) bool {
|
||||
i++
|
||||
return true
|
||||
})
|
||||
if i > 0 {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
|
||||
// check that the connection actually exists
|
||||
// the timeout error means no disconnection events
|
||||
// were received within the a certain timeout
|
||||
err = s.TestDisconnected(&p2ptest.Disconnect{
|
||||
Peer: s.Nodes[0].ID(),
|
||||
Error: nil,
|
||||
})
|
||||
|
||||
if err == nil || err.Error() != "timed out waiting for peers to disconnect" {
|
||||
t.Fatalf("expected peer to connect")
|
||||
t.Fatalf("expected no disconnection event")
|
||||
}
|
||||
}
|
||||
|
||||
// TestHiveStatePersistance creates a protocol simulation with n peers for a node
|
||||
// After protocols complete, the node is shut down and the state is stored.
|
||||
// Another simulation is created, where 0 nodes are created, but where the stored state is passed
|
||||
// The test succeeds if all the peers from the stored state are known after the protocols of the
|
||||
// second simulation have completed
|
||||
//
|
||||
// Actual connectivity is not in scope for this test, as the peers loaded from state are not known to
|
||||
// the simulation; the test only verifies that the peers are known to the node
|
||||
func TestHiveStatePersistance(t *testing.T) {
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
dir, err := ioutil.TempDir("", "hive_test_store")
|
||||
if err != nil {
|
||||
@ -84,7 +118,8 @@ func TestHiveStatePersistance(t *testing.T) {
|
||||
peers[raddr.String()] = true
|
||||
}
|
||||
|
||||
// start the hive and wait for the connection
|
||||
// start and stop the hive
|
||||
// the known peers should be saved upon stopping
|
||||
err = pp.Start(s.Server)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -92,15 +127,15 @@ func TestHiveStatePersistance(t *testing.T) {
|
||||
pp.Stop()
|
||||
store.Close()
|
||||
|
||||
persistedStore, err := state.NewDBStore(dir) //start the hive with an empty dbstore
|
||||
// start the hive with an empty dbstore
|
||||
persistedStore, err := state.NewDBStore(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s1, pp := newHiveTester(t, params, 1, persistedStore)
|
||||
|
||||
//start the hive and wait for the connection
|
||||
s1, pp := newHiveTester(t, params, 0, persistedStore)
|
||||
|
||||
// start the hive and check that we know of all expected peers
|
||||
pp.Start(s1.Server)
|
||||
i := 0
|
||||
pp.Kademlia.EachAddr(nil, 256, func(addr *BzzAddr, po int) bool {
|
||||
@ -108,10 +143,13 @@ func TestHiveStatePersistance(t *testing.T) {
|
||||
i++
|
||||
return true
|
||||
})
|
||||
// TODO remove this line when verified that test passes
|
||||
time.Sleep(time.Second)
|
||||
if i != 5 {
|
||||
t.Errorf("invalid number of entries: got %v, want %v", i, 5)
|
||||
t.Fatalf("invalid number of entries: got %v, want %v", i, 5)
|
||||
}
|
||||
if len(peers) != 0 {
|
||||
t.Fatalf("%d peers left over: %v", len(peers), peers)
|
||||
}
|
||||
|
||||
}
|
||||
|
299
vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia.go
generated
vendored
299
vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/pot"
|
||||
sv "github.com/ethereum/go-ethereum/swarm/version"
|
||||
)
|
||||
|
||||
/*
|
||||
@ -168,82 +169,115 @@ func (k *Kademlia) Register(peers ...*BzzAddr) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SuggestPeer returns a known peer for the lowest proximity bin for the
|
||||
// lowest bincount below depth
|
||||
// naturally if there is an empty row it returns a peer for that
|
||||
func (k *Kademlia) SuggestPeer() (a *BzzAddr, o int, want bool) {
|
||||
// SuggestPeer returns an unconnected peer address as a peer suggestion for connection
|
||||
func (k *Kademlia) SuggestPeer() (suggestedPeer *BzzAddr, saturationDepth int, changed bool) {
|
||||
k.lock.Lock()
|
||||
defer k.lock.Unlock()
|
||||
minsize := k.MinBinSize
|
||||
depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
|
||||
// if there is a callable neighbour within the current proxBin, connect
|
||||
// this makes sure nearest neighbour set is fully connected
|
||||
var ppo int
|
||||
k.addrs.EachNeighbour(k.base, Pof, func(val pot.Val, po int) bool {
|
||||
if po < depth {
|
||||
return false
|
||||
}
|
||||
e := val.(*entry)
|
||||
c := k.callable(e)
|
||||
if c {
|
||||
a = e.BzzAddr
|
||||
}
|
||||
ppo = po
|
||||
return !c
|
||||
})
|
||||
if a != nil {
|
||||
log.Trace(fmt.Sprintf("%08x candidate nearest neighbour found: %v (%v)", k.BaseAddr()[:4], a, ppo))
|
||||
return a, 0, false
|
||||
}
|
||||
|
||||
var bpo []int
|
||||
prev := -1
|
||||
radius := neighbourhoodRadiusForPot(k.conns, k.NeighbourhoodSize, k.base)
|
||||
// collect undersaturated bins in ascending order of number of connected peers
|
||||
// and from shallow to deep (ascending order of PO)
|
||||
// insert them in a map of bin arrays, keyed with the number of connected peers
|
||||
saturation := make(map[int][]int)
|
||||
var lastPO int // the last non-empty PO bin in the iteration
|
||||
saturationDepth = -1 // the deepest PO such that all shallower bins have >= k.MinBinSize peers
|
||||
var pastDepth bool // whether po of iteration >= depth
|
||||
k.conns.EachBin(k.base, Pof, 0, func(po, size int, f func(func(val pot.Val) bool) bool) bool {
|
||||
prev++
|
||||
for ; prev < po; prev++ {
|
||||
bpo = append(bpo, prev)
|
||||
minsize = 0
|
||||
// process skipped empty bins
|
||||
for ; lastPO < po; lastPO++ {
|
||||
// find the lowest unsaturated bin
|
||||
if saturationDepth == -1 {
|
||||
saturationDepth = lastPO
|
||||
}
|
||||
// if there is an empty bin, depth is surely passed
|
||||
pastDepth = true
|
||||
saturation[0] = append(saturation[0], lastPO)
|
||||
}
|
||||
if size < minsize {
|
||||
bpo = append(bpo, po)
|
||||
minsize = size
|
||||
lastPO = po + 1
|
||||
// past radius, depth is surely passed
|
||||
if po >= radius {
|
||||
pastDepth = true
|
||||
}
|
||||
return size > 0 && po < depth
|
||||
// beyond depth the bin is treated as unsaturated even if size >= k.MinBinSize
|
||||
// in order to achieve full connectivity to all neighbours
|
||||
if pastDepth && size >= k.MinBinSize {
|
||||
size = k.MinBinSize - 1
|
||||
}
|
||||
// process non-empty unsaturated bins
|
||||
if size < k.MinBinSize {
|
||||
// find the lowest unsaturated bin
|
||||
if saturationDepth == -1 {
|
||||
saturationDepth = po
|
||||
}
|
||||
saturation[size] = append(saturation[size], po)
|
||||
}
|
||||
return true
|
||||
})
|
||||
// all buckets are full, ie., minsize == k.MinBinSize
|
||||
if len(bpo) == 0 {
|
||||
// to trigger peer requests for peers closer than closest connection, include
|
||||
// all bins from nearest connection upto nearest address as unsaturated
|
||||
var nearestAddrAt int
|
||||
k.addrs.EachNeighbour(k.base, Pof, func(_ pot.Val, po int) bool {
|
||||
nearestAddrAt = po
|
||||
return false
|
||||
})
|
||||
// including bins as size 0 has the effect that requesting connection
|
||||
// is prioritised over non-empty shallower bins
|
||||
for ; lastPO <= nearestAddrAt; lastPO++ {
|
||||
saturation[0] = append(saturation[0], lastPO)
|
||||
}
|
||||
// all PO bins are saturated, ie., minsize >= k.MinBinSize, no peer suggested
|
||||
if len(saturation) == 0 {
|
||||
return nil, 0, false
|
||||
}
|
||||
// as long as we got candidate peers to connect to
|
||||
// dont ask for new peers (want = false)
|
||||
// try to select a candidate peer
|
||||
// find the first callable peer
|
||||
nxt := bpo[0]
|
||||
k.addrs.EachBin(k.base, Pof, nxt, func(po, _ int, f func(func(pot.Val) bool) bool) bool {
|
||||
// for each bin (up until depth) we find callable candidate peers
|
||||
if po >= depth {
|
||||
return false
|
||||
// find the first callable peer in the address book
|
||||
// starting from the bins with smallest size proceeding from shallow to deep
|
||||
// for each bin (up until neighbourhood radius) we find callable candidate peers
|
||||
for size := 0; size < k.MinBinSize && suggestedPeer == nil; size++ {
|
||||
bins, ok := saturation[size]
|
||||
if !ok {
|
||||
// no bin with this size
|
||||
continue
|
||||
}
|
||||
return f(func(val pot.Val) bool {
|
||||
e := val.(*entry)
|
||||
c := k.callable(e)
|
||||
if c {
|
||||
a = e.BzzAddr
|
||||
cur := 0
|
||||
curPO := bins[0]
|
||||
k.addrs.EachBin(k.base, Pof, curPO, func(po, _ int, f func(func(pot.Val) bool) bool) bool {
|
||||
curPO = bins[cur]
|
||||
// find the next bin that has size size
|
||||
if curPO == po {
|
||||
cur++
|
||||
} else {
|
||||
// skip bins that have no addresses
|
||||
for ; cur < len(bins) && curPO < po; cur++ {
|
||||
curPO = bins[cur]
|
||||
}
|
||||
if po < curPO {
|
||||
cur--
|
||||
return true
|
||||
}
|
||||
// stop if there are no addresses
|
||||
if curPO < po {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return !c
|
||||
// curPO found
|
||||
// find a callable peer out of the addresses in the unsaturated bin
|
||||
// stop if found
|
||||
f(func(val pot.Val) bool {
|
||||
e := val.(*entry)
|
||||
if k.callable(e) {
|
||||
suggestedPeer = e.BzzAddr
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
return cur < len(bins) && suggestedPeer == nil
|
||||
})
|
||||
})
|
||||
// found a candidate
|
||||
if a != nil {
|
||||
return a, 0, false
|
||||
}
|
||||
// no candidate peer found, request for the short bin
|
||||
var changed bool
|
||||
if uint8(nxt) < k.depth {
|
||||
k.depth = uint8(nxt)
|
||||
changed = true
|
||||
|
||||
if uint8(saturationDepth) < k.depth {
|
||||
k.depth = uint8(saturationDepth)
|
||||
return suggestedPeer, saturationDepth, true
|
||||
}
|
||||
return a, nxt, changed
|
||||
return suggestedPeer, 0, false
|
||||
}
|
||||
|
||||
// On inserts the peer as a kademlia peer into the live peers
|
||||
@ -319,6 +353,9 @@ func (k *Kademlia) sendNeighbourhoodDepthChange() {
|
||||
// Not receiving from the returned channel will block Register function
|
||||
// when address count value changes.
|
||||
func (k *Kademlia) AddrCountC() <-chan int {
|
||||
k.lock.Lock()
|
||||
defer k.lock.Unlock()
|
||||
|
||||
if k.addrCountC == nil {
|
||||
k.addrCountC = make(chan int)
|
||||
}
|
||||
@ -398,29 +435,25 @@ func (k *Kademlia) eachAddr(base []byte, o int, f func(*BzzAddr, int) bool) {
|
||||
})
|
||||
}
|
||||
|
||||
// NeighbourhoodDepth returns the depth for the pot, see depthForPot
|
||||
func (k *Kademlia) NeighbourhoodDepth() (depth int) {
|
||||
k.lock.RLock()
|
||||
defer k.lock.RUnlock()
|
||||
return depthForPot(k.conns, k.NeighbourhoodSize, k.base)
|
||||
}
|
||||
|
||||
// depthForPot returns the proximity order that defines the distance of
|
||||
// the nearest neighbour set with cardinality >= NeighbourhoodSize
|
||||
// if there is altogether less than NeighbourhoodSize peers it returns 0
|
||||
// neighbourhoodRadiusForPot returns the neighbourhood radius of the kademlia
|
||||
// neighbourhood radius encloses the nearest neighbour set with size >= neighbourhoodSize
|
||||
// i.e., neighbourhood radius is the deepest PO such that all bins not shallower altogether
|
||||
// contain at least neighbourhoodSize connected peers
|
||||
// if there is altogether less than neighbourhoodSize peers connected, it returns 0
|
||||
// caller must hold the lock
|
||||
func depthForPot(p *pot.Pot, neighbourhoodSize int, pivotAddr []byte) (depth int) {
|
||||
func neighbourhoodRadiusForPot(p *pot.Pot, neighbourhoodSize int, pivotAddr []byte) (depth int) {
|
||||
if p.Size() <= neighbourhoodSize {
|
||||
return 0
|
||||
}
|
||||
|
||||
// total number of peers in iteration
|
||||
var size int
|
||||
|
||||
// determining the depth is a two-step process
|
||||
// first we find the proximity bin of the shallowest of the NeighbourhoodSize peers
|
||||
// the numeric value of depth cannot be higher than this
|
||||
var maxDepth int
|
||||
|
||||
f := func(v pot.Val, i int) bool {
|
||||
// po == 256 means that addr is the pivot address(self)
|
||||
if i == 256 {
|
||||
@ -431,13 +464,30 @@ func depthForPot(p *pot.Pot, neighbourhoodSize int, pivotAddr []byte) (depth int
|
||||
// this means we have all nn-peers.
|
||||
// depth is by default set to the bin of the farthest nn-peer
|
||||
if size == neighbourhoodSize {
|
||||
maxDepth = i
|
||||
depth = i
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
p.EachNeighbour(pivotAddr, Pof, f)
|
||||
return depth
|
||||
}
|
||||
|
||||
// depthForPot returns the depth for the pot
|
||||
// depth is the radius of the minimal extension of nearest neighbourhood that
|
||||
// includes all empty PO bins. I.e., depth is the deepest PO such that
|
||||
// - it is not deeper than neighbourhood radius
|
||||
// - all bins shallower than depth are not empty
|
||||
// caller must hold the lock
|
||||
func depthForPot(p *pot.Pot, neighbourhoodSize int, pivotAddr []byte) (depth int) {
|
||||
if p.Size() <= neighbourhoodSize {
|
||||
return 0
|
||||
}
|
||||
// determining the depth is a two-step process
|
||||
// first we find the proximity bin of the shallowest of the neighbourhoodSize peers
|
||||
// the numeric value of depth cannot be higher than this
|
||||
maxDepth := neighbourhoodRadiusForPot(p, neighbourhoodSize, pivotAddr)
|
||||
|
||||
// the second step is to test for empty bins in order from shallowest to deepest
|
||||
// if an empty bin is found, this will be the actual depth
|
||||
@ -506,6 +556,9 @@ func (k *Kademlia) string() string {
|
||||
var rows []string
|
||||
|
||||
rows = append(rows, "=========================================================================")
|
||||
if len(sv.GitCommit) > 0 {
|
||||
rows = append(rows, fmt.Sprintf("commit hash: %s", sv.GitCommit))
|
||||
}
|
||||
rows = append(rows, fmt.Sprintf("%v KΛÐΞMLIΛ hive: queen's address: %x", time.Now().UTC().Format(time.UnixDate), k.BaseAddr()[:3]))
|
||||
rows = append(rows, fmt.Sprintf("population: %d (%d), NeighbourhoodSize: %d, MinBinSize: %d, MaxBinSize: %d", k.conns.Size(), k.addrs.Size(), k.NeighbourhoodSize, k.MinBinSize, k.MaxBinSize))
|
||||
|
||||
@ -575,7 +628,8 @@ func (k *Kademlia) string() string {
|
||||
// used for testing only
|
||||
// TODO move to separate testing tools file
|
||||
type PeerPot struct {
|
||||
NNSet [][]byte
|
||||
NNSet [][]byte
|
||||
PeersPerBin []int
|
||||
}
|
||||
|
||||
// NewPeerPotMap creates a map of pot record of *BzzAddr with keys
|
||||
@ -601,6 +655,7 @@ func NewPeerPotMap(neighbourhoodSize int, addrs [][]byte) map[string]*PeerPot {
|
||||
|
||||
// all nn-peers
|
||||
var nns [][]byte
|
||||
peersPerBin := make([]int, depth)
|
||||
|
||||
// iterate through the neighbours, going from the deepest to the shallowest
|
||||
np.EachNeighbour(a, Pof, func(val pot.Val, po int) bool {
|
||||
@ -614,38 +669,74 @@ func NewPeerPotMap(neighbourhoodSize int, addrs [][]byte) map[string]*PeerPot {
|
||||
// a neighbor is any peer in or deeper than the depth
|
||||
if po >= depth {
|
||||
nns = append(nns, addr)
|
||||
return true
|
||||
} else {
|
||||
// for peers < depth, we just count the number in each bin
|
||||
// the bin is the index of the slice
|
||||
peersPerBin[po]++
|
||||
}
|
||||
return false
|
||||
return true
|
||||
})
|
||||
|
||||
log.Trace(fmt.Sprintf("%x PeerPotMap NNS: %s", addrs[i][:4], LogAddrs(nns)))
|
||||
log.Trace(fmt.Sprintf("%x PeerPotMap NNS: %s, peersPerBin", addrs[i][:4], LogAddrs(nns)))
|
||||
ppmap[common.Bytes2Hex(a)] = &PeerPot{
|
||||
NNSet: nns,
|
||||
NNSet: nns,
|
||||
PeersPerBin: peersPerBin,
|
||||
}
|
||||
}
|
||||
return ppmap
|
||||
}
|
||||
|
||||
// saturation iterates through all peers and
|
||||
// returns the smallest po value in which the node has less than n peers
|
||||
// if the iterator reaches depth, then value for depth is returned
|
||||
// TODO move to separate testing tools file
|
||||
// TODO this function will stop at the first bin with less than MinBinSize peers, even if there are empty bins between that bin and the depth. This may not be correct behavior
|
||||
// saturation returns the smallest po value in which the node has less than MinBinSize peers
|
||||
// if the iterator reaches neighbourhood radius, then the last bin + 1 is returned
|
||||
func (k *Kademlia) saturation() int {
|
||||
prev := -1
|
||||
k.addrs.EachBin(k.base, Pof, 0, func(po, size int, f func(func(val pot.Val) bool) bool) bool {
|
||||
radius := neighbourhoodRadiusForPot(k.conns, k.NeighbourhoodSize, k.base)
|
||||
k.conns.EachBin(k.base, Pof, 0, func(po, size int, f func(func(val pot.Val) bool) bool) bool {
|
||||
prev++
|
||||
if po >= radius {
|
||||
return false
|
||||
}
|
||||
return prev == po && size >= k.MinBinSize
|
||||
})
|
||||
// TODO evaluate whether this check cannot just as well be done within the eachbin
|
||||
depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
|
||||
if depth < prev {
|
||||
return depth
|
||||
if prev < 0 {
|
||||
return 0
|
||||
}
|
||||
return prev
|
||||
}
|
||||
|
||||
// isSaturated returns true if the kademlia is considered saturated, or false if not.
|
||||
// It checks this by checking an array of ints called unsaturatedBins; each item in that array corresponds
|
||||
// to the bin which is unsaturated (number of connections < k.MinBinSize).
|
||||
// The bin is considered unsaturated only if there are actual peers in that PeerPot's bin (peersPerBin)
|
||||
// (if there is no peer for a given bin, then no connection could ever be established;
|
||||
// in a God's view this is relevant as no more peers will ever appear on that bin)
|
||||
func (k *Kademlia) isSaturated(peersPerBin []int, depth int) bool {
|
||||
// depth could be calculated from k but as this is called from `GetHealthInfo()`,
|
||||
// the depth has already been calculated so we can require it as a parameter
|
||||
|
||||
// early check for depth
|
||||
if depth != len(peersPerBin) {
|
||||
return false
|
||||
}
|
||||
unsaturatedBins := make([]int, 0)
|
||||
k.conns.EachBin(k.base, Pof, 0, func(po, size int, f func(func(val pot.Val) bool) bool) bool {
|
||||
|
||||
if po >= depth {
|
||||
return false
|
||||
}
|
||||
log.Trace("peers per bin", "peersPerBin[po]", peersPerBin[po], "po", po)
|
||||
// if there are actually peers in the PeerPot who can fulfill k.MinBinSize
|
||||
if size < k.MinBinSize && size < peersPerBin[po] {
|
||||
log.Trace("connections for po", "po", po, "size", size)
|
||||
unsaturatedBins = append(unsaturatedBins, po)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
log.Trace("list of unsaturated bins", "unsaturatedBins", unsaturatedBins)
|
||||
return len(unsaturatedBins) == 0
|
||||
}
|
||||
|
||||
// knowNeighbours tests if all neighbours in the peerpot
|
||||
// are found among the peers known to the kademlia
|
||||
// It is used in Healthy function for testing only
|
||||
@ -728,11 +819,13 @@ type Health struct {
|
||||
ConnectNN bool // whether node is connected to all its neighbours
|
||||
CountConnectNN int // amount of neighbours connected to
|
||||
MissingConnectNN [][]byte // which neighbours we should have been connected to but we're not
|
||||
Saturated bool // whether we are connected to all the peers we would have liked to
|
||||
Hive string
|
||||
// Saturated: if in all bins < depth number of connections >= MinBinsize or,
|
||||
// if number of connections < MinBinSize, to the number of available peers in that bin
|
||||
Saturated bool
|
||||
Hive string
|
||||
}
|
||||
|
||||
// Healthy reports the health state of the kademlia connectivity
|
||||
// GetHealthInfo reports the health state of the kademlia connectivity
|
||||
//
|
||||
// The PeerPot argument provides an all-knowing view of the network
|
||||
// The resulting Health object is a result of comparisons between
|
||||
@ -740,13 +833,19 @@ type Health struct {
|
||||
// what SHOULD it have been when we take all we know about the network into consideration.
|
||||
//
|
||||
// used for testing only
|
||||
func (k *Kademlia) Healthy(pp *PeerPot) *Health {
|
||||
func (k *Kademlia) GetHealthInfo(pp *PeerPot) *Health {
|
||||
k.lock.RLock()
|
||||
defer k.lock.RUnlock()
|
||||
if len(pp.NNSet) < k.NeighbourhoodSize {
|
||||
log.Warn("peerpot NNSet < NeighbourhoodSize")
|
||||
}
|
||||
gotnn, countgotnn, culpritsgotnn := k.connectedNeighbours(pp.NNSet)
|
||||
knownn, countknownn, culpritsknownn := k.knowNeighbours(pp.NNSet)
|
||||
depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
|
||||
saturated := k.saturation() < depth
|
||||
|
||||
// check saturation
|
||||
saturated := k.isSaturated(pp.PeersPerBin, depth)
|
||||
|
||||
log.Trace(fmt.Sprintf("%08x: healthy: knowNNs: %v, gotNNs: %v, saturated: %v\n", k.base, knownn, gotnn, saturated))
|
||||
return &Health{
|
||||
KnowNN: knownn,
|
||||
@ -759,3 +858,13 @@ func (k *Kademlia) Healthy(pp *PeerPot) *Health {
|
||||
Hive: k.string(),
|
||||
}
|
||||
}
|
||||
|
||||
// Healthy return the strict interpretation of `Healthy` given a `Health` struct
|
||||
// definition of strict health: all conditions must be true:
|
||||
// - we at least know one peer
|
||||
// - we know all neighbors
|
||||
// - we are connected to all known neighbors
|
||||
// - it is saturated
|
||||
func (h *Health) Healthy() bool {
|
||||
return h.KnowNN && h.ConnectNN && h.CountKnowNN > 0 && h.Saturated
|
||||
}
|
||||
|
820
vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia_test.go
generated
vendored
820
vendor/github.com/ethereum/go-ethereum/swarm/network/kademlia_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
13
vendor/github.com/ethereum/go-ethereum/swarm/network/networkid_test.go
generated
vendored
13
vendor/github.com/ethereum/go-ethereum/swarm/network/networkid_test.go
generated
vendored
@ -44,7 +44,7 @@ var (
|
||||
|
||||
const (
|
||||
NumberOfNets = 4
|
||||
MaxTimeout = 6
|
||||
MaxTimeout = 15 * time.Second
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -76,13 +76,12 @@ func TestNetworkID(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting up network: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
//shutdown the snapshot network
|
||||
log.Trace("Shutting down network")
|
||||
net.Shutdown()
|
||||
}()
|
||||
//let's sleep to ensure all nodes are connected
|
||||
time.Sleep(1 * time.Second)
|
||||
// shutdown the the network to avoid race conditions
|
||||
// on accessing kademlias global map while network nodes
|
||||
// are accepting messages
|
||||
net.Shutdown()
|
||||
//for each group sharing the same network ID...
|
||||
for _, netIDGroup := range nodeMap {
|
||||
log.Trace("netIDGroup size", "size", len(netIDGroup))
|
||||
@ -147,7 +146,7 @@ func setupNetwork(numnodes int) (net *simulations.Network, err error) {
|
||||
return nil, fmt.Errorf("create node %d rpc client fail: %v", i, err)
|
||||
}
|
||||
//now setup and start event watching in order to know when we can upload
|
||||
ctx, watchCancel := context.WithTimeout(context.Background(), MaxTimeout*time.Second)
|
||||
ctx, watchCancel := context.WithTimeout(context.Background(), MaxTimeout)
|
||||
defer watchCancel()
|
||||
watchSubscriptionEvents(ctx, nodes[i].ID(), client, errc, quitC)
|
||||
//on every iteration we connect to all previous ones
|
||||
|
16
vendor/github.com/ethereum/go-ethereum/swarm/network/protocol.go
generated
vendored
16
vendor/github.com/ethereum/go-ethereum/swarm/network/protocol.go
generated
vendored
@ -67,6 +67,7 @@ type BzzConfig struct {
|
||||
HiveParams *HiveParams
|
||||
NetworkID uint64
|
||||
LightNode bool
|
||||
BootnodeMode bool
|
||||
}
|
||||
|
||||
// Bzz is the swarm protocol bundle
|
||||
@ -87,7 +88,7 @@ type Bzz struct {
|
||||
// * overlay driver
|
||||
// * peer store
|
||||
func NewBzz(config *BzzConfig, kad *Kademlia, store state.Store, streamerSpec *protocols.Spec, streamerRun func(*BzzPeer) error) *Bzz {
|
||||
return &Bzz{
|
||||
bzz := &Bzz{
|
||||
Hive: NewHive(config.HiveParams, kad, store),
|
||||
NetworkID: config.NetworkID,
|
||||
LightNode: config.LightNode,
|
||||
@ -96,6 +97,13 @@ func NewBzz(config *BzzConfig, kad *Kademlia, store state.Store, streamerSpec *p
|
||||
streamerRun: streamerRun,
|
||||
streamerSpec: streamerSpec,
|
||||
}
|
||||
|
||||
if config.BootnodeMode {
|
||||
bzz.streamerRun = nil
|
||||
bzz.streamerSpec = nil
|
||||
}
|
||||
|
||||
return bzz
|
||||
}
|
||||
|
||||
// UpdateLocalAddr updates underlayaddress of the running node
|
||||
@ -168,7 +176,7 @@ func (b *Bzz) APIs() []rpc.API {
|
||||
func (b *Bzz) RunProtocol(spec *protocols.Spec, run func(*BzzPeer) error) func(*p2p.Peer, p2p.MsgReadWriter) error {
|
||||
return func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
// wait for the bzz protocol to perform the handshake
|
||||
handshake, _ := b.GetHandshake(p.ID())
|
||||
handshake, _ := b.GetOrCreateHandshake(p.ID())
|
||||
defer b.removeHandshake(p.ID())
|
||||
select {
|
||||
case <-handshake.done:
|
||||
@ -213,7 +221,7 @@ func (b *Bzz) performHandshake(p *protocols.Peer, handshake *HandshakeMsg) error
|
||||
// runBzz is the p2p protocol run function for the bzz base protocol
|
||||
// that negotiates the bzz handshake
|
||||
func (b *Bzz) runBzz(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
handshake, _ := b.GetHandshake(p.ID())
|
||||
handshake, _ := b.GetOrCreateHandshake(p.ID())
|
||||
if !<-handshake.init {
|
||||
return fmt.Errorf("%08x: bzz already started on peer %08x", b.localAddr.Over()[:4], p.ID().Bytes()[:4])
|
||||
}
|
||||
@ -303,7 +311,7 @@ func (b *Bzz) removeHandshake(peerID enode.ID) {
|
||||
}
|
||||
|
||||
// GetHandshake returns the bzz handhake that the remote peer with peerID sent
|
||||
func (b *Bzz) GetHandshake(peerID enode.ID) (*HandshakeMsg, bool) {
|
||||
func (b *Bzz) GetOrCreateHandshake(peerID enode.ID) (*HandshakeMsg, bool) {
|
||||
b.mtx.Lock()
|
||||
defer b.mtx.Unlock()
|
||||
handshake, found := b.handshakes[peerID]
|
||||
|
26
vendor/github.com/ethereum/go-ethereum/swarm/network/protocol_test.go
generated
vendored
26
vendor/github.com/ethereum/go-ethereum/swarm/network/protocol_test.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
@ -82,7 +83,7 @@ func newBzzBaseTester(t *testing.T, n int, addr *BzzAddr, spec *protocols.Spec,
|
||||
return srv(&BzzPeer{Peer: protocols.NewPeer(p, rw, spec), BzzAddr: NewAddr(p.Node())})
|
||||
}
|
||||
|
||||
s := p2ptest.NewProtocolTester(t, addr.ID(), n, protocol)
|
||||
s := p2ptest.NewProtocolTester(addr.ID(), n, protocol)
|
||||
|
||||
for _, node := range s.Nodes {
|
||||
cs[node.ID().String()] = make(chan bool)
|
||||
@ -115,9 +116,9 @@ func newBzz(addr *BzzAddr, lightNode bool) *Bzz {
|
||||
return bzz
|
||||
}
|
||||
|
||||
func newBzzHandshakeTester(t *testing.T, n int, addr *BzzAddr, lightNode bool) *bzzTester {
|
||||
func newBzzHandshakeTester(n int, addr *BzzAddr, lightNode bool) *bzzTester {
|
||||
bzz := newBzz(addr, lightNode)
|
||||
pt := p2ptest.NewProtocolTester(t, addr.ID(), n, bzz.runBzz)
|
||||
pt := p2ptest.NewProtocolTester(addr.ID(), n, bzz.runBzz)
|
||||
|
||||
return &bzzTester{
|
||||
addr: addr,
|
||||
@ -165,7 +166,7 @@ func correctBzzHandshake(addr *BzzAddr, lightNode bool) *HandshakeMsg {
|
||||
func TestBzzHandshakeNetworkIDMismatch(t *testing.T) {
|
||||
lightNode := false
|
||||
addr := RandomAddr()
|
||||
s := newBzzHandshakeTester(t, 1, addr, lightNode)
|
||||
s := newBzzHandshakeTester(1, addr, lightNode)
|
||||
node := s.Nodes[0]
|
||||
|
||||
err := s.testHandshake(
|
||||
@ -182,7 +183,7 @@ func TestBzzHandshakeNetworkIDMismatch(t *testing.T) {
|
||||
func TestBzzHandshakeVersionMismatch(t *testing.T) {
|
||||
lightNode := false
|
||||
addr := RandomAddr()
|
||||
s := newBzzHandshakeTester(t, 1, addr, lightNode)
|
||||
s := newBzzHandshakeTester(1, addr, lightNode)
|
||||
node := s.Nodes[0]
|
||||
|
||||
err := s.testHandshake(
|
||||
@ -199,7 +200,7 @@ func TestBzzHandshakeVersionMismatch(t *testing.T) {
|
||||
func TestBzzHandshakeSuccess(t *testing.T) {
|
||||
lightNode := false
|
||||
addr := RandomAddr()
|
||||
s := newBzzHandshakeTester(t, 1, addr, lightNode)
|
||||
s := newBzzHandshakeTester(1, addr, lightNode)
|
||||
node := s.Nodes[0]
|
||||
|
||||
err := s.testHandshake(
|
||||
@ -224,7 +225,8 @@ func TestBzzHandshakeLightNode(t *testing.T) {
|
||||
for _, test := range lightNodeTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
randomAddr := RandomAddr()
|
||||
pt := newBzzHandshakeTester(t, 1, randomAddr, false)
|
||||
pt := newBzzHandshakeTester(1, randomAddr, false)
|
||||
|
||||
node := pt.Nodes[0]
|
||||
addr := NewAddr(node)
|
||||
|
||||
@ -237,8 +239,14 @@ func TestBzzHandshakeLightNode(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if pt.bzz.handshakes[node.ID()].LightNode != test.lightNode {
|
||||
t.Fatalf("peer LightNode flag is %v, should be %v", pt.bzz.handshakes[node.ID()].LightNode, test.lightNode)
|
||||
select {
|
||||
|
||||
case <-pt.bzz.handshakes[node.ID()].done:
|
||||
if pt.bzz.handshakes[node.ID()].LightNode != test.lightNode {
|
||||
t.Fatalf("peer LightNode flag is %v, should be %v", pt.bzz.handshakes[node.ID()].LightNode, test.lightNode)
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("test timeout")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
2
vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/kademlia.go
generated
vendored
@ -64,7 +64,7 @@ func (s *Simulation) WaitTillHealthy(ctx context.Context) (ill map[enode.ID]*net
|
||||
addr := common.Bytes2Hex(k.BaseAddr())
|
||||
pp := ppmap[addr]
|
||||
//call Healthy RPC
|
||||
h := k.Healthy(pp)
|
||||
h := k.GetHealthInfo(pp)
|
||||
//print info
|
||||
log.Debug(k.String())
|
||||
log.Debug("kademlia", "connectNN", h.ConnectNN, "knowNN", h.KnowNN)
|
||||
|
4
vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/node.go
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/node.go
generated
vendored
@ -44,7 +44,7 @@ func (s *Simulation) NodeIDs() (ids []enode.ID) {
|
||||
func (s *Simulation) UpNodeIDs() (ids []enode.ID) {
|
||||
nodes := s.Net.GetNodes()
|
||||
for _, node := range nodes {
|
||||
if node.Up {
|
||||
if node.Up() {
|
||||
ids = append(ids, node.ID())
|
||||
}
|
||||
}
|
||||
@ -55,7 +55,7 @@ func (s *Simulation) UpNodeIDs() (ids []enode.ID) {
|
||||
func (s *Simulation) DownNodeIDs() (ids []enode.ID) {
|
||||
nodes := s.Net.GetNodes()
|
||||
for _, node := range nodes {
|
||||
if !node.Up {
|
||||
if !node.Up() {
|
||||
ids = append(ids, node.ID())
|
||||
}
|
||||
}
|
||||
|
63
vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/node_test.go
generated
vendored
63
vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/node_test.go
generated
vendored
@ -54,7 +54,7 @@ func TestUpDownNodeIDs(t *testing.T) {
|
||||
gotIDs = sim.UpNodeIDs()
|
||||
|
||||
for _, id := range gotIDs {
|
||||
if !sim.Net.GetNode(id).Up {
|
||||
if !sim.Net.GetNode(id).Up() {
|
||||
t.Errorf("node %s should not be down", id)
|
||||
}
|
||||
}
|
||||
@ -66,7 +66,7 @@ func TestUpDownNodeIDs(t *testing.T) {
|
||||
gotIDs = sim.DownNodeIDs()
|
||||
|
||||
for _, id := range gotIDs {
|
||||
if sim.Net.GetNode(id).Up {
|
||||
if sim.Net.GetNode(id).Up() {
|
||||
t.Errorf("node %s should not be up", id)
|
||||
}
|
||||
}
|
||||
@ -112,7 +112,7 @@ func TestAddNode(t *testing.T) {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
|
||||
if !n.Up {
|
||||
if !n.Up() {
|
||||
t.Error("node not started")
|
||||
}
|
||||
}
|
||||
@ -327,7 +327,7 @@ func TestStartStopNode(t *testing.T) {
|
||||
if n == nil {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
if !n.Up {
|
||||
if !n.Up() {
|
||||
t.Error("node not started")
|
||||
}
|
||||
|
||||
@ -335,26 +335,17 @@ func TestStartStopNode(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n.Up {
|
||||
if n.Up() {
|
||||
t.Error("node not stopped")
|
||||
}
|
||||
|
||||
// Sleep here to ensure that Network.watchPeerEvents defer function
|
||||
// has set the `node.Up = false` before we start the node again.
|
||||
// p2p/simulations/network.go:215
|
||||
//
|
||||
// The same node is stopped and started again, and upon start
|
||||
// watchPeerEvents is started in a goroutine. If the node is stopped
|
||||
// and then very quickly started, that goroutine may be scheduled later
|
||||
// then start and force `node.Up = false` in its defer function.
|
||||
// This will make this test unreliable.
|
||||
time.Sleep(time.Second)
|
||||
waitForPeerEventPropagation()
|
||||
|
||||
err = sim.StartNode(id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !n.Up {
|
||||
if !n.Up() {
|
||||
t.Error("node not started")
|
||||
}
|
||||
}
|
||||
@ -377,7 +368,7 @@ func TestStartStopRandomNode(t *testing.T) {
|
||||
if n == nil {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
if n.Up {
|
||||
if n.Up() {
|
||||
t.Error("node not stopped")
|
||||
}
|
||||
|
||||
@ -386,16 +377,7 @@ func TestStartStopRandomNode(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Sleep here to ensure that Network.watchPeerEvents defer function
|
||||
// has set the `node.Up = false` before we start the node again.
|
||||
// p2p/simulations/network.go:215
|
||||
//
|
||||
// The same node is stopped and started again, and upon start
|
||||
// watchPeerEvents is started in a goroutine. If the node is stopped
|
||||
// and then very quickly started, that goroutine may be scheduled later
|
||||
// then start and force `node.Up = false` in its defer function.
|
||||
// This will make this test unreliable.
|
||||
time.Sleep(time.Second)
|
||||
waitForPeerEventPropagation()
|
||||
|
||||
idStarted, err := sim.StartRandomNode()
|
||||
if err != nil {
|
||||
@ -426,21 +408,12 @@ func TestStartStopRandomNodes(t *testing.T) {
|
||||
if n == nil {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
if n.Up {
|
||||
if n.Up() {
|
||||
t.Error("node not stopped")
|
||||
}
|
||||
}
|
||||
|
||||
// Sleep here to ensure that Network.watchPeerEvents defer function
|
||||
// has set the `node.Up = false` before we start the node again.
|
||||
// p2p/simulations/network.go:215
|
||||
//
|
||||
// The same node is stopped and started again, and upon start
|
||||
// watchPeerEvents is started in a goroutine. If the node is stopped
|
||||
// and then very quickly started, that goroutine may be scheduled later
|
||||
// then start and force `node.Up = false` in its defer function.
|
||||
// This will make this test unreliable.
|
||||
time.Sleep(time.Second)
|
||||
waitForPeerEventPropagation()
|
||||
|
||||
ids, err = sim.StartRandomNodes(2)
|
||||
if err != nil {
|
||||
@ -452,8 +425,20 @@ func TestStartStopRandomNodes(t *testing.T) {
|
||||
if n == nil {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
if !n.Up {
|
||||
if !n.Up() {
|
||||
t.Error("node not started")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitForPeerEventPropagation() {
|
||||
// Sleep here to ensure that Network.watchPeerEvents defer function
|
||||
// has set the `node.Up() = false` before we start the node again.
|
||||
//
|
||||
// The same node is stopped and started again, and upon start
|
||||
// watchPeerEvents is started in a goroutine. If the node is stopped
|
||||
// and then very quickly started, that goroutine may be scheduled later
|
||||
// then start and force `node.Up() = false` in its defer function.
|
||||
// This will make this test unreliable.
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
2
vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/service.go
generated
vendored
2
vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/service.go
generated
vendored
@ -52,7 +52,7 @@ func (s *Simulation) Services(name string) (services map[enode.ID]node.Service)
|
||||
nodes := s.Net.GetNodes()
|
||||
services = make(map[enode.ID]node.Service)
|
||||
for _, node := range nodes {
|
||||
if !node.Up {
|
||||
if !node.Up() {
|
||||
continue
|
||||
}
|
||||
simNode, ok := node.Node.(*adapters.SimNode)
|
||||
|
4
vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation_test.go
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/swarm/network/simulation/simulation_test.go
generated
vendored
@ -124,7 +124,7 @@ func TestClose(t *testing.T) {
|
||||
|
||||
var upNodeCount int
|
||||
for _, n := range sim.Net.GetNodes() {
|
||||
if n.Up {
|
||||
if n.Up() {
|
||||
upNodeCount++
|
||||
}
|
||||
}
|
||||
@ -140,7 +140,7 @@ func TestClose(t *testing.T) {
|
||||
|
||||
upNodeCount = 0
|
||||
for _, n := range sim.Net.GetNodes() {
|
||||
if n.Up {
|
||||
if n.Up() {
|
||||
upNodeCount++
|
||||
}
|
||||
}
|
||||
|
@ -18,16 +18,12 @@ package discovery
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -86,12 +82,10 @@ func getDbStore(nodeID string) (*state.DBStore, error) {
|
||||
}
|
||||
|
||||
var (
|
||||
nodeCount = flag.Int("nodes", 10, "number of nodes to create (default 10)")
|
||||
initCount = flag.Int("conns", 1, "number of originally connected peers (default 1)")
|
||||
snapshotFile = flag.String("snapshot", "", "path to create snapshot file in")
|
||||
loglevel = flag.Int("loglevel", 3, "verbosity of logs")
|
||||
rawlog = flag.Bool("rawlog", false, "remove terminal formatting from logs")
|
||||
serviceOverride = flag.String("services", "", "remove or add services to the node snapshot; prefix with \"+\" to add, \"-\" to remove; example: +pss,-discovery")
|
||||
nodeCount = flag.Int("nodes", 32, "number of nodes to create (default 32)")
|
||||
initCount = flag.Int("conns", 1, "number of originally connected peers (default 1)")
|
||||
loglevel = flag.Int("loglevel", 3, "verbosity of logs")
|
||||
rawlog = flag.Bool("rawlog", false, "remove terminal formatting from logs")
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -157,7 +151,6 @@ func testDiscoverySimulationSimAdapter(t *testing.T, nodes, conns int) {
|
||||
}
|
||||
|
||||
func testDiscoverySimulation(t *testing.T, nodes, conns int, adapter adapters.NodeAdapter) {
|
||||
t.Skip("discovery tests depend on suggestpeer, which is unreliable after kademlia depth change.")
|
||||
startedAt := time.Now()
|
||||
result, err := discoverySimulation(nodes, conns, adapter)
|
||||
if err != nil {
|
||||
@ -185,7 +178,6 @@ func testDiscoverySimulation(t *testing.T, nodes, conns int, adapter adapters.No
|
||||
}
|
||||
|
||||
func testDiscoveryPersistenceSimulation(t *testing.T, nodes, conns int, adapter adapters.NodeAdapter) map[int][]byte {
|
||||
t.Skip("discovery tests depend on suggestpeer, which is unreliable after kademlia depth change.")
|
||||
persistenceEnabled = true
|
||||
discoveryEnabled = true
|
||||
|
||||
@ -247,25 +239,14 @@ func discoverySimulation(nodes, conns int, adapter adapters.NodeAdapter) (*simul
|
||||
action := func(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
wg := sync.WaitGroup{}
|
||||
for i := range ids {
|
||||
// collect the overlay addresses, to
|
||||
addrs = append(addrs, ids[i].Bytes())
|
||||
for j := 0; j < conns; j++ {
|
||||
var k int
|
||||
if j == 0 {
|
||||
k = (i + 1) % len(ids)
|
||||
} else {
|
||||
k = rand.Intn(len(ids))
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(i, k int) {
|
||||
defer wg.Done()
|
||||
net.Connect(ids[i], ids[k])
|
||||
}(i, k)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
err := net.ConnectNodesChain(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debug(fmt.Sprintf("nodes: %v", len(addrs)))
|
||||
// construct the peer pot, so that kademlia health can be checked
|
||||
ppmap := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, addrs)
|
||||
@ -286,10 +267,10 @@ func discoverySimulation(nodes, conns int, adapter adapters.NodeAdapter) (*simul
|
||||
}
|
||||
|
||||
healthy := &network.Health{}
|
||||
if err := client.Call(&healthy, "hive_healthy", ppmap); err != nil {
|
||||
if err := client.Call(&healthy, "hive_getHealthInfo", ppmap[common.Bytes2Hex(id.Bytes())]); err != nil {
|
||||
return false, fmt.Errorf("error getting node health: %s", err)
|
||||
}
|
||||
log.Info(fmt.Sprintf("node %4s healthy: connected nearest neighbours: %v, know nearest neighbours: %v,\n\n%v", id, healthy.ConnectNN, healthy.KnowNN, healthy.Hive))
|
||||
log.Debug(fmt.Sprintf("node %4s healthy: connected nearest neighbours: %v, know nearest neighbours: %v,\n\n%v", id, healthy.ConnectNN, healthy.KnowNN, healthy.Hive))
|
||||
return healthy.KnowNN && healthy.ConnectNN, nil
|
||||
}
|
||||
|
||||
@ -309,40 +290,6 @@ func discoverySimulation(nodes, conns int, adapter adapters.NodeAdapter) (*simul
|
||||
if result.Error != nil {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
if *snapshotFile != "" {
|
||||
var err error
|
||||
var snap *simulations.Snapshot
|
||||
if len(*serviceOverride) > 0 {
|
||||
var addServices []string
|
||||
var removeServices []string
|
||||
for _, osvc := range strings.Split(*serviceOverride, ",") {
|
||||
if strings.Index(osvc, "+") == 0 {
|
||||
addServices = append(addServices, osvc[1:])
|
||||
} else if strings.Index(osvc, "-") == 0 {
|
||||
removeServices = append(removeServices, osvc[1:])
|
||||
} else {
|
||||
panic("stick to the rules, you know what they are")
|
||||
}
|
||||
}
|
||||
snap, err = net.SnapshotWithServices(addServices, removeServices)
|
||||
} else {
|
||||
snap, err = net.Snapshot()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.New("no shapshot dude")
|
||||
}
|
||||
jsonsnapshot, err := json.Marshal(snap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("corrupt json snapshot: %v", err)
|
||||
}
|
||||
log.Info("writing snapshot", "file", *snapshotFile)
|
||||
err = ioutil.WriteFile(*snapshotFile, jsonsnapshot, 0755)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@ -405,7 +352,7 @@ func discoveryPersistenceSimulation(nodes, conns int, adapter adapters.NodeAdapt
|
||||
healthy := &network.Health{}
|
||||
addr := id.String()
|
||||
ppmap := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, addrs)
|
||||
if err := client.Call(&healthy, "hive_healthy", ppmap); err != nil {
|
||||
if err := client.Call(&healthy, "hive_getHealthInfo", ppmap[common.Bytes2Hex(id.Bytes())]); err != nil {
|
||||
return fmt.Errorf("error getting node health: %s", err)
|
||||
}
|
||||
|
||||
@ -415,9 +362,6 @@ func discoveryPersistenceSimulation(nodes, conns int, adapter adapters.NodeAdapt
|
||||
return fmt.Errorf("error getting node string %s", err)
|
||||
}
|
||||
log.Info(nodeStr)
|
||||
for _, a := range addrs {
|
||||
log.Info(common.Bytes2Hex(a))
|
||||
}
|
||||
if !healthy.ConnectNN || healthy.CountKnowNN == 0 {
|
||||
isHealthy = false
|
||||
break
|
||||
@ -457,23 +401,7 @@ func discoveryPersistenceSimulation(nodes, conns int, adapter adapters.NodeAdapt
|
||||
|
||||
return nil
|
||||
}
|
||||
//connects in a chain
|
||||
wg := sync.WaitGroup{}
|
||||
//connects in a ring
|
||||
for i := range ids {
|
||||
for j := 1; j <= conns; j++ {
|
||||
k := (i + j) % len(ids)
|
||||
if k == i {
|
||||
k = (k + 1) % len(ids)
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(i, k int) {
|
||||
defer wg.Done()
|
||||
net.Connect(ids[i], ids[k])
|
||||
}(i, k)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
net.ConnectNodesChain(nil)
|
||||
log.Debug(fmt.Sprintf("nodes: %v", len(addrs)))
|
||||
// construct the peer pot, so that kademlia health can be checked
|
||||
check := func(ctx context.Context, id enode.ID) (bool, error) {
|
||||
@ -494,7 +422,7 @@ func discoveryPersistenceSimulation(nodes, conns int, adapter adapters.NodeAdapt
|
||||
healthy := &network.Health{}
|
||||
ppmap := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, addrs)
|
||||
|
||||
if err := client.Call(&healthy, "hive_healthy", ppmap); err != nil {
|
||||
if err := client.Call(&healthy, "hive_getHealthInfo", ppmap[common.Bytes2Hex(id.Bytes())]); err != nil {
|
||||
return false, fmt.Errorf("error getting node health: %s", err)
|
||||
}
|
||||
log.Info(fmt.Sprintf("node %4s healthy: got nearest neighbours: %v, know nearest neighbours: %v", id, healthy.ConnectNN, healthy.KnowNN))
|
||||
|
4
vendor/github.com/ethereum/go-ethereum/swarm/network/simulations/overlay_test.go
generated
vendored
4
vendor/github.com/ethereum/go-ethereum/swarm/network/simulations/overlay_test.go
generated
vendored
@ -32,7 +32,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
nodeCount = 16
|
||||
nodeCount = 10
|
||||
)
|
||||
|
||||
//This test is used to test the overlay simulation.
|
||||
@ -179,7 +179,7 @@ func watchSimEvents(net *simulations.Network, ctx context.Context, trigger chan
|
||||
case ev := <-events:
|
||||
//only catch node up events
|
||||
if ev.Type == simulations.EventTypeNode {
|
||||
if ev.Node.Up {
|
||||
if ev.Node.Up() {
|
||||
log.Debug("got node up event", "event", ev, "node", ev.Node.Config.ID)
|
||||
select {
|
||||
case trigger <- ev.Node.Config.ID:
|
||||
|
149
vendor/github.com/ethereum/go-ethereum/swarm/network/stream/common_test.go
generated
vendored
149
vendor/github.com/ethereum/go-ethereum/swarm/network/stream/common_test.go
generated
vendored
@ -26,17 +26,19 @@ import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
||||
"github.com/ethereum/go-ethereum/swarm/state"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
mockmem "github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
|
||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
)
|
||||
@ -67,7 +69,81 @@ func init() {
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
}
|
||||
|
||||
func newStreamerTester(t *testing.T, registryOptions *RegistryOptions) (*p2ptest.ProtocolTester, *Registry, *storage.LocalStore, func(), error) {
|
||||
// newNetStoreAndDelivery is a default constructor for BzzAddr, NetStore and Delivery, used in Simulations
|
||||
func newNetStoreAndDelivery(ctx *adapters.ServiceContext, bucket *sync.Map) (*network.BzzAddr, *storage.NetStore, *Delivery, func(), error) {
|
||||
addr := network.NewAddr(ctx.Config.Node())
|
||||
|
||||
netStore, delivery, cleanup, err := netStoreAndDeliveryWithAddr(ctx, bucket, addr)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
||||
|
||||
return addr, netStore, delivery, cleanup, nil
|
||||
}
|
||||
|
||||
// newNetStoreAndDeliveryWithBzzAddr is a constructor for NetStore and Delivery, used in Simulations, accepting any BzzAddr
|
||||
func newNetStoreAndDeliveryWithBzzAddr(ctx *adapters.ServiceContext, bucket *sync.Map, addr *network.BzzAddr) (*storage.NetStore, *Delivery, func(), error) {
|
||||
netStore, delivery, cleanup, err := netStoreAndDeliveryWithAddr(ctx, bucket, addr)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
||||
|
||||
return netStore, delivery, cleanup, nil
|
||||
}
|
||||
|
||||
// newNetStoreAndDeliveryWithRequestFunc is a constructor for NetStore and Delivery, used in Simulations, accepting any NetStore.RequestFunc
|
||||
func newNetStoreAndDeliveryWithRequestFunc(ctx *adapters.ServiceContext, bucket *sync.Map, rf network.RequestFunc) (*network.BzzAddr, *storage.NetStore, *Delivery, func(), error) {
|
||||
addr := network.NewAddr(ctx.Config.Node())
|
||||
|
||||
netStore, delivery, cleanup, err := netStoreAndDeliveryWithAddr(ctx, bucket, addr)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
netStore.NewNetFetcherFunc = network.NewFetcherFactory(rf, true).New
|
||||
|
||||
return addr, netStore, delivery, cleanup, nil
|
||||
}
|
||||
|
||||
func netStoreAndDeliveryWithAddr(ctx *adapters.ServiceContext, bucket *sync.Map, addr *network.BzzAddr) (*storage.NetStore, *Delivery, func(), error) {
|
||||
n := ctx.Config.Node()
|
||||
|
||||
store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
|
||||
if *useMockStore {
|
||||
store, datadir, err = createMockStore(mockmem.NewGlobalStore(), n.ID(), addr)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
localStore := store.(*storage.LocalStore)
|
||||
netStore, err := storage.NewNetStore(localStore, nil)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
|
||||
|
||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||
delivery := NewDelivery(kad, netStore)
|
||||
|
||||
bucket.Store(bucketKeyStore, store)
|
||||
bucket.Store(bucketKeyDB, netStore)
|
||||
bucket.Store(bucketKeyDelivery, delivery)
|
||||
bucket.Store(bucketKeyFileStore, fileStore)
|
||||
|
||||
cleanup := func() {
|
||||
netStore.Close()
|
||||
os.RemoveAll(datadir)
|
||||
}
|
||||
|
||||
return netStore, delivery, cleanup, nil
|
||||
}
|
||||
|
||||
func newStreamerTester(registryOptions *RegistryOptions) (*p2ptest.ProtocolTester, *Registry, *storage.LocalStore, func(), error) {
|
||||
// setup
|
||||
addr := network.RandomAddr() // tested peers peer address
|
||||
to := network.NewKademlia(addr.OAddr, network.NewKadParams())
|
||||
@ -75,7 +151,7 @@ func newStreamerTester(t *testing.T, registryOptions *RegistryOptions) (*p2ptest
|
||||
// temp datadir
|
||||
datadir, err := ioutil.TempDir("", "streamer")
|
||||
if err != nil {
|
||||
return nil, nil, nil, func() {}, err
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
removeDataDir := func() {
|
||||
os.RemoveAll(datadir)
|
||||
@ -87,12 +163,14 @@ func newStreamerTester(t *testing.T, registryOptions *RegistryOptions) (*p2ptest
|
||||
|
||||
localStore, err := storage.NewTestLocalStoreForAddr(params)
|
||||
if err != nil {
|
||||
return nil, nil, nil, removeDataDir, err
|
||||
removeDataDir()
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
netStore, err := storage.NewNetStore(localStore, nil)
|
||||
if err != nil {
|
||||
return nil, nil, nil, removeDataDir, err
|
||||
removeDataDir()
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
delivery := NewDelivery(to, netStore)
|
||||
@ -102,10 +180,11 @@ func newStreamerTester(t *testing.T, registryOptions *RegistryOptions) (*p2ptest
|
||||
streamer.Close()
|
||||
removeDataDir()
|
||||
}
|
||||
protocolTester := p2ptest.NewProtocolTester(t, addr.ID(), 1, streamer.runProtocol)
|
||||
protocolTester := p2ptest.NewProtocolTester(addr.ID(), 1, streamer.runProtocol)
|
||||
|
||||
err = waitForPeers(streamer, 1*time.Second, 1)
|
||||
err = waitForPeers(streamer, 10*time.Second, 1)
|
||||
if err != nil {
|
||||
teardown()
|
||||
return nil, nil, nil, nil, errors.New("timeout: peer is not created")
|
||||
}
|
||||
|
||||
@ -138,6 +217,11 @@ func newRoundRobinStore(stores ...storage.ChunkStore) *roundRobinStore {
|
||||
}
|
||||
}
|
||||
|
||||
// not used in this context, only to fulfill ChunkStore interface
|
||||
func (rrs *roundRobinStore) Has(ctx context.Context, addr storage.Address) bool {
|
||||
panic("RoundRobinStor doesn't support HasChunk")
|
||||
}
|
||||
|
||||
func (rrs *roundRobinStore) Get(ctx context.Context, addr storage.Address) (storage.Chunk, error) {
|
||||
return nil, errors.New("get not well defined on round robin store")
|
||||
}
|
||||
@ -236,3 +320,54 @@ func createTestLocalStorageForID(id enode.ID, addr *network.BzzAddr) (storage.Ch
|
||||
}
|
||||
return store, datadir, nil
|
||||
}
|
||||
|
||||
// watchDisconnections receives simulation peer events in a new goroutine and sets atomic value
|
||||
// disconnected to true in case of a disconnect event.
|
||||
func watchDisconnections(ctx context.Context, sim *simulation.Simulation) (disconnected *boolean) {
|
||||
log.Debug("Watching for disconnections")
|
||||
disconnections := sim.PeerEvents(
|
||||
ctx,
|
||||
sim.NodeIDs(),
|
||||
simulation.NewPeerEventsFilter().Drop(),
|
||||
)
|
||||
disconnected = new(boolean)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case d := <-disconnections:
|
||||
if d.Error != nil {
|
||||
log.Error("peer drop event error", "node", d.NodeID, "peer", d.PeerID, "err", d.Error)
|
||||
} else {
|
||||
log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
|
||||
}
|
||||
disconnected.set(true)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return disconnected
|
||||
}
|
||||
|
||||
// boolean is used to concurrently set
|
||||
// and read a boolean value.
|
||||
type boolean struct {
|
||||
v bool
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// set sets the value.
|
||||
func (b *boolean) set(v bool) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
b.v = v
|
||||
}
|
||||
|
||||
// bool reads the value.
|
||||
func (b *boolean) bool() bool {
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
|
||||
return b.v
|
||||
}
|
||||
|
9
vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery.go
generated
vendored
9
vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery.go
generated
vendored
@ -144,7 +144,6 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *
|
||||
ctx, osp = spancontext.StartSpan(
|
||||
ctx,
|
||||
"retrieve.request")
|
||||
defer osp.Finish()
|
||||
|
||||
s, err := sp.getServer(NewStream(swarmChunkServerStreamName, "", true))
|
||||
if err != nil {
|
||||
@ -167,6 +166,7 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer osp.Finish()
|
||||
chunk, err := d.chunkStore.Get(ctx, req.Addr)
|
||||
if err != nil {
|
||||
retrieveChunkFail.Inc(1)
|
||||
@ -213,11 +213,12 @@ func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *Ch
|
||||
ctx, osp = spancontext.StartSpan(
|
||||
ctx,
|
||||
"chunk.delivery")
|
||||
defer osp.Finish()
|
||||
|
||||
processReceivedChunksCount.Inc(1)
|
||||
|
||||
go func() {
|
||||
defer osp.Finish()
|
||||
|
||||
req.peer = sp
|
||||
err := d.chunkStore.Put(ctx, storage.NewChunk(req.Addr, req.SData))
|
||||
if err != nil {
|
||||
@ -255,8 +256,8 @@ func (d *Delivery) RequestFromPeers(ctx context.Context, req *network.Request) (
|
||||
return true
|
||||
}
|
||||
sp = d.getPeer(id)
|
||||
// sp is nil, when we encounter a peer that is not registered for delivery, i.e. doesn't support the `stream` protocol
|
||||
if sp == nil {
|
||||
//log.Warn("Delivery.RequestFromPeers: peer not found", "id", id)
|
||||
return true
|
||||
}
|
||||
spID = &id
|
||||
@ -271,7 +272,7 @@ func (d *Delivery) RequestFromPeers(ctx context.Context, req *network.Request) (
|
||||
Addr: req.Addr,
|
||||
SkipCheck: req.SkipCheck,
|
||||
HopCount: req.HopCount,
|
||||
}, Top)
|
||||
}, Top, "request.from.peers")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
351
vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery_test.go
generated
vendored
351
vendor/github.com/ethereum/go-ethereum/swarm/network/stream/delivery_test.go
generated
vendored
@ -21,9 +21,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -48,11 +46,11 @@ func TestStreamerRetrieveRequest(t *testing.T) {
|
||||
Retrieval: RetrievalClientOnly,
|
||||
Syncing: SyncingDisabled,
|
||||
}
|
||||
tester, streamer, _, teardown, err := newStreamerTester(t, regOpts)
|
||||
defer teardown()
|
||||
tester, streamer, _, teardown, err := newStreamerTester(regOpts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer teardown()
|
||||
|
||||
node := tester.Nodes[0]
|
||||
|
||||
@ -97,14 +95,14 @@ func TestStreamerRetrieveRequest(t *testing.T) {
|
||||
//Test requesting a chunk from a peer then issuing a "empty" OfferedHashesMsg (no hashes available yet)
|
||||
//Should time out as the peer does not have the chunk (no syncing happened previously)
|
||||
func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
|
||||
tester, streamer, _, teardown, err := newStreamerTester(t, &RegistryOptions{
|
||||
tester, streamer, _, teardown, err := newStreamerTester(&RegistryOptions{
|
||||
Retrieval: RetrievalEnabled,
|
||||
Syncing: SyncingDisabled, //do no syncing
|
||||
})
|
||||
defer teardown()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer teardown()
|
||||
|
||||
node := tester.Nodes[0]
|
||||
|
||||
@ -169,14 +167,14 @@ func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
|
||||
// upstream request server receives a retrieve Request and responds with
|
||||
// offered hashes or delivery if skipHash is set to true
|
||||
func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
|
||||
tester, streamer, localStore, teardown, err := newStreamerTester(t, &RegistryOptions{
|
||||
tester, streamer, localStore, teardown, err := newStreamerTester(&RegistryOptions{
|
||||
Retrieval: RetrievalEnabled,
|
||||
Syncing: SyncingDisabled,
|
||||
})
|
||||
defer teardown()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer teardown()
|
||||
|
||||
node := tester.Nodes[0]
|
||||
|
||||
@ -359,14 +357,14 @@ func TestRequestFromPeersWithLightNode(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
|
||||
tester, streamer, localStore, teardown, err := newStreamerTester(t, &RegistryOptions{
|
||||
tester, streamer, localStore, teardown, err := newStreamerTester(&RegistryOptions{
|
||||
Retrieval: RetrievalDisabled,
|
||||
Syncing: SyncingDisabled,
|
||||
})
|
||||
defer teardown()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer teardown()
|
||||
|
||||
streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
|
||||
return &testClient{
|
||||
@ -455,164 +453,136 @@ func TestDeliveryFromNodes(t *testing.T) {
|
||||
}
|
||||
|
||||
func testDeliveryFromNodes(t *testing.T, nodes, chunkCount int, skipCheck bool) {
|
||||
sim := simulation.New(map[string]simulation.ServiceFunc{
|
||||
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
|
||||
node := ctx.Config.Node()
|
||||
addr := network.NewAddr(node)
|
||||
store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
bucket.Store(bucketKeyStore, store)
|
||||
cleanup = func() {
|
||||
os.RemoveAll(datadir)
|
||||
store.Close()
|
||||
}
|
||||
localStore := store.(*storage.LocalStore)
|
||||
netStore, err := storage.NewNetStore(localStore, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||
delivery := NewDelivery(kad, netStore)
|
||||
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
||||
|
||||
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||
SkipCheck: skipCheck,
|
||||
Syncing: SyncingDisabled,
|
||||
Retrieval: RetrievalEnabled,
|
||||
}, nil)
|
||||
bucket.Store(bucketKeyRegistry, r)
|
||||
|
||||
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
|
||||
bucket.Store(bucketKeyFileStore, fileStore)
|
||||
|
||||
return r, cleanup, nil
|
||||
|
||||
},
|
||||
})
|
||||
defer sim.Close()
|
||||
|
||||
log.Info("Adding nodes to simulation")
|
||||
_, err := sim.AddNodesAndConnectChain(nodes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
log.Info("Starting simulation")
|
||||
ctx := context.Background()
|
||||
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
|
||||
nodeIDs := sim.UpNodeIDs()
|
||||
//determine the pivot node to be the first node of the simulation
|
||||
pivot := nodeIDs[0]
|
||||
|
||||
//distribute chunks of a random file into Stores of nodes 1 to nodes
|
||||
//we will do this by creating a file store with an underlying round-robin store:
|
||||
//the file store will create a hash for the uploaded file, but every chunk will be
|
||||
//distributed to different nodes via round-robin scheduling
|
||||
log.Debug("Writing file to round-robin file store")
|
||||
//to do this, we create an array for chunkstores (length minus one, the pivot node)
|
||||
stores := make([]storage.ChunkStore, len(nodeIDs)-1)
|
||||
//we then need to get all stores from the sim....
|
||||
lStores := sim.NodesItems(bucketKeyStore)
|
||||
i := 0
|
||||
//...iterate the buckets...
|
||||
for id, bucketVal := range lStores {
|
||||
//...and remove the one which is the pivot node
|
||||
if id == pivot {
|
||||
continue
|
||||
}
|
||||
//the other ones are added to the array...
|
||||
stores[i] = bucketVal.(storage.ChunkStore)
|
||||
i++
|
||||
}
|
||||
//...which then gets passed to the round-robin file store
|
||||
roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
|
||||
//now we can actually upload a (random) file to the round-robin store
|
||||
size := chunkCount * chunkSize
|
||||
log.Debug("Storing data to file store")
|
||||
fileHash, wait, err := roundRobinFileStore.Store(ctx, testutil.RandomReader(1, size), int64(size), false)
|
||||
// wait until all chunks stored
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = wait(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debug("Waiting for kademlia")
|
||||
// TODO this does not seem to be correct usage of the function, as the simulation may have no kademlias
|
||||
if _, err := sim.WaitTillHealthy(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//get the pivot node's filestore
|
||||
item, ok := sim.NodeItem(pivot, bucketKeyFileStore)
|
||||
if !ok {
|
||||
return fmt.Errorf("No filestore")
|
||||
}
|
||||
pivotFileStore := item.(*storage.FileStore)
|
||||
log.Debug("Starting retrieval routine")
|
||||
retErrC := make(chan error)
|
||||
go func() {
|
||||
// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
|
||||
// we must wait for the peer connections to have started before requesting
|
||||
n, err := readAll(pivotFileStore, fileHash)
|
||||
log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
|
||||
retErrC <- err
|
||||
}()
|
||||
|
||||
log.Debug("Watching for disconnections")
|
||||
disconnections := sim.PeerEvents(
|
||||
context.Background(),
|
||||
sim.NodeIDs(),
|
||||
simulation.NewPeerEventsFilter().Drop(),
|
||||
)
|
||||
|
||||
var disconnected atomic.Value
|
||||
go func() {
|
||||
for d := range disconnections {
|
||||
if d.Error != nil {
|
||||
log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
|
||||
disconnected.Store(true)
|
||||
t.Helper()
|
||||
t.Run(fmt.Sprintf("testDeliveryFromNodes_%d_%d_skipCheck_%t", nodes, chunkCount, skipCheck), func(t *testing.T) {
|
||||
sim := simulation.New(map[string]simulation.ServiceFunc{
|
||||
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
|
||||
addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||
SkipCheck: skipCheck,
|
||||
Syncing: SyncingDisabled,
|
||||
Retrieval: RetrievalEnabled,
|
||||
}, nil)
|
||||
bucket.Store(bucketKeyRegistry, r)
|
||||
|
||||
cleanup = func() {
|
||||
r.Close()
|
||||
clean()
|
||||
}
|
||||
|
||||
return r, cleanup, nil
|
||||
},
|
||||
})
|
||||
defer sim.Close()
|
||||
|
||||
log.Info("Adding nodes to simulation")
|
||||
_, err := sim.AddNodesAndConnectChain(nodes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
log.Info("Starting simulation")
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
|
||||
nodeIDs := sim.UpNodeIDs()
|
||||
//determine the pivot node to be the first node of the simulation
|
||||
pivot := nodeIDs[0]
|
||||
|
||||
//distribute chunks of a random file into Stores of nodes 1 to nodes
|
||||
//we will do this by creating a file store with an underlying round-robin store:
|
||||
//the file store will create a hash for the uploaded file, but every chunk will be
|
||||
//distributed to different nodes via round-robin scheduling
|
||||
log.Debug("Writing file to round-robin file store")
|
||||
//to do this, we create an array for chunkstores (length minus one, the pivot node)
|
||||
stores := make([]storage.ChunkStore, len(nodeIDs)-1)
|
||||
//we then need to get all stores from the sim....
|
||||
lStores := sim.NodesItems(bucketKeyStore)
|
||||
i := 0
|
||||
//...iterate the buckets...
|
||||
for id, bucketVal := range lStores {
|
||||
//...and remove the one which is the pivot node
|
||||
if id == pivot {
|
||||
continue
|
||||
}
|
||||
//the other ones are added to the array...
|
||||
stores[i] = bucketVal.(storage.ChunkStore)
|
||||
i++
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
//...which then gets passed to the round-robin file store
|
||||
roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
|
||||
//now we can actually upload a (random) file to the round-robin store
|
||||
size := chunkCount * chunkSize
|
||||
log.Debug("Storing data to file store")
|
||||
fileHash, wait, err := roundRobinFileStore.Store(ctx, testutil.RandomReader(1, size), int64(size), false)
|
||||
// wait until all chunks stored
|
||||
if err != nil {
|
||||
if yes, ok := disconnected.Load().(bool); ok && yes {
|
||||
return err
|
||||
}
|
||||
err = wait(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debug("Waiting for kademlia")
|
||||
// TODO this does not seem to be correct usage of the function, as the simulation may have no kademlias
|
||||
if _, err := sim.WaitTillHealthy(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//get the pivot node's filestore
|
||||
item, ok := sim.NodeItem(pivot, bucketKeyFileStore)
|
||||
if !ok {
|
||||
return fmt.Errorf("No filestore")
|
||||
}
|
||||
pivotFileStore := item.(*storage.FileStore)
|
||||
log.Debug("Starting retrieval routine")
|
||||
retErrC := make(chan error)
|
||||
go func() {
|
||||
// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
|
||||
// we must wait for the peer connections to have started before requesting
|
||||
n, err := readAll(pivotFileStore, fileHash)
|
||||
log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
|
||||
retErrC <- err
|
||||
}()
|
||||
|
||||
disconnected := watchDisconnections(ctx, sim)
|
||||
defer func() {
|
||||
if err != nil && disconnected.bool() {
|
||||
err = errors.New("disconnect events received")
|
||||
}
|
||||
}()
|
||||
|
||||
//finally check that the pivot node gets all chunks via the root hash
|
||||
log.Debug("Check retrieval")
|
||||
success := true
|
||||
var total int64
|
||||
total, err = readAll(pivotFileStore, fileHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
|
||||
if err != nil || total != int64(size) {
|
||||
success = false
|
||||
}
|
||||
}()
|
||||
|
||||
//finally check that the pivot node gets all chunks via the root hash
|
||||
log.Debug("Check retrieval")
|
||||
success := true
|
||||
var total int64
|
||||
total, err = readAll(pivotFileStore, fileHash)
|
||||
if err != nil {
|
||||
return err
|
||||
if !success {
|
||||
return fmt.Errorf("Test failed, chunks not available on all nodes")
|
||||
}
|
||||
if err := <-retErrC; err != nil {
|
||||
return fmt.Errorf("requesting chunks: %v", err)
|
||||
}
|
||||
log.Debug("Test terminated successfully")
|
||||
return nil
|
||||
})
|
||||
if result.Error != nil {
|
||||
t.Fatal(result.Error)
|
||||
}
|
||||
log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
|
||||
if err != nil || total != int64(size) {
|
||||
success = false
|
||||
}
|
||||
|
||||
if !success {
|
||||
return fmt.Errorf("Test failed, chunks not available on all nodes")
|
||||
}
|
||||
if err := <-retErrC; err != nil {
|
||||
t.Fatalf("requesting chunks: %v", err)
|
||||
}
|
||||
log.Debug("Test terminated successfully")
|
||||
return nil
|
||||
})
|
||||
if result.Error != nil {
|
||||
t.Fatal(result.Error)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
|
||||
@ -644,25 +614,10 @@ func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
|
||||
func benchmarkDeliveryFromNodes(b *testing.B, nodes, chunkCount int, skipCheck bool) {
|
||||
sim := simulation.New(map[string]simulation.ServiceFunc{
|
||||
"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
|
||||
node := ctx.Config.Node()
|
||||
addr := network.NewAddr(node)
|
||||
store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
|
||||
addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
bucket.Store(bucketKeyStore, store)
|
||||
cleanup = func() {
|
||||
os.RemoveAll(datadir)
|
||||
store.Close()
|
||||
}
|
||||
localStore := store.(*storage.LocalStore)
|
||||
netStore, err := storage.NewNetStore(localStore, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||
delivery := NewDelivery(kad, netStore)
|
||||
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
||||
|
||||
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||
SkipCheck: skipCheck,
|
||||
@ -670,12 +625,14 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, chunkCount int, skipCheck b
|
||||
Retrieval: RetrievalDisabled,
|
||||
SyncUpdateDelay: 0,
|
||||
}, nil)
|
||||
bucket.Store(bucketKeyRegistry, r)
|
||||
|
||||
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
|
||||
bucket.Store(bucketKeyFileStore, fileStore)
|
||||
cleanup = func() {
|
||||
r.Close()
|
||||
clean()
|
||||
}
|
||||
|
||||
return r, cleanup, nil
|
||||
|
||||
},
|
||||
})
|
||||
defer sim.Close()
|
||||
@ -686,21 +643,22 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, chunkCount int, skipCheck b
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
|
||||
nodeIDs := sim.UpNodeIDs()
|
||||
node := nodeIDs[len(nodeIDs)-1]
|
||||
|
||||
item, ok := sim.NodeItem(node, bucketKeyFileStore)
|
||||
if !ok {
|
||||
b.Fatal("No filestore")
|
||||
return errors.New("No filestore")
|
||||
}
|
||||
remoteFileStore := item.(*storage.FileStore)
|
||||
|
||||
pivotNode := nodeIDs[0]
|
||||
item, ok = sim.NodeItem(pivotNode, bucketKeyNetStore)
|
||||
if !ok {
|
||||
b.Fatal("No filestore")
|
||||
return errors.New("No filestore")
|
||||
}
|
||||
netStore := item.(*storage.NetStore)
|
||||
|
||||
@ -708,26 +666,10 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, chunkCount int, skipCheck b
|
||||
return err
|
||||
}
|
||||
|
||||
disconnections := sim.PeerEvents(
|
||||
context.Background(),
|
||||
sim.NodeIDs(),
|
||||
simulation.NewPeerEventsFilter().Drop(),
|
||||
)
|
||||
|
||||
var disconnected atomic.Value
|
||||
go func() {
|
||||
for d := range disconnections {
|
||||
if d.Error != nil {
|
||||
log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
|
||||
disconnected.Store(true)
|
||||
}
|
||||
}
|
||||
}()
|
||||
disconnected := watchDisconnections(ctx, sim)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if yes, ok := disconnected.Load().(bool); ok && yes {
|
||||
err = errors.New("disconnect events received")
|
||||
}
|
||||
if err != nil && disconnected.bool() {
|
||||
err = errors.New("disconnect events received")
|
||||
}
|
||||
}()
|
||||
// benchmark loop
|
||||
@ -742,12 +684,12 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, chunkCount int, skipCheck b
|
||||
ctx := context.TODO()
|
||||
hash, wait, err := remoteFileStore.Store(ctx, testutil.RandomReader(i, chunkSize), int64(chunkSize), false)
|
||||
if err != nil {
|
||||
b.Fatalf("expected no error. got %v", err)
|
||||
return fmt.Errorf("store: %v", err)
|
||||
}
|
||||
// wait until all chunks stored
|
||||
err = wait(ctx)
|
||||
if err != nil {
|
||||
b.Fatalf("expected no error. got %v", err)
|
||||
return fmt.Errorf("wait store: %v", err)
|
||||
}
|
||||
// collect the hashes
|
||||
hashes[i] = hash
|
||||
@ -783,10 +725,7 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, chunkCount int, skipCheck b
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
})
|
||||
if result.Error != nil {
|
||||
b.Fatal(result.Error)
|
||||
|
58
vendor/github.com/ethereum/go-ethereum/swarm/network/stream/intervals_test.go
generated
vendored
58
vendor/github.com/ethereum/go-ethereum/swarm/network/stream/intervals_test.go
generated
vendored
@ -21,9 +21,7 @@ import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -31,7 +29,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
||||
"github.com/ethereum/go-ethereum/swarm/state"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
@ -62,26 +59,11 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
|
||||
externalStreamMaxKeys := uint64(100)
|
||||
|
||||
sim := simulation.New(map[string]simulation.ServiceFunc{
|
||||
"intervalsStreamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
|
||||
n := ctx.Config.Node()
|
||||
addr := network.NewAddr(n)
|
||||
store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
|
||||
"intervalsStreamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (node.Service, func(), error) {
|
||||
addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
bucket.Store(bucketKeyStore, store)
|
||||
cleanup = func() {
|
||||
store.Close()
|
||||
os.RemoveAll(datadir)
|
||||
}
|
||||
localStore := store.(*storage.LocalStore)
|
||||
netStore, err := storage.NewNetStore(localStore, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||
delivery := NewDelivery(kad, netStore)
|
||||
netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
|
||||
|
||||
r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
|
||||
Retrieval: RetrievalDisabled,
|
||||
@ -97,11 +79,12 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
|
||||
return newTestExternalServer(t, externalStreamSessionAt, externalStreamMaxKeys, nil), nil
|
||||
})
|
||||
|
||||
fileStore := storage.NewFileStore(localStore, storage.NewFileStoreParams())
|
||||
bucket.Store(bucketKeyFileStore, fileStore)
|
||||
cleanup := func() {
|
||||
r.Close()
|
||||
clean()
|
||||
}
|
||||
|
||||
return r, cleanup, nil
|
||||
|
||||
},
|
||||
})
|
||||
defer sim.Close()
|
||||
@ -134,13 +117,11 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
|
||||
|
||||
_, wait, err := fileStore.Store(ctx, testutil.RandomReader(1, size), int64(size), false)
|
||||
if err != nil {
|
||||
log.Error("Store error: %v", "err", err)
|
||||
t.Fatal(err)
|
||||
return fmt.Errorf("store: %v", err)
|
||||
}
|
||||
err = wait(ctx)
|
||||
if err != nil {
|
||||
log.Error("Wait error: %v", "err", err)
|
||||
t.Fatal(err)
|
||||
return fmt.Errorf("wait store: %v", err)
|
||||
}
|
||||
|
||||
item, ok = sim.NodeItem(checker, bucketKeyRegistry)
|
||||
@ -152,32 +133,15 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
|
||||
liveErrC := make(chan error)
|
||||
historyErrC := make(chan error)
|
||||
|
||||
log.Debug("Watching for disconnections")
|
||||
disconnections := sim.PeerEvents(
|
||||
context.Background(),
|
||||
sim.NodeIDs(),
|
||||
simulation.NewPeerEventsFilter().Drop(),
|
||||
)
|
||||
|
||||
err = registry.Subscribe(storer, NewStream(externalStreamName, "", live), history, Top)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var disconnected atomic.Value
|
||||
go func() {
|
||||
for d := range disconnections {
|
||||
if d.Error != nil {
|
||||
log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
|
||||
disconnected.Store(true)
|
||||
}
|
||||
}
|
||||
}()
|
||||
disconnected := watchDisconnections(ctx, sim)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if yes, ok := disconnected.Load().(bool); ok && yes {
|
||||
err = errors.New("disconnect events received")
|
||||
}
|
||||
if err != nil && disconnected.bool() {
|
||||
err = errors.New("disconnect events received")
|
||||
}
|
||||
}()
|
||||
|
||||
|
16
vendor/github.com/ethereum/go-ethereum/swarm/network/stream/lightnode_test.go
generated
vendored
16
vendor/github.com/ethereum/go-ethereum/swarm/network/stream/lightnode_test.go
generated
vendored
@ -28,11 +28,11 @@ func TestLigthnodeRetrieveRequestWithRetrieve(t *testing.T) {
|
||||
Retrieval: RetrievalClientOnly,
|
||||
Syncing: SyncingDisabled,
|
||||
}
|
||||
tester, _, _, teardown, err := newStreamerTester(t, registryOptions)
|
||||
defer teardown()
|
||||
tester, _, _, teardown, err := newStreamerTester(registryOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer teardown()
|
||||
|
||||
node := tester.Nodes[0]
|
||||
|
||||
@ -67,11 +67,11 @@ func TestLigthnodeRetrieveRequestWithoutRetrieve(t *testing.T) {
|
||||
Retrieval: RetrievalDisabled,
|
||||
Syncing: SyncingDisabled,
|
||||
}
|
||||
tester, _, _, teardown, err := newStreamerTester(t, registryOptions)
|
||||
defer teardown()
|
||||
tester, _, _, teardown, err := newStreamerTester(registryOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer teardown()
|
||||
|
||||
node := tester.Nodes[0]
|
||||
|
||||
@ -111,11 +111,11 @@ func TestLigthnodeRequestSubscriptionWithSync(t *testing.T) {
|
||||
Retrieval: RetrievalDisabled,
|
||||
Syncing: SyncingRegisterOnly,
|
||||
}
|
||||
tester, _, _, teardown, err := newStreamerTester(t, registryOptions)
|
||||
defer teardown()
|
||||
tester, _, _, teardown, err := newStreamerTester(registryOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer teardown()
|
||||
|
||||
node := tester.Nodes[0]
|
||||
|
||||
@ -156,11 +156,11 @@ func TestLigthnodeRequestSubscriptionWithoutSync(t *testing.T) {
|
||||
Retrieval: RetrievalDisabled,
|
||||
Syncing: SyncingDisabled,
|
||||
}
|
||||
tester, _, _, teardown, err := newStreamerTester(t, registryOptions)
|
||||
defer teardown()
|
||||
tester, _, _, teardown, err := newStreamerTester(registryOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer teardown()
|
||||
|
||||
node := tester.Nodes[0]
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user