forked from cerc-io/plugeth
swarm, cmd/swarm: Merge branch 'master' into multiple-ens-endpoints
Merge with changes that implement config file PR #15548. Field *EnsApi string* in swarm/api.Config is replaced with *EnsAPIs []string*. A new field *EnsDisabled bool* is added to swarm/api.Config for easy way to disable ENS resolving with config file. Signature of function swarm.NewSwarm is changed and simplified.
This commit is contained in:
commit
19982f9467
9
.github/CODEOWNERS
vendored
Normal file
9
.github/CODEOWNERS
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
# Lines starting with '#' are comments.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
|
||||
accounts/usbwallet @karalabe
|
||||
consensus @karalabe
|
||||
core/ @karalabe @holiman
|
||||
eth/ @karalabe
|
||||
mobile/ @karalabe
|
||||
p2p/ @fjl @zsfelfoldi
|
28
.travis.yml
28
.travis.yml
@ -8,7 +8,6 @@ matrix:
|
||||
sudo: required
|
||||
go: 1.7.x
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
@ -20,7 +19,6 @@ matrix:
|
||||
sudo: required
|
||||
go: 1.8.x
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
@ -33,7 +31,6 @@ matrix:
|
||||
sudo: required
|
||||
go: 1.9.x
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
@ -42,7 +39,6 @@ matrix:
|
||||
|
||||
- os: osx
|
||||
go: 1.9.x
|
||||
sudo: required
|
||||
script:
|
||||
- brew update
|
||||
- brew install caskroom/cask/brew-cask
|
||||
@ -53,15 +49,12 @@ matrix:
|
||||
# This builder only tests code linters on latest version of Go
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.9.x
|
||||
env:
|
||||
- lint
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go lint
|
||||
|
||||
# This builder does the Ubuntu PPA and Linux Azure uploads
|
||||
@ -72,6 +65,8 @@ matrix:
|
||||
env:
|
||||
- ubuntu-ppa
|
||||
- azure-linux
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
@ -104,12 +99,13 @@ matrix:
|
||||
# This builder does the Linux Azure MIPS xgo uploads
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
go: 1.9.x
|
||||
env:
|
||||
- azure-linux-mips
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
script:
|
||||
- go run build/ci.go xgo --alltools -- --targets=linux/mips --ldflags '-extldflags "-static"' -v
|
||||
- for bin in build/bin/*-linux-mips; do mv -f "${bin}" "${bin/-linux-mips/}"; done
|
||||
@ -146,6 +142,8 @@ matrix:
|
||||
env:
|
||||
- azure-android
|
||||
- maven-android
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
before_install:
|
||||
- curl https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
@ -169,6 +167,8 @@ matrix:
|
||||
- azure-osx
|
||||
- azure-ios
|
||||
- cocoapods-ios
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -upload gethstore/builds
|
||||
@ -193,15 +193,11 @@ matrix:
|
||||
go: 1.9.x
|
||||
env:
|
||||
- azure-purge
|
||||
git:
|
||||
submodules: false # avoid cloning ethereum/tests
|
||||
script:
|
||||
- go run build/ci.go purge -store gethstore/builds -days 14
|
||||
|
||||
install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage
|
||||
|
||||
notifications:
|
||||
webhooks:
|
||||
urls:
|
||||
|
@ -266,7 +266,7 @@ instance for mining, run it with all your usual flags, extended by:
|
||||
$ geth <usual-flags> --mine --minerthreads=1 --etherbase=0x0000000000000000000000000000000000000000
|
||||
```
|
||||
|
||||
Which will start mining bocks and transactions on a single CPU thread, crediting all proceedings to
|
||||
Which will start mining blocks and transactions on a single CPU thread, crediting all proceedings to
|
||||
the account specified by `--etherbase`. You can further tune the mining by changing the default gas
|
||||
limit blocks converge to (`--targetgaslimit`) and the price transactions are accepted at (`--gasprice`).
|
||||
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
@ -75,13 +74,6 @@ type accountCache struct {
|
||||
fileC fileCache
|
||||
}
|
||||
|
||||
// fileCache is a cache of files seen during scan of keystore
|
||||
type fileCache struct {
|
||||
all *set.SetNonTS // list of all files
|
||||
mtime time.Time // latest mtime seen
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func newAccountCache(keydir string) (*accountCache, chan struct{}) {
|
||||
ac := &accountCache{
|
||||
keydir: keydir,
|
||||
@ -236,66 +228,22 @@ func (ac *accountCache) close() {
|
||||
ac.mu.Unlock()
|
||||
}
|
||||
|
||||
// scanFiles performs a new scan on the given directory, compares against the already
|
||||
// cached filenames, and returns file sets: new, missing , modified
|
||||
func (fc *fileCache) scanFiles(keyDir string) (set.Interface, set.Interface, set.Interface, error) {
|
||||
t0 := time.Now()
|
||||
files, err := ioutil.ReadDir(keyDir)
|
||||
t1 := time.Now()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
fc.mu.RLock()
|
||||
prevMtime := fc.mtime
|
||||
fc.mu.RUnlock()
|
||||
|
||||
filesNow := set.NewNonTS()
|
||||
moddedFiles := set.NewNonTS()
|
||||
var newMtime time.Time
|
||||
for _, fi := range files {
|
||||
modTime := fi.ModTime()
|
||||
path := filepath.Join(keyDir, fi.Name())
|
||||
if skipKeyFile(fi) {
|
||||
log.Trace("Ignoring file on account scan", "path", path)
|
||||
continue
|
||||
}
|
||||
filesNow.Add(path)
|
||||
if modTime.After(prevMtime) {
|
||||
moddedFiles.Add(path)
|
||||
}
|
||||
if modTime.After(newMtime) {
|
||||
newMtime = modTime
|
||||
}
|
||||
}
|
||||
t2 := time.Now()
|
||||
|
||||
fc.mu.Lock()
|
||||
// Missing = previous - current
|
||||
missing := set.Difference(fc.all, filesNow)
|
||||
// New = current - previous
|
||||
newFiles := set.Difference(filesNow, fc.all)
|
||||
// Modified = modified - new
|
||||
modified := set.Difference(moddedFiles, newFiles)
|
||||
fc.all = filesNow
|
||||
fc.mtime = newMtime
|
||||
fc.mu.Unlock()
|
||||
t3 := time.Now()
|
||||
log.Debug("FS scan times", "list", t1.Sub(t0), "set", t2.Sub(t1), "diff", t3.Sub(t2))
|
||||
return newFiles, missing, modified, nil
|
||||
}
|
||||
|
||||
// scanAccounts checks if any changes have occurred on the filesystem, and
|
||||
// updates the account cache accordingly
|
||||
func (ac *accountCache) scanAccounts() error {
|
||||
newFiles, missingFiles, modified, err := ac.fileC.scanFiles(ac.keydir)
|
||||
t1 := time.Now()
|
||||
// Scan the entire folder metadata for file changes
|
||||
creates, deletes, updates, err := ac.fileC.scan(ac.keydir)
|
||||
if err != nil {
|
||||
log.Debug("Failed to reload keystore contents", "err", err)
|
||||
return err
|
||||
}
|
||||
if creates.Size() == 0 && deletes.Size() == 0 && updates.Size() == 0 {
|
||||
return nil
|
||||
}
|
||||
// Create a helper method to scan the contents of the key files
|
||||
var (
|
||||
buf = new(bufio.Reader)
|
||||
keyJSON struct {
|
||||
buf = new(bufio.Reader)
|
||||
key struct {
|
||||
Address string `json:"address"`
|
||||
}
|
||||
)
|
||||
@ -308,9 +256,9 @@ func (ac *accountCache) scanAccounts() error {
|
||||
defer fd.Close()
|
||||
buf.Reset(fd)
|
||||
// Parse the address.
|
||||
keyJSON.Address = ""
|
||||
err = json.NewDecoder(buf).Decode(&keyJSON)
|
||||
addr := common.HexToAddress(keyJSON.Address)
|
||||
key.Address = ""
|
||||
err = json.NewDecoder(buf).Decode(&key)
|
||||
addr := common.HexToAddress(key.Address)
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Debug("Failed to decode keystore key", "path", path, "err", err)
|
||||
@ -321,47 +269,30 @@ func (ac *accountCache) scanAccounts() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Process all the file diffs
|
||||
start := time.Now()
|
||||
|
||||
for _, p := range newFiles.List() {
|
||||
path, _ := p.(string)
|
||||
a := readAccount(path)
|
||||
if a != nil {
|
||||
for _, p := range creates.List() {
|
||||
if a := readAccount(p.(string)); a != nil {
|
||||
ac.add(*a)
|
||||
}
|
||||
}
|
||||
for _, p := range missingFiles.List() {
|
||||
path, _ := p.(string)
|
||||
ac.deleteByFile(path)
|
||||
for _, p := range deletes.List() {
|
||||
ac.deleteByFile(p.(string))
|
||||
}
|
||||
|
||||
for _, p := range modified.List() {
|
||||
path, _ := p.(string)
|
||||
a := readAccount(path)
|
||||
for _, p := range updates.List() {
|
||||
path := p.(string)
|
||||
ac.deleteByFile(path)
|
||||
if a != nil {
|
||||
if a := readAccount(path); a != nil {
|
||||
ac.add(*a)
|
||||
}
|
||||
}
|
||||
|
||||
t2 := time.Now()
|
||||
end := time.Now()
|
||||
|
||||
select {
|
||||
case ac.notify <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
log.Trace("Handled keystore changes", "time", t2.Sub(t1))
|
||||
|
||||
log.Trace("Handled keystore changes", "time", end.Sub(start))
|
||||
return nil
|
||||
}
|
||||
|
||||
func skipKeyFile(fi os.FileInfo) bool {
|
||||
// Skip editor backups and UNIX-style hidden files.
|
||||
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
|
||||
return true
|
||||
}
|
||||
// Skip misc special files, directories (yes, symlinks too).
|
||||
if fi.IsDir() || fi.Mode()&os.ModeType != 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ func TestWatchNewFile(t *testing.T) {
|
||||
|
||||
// Ensure the watcher is started before adding any files.
|
||||
ks.Accounts()
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
|
||||
// Move in the files.
|
||||
wantAccounts := make([]accounts.Account, len(cachetestAccounts))
|
||||
@ -349,6 +349,9 @@ func TestUpdatedKeyfileContents(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
// needed so that modTime of `file` is different to its current value after forceCopyFile
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
|
||||
// Now replace file contents
|
||||
if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil {
|
||||
t.Fatal(err)
|
||||
@ -362,6 +365,9 @@ func TestUpdatedKeyfileContents(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
// needed so that modTime of `file` is different to its current value after forceCopyFile
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
|
||||
// Now replace file contents again
|
||||
if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil {
|
||||
t.Fatal(err)
|
||||
@ -374,6 +380,10 @@ func TestUpdatedKeyfileContents(t *testing.T) {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// needed so that modTime of `file` is different to its current value after ioutil.WriteFile
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
|
||||
// Now replace file contents with crap
|
||||
if err := ioutil.WriteFile(file, []byte("foo"), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
|
102
accounts/keystore/file_cache.go
Normal file
102
accounts/keystore/file_cache.go
Normal file
@ -0,0 +1,102 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package keystore
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
set "gopkg.in/fatih/set.v0"
|
||||
)
|
||||
|
||||
// fileCache is a cache of files seen during scan of keystore.
|
||||
type fileCache struct {
|
||||
all *set.SetNonTS // Set of all files from the keystore folder
|
||||
lastMod time.Time // Last time instance when a file was modified
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// scan performs a new scan on the given directory, compares against the already
|
||||
// cached filenames, and returns file sets: creates, deletes, updates.
|
||||
func (fc *fileCache) scan(keyDir string) (set.Interface, set.Interface, set.Interface, error) {
|
||||
t0 := time.Now()
|
||||
|
||||
// List all the failes from the keystore folder
|
||||
files, err := ioutil.ReadDir(keyDir)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
t1 := time.Now()
|
||||
|
||||
fc.mu.Lock()
|
||||
defer fc.mu.Unlock()
|
||||
|
||||
// Iterate all the files and gather their metadata
|
||||
all := set.NewNonTS()
|
||||
mods := set.NewNonTS()
|
||||
|
||||
var newLastMod time.Time
|
||||
for _, fi := range files {
|
||||
// Skip any non-key files from the folder
|
||||
path := filepath.Join(keyDir, fi.Name())
|
||||
if skipKeyFile(fi) {
|
||||
log.Trace("Ignoring file on account scan", "path", path)
|
||||
continue
|
||||
}
|
||||
// Gather the set of all and fresly modified files
|
||||
all.Add(path)
|
||||
|
||||
modified := fi.ModTime()
|
||||
if modified.After(fc.lastMod) {
|
||||
mods.Add(path)
|
||||
}
|
||||
if modified.After(newLastMod) {
|
||||
newLastMod = modified
|
||||
}
|
||||
}
|
||||
t2 := time.Now()
|
||||
|
||||
// Update the tracked files and return the three sets
|
||||
deletes := set.Difference(fc.all, all) // Deletes = previous - current
|
||||
creates := set.Difference(all, fc.all) // Creates = current - previous
|
||||
updates := set.Difference(mods, creates) // Updates = modified - creates
|
||||
|
||||
fc.all, fc.lastMod = all, newLastMod
|
||||
t3 := time.Now()
|
||||
|
||||
// Report on the scanning stats and return
|
||||
log.Debug("FS scan times", "list", t1.Sub(t0), "set", t2.Sub(t1), "diff", t3.Sub(t2))
|
||||
return creates, deletes, updates, nil
|
||||
}
|
||||
|
||||
// skipKeyFile ignores editor backups, hidden files and folders/symlinks.
|
||||
func skipKeyFile(fi os.FileInfo) bool {
|
||||
// Skip editor backups and UNIX-style hidden files.
|
||||
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
|
||||
return true
|
||||
}
|
||||
// Skip misc special files, directories (yes, symlinks too).
|
||||
if fi.IsDir() || fi.Mode()&os.ModeType != 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
@ -28,6 +28,7 @@ package keystore
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
crand "crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
@ -90,6 +91,12 @@ func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string)
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// StoreKey generates a key, encrypts with 'auth' and stores in the given directory
|
||||
func StoreKey(dir, auth string, scryptN, scryptP int) (common.Address, error) {
|
||||
_, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP}, crand.Reader, auth)
|
||||
return a.Address, err
|
||||
}
|
||||
|
||||
func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) error {
|
||||
keyjson, err := EncryptKey(key, auth, ks.scryptN, ks.scryptP)
|
||||
if err != nil {
|
||||
|
@ -81,10 +81,14 @@ func (w *watcher) loop() {
|
||||
// When an event occurs, the reload call is delayed a bit so that
|
||||
// multiple events arriving quickly only cause a single reload.
|
||||
var (
|
||||
debounce = time.NewTimer(0)
|
||||
debounceDuration = 500 * time.Millisecond
|
||||
rescanTriggered = false
|
||||
debounce = time.NewTimer(0)
|
||||
)
|
||||
// Ignore initial trigger
|
||||
if !debounce.Stop() {
|
||||
<-debounce.C
|
||||
}
|
||||
defer debounce.Stop()
|
||||
for {
|
||||
select {
|
||||
|
@ -41,6 +41,11 @@ type Manager struct {
|
||||
// NewManager creates a generic account manager to sign transaction via various
|
||||
// supported backends.
|
||||
func NewManager(backends ...Backend) *Manager {
|
||||
// Retrieve the initial list of wallets from the backends and sort by URL
|
||||
var wallets []Wallet
|
||||
for _, backend := range backends {
|
||||
wallets = merge(wallets, backend.Wallets()...)
|
||||
}
|
||||
// Subscribe to wallet notifications from all backends
|
||||
updates := make(chan WalletEvent, 4*len(backends))
|
||||
|
||||
@ -48,11 +53,6 @@ func NewManager(backends ...Backend) *Manager {
|
||||
for i, backend := range backends {
|
||||
subs[i] = backend.Subscribe(updates)
|
||||
}
|
||||
// Retrieve the initial list of wallets from the backends and sort by URL
|
||||
var wallets []Wallet
|
||||
for _, backend := range backends {
|
||||
wallets = merge(wallets, backend.Wallets()...)
|
||||
}
|
||||
// Assemble the account manager and return
|
||||
am := &Manager{
|
||||
backends: make(map[reflect.Type][]Backend),
|
||||
|
16
build/ci.go
16
build/ci.go
@ -19,7 +19,7 @@
|
||||
/*
|
||||
The ci command is called from Continuous Integration scripts.
|
||||
|
||||
Usage: go run ci.go <command> <command flags/arguments>
|
||||
Usage: go run build/ci.go <command> <command flags/arguments>
|
||||
|
||||
Available commands are:
|
||||
|
||||
@ -199,7 +199,7 @@ func doInstall(cmdline []string) {
|
||||
build.MustRun(goinstall)
|
||||
return
|
||||
}
|
||||
// If we are cross compiling to ARMv5 ARMv6 or ARMv7, clean any prvious builds
|
||||
// If we are cross compiling to ARMv5 ARMv6 or ARMv7, clean any previous builds
|
||||
if *arch == "arm" {
|
||||
os.RemoveAll(filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_arm"))
|
||||
for _, path := range filepath.SplitList(build.GOPATH()) {
|
||||
@ -323,11 +323,19 @@ func doLint(cmdline []string) {
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v1"), "--install")
|
||||
|
||||
// Run fast linters batched together
|
||||
configs := []string{"--vendor", "--disable-all", "--enable=vet", "--enable=gofmt", "--enable=misspell"}
|
||||
configs := []string{
|
||||
"--vendor",
|
||||
"--disable-all",
|
||||
"--enable=vet",
|
||||
"--enable=gofmt",
|
||||
"--enable=misspell",
|
||||
"--enable=goconst",
|
||||
"--min-occurrences=6", // for goconst
|
||||
}
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v1"), append(configs, packages...)...)
|
||||
|
||||
// Run slow linters one by one
|
||||
for _, linter := range []string{"unconvert"} {
|
||||
for _, linter := range []string{"unconvert", "gosimple"} {
|
||||
configs = []string{"--vendor", "--deadline=10m", "--disable-all", "--enable=" + linter}
|
||||
build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v1"), append(configs, packages...)...)
|
||||
}
|
||||
|
@ -83,7 +83,8 @@ var (
|
||||
captchaToken = flag.String("captcha.token", "", "Recaptcha site key to authenticate client side")
|
||||
captchaSecret = flag.String("captcha.secret", "", "Recaptcha secret key to authenticate server side")
|
||||
|
||||
logFlag = flag.Int("loglevel", 3, "Log level to use for Ethereum and the faucet")
|
||||
noauthFlag = flag.Bool("noauth", false, "Enables funding requests without authentication")
|
||||
logFlag = flag.Int("loglevel", 3, "Log level to use for Ethereum and the faucet")
|
||||
)
|
||||
|
||||
var (
|
||||
@ -132,6 +133,7 @@ func main() {
|
||||
"Amounts": amounts,
|
||||
"Periods": periods,
|
||||
"Recaptcha": *captchaToken,
|
||||
"NoAuth": *noauthFlag,
|
||||
})
|
||||
if err != nil {
|
||||
log.Crit("Failed to render the faucet template", "err", err)
|
||||
@ -374,7 +376,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
if err = websocket.JSON.Receive(conn, &msg); err != nil {
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(msg.URL, "https://gist.github.com/") && !strings.HasPrefix(msg.URL, "https://twitter.com/") &&
|
||||
if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://gist.github.com/") && !strings.HasPrefix(msg.URL, "https://twitter.com/") &&
|
||||
!strings.HasPrefix(msg.URL, "https://plus.google.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") {
|
||||
if err = sendError(conn, errors.New("URL doesn't link to supported services")); err != nil {
|
||||
log.Warn("Failed to send URL error to client", "err", err)
|
||||
@ -435,13 +437,19 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
)
|
||||
switch {
|
||||
case strings.HasPrefix(msg.URL, "https://gist.github.com/"):
|
||||
username, avatar, address, err = authGitHub(msg.URL)
|
||||
if err = sendError(conn, errors.New("GitHub authentication discontinued at the official request of GitHub")); err != nil {
|
||||
log.Warn("Failed to send GitHub deprecation to client", "err", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
case strings.HasPrefix(msg.URL, "https://twitter.com/"):
|
||||
username, avatar, address, err = authTwitter(msg.URL)
|
||||
case strings.HasPrefix(msg.URL, "https://plus.google.com/"):
|
||||
username, avatar, address, err = authGooglePlus(msg.URL)
|
||||
case strings.HasPrefix(msg.URL, "https://www.facebook.com/"):
|
||||
username, avatar, address, err = authFacebook(msg.URL)
|
||||
case *noauthFlag:
|
||||
username, avatar, address, err = authNoAuth(msg.URL)
|
||||
default:
|
||||
err = errors.New("Something funky happened, please open an issue at https://github.com/ethereum/go-ethereum/issues")
|
||||
}
|
||||
@ -776,3 +784,14 @@ func authFacebook(url string) (string, string, common.Address, error) {
|
||||
}
|
||||
return username + "@facebook", avatar, address, nil
|
||||
}
|
||||
|
||||
// authNoAuth tries to interpret a faucet request as a plain Ethereum address,
|
||||
// without actually performing any remote authentication. This mode is prone to
|
||||
// Byzantine attack, so only ever use for truly private networks.
|
||||
func authNoAuth(url string) (string, string, common.Address, error) {
|
||||
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(url))
|
||||
if address == (common.Address{}) {
|
||||
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
|
||||
}
|
||||
return address.Hex() + "@noauth", "", address, nil
|
||||
}
|
||||
|
@ -80,11 +80,8 @@
|
||||
<div class="row" style="margin-top: 32px;">
|
||||
<div class="col-lg-12">
|
||||
<h3>How does this work?</h3>
|
||||
<p>This Ether faucet is running on the {{.Network}} network. To prevent malicious actors from exhausting all available funds or accumulating enough Ether to mount long running spam attacks, requests are tied to certain common 3rd party accounts. Anyone having a GitHub, Twitter, Google+ or Facebook account may request funds within the permitted limits.</p>
|
||||
<p>This Ether faucet is running on the {{.Network}} network. To prevent malicious actors from exhausting all available funds or accumulating enough Ether to mount long running spam attacks, requests are tied to common 3rd party social network accounts. Anyone having a Twitter, Google+ or Facebook account may request funds within the permitted limits.</p>
|
||||
<dl class="dl-horizontal">
|
||||
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-github-alt" aria-hidden="true" style="font-size: 36px;"></i></dt>
|
||||
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via GitHub, create a <a href="https://gist.github.com/" target="_about:blank">gist</a> with your Ethereum address embedded into the content (the file name doesn't matter).<br/>Copy-paste the gists URL into the above input box and fire away!</dd>
|
||||
|
||||
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-twitter" aria-hidden="true" style="font-size: 36px;"></i></dt>
|
||||
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Twitter, make a <a href="https://twitter.com/intent/tweet?text=Requesting%20faucet%20funds%20into%200x0000000000000000000000000000000000000000%20on%20the%20%23{{.Network}}%20%23Ethereum%20test%20network." target="_about:blank">tweet</a> with your Ethereum address pasted into the contents (surrounding text doesn't matter).<br/>Copy-paste the <a href="https://support.twitter.com/articles/80586" target="_about:blank">tweets URL</a> into the above input box and fire away!</dd>
|
||||
|
||||
@ -93,6 +90,11 @@
|
||||
|
||||
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-facebook" aria-hidden="true" style="font-size: 36px;"></i></dt>
|
||||
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Facebook, publish a new <strong>public</strong> post with your Ethereum address embedded into the content (surrounding text doesn't matter).<br/>Copy-paste the <a href="https://www.facebook.com/help/community/question/?id=282662498552845" target="_about:blank">posts URL</a> into the above input box and fire away!</dd>
|
||||
|
||||
{{if .NoAuth}}
|
||||
<dt class="text-danger" style="width: auto; margin-left: 40px;"><i class="fa fa-unlock-alt" aria-hidden="true" style="font-size: 36px;"></i></dt>
|
||||
<dd class="text-danger" style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds <strong>without authentication</strong>, simply copy-paste your Ethereum address into the above input box (surrounding text doesn't matter) and fire away.<br/>This mode is susceptible to Byzantine attacks. Only use for debugging or private networks!</dd>
|
||||
{{end}}
|
||||
</dl>
|
||||
<p>You can track the current pending requests below the input field to see how much you have to wait until your turn comes.</p>
|
||||
{{if .Recaptcha}}<em>The faucet is running invisible reCaptcha protection against bots.</em>{{end}}
|
||||
@ -126,12 +128,7 @@
|
||||
};
|
||||
// Define a method to reconnect upon server loss
|
||||
var reconnect = function() {
|
||||
if (attempt % 2 == 0) {
|
||||
server = new WebSocket("wss://" + location.host + "/api");
|
||||
} else {
|
||||
server = new WebSocket("ws://" + location.host + "/api");
|
||||
}
|
||||
attempt++;
|
||||
server = new WebSocket(((window.location.protocol === "https:") ? "wss://" : "ws://") + window.location.host + "/api");
|
||||
|
||||
server.onmessage = function(event) {
|
||||
var msg = JSON.parse(event.data);
|
||||
|
File diff suppressed because one or more lines are too long
@ -291,15 +291,28 @@ func ambiguousAddrRecovery(ks *keystore.KeyStore, err *keystore.AmbiguousAddrErr
|
||||
|
||||
// accountCreate creates a new account into the keystore defined by the CLI flags.
|
||||
func accountCreate(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
cfg := gethConfig{Node: defaultNodeConfig()}
|
||||
// Load config file.
|
||||
if file := ctx.GlobalString(configFileFlag.Name); file != "" {
|
||||
if err := loadConfig(file, &cfg); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
utils.SetNodeConfig(ctx, &cfg.Node)
|
||||
scryptN, scryptP, keydir, err := cfg.Node.AccountConfig()
|
||||
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read configuration: %v", err)
|
||||
}
|
||||
|
||||
password := getPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))
|
||||
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
account, err := ks.NewAccount(password)
|
||||
address, err := keystore.StoreKey(keydir, password, scryptN, scryptP)
|
||||
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to create account: %v", err)
|
||||
}
|
||||
fmt.Printf("Address: {%x}\n", account.Address)
|
||||
fmt.Printf("Address: {%x}\n", address)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -17,8 +17,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
@ -112,7 +114,18 @@ func localConsole(ctx *cli.Context) error {
|
||||
// console to it.
|
||||
func remoteConsole(ctx *cli.Context) error {
|
||||
// Attach to a remotely running geth instance and start the JavaScript console
|
||||
client, err := dialRPC(ctx.Args().First())
|
||||
endpoint := ctx.Args().First()
|
||||
if endpoint == "" {
|
||||
path := node.DefaultDataDir()
|
||||
if ctx.GlobalIsSet(utils.DataDirFlag.Name) {
|
||||
path = ctx.GlobalString(utils.DataDirFlag.Name)
|
||||
}
|
||||
if path != "" && ctx.GlobalBool(utils.TestnetFlag.Name) {
|
||||
path = filepath.Join(path, "testnet")
|
||||
}
|
||||
endpoint = fmt.Sprintf("%s/geth.ipc", path)
|
||||
}
|
||||
client, err := dialRPC(endpoint)
|
||||
if err != nil {
|
||||
utils.Fatalf("Unable to attach to remote geth: %v", err)
|
||||
}
|
||||
|
379
cmd/puppeth/genesis.go
Normal file
379
cmd/puppeth/genesis.go
Normal file
@ -0,0 +1,379 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
// cppEthereumGenesisSpec represents the genesis specification format used by the
|
||||
// C++ Ethereum implementation.
|
||||
type cppEthereumGenesisSpec struct {
|
||||
SealEngine string `json:"sealEngine"`
|
||||
Params struct {
|
||||
AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"`
|
||||
HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"`
|
||||
EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"`
|
||||
EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"`
|
||||
ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"`
|
||||
ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
ChainID hexutil.Uint64 `json:"chainID"`
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
|
||||
MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"`
|
||||
GasLimitBoundDivisor *hexutil.Big `json:"gasLimitBoundDivisor"`
|
||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
|
||||
DurationLimit *hexutil.Big `json:"durationLimit"`
|
||||
BlockReward *hexutil.Big `json:"blockReward"`
|
||||
} `json:"params"`
|
||||
|
||||
Genesis struct {
|
||||
Nonce hexutil.Bytes `json:"nonce"`
|
||||
Difficulty *hexutil.Big `json:"difficulty"`
|
||||
MixHash common.Hash `json:"mixHash"`
|
||||
Author common.Address `json:"author"`
|
||||
Timestamp hexutil.Uint64 `json:"timestamp"`
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
ExtraData hexutil.Bytes `json:"extraData"`
|
||||
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
||||
} `json:"genesis"`
|
||||
|
||||
Accounts map[common.Address]*cppEthereumGenesisSpecAccount `json:"accounts"`
|
||||
}
|
||||
|
||||
// cppEthereumGenesisSpecAccount is the prefunded genesis account and/or precompiled
|
||||
// contract definition.
|
||||
type cppEthereumGenesisSpecAccount struct {
|
||||
Balance *hexutil.Big `json:"balance"`
|
||||
Nonce uint64 `json:"nonce,omitempty"`
|
||||
Precompiled *cppEthereumGenesisSpecBuiltin `json:"precompiled,omitempty"`
|
||||
}
|
||||
|
||||
// cppEthereumGenesisSpecBuiltin is the precompiled contract definition.
|
||||
type cppEthereumGenesisSpecBuiltin struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"`
|
||||
Linear *cppEthereumGenesisSpecLinearPricing `json:"linear,omitempty"`
|
||||
}
|
||||
|
||||
type cppEthereumGenesisSpecLinearPricing struct {
|
||||
Base uint64 `json:"base"`
|
||||
Word uint64 `json:"word"`
|
||||
}
|
||||
|
||||
// newCppEthereumGenesisSpec converts a go-ethereum genesis block into a Parity specific
|
||||
// chain specification format.
|
||||
func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEthereumGenesisSpec, error) {
|
||||
// Only ethash is currently supported between go-ethereum and cpp-ethereum
|
||||
if genesis.Config.Ethash == nil {
|
||||
return nil, errors.New("unsupported consensus engine")
|
||||
}
|
||||
// Reconstruct the chain spec in Parity's format
|
||||
spec := &cppEthereumGenesisSpec{
|
||||
SealEngine: "Ethash",
|
||||
}
|
||||
spec.Params.AccountStartNonce = 0
|
||||
spec.Params.HomesteadForkBlock = (hexutil.Uint64)(genesis.Config.HomesteadBlock.Uint64())
|
||||
spec.Params.EIP150ForkBlock = (hexutil.Uint64)(genesis.Config.EIP150Block.Uint64())
|
||||
spec.Params.EIP158ForkBlock = (hexutil.Uint64)(genesis.Config.EIP158Block.Uint64())
|
||||
spec.Params.ByzantiumForkBlock = (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())
|
||||
spec.Params.ConstantinopleForkBlock = (hexutil.Uint64)(math.MaxUint64)
|
||||
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
||||
spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
||||
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit.Uint64())
|
||||
spec.Params.MaxGasLimit = (hexutil.Uint64)(math.MaxUint64)
|
||||
spec.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
|
||||
spec.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
|
||||
spec.Params.GasLimitBoundDivisor = (*hexutil.Big)(params.GasLimitBoundDivisor)
|
||||
spec.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
|
||||
spec.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
|
||||
|
||||
spec.Genesis.Nonce = (hexutil.Bytes)(make([]byte, 8))
|
||||
binary.LittleEndian.PutUint64(spec.Genesis.Nonce[:], genesis.Nonce)
|
||||
|
||||
spec.Genesis.MixHash = genesis.Mixhash
|
||||
spec.Genesis.Difficulty = (*hexutil.Big)(genesis.Difficulty)
|
||||
spec.Genesis.Author = genesis.Coinbase
|
||||
spec.Genesis.Timestamp = (hexutil.Uint64)(genesis.Timestamp)
|
||||
spec.Genesis.ParentHash = genesis.ParentHash
|
||||
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
|
||||
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
|
||||
|
||||
spec.Accounts = make(map[common.Address]*cppEthereumGenesisSpecAccount)
|
||||
for address, account := range genesis.Alloc {
|
||||
spec.Accounts[address] = &cppEthereumGenesisSpecAccount{
|
||||
Balance: (*hexutil.Big)(account.Balance),
|
||||
Nonce: account.Nonce,
|
||||
}
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{1})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "ecrecover", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 3000},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{2})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "sha256", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 60, Word: 12},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{3})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "ripemd160", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 600, Word: 120},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{4})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "identity", Linear: &cppEthereumGenesisSpecLinearPricing{Base: 15, Word: 3},
|
||||
}
|
||||
if genesis.Config.ByzantiumBlock != nil {
|
||||
spec.Accounts[common.BytesToAddress([]byte{5})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "modexp", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{6})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "alt_bn128_G1_add", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 500},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{7})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "alt_bn128_G1_mul", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), Linear: &cppEthereumGenesisSpecLinearPricing{Base: 40000},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{8})].Precompiled = &cppEthereumGenesisSpecBuiltin{
|
||||
Name: "alt_bn128_pairing_product", StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()),
|
||||
}
|
||||
}
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
// parityChainSpec is the chain specification format used by Parity.
|
||||
type parityChainSpec struct {
|
||||
Name string `json:"name"`
|
||||
Engine struct {
|
||||
Ethash struct {
|
||||
Params struct {
|
||||
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
|
||||
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
|
||||
GasLimitBoundDivisor *hexutil.Big `json:"gasLimitBoundDivisor"`
|
||||
DurationLimit *hexutil.Big `json:"durationLimit"`
|
||||
BlockReward *hexutil.Big `json:"blockReward"`
|
||||
HomesteadTransition uint64 `json:"homesteadTransition"`
|
||||
EIP150Transition uint64 `json:"eip150Transition"`
|
||||
EIP160Transition uint64 `json:"eip160Transition"`
|
||||
EIP161abcTransition uint64 `json:"eip161abcTransition"`
|
||||
EIP161dTransition uint64 `json:"eip161dTransition"`
|
||||
EIP649Reward *hexutil.Big `json:"eip649Reward"`
|
||||
EIP100bTransition uint64 `json:"eip100bTransition"`
|
||||
EIP649Transition uint64 `json:"eip649Transition"`
|
||||
} `json:"params"`
|
||||
} `json:"Ethash"`
|
||||
} `json:"engine"`
|
||||
|
||||
Params struct {
|
||||
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
|
||||
MinGasLimit *hexutil.Big `json:"minGasLimit"`
|
||||
NetworkID hexutil.Uint64 `json:"networkID"`
|
||||
MaxCodeSize uint64 `json:"maxCodeSize"`
|
||||
EIP155Transition uint64 `json:"eip155Transition"`
|
||||
EIP98Transition uint64 `json:"eip98Transition"`
|
||||
EIP86Transition uint64 `json:"eip86Transition"`
|
||||
EIP140Transition uint64 `json:"eip140Transition"`
|
||||
EIP211Transition uint64 `json:"eip211Transition"`
|
||||
EIP214Transition uint64 `json:"eip214Transition"`
|
||||
EIP658Transition uint64 `json:"eip658Transition"`
|
||||
} `json:"params"`
|
||||
|
||||
Genesis struct {
|
||||
Seal struct {
|
||||
Ethereum struct {
|
||||
Nonce hexutil.Bytes `json:"nonce"`
|
||||
MixHash hexutil.Bytes `json:"mixHash"`
|
||||
} `json:"ethereum"`
|
||||
} `json:"seal"`
|
||||
|
||||
Difficulty *hexutil.Big `json:"difficulty"`
|
||||
Author common.Address `json:"author"`
|
||||
Timestamp hexutil.Uint64 `json:"timestamp"`
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
ExtraData hexutil.Bytes `json:"extraData"`
|
||||
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
||||
} `json:"genesis"`
|
||||
|
||||
Nodes []string `json:"nodes"`
|
||||
Accounts map[common.Address]*parityChainSpecAccount `json:"accounts"`
|
||||
}
|
||||
|
||||
// parityChainSpecAccount is the prefunded genesis account and/or precompiled
|
||||
// contract definition.
|
||||
type parityChainSpecAccount struct {
|
||||
Balance *hexutil.Big `json:"balance"`
|
||||
Nonce uint64 `json:"nonce,omitempty"`
|
||||
Builtin *parityChainSpecBuiltin `json:"builtin,omitempty"`
|
||||
}
|
||||
|
||||
// parityChainSpecBuiltin is the precompiled contract definition.
|
||||
type parityChainSpecBuiltin struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
ActivateAt uint64 `json:"activate_at,omitempty"`
|
||||
Pricing *parityChainSpecPricing `json:"pricing,omitempty"`
|
||||
}
|
||||
|
||||
// parityChainSpecPricing represents the different pricing models that builtin
|
||||
// contracts might advertise using.
|
||||
type parityChainSpecPricing struct {
|
||||
Linear *parityChainSpecLinearPricing `json:"linear,omitempty"`
|
||||
ModExp *parityChainSpecModExpPricing `json:"modexp,omitempty"`
|
||||
AltBnPairing *parityChainSpecAltBnPairingPricing `json:"alt_bn128_pairing,omitempty"`
|
||||
}
|
||||
|
||||
type parityChainSpecLinearPricing struct {
|
||||
Base uint64 `json:"base"`
|
||||
Word uint64 `json:"word"`
|
||||
}
|
||||
|
||||
type parityChainSpecModExpPricing struct {
|
||||
Divisor uint64 `json:"divisor"`
|
||||
}
|
||||
|
||||
type parityChainSpecAltBnPairingPricing struct {
|
||||
Base uint64 `json:"base"`
|
||||
Pair uint64 `json:"pair"`
|
||||
}
|
||||
|
||||
// newParityChainSpec converts a go-ethereum genesis block into a Parity specific
|
||||
// chain specification format.
|
||||
func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []string) (*parityChainSpec, error) {
|
||||
// Only ethash is currently supported between go-ethereum and Parity
|
||||
if genesis.Config.Ethash == nil {
|
||||
return nil, errors.New("unsupported consensus engine")
|
||||
}
|
||||
// Reconstruct the chain spec in Parity's format
|
||||
spec := &parityChainSpec{
|
||||
Name: network,
|
||||
Nodes: bootnodes,
|
||||
}
|
||||
spec.Engine.Ethash.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
|
||||
spec.Engine.Ethash.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
|
||||
spec.Engine.Ethash.Params.GasLimitBoundDivisor = (*hexutil.Big)(params.GasLimitBoundDivisor)
|
||||
spec.Engine.Ethash.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
|
||||
spec.Engine.Ethash.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
|
||||
spec.Engine.Ethash.Params.HomesteadTransition = genesis.Config.HomesteadBlock.Uint64()
|
||||
spec.Engine.Ethash.Params.EIP150Transition = genesis.Config.EIP150Block.Uint64()
|
||||
spec.Engine.Ethash.Params.EIP160Transition = genesis.Config.EIP155Block.Uint64()
|
||||
spec.Engine.Ethash.Params.EIP161abcTransition = genesis.Config.EIP158Block.Uint64()
|
||||
spec.Engine.Ethash.Params.EIP161dTransition = genesis.Config.EIP158Block.Uint64()
|
||||
spec.Engine.Ethash.Params.EIP649Reward = (*hexutil.Big)(ethash.ByzantiumBlockReward)
|
||||
spec.Engine.Ethash.Params.EIP100bTransition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
spec.Engine.Ethash.Params.EIP649Transition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
|
||||
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
|
||||
spec.Params.MinGasLimit = (*hexutil.Big)(params.MinGasLimit)
|
||||
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
|
||||
spec.Params.MaxCodeSize = params.MaxCodeSize
|
||||
spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64()
|
||||
spec.Params.EIP98Transition = math.MaxUint64
|
||||
spec.Params.EIP86Transition = math.MaxUint64
|
||||
spec.Params.EIP140Transition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
spec.Params.EIP211Transition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
spec.Params.EIP214Transition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
spec.Params.EIP658Transition = genesis.Config.ByzantiumBlock.Uint64()
|
||||
|
||||
spec.Genesis.Seal.Ethereum.Nonce = (hexutil.Bytes)(make([]byte, 8))
|
||||
binary.LittleEndian.PutUint64(spec.Genesis.Seal.Ethereum.Nonce[:], genesis.Nonce)
|
||||
|
||||
spec.Genesis.Seal.Ethereum.MixHash = (hexutil.Bytes)(genesis.Mixhash[:])
|
||||
spec.Genesis.Difficulty = (*hexutil.Big)(genesis.Difficulty)
|
||||
spec.Genesis.Author = genesis.Coinbase
|
||||
spec.Genesis.Timestamp = (hexutil.Uint64)(genesis.Timestamp)
|
||||
spec.Genesis.ParentHash = genesis.ParentHash
|
||||
spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData)
|
||||
spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit)
|
||||
|
||||
spec.Accounts = make(map[common.Address]*parityChainSpecAccount)
|
||||
for address, account := range genesis.Alloc {
|
||||
spec.Accounts[address] = &parityChainSpecAccount{
|
||||
Balance: (*hexutil.Big)(account.Balance),
|
||||
Nonce: account.Nonce,
|
||||
}
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{1})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "ecrecover", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 3000}},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{2})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "sha256", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 60, Word: 12}},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{3})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "ripemd160", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 600, Word: 120}},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{4})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "identity", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 15, Word: 3}},
|
||||
}
|
||||
if genesis.Config.ByzantiumBlock != nil {
|
||||
spec.Accounts[common.BytesToAddress([]byte{5})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "modexp", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{6})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_add", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{7})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_mul", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}},
|
||||
}
|
||||
spec.Accounts[common.BytesToAddress([]byte{8})].Builtin = &parityChainSpecBuiltin{
|
||||
Name: "alt_bn128_pairing", ActivateAt: genesis.Config.ByzantiumBlock.Uint64(), Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}},
|
||||
}
|
||||
}
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
// pyEthereumGenesisSpec represents the genesis specification format used by the
|
||||
// Python Ethereum implementation.
|
||||
type pyEthereumGenesisSpec struct {
|
||||
Nonce hexutil.Bytes `json:"nonce"`
|
||||
Timestamp hexutil.Uint64 `json:"timestamp"`
|
||||
ExtraData hexutil.Bytes `json:"extraData"`
|
||||
GasLimit hexutil.Uint64 `json:"gasLimit"`
|
||||
Difficulty *hexutil.Big `json:"difficulty"`
|
||||
Mixhash common.Hash `json:"mixhash"`
|
||||
Coinbase common.Address `json:"coinbase"`
|
||||
Alloc core.GenesisAlloc `json:"alloc"`
|
||||
ParentHash common.Hash `json:"parentHash"`
|
||||
}
|
||||
|
||||
// newPyEthereumGenesisSpec converts a go-ethereum genesis block into a Parity specific
|
||||
// chain specification format.
|
||||
func newPyEthereumGenesisSpec(network string, genesis *core.Genesis) (*pyEthereumGenesisSpec, error) {
|
||||
// Only ethash is currently supported between go-ethereum and pyethereum
|
||||
if genesis.Config.Ethash == nil {
|
||||
return nil, errors.New("unsupported consensus engine")
|
||||
}
|
||||
spec := &pyEthereumGenesisSpec{
|
||||
Timestamp: (hexutil.Uint64)(genesis.Timestamp),
|
||||
ExtraData: genesis.ExtraData,
|
||||
GasLimit: (hexutil.Uint64)(genesis.GasLimit),
|
||||
Difficulty: (*hexutil.Big)(genesis.Difficulty),
|
||||
Mixhash: genesis.Mixhash,
|
||||
Coinbase: genesis.Coinbase,
|
||||
Alloc: genesis.Alloc,
|
||||
ParentHash: genesis.ParentHash,
|
||||
}
|
||||
spec.Nonce = (hexutil.Bytes)(make([]byte, 8))
|
||||
binary.LittleEndian.PutUint64(spec.Nonce[:], genesis.Nonce)
|
||||
|
||||
return spec, nil
|
||||
}
|
@ -18,10 +18,12 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@ -76,25 +78,26 @@ var dashboardContent = `
|
||||
<div id="sidebar-menu" class="main_menu_side hidden-print main_menu">
|
||||
<div class="menu_section">
|
||||
<ul class="nav side-menu">
|
||||
{{if .EthstatsPage}}<li><a onclick="load('//{{.EthstatsPage}}')"><i class="fa fa-tachometer"></i> Network Stats</a></li>{{end}}
|
||||
{{if .ExplorerPage}}<li><a onclick="load('//{{.ExplorerPage}}')"><i class="fa fa-database"></i> Block Explorer</a></li>{{end}}
|
||||
{{if .WalletPage}}<li><a onclick="load('//{{.WalletPage}}')"><i class="fa fa-address-book-o"></i> Browser Wallet</a></li>{{end}}
|
||||
{{if .FaucetPage}}<li><a onclick="load('//{{.FaucetPage}}')"><i class="fa fa-bath"></i> Crypto Faucet</a></li>{{end}}
|
||||
<li id="connect"><a><i class="fa fa-plug"></i> Connect Yourself</a>
|
||||
{{if .EthstatsPage}}<li id="stats_menu"><a onclick="load('#stats')"><i class="fa fa-tachometer"></i> Network Stats</a></li>{{end}}
|
||||
{{if .ExplorerPage}}<li id="explorer_menu"><a onclick="load('#explorer')"><i class="fa fa-database"></i> Block Explorer</a></li>{{end}}
|
||||
{{if .WalletPage}}<li id="wallet_menu"><a onclick="load('#wallet')"><i class="fa fa-address-book-o"></i> Browser Wallet</a></li>{{end}}
|
||||
{{if .FaucetPage}}<li id="faucet_menu"><a onclick="load('#faucet')"><i class="fa fa-bath"></i> Crypto Faucet</a></li>{{end}}
|
||||
<li id="connect_menu"><a><i class="fa fa-plug"></i> Connect Yourself</a>
|
||||
<ul id="connect_list" class="nav child_menu">
|
||||
<li><a onclick="$('#connect').removeClass('active'); $('#connect_list').toggle(); load('#connect-go-ethereum-geth')">Go Ethereum: Geth</a></li>
|
||||
<li><a onclick="$('#connect').removeClass('active'); $('#connect_list').toggle(); load('#connect-go-ethereum-mist')">Go Ethereum: Wallet & Mist</a></li>
|
||||
<li><a onclick="$('#connect').removeClass('active'); $('#connect_list').toggle(); load('#connect-go-ethereum-mobile')">Go Ethereum: Android & iOS</a></li>
|
||||
<li><a onclick="$('#connect_menu').removeClass('active'); $('#connect_list').toggle(); load('#geth')">Go Ethereum: Geth</a></li>
|
||||
<li><a onclick="$('#connect_menu').removeClass('active'); $('#connect_list').toggle(); load('#mist')">Go Ethereum: Wallet & Mist</a></li>
|
||||
<li><a onclick="$('#connect_menu').removeClass('active'); $('#connect_list').toggle(); load('#mobile')">Go Ethereum: Android & iOS</a></li>{{if .Ethash}}
|
||||
<li><a onclick="$('#connect_menu').removeClass('active'); $('#connect_list').toggle(); load('#other')">Other Ethereum Clients</a></li>{{end}}
|
||||
</ul>
|
||||
</li>
|
||||
<li><a onclick="load('#about')"><i class="fa fa-heartbeat"></i> About Puppeth</a></li>
|
||||
<li id="about_menu"><a onclick="load('#about')"><i class="fa fa-heartbeat"></i> About Puppeth</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="right_col" role="main" style="padding: 0">
|
||||
<div id="connect-go-ethereum-geth" hidden style="padding: 16px;">
|
||||
<div class="right_col" role="main" style="padding: 0 !important">
|
||||
<div id="geth" hidden style="padding: 16px;">
|
||||
<div class="page-title">
|
||||
<div class="title_left">
|
||||
<h3>Connect Yourself – Go Ethereum: Geth</h3>
|
||||
@ -154,7 +157,7 @@ var dashboardContent = `
|
||||
<p>Initial processing required to synchronize is light, as it only verifies the validity of the headers; similarly required disk capacity is small, tallying around 500 bytes per header. Low end machines with arbitrary storage, weak CPUs and 512MB+ RAM should cope well.</p>
|
||||
<br/>
|
||||
<p>To run a light node, download <a href="/{{.GethGenesis}}"><code>{{.GethGenesis}}</code></a> and start Geth with:
|
||||
<pre>geth --datadir=$HOME/.{{.Network}} --light init {{.GethGenesis}}</pre>
|
||||
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
||||
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --syncmode=light{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesLightFlat}}</pre>
|
||||
</p>
|
||||
<br/>
|
||||
@ -173,8 +176,8 @@ var dashboardContent = `
|
||||
<p>Initial processing required to synchronize is light, as it only verifies the validity of the headers; similarly required disk capacity is small, tallying around 500 bytes per header. Embedded machines with arbitrary storage, low power CPUs and 128MB+ RAM may work.</p>
|
||||
<br/>
|
||||
<p>To run an embedded node, download <a href="/{{.GethGenesis}}"><code>{{.GethGenesis}}</code></a> and start Geth with:
|
||||
<pre>geth --datadir=$HOME/.{{.Network}} --light init {{.GethGenesis}}</pre>
|
||||
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --cache=32 --syncmode=light{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesLightFlat}}</pre>
|
||||
<pre>geth --datadir=$HOME/.{{.Network}} init {{.GethGenesis}}</pre>
|
||||
<pre>geth --networkid={{.NetworkID}} --datadir=$HOME/.{{.Network}} --cache=16 --ethash.cachesinmem=1 --syncmode=light{{if .Ethstats}} --ethstats='{{.Ethstats}}'{{end}} --bootnodes={{.BootnodesLightFlat}}</pre>
|
||||
</p>
|
||||
<br/>
|
||||
<p>You can download Geth from <a href="https://geth.ethereum.org/downloads/" target="about:blank">https://geth.ethereum.org/downloads/</a>.</p>
|
||||
@ -183,7 +186,7 @@ var dashboardContent = `
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="connect-go-ethereum-mist" hidden style="padding: 16px;">
|
||||
<div id="mist" hidden style="padding: 16px;">
|
||||
<div class="page-title">
|
||||
<div class="title_left">
|
||||
<h3>Connect Yourself – Go Ethereum: Wallet & Mist</h3>
|
||||
@ -235,7 +238,7 @@ var dashboardContent = `
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="connect-go-ethereum-mobile" hidden style="padding: 16px;">
|
||||
<div id="mobile" hidden style="padding: 16px;">
|
||||
<div class="page-title">
|
||||
<div class="title_left">
|
||||
<h3>Connect Yourself – Go Ethereum: Android & iOS</h3>
|
||||
@ -309,7 +312,101 @@ try! node?.start();
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>{{if .Ethash}}
|
||||
<div id="other" hidden style="padding: 16px;">
|
||||
<div class="page-title">
|
||||
<div class="title_left">
|
||||
<h3>Connect Yourself – Other Ethereum Clients</h3>
|
||||
</div>
|
||||
</div>
|
||||
<div class="clearfix"></div>
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<div class="x_panel">
|
||||
<div class="x_title">
|
||||
<h2>
|
||||
<svg height="14px" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 115 115"><path fill="#5C8DBC" d="M9.7 83.3V35.5s0-3.4 3.3-5.2c3.3-1.8 39.6-23.5 39.6-23.5s4.6-3.1 9.4 0c0 0 43.1 23.9 42.4 25.3L85.3 43.3s-3.6-8.4-13.1-13c-11.3-5.5-29.7-6.2-42.9 13.3 0 0-8.6 13.5.3 31.6l-19 10.7s-.9-.6-.9-2.6z"/><path fill="#5C8DBC" d="M71 51.3c-2.8-4.7-7.9-7.9-13.8-7.9-8.8 0-16 7.2-16 16 0 2.8.7 5.4 2 7.7L71 51.3z"/><path fill="#194674" d="M43.1 67c2.8 4.7 7.9 7.9 13.8 7.9 8.8 0 16-7.2 16-16 0-2.8-.7-5.4-2-7.7L43.1 67z"/><path fill="#1B598E" d="M104.4 32.1s1.3 52.6-.3 53.6L58 58.6l46.4-26.5z"/><path fill="#FFF" d="M90 57h-3.9v-4.1h-4.2V57h-4v4.1h4V65h4.2v-3.9H90zm13.6 0h-3.9v-4.1h-4.2V57h-4v4.1h4V65h4.2v-3.9h3.9z"/><path fill="#194674" d="M29.5 75.1s9.2 17 28.5 16.1 27.3-16.6 27.3-16.6L104 85.4s4.1.8-41.6 25.7c0 0-4.9 3.3-10.2 0 0 0-41.3-23.1-41.6-25.3l18.9-10.7z"/></svg>
|
||||
C++ Ethereum <small>Official C++ client from the Ethereum Foundation</small>
|
||||
</h2>
|
||||
<div class="clearfix"></div>
|
||||
</div>
|
||||
<div class="x_content">
|
||||
<p>C++ Ethereum is the third most popular of the Ethereum clients, focusing on code portability to a broad range of operating systems and hardware. The client is currently a full node with transaction processing based synchronization.</p>
|
||||
<br/>
|
||||
<p>To run a cpp-ethereum node, download <a href="/{{.CppGenesis}}"><code>{{.CppGenesis}}</code></a> and start the node with:
|
||||
<pre>eth --config {{.CppGenesis}} --datadir $HOME/.{{.Network}} --peerset "{{.CppBootnodes}}"</pre>
|
||||
</p>
|
||||
<br/>
|
||||
<p>You can find cpp-ethereum at <a href="https://github.com/ethereum/cpp-ethereum/" target="about:blank">https://github.com/ethereum/cpp-ethereum/</a>.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-6">
|
||||
<div class="x_panel">
|
||||
<div class="x_title">
|
||||
<h2>
|
||||
<svg height="14px" version="1.1" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64"><path d="M46.42,13.07S24.51,18.54,35,30.6c3.09,3.55-.81,6.75-0.81,6.75s7.84-4,4.24-9.11C35,23.51,32.46,21.17,46.42,13.07ZM32.1,16.88C45.05,6.65,38.4,0,38.4,0c2.68,10.57-9.46,13.76-13.84,20.34-3,4.48,1.46,9.3,7.53,14.77C29.73,29.77,21.71,25.09,32.1,16.88Z" transform="translate(-8.4)" fill="#e57125"/><path d="M23.6,49.49c-9.84,2.75,6,8.43,18.51,3.06a23.06,23.06,0,0,1-3.52-1.72,36.62,36.62,0,0,1-13.25.56C21.16,50.92,23.6,49.49,23.6,49.49Zm17-5.36a51.7,51.7,0,0,1-17.1.82c-4.19-.43-1.45-2.46-1.45-2.46-10.84,3.6,6,7.68,21.18,3.25A7.59,7.59,0,0,1,40.62,44.13ZM51.55,54.68s1.81,1.49-2,2.64c-7.23,2.19-30.1,2.85-36.45.09-2.28-1,2-2.37,3.35-2.66a8.69,8.69,0,0,1,2.21-.25c-2.54-1.79-16.41,3.51-7,5C37.15,63.67,58.17,57.67,51.55,54.68ZM42.77,39.12a20.42,20.42,0,0,1,2.93-1.57s-4.83.86-9.65,1.27A87.37,87.37,0,0,1,20.66,39c-7.51-1,4.12-3.77,4.12-3.77A22,22,0,0,0,14.7,37.61C8.14,40.79,31,42.23,42.77,39.12Zm2.88,7.77a1,1,0,0,1-.24.31C61.44,43,55.54,32.35,47.88,35a2.19,2.19,0,0,0-1,.79,9,9,0,0,1,1.37-.37C52.1,34.66,57.65,40.65,45.64,46.89Zm0.43,14.75a94.76,94.76,0,0,1-29.17.45s1.47,1.22,9,1.7c11.53,0.74,29.22-.41,29.64-5.86C55.6,57.94,54.79,60,46.08,61.65Z" transform="translate(-8.4)" fill="#5482a2"/></svg>
|
||||
Ethereum Harmony<small>Third party Java client from EtherCamp</small>
|
||||
</h2>
|
||||
<div class="clearfix"></div>
|
||||
</div>
|
||||
<div class="x_content">
|
||||
<p>Ethereum Harmony is a web user-interface based graphical Ethereum client built on top of the EthereumJ Java implementation of the Ethereum protocol. The client currently is a full node with state download based synchronization.</p>
|
||||
<br/>
|
||||
<p>To run an Ethereum Harmony node, download <a href="/{{.HarmonyGenesis}}"><code>{{.HarmonyGenesis}}</code></a> and start the node with:
|
||||
<pre>./gradlew runCustom -DgenesisFile={{.HarmonyGenesis}} -Dpeer.networkId={{.NetworkID}} -Ddatabase.dir=$HOME/.harmony/{{.Network}} {{.HarmonyBootnodes}} </pre>
|
||||
</p>
|
||||
<br/>
|
||||
<p>You can find Ethereum Harmony at <a href="https://github.com/ether-camp/ethereum-harmony/" target="about:blank">https://github.com/ether-camp/ethereum-harmony/</a>.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="clearfix"></div>
|
||||
<div class="row">
|
||||
<div class="col-md-6">
|
||||
<div class="x_panel">
|
||||
<div class="x_title">
|
||||
<h2>
|
||||
<svg height="14px" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:cc="http://creativecommons.org/ns#" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:svg="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 104.56749 104.56675" version="1.1" viewbox="0 0 144 144" y="0px" x="0px"><metadata id="metadata10"><rdf:RDF><cc:Work rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage" /></cc:Work></rdf:RDF></metadata><defs id="defs8" /><path style="fill:#676767;" id="path2" d="m 49.0125,12.3195 a 3.108,3.108 0 0 1 6.216,0 3.108,3.108 0 0 1 -6.216,0 m -37.077,28.14 a 3.108,3.108 0 0 1 6.216,0 3.108,3.108 0 0 1 -6.216,0 m 74.153,0.145 a 3.108,3.108 0 0 1 6.216,0 3.108,3.108 0 0 1 -6.216,0 m -65.156,4.258 c 1.43,-0.635 2.076,-2.311 1.441,-3.744 l -1.379,-3.118 h 5.423 v 24.444 h -10.941 a 38.265,38.265 0 0 1 -1.239,-14.607 z m 22.685,0.601 v -7.205 h 12.914 c 0.667,0 4.71,0.771 4.71,3.794 0,2.51 -3.101,3.41 -5.651,3.41 z m -17.631,38.793 a 3.108,3.108 0 0 1 6.216,0 3.108,3.108 0 0 1 -6.216,0 m 46.051,0.145 a 3.108,3.108 0 0 1 6.216,0 3.108,3.108 0 0 1 -6.216,0 m 0.961,-7.048 c -1.531,-0.328 -3.037,0.646 -3.365,2.18 l -1.56,7.28 a 38.265,38.265 0 0 1 -31.911,-0.153 l -1.559,-7.28 c -0.328,-1.532 -1.834,-2.508 -3.364,-2.179 l -6.427,1.38 a 38.265,38.265 0 0 1 -3.323,-3.917 h 31.272 c 0.354,0 0.59,-0.064 0.59,-0.386 v -11.062 c 0,-0.322 -0.236,-0.386 -0.59,-0.386 h -9.146 v -7.012 h 9.892 c 0.903,0 4.828,0.258 6.083,5.275 0.393,1.543 1.256,6.562 1.846,8.169 0.588,1.802 2.982,5.402 5.533,5.402 h 16.146 a 38.265,38.265 0 0 1 -3.544,4.102 z m 17.365,-29.207 a 38.265,38.265 0 0 1 0.081,6.643 h -3.926 c -0.393,0 -0.551,0.258 -0.551,0.643 v 1.803 c 0,4.244 -2.393,5.167 -4.49,5.402 -1.997,0.225 -4.211,-0.836 -4.484,-2.058 -1.178,-6.626 -3.141,-8.041 -6.241,-10.486 3.847,-2.443 7.85,-6.047 7.85,-10.871 0,-5.209 -3.571,-8.49 -6.005,-10.099 -3.415,-2.251 -7.196,-2.702 -8.216,-2.702 h -40.603 a 38.265,38.265 0 0 1 21.408,-12.082 l 4.786,5.021 c 1.082,1.133 2.874,1.175 4.006,0.092 l 5.355,-5.122 a 38.265,38.265 0 0 1 26.196,18.657 l -3.666,8.28 c -0.633,1.433 0.013,3.109 1.442,3.744 z m 9.143,0.134 -0.125,-1.28 3.776,-3.522 c 0.768,-0.716 0.481,-2.157 -0.501,-2.523 l -4.827,-1.805 -0.378,-1.246 3.011,-4.182 c 0.614,-0.85 0.05,-2.207 -0.984,-2.377 l -5.09,-0.828 -0.612,-1.143 2.139,-4.695 c 0.438,-0.956 -0.376,-2.179 -1.428,-2.139 l -5.166,0.18 -0.816,-0.99 1.187,-5.032 c 0.24,-1.022 -0.797,-2.06 -1.819,-1.82 l -5.031,1.186 -0.992,-0.816 0.181,-5.166 c 0.04,-1.046 -1.184,-1.863 -2.138,-1.429 l -4.694,2.14 -1.143,-0.613 -0.83,-5.091 c -0.168,-1.032 -1.526,-1.596 -2.376,-0.984 l -4.185,3.011 -1.244,-0.377 -1.805,-4.828 c -0.366,-0.984 -1.808,-1.267 -2.522,-0.503 l -3.522,3.779 -1.28,-0.125 -2.72,-4.395 c -0.55,-0.89 -2.023,-0.89 -2.571,0 l -2.72,4.395 -1.281,0.125 -3.523,-3.779 c -0.714,-0.764 -2.156,-0.481 -2.522,0.503 l -1.805,4.828 -1.245,0.377 -4.184,-3.011 c -0.85,-0.614 -2.209,-0.048 -2.377,0.984 l -0.83,5.091 -1.143,0.613 -4.694,-2.14 c -0.954,-0.436 -2.178,0.383 -2.138,1.429 l 0.18,5.166 -0.992,0.816 -5.031,-1.186 c -1.022,-0.238 -2.06,0.798 -1.82,1.82 l 1.185,5.032 -0.814,0.99 -5.166,-0.18 c -1.042,-0.03 -1.863,1.183 -1.429,2.139 l 2.14,4.695 -0.613,1.143 -5.09,0.828 c -1.034,0.168 -1.594,1.527 -0.984,2.377 l 3.011,4.182 -0.378,1.246 -4.828,1.805 c -0.98,0.366 -1.267,1.807 -0.501,2.523 l 3.777,3.522 -0.125,1.28 -4.394,2.72 c -0.89,0.55 -0.89,2.023 0,2.571 l 4.394,2.72 0.125,1.28 -3.777,3.523 c -0.766,0.714 -0.479,2.154 0.501,2.522 l 4.828,1.805 0.378,1.246 -3.011,4.183 c -0.612,0.852 -0.049,2.21 0.985,2.376 l 5.089,0.828 0.613,1.145 -2.14,4.693 c -0.436,0.954 0.387,2.181 1.429,2.139 l 5.164,-0.181 0.816,0.992 -1.185,5.033 c -0.24,1.02 0.798,2.056 1.82,1.816 l 5.031,-1.185 0.992,0.814 -0.18,5.167 c -0.04,1.046 1.184,1.864 2.138,1.428 l 4.694,-2.139 1.143,0.613 0.83,5.088 c 0.168,1.036 1.527,1.596 2.377,0.986 l 4.182,-3.013 1.246,0.379 1.805,4.826 c 0.366,0.98 1.808,1.269 2.522,0.501 l 3.523,-3.777 1.281,0.128 2.72,4.394 c 0.548,0.886 2.021,0.888 2.571,0 l 2.72,-4.394 1.28,-0.128 3.522,3.777 c 0.714,0.768 2.156,0.479 2.522,-0.501 l 1.805,-4.826 1.246,-0.379 4.183,3.013 c 0.85,0.61 2.208,0.048 2.376,-0.986 l 0.83,-5.088 1.143,-0.613 4.694,2.139 c 0.954,0.436 2.176,-0.38 2.138,-1.428 l -0.18,-5.167 0.991,-0.814 5.031,1.185 c 1.022,0.24 2.059,-0.796 1.819,-1.816 l -1.185,-5.033 0.814,-0.992 5.166,0.181 c 1.042,0.042 1.866,-1.185 1.428,-2.139 l -2.139,-4.693 0.612,-1.145 5.09,-0.828 c 1.036,-0.166 1.598,-1.524 0.984,-2.376 l -3.011,-4.183 0.378,-1.246 4.827,-1.805 c 0.982,-0.368 1.269,-1.808 0.501,-2.522 l -3.776,-3.523 0.125,-1.28 4.394,-2.72 c 0.89,-0.548 0.891,-2.021 10e-4,-2.571 z" /></svg>
|
||||
Parity<small>Third party Rust client from Parity Technologies</small>
|
||||
</h2>
|
||||
<div class="clearfix"></div>
|
||||
</div>
|
||||
<div class="x_content">
|
||||
<p>Parity is a fast, light and secure Ethereum client, supporting both headless mode of operation as well as a web user interface for direct manual interaction. The client is currently a full node with transaction processing based synchronization and state pruning enabled.</p>
|
||||
<br/>
|
||||
<p>To run a Parity node, download <a href="/{{.ParityGenesis}}"><code>{{.ParityGenesis}}</code></a> and start the node with:
|
||||
<pre>parity --chain={{.ParityGenesis}}</pre>
|
||||
</p>
|
||||
<br/>
|
||||
<p>You can find Parity at <a href="https://parity.io/" target="about:blank">https://parity.io/</a>.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-6">
|
||||
<div class="x_panel">
|
||||
<div class="x_title">
|
||||
<h2>
|
||||
<svg height="14px" version="1.1" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64"><defs><linearGradient id="a" x1="13.79" y1="38.21" x2="75.87" y2="-15.2" gradientTransform="matrix(0.56, 0, 0, -0.57, -8.96, 23.53)" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#5c9fd3"/><stop offset="1" stop-color="#316a99"/></linearGradient><linearGradient id="b" x1="99.87" y1="-47.53" x2="77.7" y2="-16.16" gradientTransform="matrix(0.56, 0, 0, -0.57, -8.96, 23.53)" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ffd43d"/><stop offset="1" stop-color="#fee875"/></linearGradient></defs><g><path d="M31.62,0a43.6,43.6,0,0,0-7.3.62c-6.46,1.14-7.63,3.53-7.63,7.94v5.82H32v1.94H11a9.53,9.53,0,0,0-9.54,7.74,28.54,28.54,0,0,0,0,15.52c1.09,4.52,3.68,7.74,8.11,7.74h5.25v-7a9.7,9.7,0,0,1,9.54-9.48H39.58a7.69,7.69,0,0,0,7.63-7.76V8.56c0-4.14-3.49-7.25-7.63-7.94A47.62,47.62,0,0,0,31.62,0ZM23.37,4.68A2.91,2.91,0,1,1,20.5,7.6,2.9,2.9,0,0,1,23.37,4.68Z" transform="translate(-0.35)" fill="url(#a)"/><path d="M49.12,16.32V23.1a9.79,9.79,0,0,1-9.54,9.68H24.33a7.79,7.79,0,0,0-7.63,7.76V55.08c0,4.14,3.6,6.57,7.63,7.76a25.55,25.55,0,0,0,15.25,0c3.84-1.11,7.63-3.35,7.63-7.76V49.26H32V47.32H54.85c4.44,0,6.09-3.1,7.63-7.74s1.53-9.38,0-15.52c-1.1-4.42-3.19-7.74-7.63-7.74H49.12ZM40.54,53.14A2.91,2.91,0,1,1,37.67,56,2.88,2.88,0,0,1,40.54,53.14Z" transform="translate(-0.35)" fill="url(#b)"/></g></svg>
|
||||
PyEthApp<small>Official Python client from the Ethereum Foundation</small>
|
||||
</h2>
|
||||
<div class="clearfix"></div>
|
||||
</div>
|
||||
<div class="x_content">
|
||||
<p>Pyethapp is the Ethereum Foundation's research client, aiming to provide an easily hackable and extendable codebase. The client is currently a full node with transaction processing based synchronization and state pruning enabled.</p>
|
||||
<br/>
|
||||
<p>To run a pyethapp node, download <a href="/{{.PythonGenesis}}"><code>{{.PythonGenesis}}</code></a> and start the node with:
|
||||
<pre>mkdir -p $HOME/.config/pyethapp/{{.Network}}</pre>
|
||||
<pre>pyethapp -c eth.genesis="$(cat {{.PythonGenesis}})" -c eth.network_id={{.NetworkID}} -c data_dir=$HOME/.config/pyethapp/{{.Network}} -c discovery.bootstrap_nodes="[{{.PythonBootnodes}}]" -c eth.block.HOMESTEAD_FORK_BLKNUM={{.Homestead}} -c eth.block.ANTI_DOS_FORK_BLKNUM={{.Tangerine}} -c eth.block.SPURIOUS_DRAGON_FORK_BLKNUM={{.Spurious}} -c eth.block.METROPOLIS_FORK_BLKNUM={{.Byzantium}} -c eth.block.DAO_FORK_BLKNUM=18446744073709551615 run --console</pre>
|
||||
</p>
|
||||
<br/>
|
||||
<p>You can find pyethapp at <a href="https://github.com/ethereum/pyethapp/" target="about:blank">https://github.com/ethereum/pyethapp/</a>.</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>{{end}}
|
||||
<div id="about" hidden>
|
||||
<div class="row vertical-center">
|
||||
<div style="margin: 0 auto;">
|
||||
@ -344,13 +441,33 @@ try! node?.start();
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/js/bootstrap.min.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/gentelella/1.3.0/js/custom.min.js"></script>
|
||||
<script>
|
||||
var load = function(url) {
|
||||
$("#connect-go-ethereum-geth").fadeOut(300)
|
||||
$("#connect-go-ethereum-mist").fadeOut(300)
|
||||
$("#connect-go-ethereum-mobile").fadeOut(300)
|
||||
var load = function(hash) {
|
||||
window.location.hash = hash;
|
||||
|
||||
// Fade out all possible pages (yes, ugly, no, don't care)
|
||||
$("#geth").fadeOut(300)
|
||||
$("#mist").fadeOut(300)
|
||||
$("#mobile").fadeOut(300)
|
||||
$("#other").fadeOut(300)
|
||||
$("#about").fadeOut(300)
|
||||
$("#frame-wrapper").fadeOut(300);
|
||||
|
||||
// Depending on the hash, resolve it into a local or remote URL
|
||||
var url = hash;
|
||||
switch (hash) {
|
||||
case "#stats":
|
||||
url = "//{{.EthstatsPage}}";
|
||||
break;
|
||||
case "#explorer":
|
||||
url = "//{{.ExplorerPage}}";
|
||||
break;
|
||||
case "#wallet":
|
||||
url = "//{{.WalletPage}}";
|
||||
break;
|
||||
case "#faucet":
|
||||
url = "//{{.FaucetPage}}";
|
||||
break;
|
||||
}
|
||||
setTimeout(function() {
|
||||
if (url.substring(0, 1) == "#") {
|
||||
$('.body').css({overflowY: 'auto'});
|
||||
@ -364,13 +481,10 @@ try! node?.start();
|
||||
}
|
||||
var resize = function() {
|
||||
var sidebar = $($(".navbar")[0]).width();
|
||||
var content = 1920;
|
||||
var limit = document.body.clientWidth - sidebar;
|
||||
var scale = limit / content;
|
||||
var scale = limit / 1920;
|
||||
|
||||
console.log(document.body.clientHeight);
|
||||
|
||||
$("#frame-wrapper").width(content / scale);
|
||||
$("#frame-wrapper").width(limit);
|
||||
$("#frame-wrapper").height(document.body.clientHeight / scale);
|
||||
$("#frame-wrapper").css({
|
||||
transform: 'scale(' + (scale) + ')',
|
||||
@ -379,9 +493,17 @@ try! node?.start();
|
||||
};
|
||||
$(window).resize(resize);
|
||||
|
||||
var item = $(".side-menu").children()[0];
|
||||
$(item).children()[0].click();
|
||||
$(item).addClass("active");
|
||||
if (window.location.hash == "") {
|
||||
var item = $(".side-menu").children()[0];
|
||||
$(item).children()[0].click();
|
||||
$(item).addClass("active");
|
||||
} else {
|
||||
load(window.location.hash);
|
||||
var menu = $(window.location.hash + "_menu");
|
||||
if (menu !== undefined) {
|
||||
$(menu).addClass("active");
|
||||
}
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
@ -405,6 +527,10 @@ RUN \
|
||||
echo '});' >> server.js
|
||||
|
||||
ADD {{.Network}}.json /dashboard/{{.Network}}.json
|
||||
ADD {{.Network}}-cpp.json /dashboard/{{.Network}}-cpp.json
|
||||
ADD {{.Network}}-harmony.json /dashboard/{{.Network}}-harmony.json
|
||||
ADD {{.Network}}-parity.json /dashboard/{{.Network}}-parity.json
|
||||
ADD {{.Network}}-python.json /dashboard/{{.Network}}-python.json
|
||||
ADD index.html /dashboard/index.html
|
||||
ADD puppeth.png /dashboard/puppeth.png
|
||||
|
||||
@ -422,8 +548,12 @@ services:
|
||||
build: .
|
||||
image: {{.Network}}/dashboard{{if not .VHost}}
|
||||
ports:
|
||||
- "{{.Port}}:80"{{else}}
|
||||
- "{{.Port}}:80"{{end}}
|
||||
environment:
|
||||
- ETHSTATS_PAGE={{.EthstatsPage}}
|
||||
- EXPLORER_PAGE={{.ExplorerPage}}
|
||||
- WALLET_PAGE={{.WalletPage}}
|
||||
- FAUCET_PAGE={{.FaucetPage}}{{if .VHost}}
|
||||
- VIRTUAL_HOST={{.VHost}}{{end}}
|
||||
logging:
|
||||
driver: "json-file"
|
||||
@ -436,7 +566,7 @@ services:
|
||||
// deployDashboard deploys a new dashboard container to a remote machine via SSH,
|
||||
// docker and docker-compose. If an instance with the specified network name
|
||||
// already exists there, it will be overwritten!
|
||||
func deployDashboard(client *sshClient, network string, port int, vhost string, services map[string]string, conf *config, ethstats bool) ([]byte, error) {
|
||||
func deployDashboard(client *sshClient, network string, conf *config, config *dashboardInfos, nocache bool) ([]byte, error) {
|
||||
// Generate the content to upload to the server
|
||||
workdir := fmt.Sprintf("%d", rand.Int63())
|
||||
files := make(map[string][]byte)
|
||||
@ -449,37 +579,95 @@ func deployDashboard(client *sshClient, network string, port int, vhost string,
|
||||
|
||||
composefile := new(bytes.Buffer)
|
||||
template.Must(template.New("").Parse(dashboardComposefile)).Execute(composefile, map[string]interface{}{
|
||||
"Network": network,
|
||||
"Port": port,
|
||||
"VHost": vhost,
|
||||
"Network": network,
|
||||
"Port": config.port,
|
||||
"VHost": config.host,
|
||||
"EthstatsPage": config.ethstats,
|
||||
"ExplorerPage": config.explorer,
|
||||
"WalletPage": config.wallet,
|
||||
"FaucetPage": config.faucet,
|
||||
})
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
||||
statsLogin := fmt.Sprintf("yournode:%s", conf.ethstats)
|
||||
if !ethstats {
|
||||
if !config.trusted {
|
||||
statsLogin = ""
|
||||
}
|
||||
indexfile := new(bytes.Buffer)
|
||||
bootCpp := make([]string, len(conf.bootFull))
|
||||
for i, boot := range conf.bootFull {
|
||||
bootCpp[i] = "required:" + strings.TrimPrefix(boot, "enode://")
|
||||
}
|
||||
bootHarmony := make([]string, len(conf.bootFull))
|
||||
for i, boot := range conf.bootFull {
|
||||
bootHarmony[i] = fmt.Sprintf("-Dpeer.active.%d.url=%s", i, boot)
|
||||
}
|
||||
bootPython := make([]string, len(conf.bootFull))
|
||||
for i, boot := range conf.bootFull {
|
||||
bootPython[i] = "'" + boot + "'"
|
||||
}
|
||||
template.Must(template.New("").Parse(dashboardContent)).Execute(indexfile, map[string]interface{}{
|
||||
"Network": network,
|
||||
"NetworkID": conf.genesis.Config.ChainId,
|
||||
"NetworkID": conf.Genesis.Config.ChainId,
|
||||
"NetworkTitle": strings.Title(network),
|
||||
"EthstatsPage": services["ethstats"],
|
||||
"ExplorerPage": services["explorer"],
|
||||
"WalletPage": services["wallet"],
|
||||
"FaucetPage": services["faucet"],
|
||||
"EthstatsPage": config.ethstats,
|
||||
"ExplorerPage": config.explorer,
|
||||
"WalletPage": config.wallet,
|
||||
"FaucetPage": config.faucet,
|
||||
"GethGenesis": network + ".json",
|
||||
"BootnodesFull": conf.bootFull,
|
||||
"BootnodesLight": conf.bootLight,
|
||||
"BootnodesFullFlat": strings.Join(conf.bootFull, ","),
|
||||
"BootnodesLightFlat": strings.Join(conf.bootLight, ","),
|
||||
"Ethstats": statsLogin,
|
||||
"Ethash": conf.Genesis.Config.Ethash != nil,
|
||||
"CppGenesis": network + "-cpp.json",
|
||||
"CppBootnodes": strings.Join(bootCpp, " "),
|
||||
"HarmonyGenesis": network + "-harmony.json",
|
||||
"HarmonyBootnodes": strings.Join(bootHarmony, " "),
|
||||
"ParityGenesis": network + "-parity.json",
|
||||
"PythonGenesis": network + "-python.json",
|
||||
"PythonBootnodes": strings.Join(bootPython, ","),
|
||||
"Homestead": conf.Genesis.Config.HomesteadBlock,
|
||||
"Tangerine": conf.Genesis.Config.EIP150Block,
|
||||
"Spurious": conf.Genesis.Config.EIP155Block,
|
||||
"Byzantium": conf.Genesis.Config.ByzantiumBlock,
|
||||
})
|
||||
files[filepath.Join(workdir, "index.html")] = indexfile.Bytes()
|
||||
|
||||
genesis, _ := conf.genesis.MarshalJSON()
|
||||
// Marshal the genesis spec files for go-ethereum and all the other clients
|
||||
genesis, _ := conf.Genesis.MarshalJSON()
|
||||
files[filepath.Join(workdir, network+".json")] = genesis
|
||||
|
||||
if conf.Genesis.Config.Ethash != nil {
|
||||
cppSpec, err := newCppEthereumGenesisSpec(network, conf.Genesis)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cppSpecJSON, _ := json.Marshal(cppSpec)
|
||||
files[filepath.Join(workdir, network+"-cpp.json")] = cppSpecJSON
|
||||
|
||||
harmonySpecJSON, _ := conf.Genesis.MarshalJSON()
|
||||
files[filepath.Join(workdir, network+"-harmony.json")] = harmonySpecJSON
|
||||
|
||||
paritySpec, err := newParityChainSpec(network, conf.Genesis, conf.bootFull)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
paritySpecJSON, _ := json.Marshal(paritySpec)
|
||||
files[filepath.Join(workdir, network+"-parity.json")] = paritySpecJSON
|
||||
|
||||
pyethSpec, err := newPyEthereumGenesisSpec(network, conf.Genesis)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pyethSpecJSON, _ := json.Marshal(pyethSpec)
|
||||
files[filepath.Join(workdir, network+"-python.json")] = pyethSpecJSON
|
||||
} else {
|
||||
for _, client := range []string{"cpp", "harmony", "parity", "python"} {
|
||||
files[filepath.Join(workdir, network+"-"+client+".json")] = []byte{}
|
||||
}
|
||||
}
|
||||
files[filepath.Join(workdir, "puppeth.png")] = dashboardMascot
|
||||
|
||||
// Upload the deployment files to the remote server (and clean up afterwards)
|
||||
@ -489,19 +677,36 @@ func deployDashboard(client *sshClient, network string, port int, vhost string,
|
||||
defer client.Run("rm -rf " + workdir)
|
||||
|
||||
// Build and deploy the dashboard service
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build", workdir, network))
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
}
|
||||
|
||||
// dashboardInfos is returned from an dashboard status check to allow reporting
|
||||
// various configuration parameters.
|
||||
type dashboardInfos struct {
|
||||
host string
|
||||
port int
|
||||
host string
|
||||
port int
|
||||
trusted bool
|
||||
|
||||
ethstats string
|
||||
explorer string
|
||||
wallet string
|
||||
faucet string
|
||||
}
|
||||
|
||||
// String implements the stringer interface.
|
||||
func (info *dashboardInfos) String() string {
|
||||
return fmt.Sprintf("host=%s, port=%d", info.host, info.port)
|
||||
// Report converts the typed struct into a plain string->string map, containing
|
||||
// most - but not all - fields for reporting to the user.
|
||||
func (info *dashboardInfos) Report() map[string]string {
|
||||
return map[string]string{
|
||||
"Website address": info.host,
|
||||
"Website listener port": strconv.Itoa(info.port),
|
||||
"Ethstats service": info.ethstats,
|
||||
"Explorer service": info.explorer,
|
||||
"Wallet service": info.wallet,
|
||||
"Faucet service": info.faucet,
|
||||
}
|
||||
}
|
||||
|
||||
// checkDashboard does a health-check against a dashboard container to verify if
|
||||
@ -536,7 +741,11 @@ func checkDashboard(client *sshClient, network string) (*dashboardInfos, error)
|
||||
}
|
||||
// Container available, assemble and return the useful infos
|
||||
return &dashboardInfos{
|
||||
host: host,
|
||||
port: port,
|
||||
host: host,
|
||||
port: port,
|
||||
ethstats: infos.envvars["ETHSTATS_PAGE"],
|
||||
explorer: infos.envvars["EXPLORER_PAGE"],
|
||||
wallet: infos.envvars["WALLET_PAGE"],
|
||||
faucet: infos.envvars["FAUCET_PAGE"],
|
||||
}, nil
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
@ -30,21 +31,9 @@ import (
|
||||
// ethstatsDockerfile is the Dockerfile required to build an ethstats backend
|
||||
// and associated monitoring site.
|
||||
var ethstatsDockerfile = `
|
||||
FROM mhart/alpine-node:latest
|
||||
|
||||
RUN \
|
||||
apk add --update git && \
|
||||
git clone --depth=1 https://github.com/karalabe/eth-netstats && \
|
||||
apk del git && rm -rf /var/cache/apk/* && \
|
||||
\
|
||||
cd /eth-netstats && npm install && npm install -g grunt-cli && grunt
|
||||
|
||||
WORKDIR /eth-netstats
|
||||
EXPOSE 3000
|
||||
FROM puppeth/ethstats:latest
|
||||
|
||||
RUN echo 'module.exports = {trusted: [{{.Trusted}}], banned: [{{.Banned}}], reserved: ["yournode"]};' > lib/utils/config.js
|
||||
|
||||
CMD ["npm", "start"]
|
||||
`
|
||||
|
||||
// ethstatsComposefile is the docker-compose.yml file required to deploy and
|
||||
@ -72,7 +61,7 @@ services:
|
||||
// deployEthstats deploys a new ethstats container to a remote machine via SSH,
|
||||
// docker and docker-compose. If an instance with the specified network name
|
||||
// already exists there, it will be overwritten!
|
||||
func deployEthstats(client *sshClient, network string, port int, secret string, vhost string, trusted []string, banned []string) ([]byte, error) {
|
||||
func deployEthstats(client *sshClient, network string, port int, secret string, vhost string, trusted []string, banned []string, nocache bool) ([]byte, error) {
|
||||
// Generate the content to upload to the server
|
||||
workdir := fmt.Sprintf("%d", rand.Int63())
|
||||
files := make(map[string][]byte)
|
||||
@ -110,7 +99,10 @@ func deployEthstats(client *sshClient, network string, port int, secret string,
|
||||
defer client.Run("rm -rf " + workdir)
|
||||
|
||||
// Build and deploy the ethstats service
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build", workdir, network))
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
}
|
||||
|
||||
// ethstatsInfos is returned from an ethstats status check to allow reporting
|
||||
@ -123,9 +115,15 @@ type ethstatsInfos struct {
|
||||
banned []string
|
||||
}
|
||||
|
||||
// String implements the stringer interface.
|
||||
func (info *ethstatsInfos) String() string {
|
||||
return fmt.Sprintf("host=%s, port=%d, secret=%s, banned=%v", info.host, info.port, info.secret, info.banned)
|
||||
// Report converts the typed struct into a plain string->string map, containing
|
||||
// most - but not all - fields for reporting to the user.
|
||||
func (info *ethstatsInfos) Report() map[string]string {
|
||||
return map[string]string{
|
||||
"Website address": info.host,
|
||||
"Website listener port": strconv.Itoa(info.port),
|
||||
"Login secret": info.secret,
|
||||
"Banned addresses": fmt.Sprintf("%v", info.banned),
|
||||
}
|
||||
}
|
||||
|
||||
// checkEthstats does a health-check against an ethstats server to verify whether
|
||||
|
211
cmd/puppeth/module_explorer.go
Normal file
211
cmd/puppeth/module_explorer.go
Normal file
@ -0,0 +1,211 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// explorerDockerfile is the Dockerfile required to run a block explorer.
|
||||
var explorerDockerfile = `
|
||||
FROM puppeth/explorer:latest
|
||||
|
||||
ADD ethstats.json /ethstats.json
|
||||
ADD chain.json /chain.json
|
||||
|
||||
RUN \
|
||||
echo '(cd ../eth-net-intelligence-api && pm2 start /ethstats.json)' > explorer.sh && \
|
||||
echo '(cd ../etherchain-light && npm start &)' >> explorer.sh && \
|
||||
echo '/parity/parity --chain=/chain.json --port={{.NodePort}} --tracing=on --fat-db=on --pruning=archive' >> explorer.sh
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "explorer.sh"]
|
||||
`
|
||||
|
||||
// explorerEthstats is the configuration file for the ethstats javascript client.
|
||||
var explorerEthstats = `[
|
||||
{
|
||||
"name" : "node-app",
|
||||
"script" : "app.js",
|
||||
"log_date_format" : "YYYY-MM-DD HH:mm Z",
|
||||
"merge_logs" : false,
|
||||
"watch" : false,
|
||||
"max_restarts" : 10,
|
||||
"exec_interpreter" : "node",
|
||||
"exec_mode" : "fork_mode",
|
||||
"env":
|
||||
{
|
||||
"NODE_ENV" : "production",
|
||||
"RPC_HOST" : "localhost",
|
||||
"RPC_PORT" : "8545",
|
||||
"LISTENING_PORT" : "{{.Port}}",
|
||||
"INSTANCE_NAME" : "{{.Name}}",
|
||||
"CONTACT_DETAILS" : "",
|
||||
"WS_SERVER" : "{{.Host}}",
|
||||
"WS_SECRET" : "{{.Secret}}",
|
||||
"VERBOSITY" : 2
|
||||
}
|
||||
}
|
||||
]`
|
||||
|
||||
// explorerComposefile is the docker-compose.yml file required to deploy and
|
||||
// maintain a block explorer.
|
||||
var explorerComposefile = `
|
||||
version: '2'
|
||||
services:
|
||||
explorer:
|
||||
build: .
|
||||
image: {{.Network}}/explorer
|
||||
ports:
|
||||
- "{{.NodePort}}:{{.NodePort}}"
|
||||
- "{{.NodePort}}:{{.NodePort}}/udp"{{if not .VHost}}
|
||||
- "{{.WebPort}}:3000"{{end}}
|
||||
volumes:
|
||||
- {{.Datadir}}:/root/.local/share/io.parity.ethereum
|
||||
environment:
|
||||
- NODE_PORT={{.NodePort}}/tcp
|
||||
- STATS={{.Ethstats}}{{if .VHost}}
|
||||
- VIRTUAL_HOST={{.VHost}}
|
||||
- VIRTUAL_PORT=3000{{end}}
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "10"
|
||||
restart: always
|
||||
`
|
||||
|
||||
// deployExplorer deploys a new block explorer container to a remote machine via
|
||||
// SSH, docker and docker-compose. If an instance with the specified network name
|
||||
// already exists there, it will be overwritten!
|
||||
func deployExplorer(client *sshClient, network string, chainspec []byte, config *explorerInfos, nocache bool) ([]byte, error) {
|
||||
// Generate the content to upload to the server
|
||||
workdir := fmt.Sprintf("%d", rand.Int63())
|
||||
files := make(map[string][]byte)
|
||||
|
||||
dockerfile := new(bytes.Buffer)
|
||||
template.Must(template.New("").Parse(explorerDockerfile)).Execute(dockerfile, map[string]interface{}{
|
||||
"NodePort": config.nodePort,
|
||||
})
|
||||
files[filepath.Join(workdir, "Dockerfile")] = dockerfile.Bytes()
|
||||
|
||||
ethstats := new(bytes.Buffer)
|
||||
template.Must(template.New("").Parse(explorerEthstats)).Execute(ethstats, map[string]interface{}{
|
||||
"Port": config.nodePort,
|
||||
"Name": config.ethstats[:strings.Index(config.ethstats, ":")],
|
||||
"Secret": config.ethstats[strings.Index(config.ethstats, ":")+1 : strings.Index(config.ethstats, "@")],
|
||||
"Host": config.ethstats[strings.Index(config.ethstats, "@")+1:],
|
||||
})
|
||||
files[filepath.Join(workdir, "ethstats.json")] = ethstats.Bytes()
|
||||
|
||||
composefile := new(bytes.Buffer)
|
||||
template.Must(template.New("").Parse(explorerComposefile)).Execute(composefile, map[string]interface{}{
|
||||
"Datadir": config.datadir,
|
||||
"Network": network,
|
||||
"NodePort": config.nodePort,
|
||||
"VHost": config.webHost,
|
||||
"WebPort": config.webPort,
|
||||
"Ethstats": config.ethstats[:strings.Index(config.ethstats, ":")],
|
||||
})
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
||||
files[filepath.Join(workdir, "chain.json")] = chainspec
|
||||
|
||||
// Upload the deployment files to the remote server (and clean up afterwards)
|
||||
if out, err := client.Upload(files); err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer client.Run("rm -rf " + workdir)
|
||||
|
||||
// Build and deploy the boot or seal node service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
}
|
||||
|
||||
// explorerInfos is returned from a block explorer status check to allow reporting
|
||||
// various configuration parameters.
|
||||
type explorerInfos struct {
|
||||
datadir string
|
||||
ethstats string
|
||||
nodePort int
|
||||
webHost string
|
||||
webPort int
|
||||
}
|
||||
|
||||
// Report converts the typed struct into a plain string->string map, containing
|
||||
// most - but not all - fields for reporting to the user.
|
||||
func (info *explorerInfos) Report() map[string]string {
|
||||
report := map[string]string{
|
||||
"Data directory": info.datadir,
|
||||
"Node listener port ": strconv.Itoa(info.nodePort),
|
||||
"Ethstats username": info.ethstats,
|
||||
"Website address ": info.webHost,
|
||||
"Website listener port ": strconv.Itoa(info.webPort),
|
||||
}
|
||||
return report
|
||||
}
|
||||
|
||||
// checkExplorer does a health-check against an block explorer server to verify
|
||||
// whether it's running, and if yes, whether it's responsive.
|
||||
func checkExplorer(client *sshClient, network string) (*explorerInfos, error) {
|
||||
// Inspect a possible block explorer container on the host
|
||||
infos, err := inspectContainer(client, fmt.Sprintf("%s_explorer_1", network))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !infos.running {
|
||||
return nil, ErrServiceOffline
|
||||
}
|
||||
// Resolve the port from the host, or the reverse proxy
|
||||
webPort := infos.portmap["3000/tcp"]
|
||||
if webPort == 0 {
|
||||
if proxy, _ := checkNginx(client, network); proxy != nil {
|
||||
webPort = proxy.port
|
||||
}
|
||||
}
|
||||
if webPort == 0 {
|
||||
return nil, ErrNotExposed
|
||||
}
|
||||
// Resolve the host from the reverse-proxy and the config values
|
||||
host := infos.envvars["VIRTUAL_HOST"]
|
||||
if host == "" {
|
||||
host = client.server
|
||||
}
|
||||
// Run a sanity check to see if the devp2p is reachable
|
||||
nodePort := infos.portmap[infos.envvars["NODE_PORT"]]
|
||||
if err = checkPort(client.server, nodePort); err != nil {
|
||||
log.Warn(fmt.Sprintf("Explorer devp2p port seems unreachable"), "server", client.server, "port", nodePort, "err", err)
|
||||
}
|
||||
// Assemble and return the useful infos
|
||||
stats := &explorerInfos{
|
||||
datadir: infos.volumes["/root/.local/share/io.parity.ethereum"],
|
||||
nodePort: nodePort,
|
||||
webHost: host,
|
||||
webPort: webPort,
|
||||
ethstats: infos.envvars["STATS"],
|
||||
}
|
||||
return stats, nil
|
||||
}
|
@ -18,6 +18,7 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"math/rand"
|
||||
@ -25,36 +26,24 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// faucetDockerfile is the Dockerfile required to build an faucet container to
|
||||
// grant crypto tokens based on GitHub authentications.
|
||||
var faucetDockerfile = `
|
||||
FROM alpine:latest
|
||||
|
||||
RUN mkdir /go
|
||||
ENV GOPATH /go
|
||||
|
||||
RUN \
|
||||
apk add --update git go make gcc musl-dev ca-certificates linux-headers && \
|
||||
mkdir -p $GOPATH/src/github.com/ethereum && \
|
||||
(cd $GOPATH/src/github.com/ethereum && git clone --depth=1 https://github.com/ethereum/go-ethereum) && \
|
||||
go build -v github.com/ethereum/go-ethereum/cmd/faucet && \
|
||||
apk del git go make gcc musl-dev linux-headers && \
|
||||
rm -rf $GOPATH && rm -rf /var/cache/apk/*
|
||||
FROM ethereum/client-go:alltools-latest
|
||||
|
||||
ADD genesis.json /genesis.json
|
||||
ADD account.json /account.json
|
||||
ADD account.pass /account.pass
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
CMD [ \
|
||||
"/faucet", "--genesis", "/genesis.json", "--network", "{{.NetworkID}}", "--bootnodes", "{{.Bootnodes}}", "--ethstats", "{{.Ethstats}}", "--ethport", "{{.EthPort}}", \
|
||||
"--faucet.name", "{{.FaucetName}}", "--faucet.amount", "{{.FaucetAmount}}", "--faucet.minutes", "{{.FaucetMinutes}}", "--faucet.tiers", "{{.FaucetTiers}}", \
|
||||
"--github.user", "{{.GitHubUser}}", "--github.token", "{{.GitHubToken}}", "--account.json", "/account.json", "--account.pass", "/account.pass" \
|
||||
{{if .CaptchaToken}}, "--captcha.token", "{{.CaptchaToken}}", "--captcha.secret", "{{.CaptchaSecret}}"{{end}} \
|
||||
ENTRYPOINT [ \
|
||||
"faucet", "--genesis", "/genesis.json", "--network", "{{.NetworkID}}", "--bootnodes", "{{.Bootnodes}}", "--ethstats", "{{.Ethstats}}", "--ethport", "{{.EthPort}}", \
|
||||
"--faucet.name", "{{.FaucetName}}", "--faucet.amount", "{{.FaucetAmount}}", "--faucet.minutes", "{{.FaucetMinutes}}", "--faucet.tiers", "{{.FaucetTiers}}", \
|
||||
"--account.json", "/account.json", "--account.pass", "/account.pass" \
|
||||
{{if .CaptchaToken}}, "--captcha.token", "{{.CaptchaToken}}", "--captcha.secret", "{{.CaptchaSecret}}"{{end}}{{if .NoAuth}}, "--noauth"{{end}} \
|
||||
]`
|
||||
|
||||
// faucetComposefile is the docker-compose.yml file required to deploy and maintain
|
||||
@ -76,10 +65,9 @@ services:
|
||||
- FAUCET_AMOUNT={{.FaucetAmount}}
|
||||
- FAUCET_MINUTES={{.FaucetMinutes}}
|
||||
- FAUCET_TIERS={{.FaucetTiers}}
|
||||
- GITHUB_USER={{.GitHubUser}}
|
||||
- GITHUB_TOKEN={{.GitHubToken}}
|
||||
- CAPTCHA_TOKEN={{.CaptchaToken}}
|
||||
- CAPTCHA_SECRET={{.CaptchaSecret}}{{if .VHost}}
|
||||
- CAPTCHA_SECRET={{.CaptchaSecret}}
|
||||
- NO_AUTH={{.NoAuth}}{{if .VHost}}
|
||||
- VIRTUAL_HOST={{.VHost}}
|
||||
- VIRTUAL_PORT=8080{{end}}
|
||||
logging:
|
||||
@ -93,7 +81,7 @@ services:
|
||||
// deployFaucet deploys a new faucet container to a remote machine via SSH,
|
||||
// docker and docker-compose. If an instance with the specified network name
|
||||
// already exists there, it will be overwritten!
|
||||
func deployFaucet(client *sshClient, network string, bootnodes []string, config *faucetInfos) ([]byte, error) {
|
||||
func deployFaucet(client *sshClient, network string, bootnodes []string, config *faucetInfos, nocache bool) ([]byte, error) {
|
||||
// Generate the content to upload to the server
|
||||
workdir := fmt.Sprintf("%d", rand.Int63())
|
||||
files := make(map[string][]byte)
|
||||
@ -104,14 +92,13 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
|
||||
"Bootnodes": strings.Join(bootnodes, ","),
|
||||
"Ethstats": config.node.ethstats,
|
||||
"EthPort": config.node.portFull,
|
||||
"GitHubUser": config.githubUser,
|
||||
"GitHubToken": config.githubToken,
|
||||
"CaptchaToken": config.captchaToken,
|
||||
"CaptchaSecret": config.captchaSecret,
|
||||
"FaucetName": strings.Title(network),
|
||||
"FaucetAmount": config.amount,
|
||||
"FaucetMinutes": config.minutes,
|
||||
"FaucetTiers": config.tiers,
|
||||
"NoAuth": config.noauth,
|
||||
})
|
||||
files[filepath.Join(workdir, "Dockerfile")] = dockerfile.Bytes()
|
||||
|
||||
@ -123,13 +110,12 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
|
||||
"ApiPort": config.port,
|
||||
"EthPort": config.node.portFull,
|
||||
"EthName": config.node.ethstats[:strings.Index(config.node.ethstats, ":")],
|
||||
"GitHubUser": config.githubUser,
|
||||
"GitHubToken": config.githubToken,
|
||||
"CaptchaToken": config.captchaToken,
|
||||
"CaptchaSecret": config.captchaSecret,
|
||||
"FaucetAmount": config.amount,
|
||||
"FaucetMinutes": config.minutes,
|
||||
"FaucetTiers": config.tiers,
|
||||
"NoAuth": config.noauth,
|
||||
})
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
||||
@ -144,7 +130,10 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
|
||||
defer client.Run("rm -rf " + workdir)
|
||||
|
||||
// Build and deploy the faucet service
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build", workdir, network))
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
}
|
||||
|
||||
// faucetInfos is returned from an faucet status check to allow reporting various
|
||||
@ -156,15 +145,38 @@ type faucetInfos struct {
|
||||
amount int
|
||||
minutes int
|
||||
tiers int
|
||||
githubUser string
|
||||
githubToken string
|
||||
noauth bool
|
||||
captchaToken string
|
||||
captchaSecret string
|
||||
}
|
||||
|
||||
// String implements the stringer interface.
|
||||
func (info *faucetInfos) String() string {
|
||||
return fmt.Sprintf("host=%s, api=%d, eth=%d, amount=%d, minutes=%d, tiers=%d, github=%s, captcha=%v, ethstats=%s", info.host, info.port, info.node.portFull, info.amount, info.minutes, info.tiers, info.githubUser, info.captchaToken != "", info.node.ethstats)
|
||||
// Report converts the typed struct into a plain string->string map, containing
|
||||
// most - but not all - fields for reporting to the user.
|
||||
func (info *faucetInfos) Report() map[string]string {
|
||||
report := map[string]string{
|
||||
"Website address": info.host,
|
||||
"Website listener port": strconv.Itoa(info.port),
|
||||
"Ethereum listener port": strconv.Itoa(info.node.portFull),
|
||||
"Funding amount (base tier)": fmt.Sprintf("%d Ethers", info.amount),
|
||||
"Funding cooldown (base tier)": fmt.Sprintf("%d mins", info.minutes),
|
||||
"Funding tiers": strconv.Itoa(info.tiers),
|
||||
"Captha protection": fmt.Sprintf("%v", info.captchaToken != ""),
|
||||
"Ethstats username": info.node.ethstats,
|
||||
}
|
||||
if info.noauth {
|
||||
report["Debug mode (no auth)"] = "enabled"
|
||||
}
|
||||
if info.node.keyJSON != "" {
|
||||
var key struct {
|
||||
Address string `json:"address"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(info.node.keyJSON), &key); err == nil {
|
||||
report["Funding account"] = common.HexToAddress(key.Address).Hex()
|
||||
} else {
|
||||
log.Error("Failed to retrieve signer address", "err", err)
|
||||
}
|
||||
}
|
||||
return report
|
||||
}
|
||||
|
||||
// checkFaucet does a health-check against an faucet server to verify whether
|
||||
@ -224,9 +236,8 @@ func checkFaucet(client *sshClient, network string) (*faucetInfos, error) {
|
||||
amount: amount,
|
||||
minutes: minutes,
|
||||
tiers: tiers,
|
||||
githubUser: infos.envvars["GITHUB_USER"],
|
||||
githubToken: infos.envvars["GITHUB_TOKEN"],
|
||||
captchaToken: infos.envvars["CAPTCHA_TOKEN"],
|
||||
captchaSecret: infos.envvars["CAPTCHA_SECRET"],
|
||||
noauth: infos.envvars["NO_AUTH"] == "true",
|
||||
}, nil
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"html/template"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
@ -54,7 +55,7 @@ services:
|
||||
// deployNginx deploys a new nginx reverse-proxy container to expose one or more
|
||||
// HTTP services running on a single host. If an instance with the specified
|
||||
// network name already exists there, it will be overwritten!
|
||||
func deployNginx(client *sshClient, network string, port int) ([]byte, error) {
|
||||
func deployNginx(client *sshClient, network string, port int, nocache bool) ([]byte, error) {
|
||||
log.Info("Deploying nginx reverse-proxy", "server", client.server, "port", port)
|
||||
|
||||
// Generate the content to upload to the server
|
||||
@ -78,8 +79,11 @@ func deployNginx(client *sshClient, network string, port int) ([]byte, error) {
|
||||
}
|
||||
defer client.Run("rm -rf " + workdir)
|
||||
|
||||
// Build and deploy the ethstats service
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build", workdir, network))
|
||||
// Build and deploy the reverse-proxy service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
}
|
||||
|
||||
// nginxInfos is returned from an nginx reverse-proxy status check to allow
|
||||
@ -88,9 +92,12 @@ type nginxInfos struct {
|
||||
port int
|
||||
}
|
||||
|
||||
// String implements the stringer interface.
|
||||
func (info *nginxInfos) String() string {
|
||||
return fmt.Sprintf("port=%d", info.port)
|
||||
// Report converts the typed struct into a plain string->string map, containing
|
||||
// most - but not all - fields for reporting to the user.
|
||||
func (info *nginxInfos) Report() map[string]string {
|
||||
return map[string]string{
|
||||
"Shared listener port": strconv.Itoa(info.port),
|
||||
}
|
||||
}
|
||||
|
||||
// checkNginx does a health-check against an nginx reverse-proxy to verify whether
|
||||
|
@ -18,6 +18,7 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
@ -25,6 +26,7 @@ import (
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
@ -38,9 +40,9 @@ ADD genesis.json /genesis.json
|
||||
ADD signer.pass /signer.pass
|
||||
{{end}}
|
||||
RUN \
|
||||
echo 'geth init /genesis.json' > geth.sh && \{{if .Unlock}}
|
||||
echo 'geth --cache 512 init /genesis.json' > geth.sh && \{{if .Unlock}}
|
||||
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
|
||||
echo $'geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .BootV4}}--bootnodesv4 {{.BootV4}}{{end}} {{if .BootV5}}--bootnodesv5 {{.BootV5}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine{{end}}{{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
|
||||
echo $'geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .BootV4}}--bootnodesv4 {{.BootV4}}{{end}} {{if .BootV5}}--bootnodesv5 {{.BootV5}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine --minerthreads 1{{end}} {{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "geth.sh"]
|
||||
`
|
||||
@ -58,7 +60,8 @@ services:
|
||||
- "{{.FullPort}}:{{.FullPort}}/udp"{{if .Light}}
|
||||
- "{{.LightPort}}:{{.LightPort}}/udp"{{end}}
|
||||
volumes:
|
||||
- {{.Datadir}}:/root/.ethereum
|
||||
- {{.Datadir}}:/root/.ethereum{{if .Ethashdir}}
|
||||
- {{.Ethashdir}}:/root/.ethash{{end}}
|
||||
environment:
|
||||
- FULL_PORT={{.FullPort}}/tcp
|
||||
- LIGHT_PORT={{.LightPort}}/udp
|
||||
@ -79,7 +82,7 @@ services:
|
||||
// deployNode deploys a new Ethereum node container to a remote machine via SSH,
|
||||
// docker and docker-compose. If an instance with the specified network name
|
||||
// already exists there, it will be overwritten!
|
||||
func deployNode(client *sshClient, network string, bootv4, bootv5 []string, config *nodeInfos) ([]byte, error) {
|
||||
func deployNode(client *sshClient, network string, bootv4, bootv5 []string, config *nodeInfos, nocache bool) ([]byte, error) {
|
||||
kind := "sealnode"
|
||||
if config.keyJSON == "" && config.etherbase == "" {
|
||||
kind = "bootnode"
|
||||
@ -114,6 +117,7 @@ func deployNode(client *sshClient, network string, bootv4, bootv5 []string, conf
|
||||
template.Must(template.New("").Parse(nodeComposefile)).Execute(composefile, map[string]interface{}{
|
||||
"Type": kind,
|
||||
"Datadir": config.datadir,
|
||||
"Ethashdir": config.ethashdir,
|
||||
"Network": network,
|
||||
"FullPort": config.portFull,
|
||||
"TotalPeers": config.peersTotal,
|
||||
@ -127,9 +131,7 @@ func deployNode(client *sshClient, network string, bootv4, bootv5 []string, conf
|
||||
})
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
||||
//genesisfile, _ := json.MarshalIndent(config.genesis, "", " ")
|
||||
files[filepath.Join(workdir, "genesis.json")] = config.genesis
|
||||
|
||||
if config.keyJSON != "" {
|
||||
files[filepath.Join(workdir, "signer.json")] = []byte(config.keyJSON)
|
||||
files[filepath.Join(workdir, "signer.pass")] = []byte(config.keyPass)
|
||||
@ -141,7 +143,10 @@ func deployNode(client *sshClient, network string, bootv4, bootv5 []string, conf
|
||||
defer client.Run("rm -rf " + workdir)
|
||||
|
||||
// Build and deploy the boot or seal node service
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build", workdir, network))
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
}
|
||||
|
||||
// nodeInfos is returned from a boot or seal node status check to allow reporting
|
||||
@ -150,6 +155,7 @@ type nodeInfos struct {
|
||||
genesis []byte
|
||||
network int64
|
||||
datadir string
|
||||
ethashdir string
|
||||
ethstats string
|
||||
portFull int
|
||||
portLight int
|
||||
@ -164,14 +170,43 @@ type nodeInfos struct {
|
||||
gasPrice float64
|
||||
}
|
||||
|
||||
// String implements the stringer interface.
|
||||
func (info *nodeInfos) String() string {
|
||||
discv5 := ""
|
||||
if info.peersLight > 0 {
|
||||
discv5 = fmt.Sprintf(", portv5=%d", info.portLight)
|
||||
// Report converts the typed struct into a plain string->string map, containing
|
||||
// most - but not all - fields for reporting to the user.
|
||||
func (info *nodeInfos) Report() map[string]string {
|
||||
report := map[string]string{
|
||||
"Data directory": info.datadir,
|
||||
"Listener port (full nodes)": strconv.Itoa(info.portFull),
|
||||
"Peer count (all total)": strconv.Itoa(info.peersTotal),
|
||||
"Peer count (light nodes)": strconv.Itoa(info.peersLight),
|
||||
"Ethstats username": info.ethstats,
|
||||
}
|
||||
return fmt.Sprintf("port=%d%s, datadir=%s, peers=%d, lights=%d, ethstats=%s, gastarget=%0.3f MGas, gasprice=%0.3f GWei",
|
||||
info.portFull, discv5, info.datadir, info.peersTotal, info.peersLight, info.ethstats, info.gasTarget, info.gasPrice)
|
||||
if info.peersLight > 0 {
|
||||
// Light server enabled
|
||||
report["Listener port (light nodes)"] = strconv.Itoa(info.portLight)
|
||||
}
|
||||
if info.gasTarget > 0 {
|
||||
// Miner or signer node
|
||||
report["Gas limit (baseline target)"] = fmt.Sprintf("%0.3f MGas", info.gasTarget)
|
||||
report["Gas price (minimum accepted)"] = fmt.Sprintf("%0.3f GWei", info.gasPrice)
|
||||
|
||||
if info.etherbase != "" {
|
||||
// Ethash proof-of-work miner
|
||||
report["Ethash directory"] = info.ethashdir
|
||||
report["Miner account"] = info.etherbase
|
||||
}
|
||||
if info.keyJSON != "" {
|
||||
// Clique proof-of-authority signer
|
||||
var key struct {
|
||||
Address string `json:"address"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(info.keyJSON), &key); err == nil {
|
||||
report["Signer account"] = common.HexToAddress(key.Address).Hex()
|
||||
} else {
|
||||
log.Error("Failed to retrieve signer address", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return report
|
||||
}
|
||||
|
||||
// checkNode does a health-check against an boot or seal node server to verify
|
||||
@ -223,6 +258,7 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
||||
stats := &nodeInfos{
|
||||
genesis: genesis,
|
||||
datadir: infos.volumes["/root/.ethereum"],
|
||||
ethashdir: infos.volumes["/root/.ethash"],
|
||||
portFull: infos.portmap[infos.envvars["FULL_PORT"]],
|
||||
portLight: infos.portmap[infos.envvars["LIGHT_PORT"]],
|
||||
peersTotal: totalPeers,
|
||||
|
200
cmd/puppeth/module_wallet.go
Normal file
200
cmd/puppeth/module_wallet.go
Normal file
@ -0,0 +1,200 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// walletDockerfile is the Dockerfile required to run a web wallet.
|
||||
var walletDockerfile = `
|
||||
FROM puppeth/wallet:latest
|
||||
|
||||
ADD genesis.json /genesis.json
|
||||
|
||||
RUN \
|
||||
echo 'node server.js &' > wallet.sh && \
|
||||
echo 'geth --cache 512 init /genesis.json' >> wallet.sh && \
|
||||
echo $'geth --networkid {{.NetworkID}} --port {{.NodePort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --rpc --rpcaddr=0.0.0.0 --rpccorsdomain "*"' >> wallet.sh
|
||||
|
||||
RUN \
|
||||
sed -i 's/PuppethNetworkID/{{.NetworkID}}/g' dist/js/etherwallet-master.js && \
|
||||
sed -i 's/PuppethNetwork/{{.Network}}/g' dist/js/etherwallet-master.js && \
|
||||
sed -i 's/PuppethDenom/{{.Denom}}/g' dist/js/etherwallet-master.js && \
|
||||
sed -i 's/PuppethHost/{{.Host}}/g' dist/js/etherwallet-master.js && \
|
||||
sed -i 's/PuppethRPCPort/{{.RPCPort}}/g' dist/js/etherwallet-master.js
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "wallet.sh"]
|
||||
`
|
||||
|
||||
// walletComposefile is the docker-compose.yml file required to deploy and
|
||||
// maintain a web wallet.
|
||||
var walletComposefile = `
|
||||
version: '2'
|
||||
services:
|
||||
wallet:
|
||||
build: .
|
||||
image: {{.Network}}/wallet
|
||||
ports:
|
||||
- "{{.NodePort}}:{{.NodePort}}"
|
||||
- "{{.NodePort}}:{{.NodePort}}/udp"
|
||||
- "{{.RPCPort}}:8545"{{if not .VHost}}
|
||||
- "{{.WebPort}}:80"{{end}}
|
||||
volumes:
|
||||
- {{.Datadir}}:/root/.ethereum
|
||||
environment:
|
||||
- NODE_PORT={{.NodePort}}/tcp
|
||||
- STATS={{.Ethstats}}{{if .VHost}}
|
||||
- VIRTUAL_HOST={{.VHost}}
|
||||
- VIRTUAL_PORT=80{{end}}
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "10"
|
||||
restart: always
|
||||
`
|
||||
|
||||
// deployWallet deploys a new web wallet container to a remote machine via SSH,
|
||||
// docker and docker-compose. If an instance with the specified network name
|
||||
// already exists there, it will be overwritten!
|
||||
func deployWallet(client *sshClient, network string, bootnodes []string, config *walletInfos, nocache bool) ([]byte, error) {
|
||||
// Generate the content to upload to the server
|
||||
workdir := fmt.Sprintf("%d", rand.Int63())
|
||||
files := make(map[string][]byte)
|
||||
|
||||
dockerfile := new(bytes.Buffer)
|
||||
template.Must(template.New("").Parse(walletDockerfile)).Execute(dockerfile, map[string]interface{}{
|
||||
"Network": strings.ToTitle(network),
|
||||
"Denom": strings.ToUpper(network),
|
||||
"NetworkID": config.network,
|
||||
"NodePort": config.nodePort,
|
||||
"RPCPort": config.rpcPort,
|
||||
"Bootnodes": strings.Join(bootnodes, ","),
|
||||
"Ethstats": config.ethstats,
|
||||
"Host": client.address,
|
||||
})
|
||||
files[filepath.Join(workdir, "Dockerfile")] = dockerfile.Bytes()
|
||||
|
||||
composefile := new(bytes.Buffer)
|
||||
template.Must(template.New("").Parse(walletComposefile)).Execute(composefile, map[string]interface{}{
|
||||
"Datadir": config.datadir,
|
||||
"Network": network,
|
||||
"NodePort": config.nodePort,
|
||||
"RPCPort": config.rpcPort,
|
||||
"VHost": config.webHost,
|
||||
"WebPort": config.webPort,
|
||||
"Ethstats": config.ethstats[:strings.Index(config.ethstats, ":")],
|
||||
})
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
||||
files[filepath.Join(workdir, "genesis.json")] = config.genesis
|
||||
|
||||
// Upload the deployment files to the remote server (and clean up afterwards)
|
||||
if out, err := client.Upload(files); err != nil {
|
||||
return out, err
|
||||
}
|
||||
defer client.Run("rm -rf " + workdir)
|
||||
|
||||
// Build and deploy the boot or seal node service
|
||||
if nocache {
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s build --pull --no-cache && docker-compose -p %s up -d --force-recreate", workdir, network, network))
|
||||
}
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build --force-recreate", workdir, network))
|
||||
}
|
||||
|
||||
// walletInfos is returned from a web wallet status check to allow reporting
|
||||
// various configuration parameters.
|
||||
type walletInfos struct {
|
||||
genesis []byte
|
||||
network int64
|
||||
datadir string
|
||||
ethstats string
|
||||
nodePort int
|
||||
rpcPort int
|
||||
webHost string
|
||||
webPort int
|
||||
}
|
||||
|
||||
// Report converts the typed struct into a plain string->string map, containing
|
||||
// most - but not all - fields for reporting to the user.
|
||||
func (info *walletInfos) Report() map[string]string {
|
||||
report := map[string]string{
|
||||
"Data directory": info.datadir,
|
||||
"Ethstats username": info.ethstats,
|
||||
"Node listener port ": strconv.Itoa(info.nodePort),
|
||||
"RPC listener port ": strconv.Itoa(info.rpcPort),
|
||||
"Website address ": info.webHost,
|
||||
"Website listener port ": strconv.Itoa(info.webPort),
|
||||
}
|
||||
return report
|
||||
}
|
||||
|
||||
// checkWallet does a health-check against web wallet server to verify whether
|
||||
// it's running, and if yes, whether it's responsive.
|
||||
func checkWallet(client *sshClient, network string) (*walletInfos, error) {
|
||||
// Inspect a possible web wallet container on the host
|
||||
infos, err := inspectContainer(client, fmt.Sprintf("%s_wallet_1", network))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !infos.running {
|
||||
return nil, ErrServiceOffline
|
||||
}
|
||||
// Resolve the port from the host, or the reverse proxy
|
||||
webPort := infos.portmap["80/tcp"]
|
||||
if webPort == 0 {
|
||||
if proxy, _ := checkNginx(client, network); proxy != nil {
|
||||
webPort = proxy.port
|
||||
}
|
||||
}
|
||||
if webPort == 0 {
|
||||
return nil, ErrNotExposed
|
||||
}
|
||||
// Resolve the host from the reverse-proxy and the config values
|
||||
host := infos.envvars["VIRTUAL_HOST"]
|
||||
if host == "" {
|
||||
host = client.server
|
||||
}
|
||||
// Run a sanity check to see if the devp2p and RPC ports are reachable
|
||||
nodePort := infos.portmap[infos.envvars["NODE_PORT"]]
|
||||
if err = checkPort(client.server, nodePort); err != nil {
|
||||
log.Warn(fmt.Sprintf("Wallet devp2p port seems unreachable"), "server", client.server, "port", nodePort, "err", err)
|
||||
}
|
||||
rpcPort := infos.portmap["8545/tcp"]
|
||||
if err = checkPort(client.server, rpcPort); err != nil {
|
||||
log.Warn(fmt.Sprintf("Wallet RPC port seems unreachable"), "server", client.server, "port", rpcPort, "err", err)
|
||||
}
|
||||
// Assemble and return the useful infos
|
||||
stats := &walletInfos{
|
||||
datadir: infos.volumes["/root/.ethereum"],
|
||||
nodePort: nodePort,
|
||||
rpcPort: rpcPort,
|
||||
webHost: host,
|
||||
webPort: webPort,
|
||||
ethstats: infos.envvars["STATS"],
|
||||
}
|
||||
return stats, nil
|
||||
}
|
@ -38,7 +38,7 @@ func main() {
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "loglevel",
|
||||
Value: 4,
|
||||
Value: 3,
|
||||
Usage: "log level to emit to the screen",
|
||||
},
|
||||
}
|
||||
|
@ -116,6 +116,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||
keycheck := func(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
||||
// If no public key is known for SSH, ask the user to confirm
|
||||
if pubkey == nil {
|
||||
fmt.Println()
|
||||
fmt.Printf("The authenticity of host '%s (%s)' can't be established.\n", hostname, remote)
|
||||
fmt.Printf("SSH key fingerprint is %s [MD5]\n", ssh.FingerprintLegacyMD5(key))
|
||||
fmt.Printf("Are you sure you want to continue connecting (yes/no)? ")
|
||||
@ -215,8 +216,8 @@ func (client *sshClient) Stream(cmd string) error {
|
||||
return session.Run(cmd)
|
||||
}
|
||||
|
||||
// Upload copied the set of files to a remote server via SCP, creating any non-
|
||||
// existing folder in te mean time.
|
||||
// Upload copies the set of files to a remote server via SCP, creating any non-
|
||||
// existing folders in the mean time.
|
||||
func (client *sshClient) Upload(files map[string][]byte) ([]byte, error) {
|
||||
// Establish a single command session
|
||||
session, err := client.client.NewSession()
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
@ -38,12 +39,12 @@ import (
|
||||
// config contains all the configurations needed by puppeth that should be saved
|
||||
// between sessions.
|
||||
type config struct {
|
||||
path string // File containing the configuration values
|
||||
genesis *core.Genesis // Genesis block to cache for node deploys
|
||||
bootFull []string // Bootnodes to always connect to by full nodes
|
||||
bootLight []string // Bootnodes to always connect to by light nodes
|
||||
ethstats string // Ethstats settings to cache for node deploys
|
||||
path string // File containing the configuration values
|
||||
bootFull []string // Bootnodes to always connect to by full nodes
|
||||
bootLight []string // Bootnodes to always connect to by light nodes
|
||||
ethstats string // Ethstats settings to cache for node deploys
|
||||
|
||||
Genesis *core.Genesis `json:"genesis,omitempty"` // Genesis block to cache for node deploys
|
||||
Servers map[string][]byte `json:"servers,omitempty"`
|
||||
}
|
||||
|
||||
@ -75,7 +76,8 @@ type wizard struct {
|
||||
servers map[string]*sshClient // SSH connections to servers to administer
|
||||
services map[string][]string // Ethereum services known to be running on servers
|
||||
|
||||
in *bufio.Reader // Wrapper around stdin to allow reading user input
|
||||
in *bufio.Reader // Wrapper around stdin to allow reading user input
|
||||
lock sync.Mutex // Lock to protect configs during concurrent service discovery
|
||||
}
|
||||
|
||||
// read reads a single line from stdin, trimming if from spaces.
|
||||
|
@ -40,6 +40,8 @@ func (w *wizard) deployDashboard() {
|
||||
host: client.server,
|
||||
}
|
||||
}
|
||||
existed := err == nil
|
||||
|
||||
// Figure out which port to listen on
|
||||
fmt.Println()
|
||||
fmt.Printf("Which port should the dashboard listen on? (default = %d)\n", infos.port)
|
||||
@ -58,7 +60,6 @@ func (w *wizard) deployDashboard() {
|
||||
available[service] = append(available[service], server)
|
||||
}
|
||||
}
|
||||
listing := make(map[string]string)
|
||||
for _, service := range []string{"ethstats", "explorer", "wallet", "faucet"} {
|
||||
// Gather all the locally hosted pages of this type
|
||||
var pages []string
|
||||
@ -74,6 +75,14 @@ func (w *wizard) deployDashboard() {
|
||||
if infos, err := checkEthstats(client, w.network); err == nil {
|
||||
port = infos.port
|
||||
}
|
||||
case "explorer":
|
||||
if infos, err := checkExplorer(client, w.network); err == nil {
|
||||
port = infos.webPort
|
||||
}
|
||||
case "wallet":
|
||||
if infos, err := checkWallet(client, w.network); err == nil {
|
||||
port = infos.webPort
|
||||
}
|
||||
case "faucet":
|
||||
if infos, err := checkFaucet(client, w.network); err == nil {
|
||||
port = infos.port
|
||||
@ -101,26 +110,43 @@ func (w *wizard) deployDashboard() {
|
||||
log.Error("Invalid listing choice, aborting")
|
||||
return
|
||||
}
|
||||
var page string
|
||||
switch {
|
||||
case choice <= len(pages):
|
||||
listing[service] = pages[choice-1]
|
||||
page = pages[choice-1]
|
||||
case choice == len(pages)+1:
|
||||
fmt.Println()
|
||||
fmt.Printf("Which address is the external %s service at?\n", service)
|
||||
listing[service] = w.readString()
|
||||
page = w.readString()
|
||||
default:
|
||||
// No service hosting for this
|
||||
}
|
||||
// Save the users choice
|
||||
switch service {
|
||||
case "ethstats":
|
||||
infos.ethstats = page
|
||||
case "explorer":
|
||||
infos.explorer = page
|
||||
case "wallet":
|
||||
infos.wallet = page
|
||||
case "faucet":
|
||||
infos.faucet = page
|
||||
}
|
||||
}
|
||||
// If we have ethstats running, ask whether to make the secret public or not
|
||||
var ethstats bool
|
||||
if w.conf.ethstats != "" {
|
||||
fmt.Println()
|
||||
fmt.Println("Include ethstats secret on dashboard (y/n)? (default = yes)")
|
||||
ethstats = w.readDefaultString("y") == "y"
|
||||
infos.trusted = w.readDefaultString("y") == "y"
|
||||
}
|
||||
// Try to deploy the dashboard container on the host
|
||||
if out, err := deployDashboard(client, w.network, infos.port, infos.host, listing, &w.conf, ethstats); err != nil {
|
||||
nocache := false
|
||||
if existed {
|
||||
fmt.Println()
|
||||
fmt.Printf("Should the dashboard be built from scratch (y/n)? (default = no)\n")
|
||||
nocache = w.readDefaultString("n") != "n"
|
||||
}
|
||||
if out, err := deployDashboard(client, w.network, &w.conf, infos, nocache); err != nil {
|
||||
log.Error("Failed to deploy dashboard container", "err", err)
|
||||
if len(out) > 0 {
|
||||
fmt.Printf("%s\n", out)
|
||||
@ -128,5 +154,5 @@ func (w *wizard) deployDashboard() {
|
||||
return
|
||||
}
|
||||
// All ok, run a network scan to pick any changes up
|
||||
w.networkStats(false)
|
||||
w.networkStats()
|
||||
}
|
||||
|
@ -42,6 +42,8 @@ func (w *wizard) deployEthstats() {
|
||||
secret: "",
|
||||
}
|
||||
}
|
||||
existed := err == nil
|
||||
|
||||
// Figure out which port to listen on
|
||||
fmt.Println()
|
||||
fmt.Printf("Which port should ethstats listen on? (default = %d)\n", infos.port)
|
||||
@ -62,49 +64,57 @@ func (w *wizard) deployEthstats() {
|
||||
infos.secret = w.readDefaultString(infos.secret)
|
||||
}
|
||||
// Gather any blacklists to ban from reporting
|
||||
fmt.Println()
|
||||
fmt.Printf("Keep existing IP %v blacklist (y/n)? (default = yes)\n", infos.banned)
|
||||
if w.readDefaultString("y") != "y" {
|
||||
// The user might want to clear the entire list, although generally probably not
|
||||
if existed {
|
||||
fmt.Println()
|
||||
fmt.Printf("Clear out blacklist and start over (y/n)? (default = no)\n")
|
||||
if w.readDefaultString("n") != "n" {
|
||||
infos.banned = nil
|
||||
}
|
||||
// Offer the user to explicitly add/remove certain IP addresses
|
||||
fmt.Println()
|
||||
fmt.Println("Which additional IP addresses should be blacklisted?")
|
||||
for {
|
||||
if ip := w.readIPAddress(); ip != "" {
|
||||
infos.banned = append(infos.banned, ip)
|
||||
continue
|
||||
fmt.Printf("Keep existing IP %v blacklist (y/n)? (default = yes)\n", infos.banned)
|
||||
if w.readDefaultString("y") != "y" {
|
||||
// The user might want to clear the entire list, although generally probably not
|
||||
fmt.Println()
|
||||
fmt.Printf("Clear out blacklist and start over (y/n)? (default = no)\n")
|
||||
if w.readDefaultString("n") != "n" {
|
||||
infos.banned = nil
|
||||
}
|
||||
break
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Println("Which IP addresses should not be blacklisted?")
|
||||
for {
|
||||
if ip := w.readIPAddress(); ip != "" {
|
||||
for i, addr := range infos.banned {
|
||||
if ip == addr {
|
||||
infos.banned = append(infos.banned[:i], infos.banned[i+1:]...)
|
||||
break
|
||||
}
|
||||
// Offer the user to explicitly add/remove certain IP addresses
|
||||
fmt.Println()
|
||||
fmt.Println("Which additional IP addresses should be blacklisted?")
|
||||
for {
|
||||
if ip := w.readIPAddress(); ip != "" {
|
||||
infos.banned = append(infos.banned, ip)
|
||||
continue
|
||||
}
|
||||
continue
|
||||
break
|
||||
}
|
||||
break
|
||||
fmt.Println()
|
||||
fmt.Println("Which IP addresses should not be blacklisted?")
|
||||
for {
|
||||
if ip := w.readIPAddress(); ip != "" {
|
||||
for i, addr := range infos.banned {
|
||||
if ip == addr {
|
||||
infos.banned = append(infos.banned[:i], infos.banned[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
sort.Strings(infos.banned)
|
||||
}
|
||||
sort.Strings(infos.banned)
|
||||
}
|
||||
// Try to deploy the ethstats server on the host
|
||||
nocache := false
|
||||
if existed {
|
||||
fmt.Println()
|
||||
fmt.Printf("Should the ethstats be built from scratch (y/n)? (default = no)\n")
|
||||
nocache = w.readDefaultString("n") != "n"
|
||||
}
|
||||
trusted := make([]string, 0, len(w.servers))
|
||||
for _, client := range w.servers {
|
||||
if client != nil {
|
||||
trusted = append(trusted, client.address)
|
||||
}
|
||||
}
|
||||
if out, err := deployEthstats(client, w.network, infos.port, infos.secret, infos.host, trusted, infos.banned); err != nil {
|
||||
if out, err := deployEthstats(client, w.network, infos.port, infos.secret, infos.host, trusted, infos.banned, nocache); err != nil {
|
||||
log.Error("Failed to deploy ethstats container", "err", err)
|
||||
if len(out) > 0 {
|
||||
fmt.Printf("%s\n", out)
|
||||
@ -112,5 +122,5 @@ func (w *wizard) deployEthstats() {
|
||||
return
|
||||
}
|
||||
// All ok, run a network scan to pick any changes up
|
||||
w.networkStats(false)
|
||||
w.networkStats()
|
||||
}
|
||||
|
117
cmd/puppeth/wizard_explorer.go
Normal file
117
cmd/puppeth/wizard_explorer.go
Normal file
@ -0,0 +1,117 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// deployExplorer creates a new block explorer based on some user input.
|
||||
func (w *wizard) deployExplorer() {
|
||||
// Do some sanity check before the user wastes time on input
|
||||
if w.conf.Genesis == nil {
|
||||
log.Error("No genesis block configured")
|
||||
return
|
||||
}
|
||||
if w.conf.ethstats == "" {
|
||||
log.Error("No ethstats server configured")
|
||||
return
|
||||
}
|
||||
if w.conf.Genesis.Config.Ethash == nil {
|
||||
log.Error("Only ethash network supported")
|
||||
return
|
||||
}
|
||||
// Select the server to interact with
|
||||
server := w.selectServer()
|
||||
if server == "" {
|
||||
return
|
||||
}
|
||||
client := w.servers[server]
|
||||
|
||||
// Retrieve any active node configurations from the server
|
||||
infos, err := checkExplorer(client, w.network)
|
||||
if err != nil {
|
||||
infos = &explorerInfos{
|
||||
nodePort: 30303, webPort: 80, webHost: client.server,
|
||||
}
|
||||
}
|
||||
existed := err == nil
|
||||
|
||||
chainspec, err := newParityChainSpec(w.network, w.conf.Genesis, w.conf.bootFull)
|
||||
if err != nil {
|
||||
log.Error("Failed to create chain spec for explorer", "err", err)
|
||||
return
|
||||
}
|
||||
chain, _ := json.MarshalIndent(chainspec, "", " ")
|
||||
|
||||
// Figure out which port to listen on
|
||||
fmt.Println()
|
||||
fmt.Printf("Which port should the explorer listen on? (default = %d)\n", infos.webPort)
|
||||
infos.webPort = w.readDefaultInt(infos.webPort)
|
||||
|
||||
// Figure which virtual-host to deploy ethstats on
|
||||
if infos.webHost, err = w.ensureVirtualHost(client, infos.webPort, infos.webHost); err != nil {
|
||||
log.Error("Failed to decide on explorer host", "err", err)
|
||||
return
|
||||
}
|
||||
// Figure out where the user wants to store the persistent data
|
||||
fmt.Println()
|
||||
if infos.datadir == "" {
|
||||
fmt.Printf("Where should data be stored on the remote machine?\n")
|
||||
infos.datadir = w.readString()
|
||||
} else {
|
||||
fmt.Printf("Where should data be stored on the remote machine? (default = %s)\n", infos.datadir)
|
||||
infos.datadir = w.readDefaultString(infos.datadir)
|
||||
}
|
||||
// Figure out which port to listen on
|
||||
fmt.Println()
|
||||
fmt.Printf("Which TCP/UDP port should the archive node listen on? (default = %d)\n", infos.nodePort)
|
||||
infos.nodePort = w.readDefaultInt(infos.nodePort)
|
||||
|
||||
// Set a proper name to report on the stats page
|
||||
fmt.Println()
|
||||
if infos.ethstats == "" {
|
||||
fmt.Printf("What should the explorer be called on the stats page?\n")
|
||||
infos.ethstats = w.readString() + ":" + w.conf.ethstats
|
||||
} else {
|
||||
fmt.Printf("What should the explorer be called on the stats page? (default = %s)\n", infos.ethstats)
|
||||
infos.ethstats = w.readDefaultString(infos.ethstats) + ":" + w.conf.ethstats
|
||||
}
|
||||
// Try to deploy the explorer on the host
|
||||
nocache := false
|
||||
if existed {
|
||||
fmt.Println()
|
||||
fmt.Printf("Should the explorer be built from scratch (y/n)? (default = no)\n")
|
||||
nocache = w.readDefaultString("n") != "n"
|
||||
}
|
||||
if out, err := deployExplorer(client, w.network, chain, infos, nocache); err != nil {
|
||||
log.Error("Failed to deploy explorer container", "err", err)
|
||||
if len(out) > 0 {
|
||||
fmt.Printf("%s\n", out)
|
||||
}
|
||||
return
|
||||
}
|
||||
// All ok, run a network scan to pick any changes up
|
||||
log.Info("Waiting for node to finish booting")
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
w.networkStats()
|
||||
}
|
@ -19,7 +19,6 @@ package main
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@ -47,8 +46,10 @@ func (w *wizard) deployFaucet() {
|
||||
tiers: 3,
|
||||
}
|
||||
}
|
||||
infos.node.genesis, _ = json.MarshalIndent(w.conf.genesis, "", " ")
|
||||
infos.node.network = w.conf.genesis.Config.ChainId.Int64()
|
||||
existed := err == nil
|
||||
|
||||
infos.node.genesis, _ = json.MarshalIndent(w.conf.Genesis, "", " ")
|
||||
infos.node.network = w.conf.Genesis.Config.ChainId.Int64()
|
||||
|
||||
// Figure out which port to listen on
|
||||
fmt.Println()
|
||||
@ -60,7 +61,7 @@ func (w *wizard) deployFaucet() {
|
||||
log.Error("Failed to decide on faucet host", "err", err)
|
||||
return
|
||||
}
|
||||
// Port and proxy settings retrieved, figure out the funcing amount per perdion configurations
|
||||
// Port and proxy settings retrieved, figure out the funding amount per period configurations
|
||||
fmt.Println()
|
||||
fmt.Printf("How many Ethers to release per request? (default = %d)\n", infos.amount)
|
||||
infos.amount = w.readDefaultInt(infos.amount)
|
||||
@ -76,47 +77,6 @@ func (w *wizard) deployFaucet() {
|
||||
log.Error("At least one funding tier must be set")
|
||||
return
|
||||
}
|
||||
// Accessing GitHub gists requires API authorization, retrieve it
|
||||
if infos.githubUser != "" {
|
||||
fmt.Println()
|
||||
fmt.Printf("Reuse previous (%s) GitHub API authorization (y/n)? (default = yes)\n", infos.githubUser)
|
||||
if w.readDefaultString("y") != "y" {
|
||||
infos.githubUser, infos.githubToken = "", ""
|
||||
}
|
||||
}
|
||||
if infos.githubUser == "" {
|
||||
// No previous authorization (or new one requested)
|
||||
fmt.Println()
|
||||
fmt.Println("Which GitHub user to verify Gists through?")
|
||||
infos.githubUser = w.readString()
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("What is the GitHub personal access token of the user? (won't be echoed)")
|
||||
infos.githubToken = w.readPassword()
|
||||
|
||||
// Do a sanity check query against github to ensure it's valid
|
||||
req, _ := http.NewRequest("GET", "https://api.github.com/user", nil)
|
||||
req.SetBasicAuth(infos.githubUser, infos.githubToken)
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
log.Error("Failed to verify GitHub authentication", "err", err)
|
||||
return
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
var msg struct {
|
||||
Login string `json:"login"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
if err = json.NewDecoder(res.Body).Decode(&msg); err != nil {
|
||||
log.Error("Failed to decode authorization response", "err", err)
|
||||
return
|
||||
}
|
||||
if msg.Login != infos.githubUser {
|
||||
log.Error("GitHub authorization failed", "user", infos.githubUser, "message", msg.Message)
|
||||
return
|
||||
}
|
||||
}
|
||||
// Accessing the reCaptcha service requires API authorizations, request it
|
||||
if infos.captchaToken != "" {
|
||||
fmt.Println()
|
||||
@ -129,7 +89,9 @@ func (w *wizard) deployFaucet() {
|
||||
// No previous authorization (or old one discarded)
|
||||
fmt.Println()
|
||||
fmt.Println("Enable reCaptcha protection against robots (y/n)? (default = no)")
|
||||
if w.readDefaultString("n") == "y" {
|
||||
if w.readDefaultString("n") == "n" {
|
||||
log.Warn("Users will be able to requests funds via automated scripts")
|
||||
} else {
|
||||
// Captcha protection explicitly requested, read the site and secret keys
|
||||
fmt.Println()
|
||||
fmt.Printf("What is the reCaptcha site key to authenticate human users?\n")
|
||||
@ -175,7 +137,7 @@ func (w *wizard) deployFaucet() {
|
||||
}
|
||||
}
|
||||
}
|
||||
if infos.node.keyJSON == "" {
|
||||
for i := 0; i < 3 && infos.node.keyJSON == ""; i++ {
|
||||
fmt.Println()
|
||||
fmt.Println("Please paste the faucet's funding account key JSON:")
|
||||
infos.node.keyJSON = w.readJSON()
|
||||
@ -186,11 +148,27 @@ func (w *wizard) deployFaucet() {
|
||||
|
||||
if _, err := keystore.DecryptKey([]byte(infos.node.keyJSON), infos.node.keyPass); err != nil {
|
||||
log.Error("Failed to decrypt key with given passphrase")
|
||||
return
|
||||
infos.node.keyJSON = ""
|
||||
infos.node.keyPass = ""
|
||||
}
|
||||
}
|
||||
// Check if the user wants to run the faucet in debug mode (noauth)
|
||||
noauth := "n"
|
||||
if infos.noauth {
|
||||
noauth = "y"
|
||||
}
|
||||
fmt.Println()
|
||||
fmt.Printf("Permit non-authenticated funding requests (y/n)? (default = %v)\n", infos.noauth)
|
||||
infos.noauth = w.readDefaultString(noauth) != "n"
|
||||
|
||||
// Try to deploy the faucet server on the host
|
||||
if out, err := deployFaucet(client, w.network, w.conf.bootLight, infos); err != nil {
|
||||
nocache := false
|
||||
if existed {
|
||||
fmt.Println()
|
||||
fmt.Printf("Should the faucet be built from scratch (y/n)? (default = no)\n")
|
||||
nocache = w.readDefaultString("n") != "n"
|
||||
}
|
||||
if out, err := deployFaucet(client, w.network, w.conf.bootLight, infos, nocache); err != nil {
|
||||
log.Error("Failed to deploy faucet container", "err", err)
|
||||
if len(out) > 0 {
|
||||
fmt.Printf("%s\n", out)
|
||||
@ -198,5 +176,5 @@ func (w *wizard) deployFaucet() {
|
||||
return
|
||||
}
|
||||
// All ok, run a network scan to pick any changes up
|
||||
w.networkStats(false)
|
||||
w.networkStats()
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ func (w *wizard) makeGenesis() {
|
||||
genesis := &core.Genesis{
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
GasLimit: 4700000,
|
||||
Difficulty: big.NewInt(1048576),
|
||||
Difficulty: big.NewInt(524288),
|
||||
Alloc: make(core.GenesisAlloc),
|
||||
Config: ¶ms.ChainConfig{
|
||||
HomesteadBlock: big.NewInt(1),
|
||||
@ -118,24 +118,16 @@ func (w *wizard) makeGenesis() {
|
||||
for i := int64(0); i < 256; i++ {
|
||||
genesis.Alloc[common.BigToAddress(big.NewInt(i))] = core.GenesisAccount{Balance: big.NewInt(1)}
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Query the user for some custom extras
|
||||
fmt.Println()
|
||||
fmt.Println("Specify your chain/network ID if you want an explicit one (default = random)")
|
||||
genesis.Config.ChainId = new(big.Int).SetUint64(uint64(w.readDefaultInt(rand.Intn(65536))))
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("Anything fun to embed into the genesis block? (max 32 bytes)")
|
||||
|
||||
extra := w.read()
|
||||
if len(extra) > 32 {
|
||||
extra = extra[:32]
|
||||
}
|
||||
genesis.ExtraData = append([]byte(extra), genesis.ExtraData[len(extra):]...)
|
||||
|
||||
// All done, store the genesis and flush to disk
|
||||
w.conf.genesis = genesis
|
||||
log.Info("Configured new genesis block")
|
||||
|
||||
w.conf.Genesis = genesis
|
||||
w.conf.flush()
|
||||
}
|
||||
|
||||
// manageGenesis permits the modification of chain configuration parameters in
|
||||
@ -145,44 +137,56 @@ func (w *wizard) manageGenesis() {
|
||||
fmt.Println()
|
||||
fmt.Println(" 1. Modify existing fork rules")
|
||||
fmt.Println(" 2. Export genesis configuration")
|
||||
fmt.Println(" 3. Remove genesis configuration")
|
||||
|
||||
choice := w.read()
|
||||
switch {
|
||||
case choice == "1":
|
||||
// Fork rule updating requested, iterate over each fork
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should Homestead come into effect? (default = %v)\n", w.conf.genesis.Config.HomesteadBlock)
|
||||
w.conf.genesis.Config.HomesteadBlock = w.readDefaultBigInt(w.conf.genesis.Config.HomesteadBlock)
|
||||
fmt.Printf("Which block should Homestead come into effect? (default = %v)\n", w.conf.Genesis.Config.HomesteadBlock)
|
||||
w.conf.Genesis.Config.HomesteadBlock = w.readDefaultBigInt(w.conf.Genesis.Config.HomesteadBlock)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should EIP150 come into effect? (default = %v)\n", w.conf.genesis.Config.EIP150Block)
|
||||
w.conf.genesis.Config.EIP150Block = w.readDefaultBigInt(w.conf.genesis.Config.EIP150Block)
|
||||
fmt.Printf("Which block should EIP150 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP150Block)
|
||||
w.conf.Genesis.Config.EIP150Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP150Block)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should EIP155 come into effect? (default = %v)\n", w.conf.genesis.Config.EIP155Block)
|
||||
w.conf.genesis.Config.EIP155Block = w.readDefaultBigInt(w.conf.genesis.Config.EIP155Block)
|
||||
fmt.Printf("Which block should EIP155 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP155Block)
|
||||
w.conf.Genesis.Config.EIP155Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP155Block)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should EIP158 come into effect? (default = %v)\n", w.conf.genesis.Config.EIP158Block)
|
||||
w.conf.genesis.Config.EIP158Block = w.readDefaultBigInt(w.conf.genesis.Config.EIP158Block)
|
||||
fmt.Printf("Which block should EIP158 come into effect? (default = %v)\n", w.conf.Genesis.Config.EIP158Block)
|
||||
w.conf.Genesis.Config.EIP158Block = w.readDefaultBigInt(w.conf.Genesis.Config.EIP158Block)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which block should Byzantium come into effect? (default = %v)\n", w.conf.genesis.Config.ByzantiumBlock)
|
||||
w.conf.genesis.Config.ByzantiumBlock = w.readDefaultBigInt(w.conf.genesis.Config.ByzantiumBlock)
|
||||
fmt.Printf("Which block should Byzantium come into effect? (default = %v)\n", w.conf.Genesis.Config.ByzantiumBlock)
|
||||
w.conf.Genesis.Config.ByzantiumBlock = w.readDefaultBigInt(w.conf.Genesis.Config.ByzantiumBlock)
|
||||
|
||||
out, _ := json.MarshalIndent(w.conf.genesis.Config, "", " ")
|
||||
out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", " ")
|
||||
fmt.Printf("Chain configuration updated:\n\n%s\n", out)
|
||||
|
||||
case choice == "2":
|
||||
// Save whatever genesis configuration we currently have
|
||||
fmt.Println()
|
||||
fmt.Printf("Which file to save the genesis into? (default = %s.json)\n", w.network)
|
||||
out, _ := json.MarshalIndent(w.conf.genesis, "", " ")
|
||||
out, _ := json.MarshalIndent(w.conf.Genesis, "", " ")
|
||||
if err := ioutil.WriteFile(w.readDefaultString(fmt.Sprintf("%s.json", w.network)), out, 0644); err != nil {
|
||||
log.Error("Failed to save genesis file", "err", err)
|
||||
}
|
||||
log.Info("Exported existing genesis block")
|
||||
|
||||
case choice == "3":
|
||||
// Make sure we don't have any services running
|
||||
if len(w.conf.servers()) > 0 {
|
||||
log.Error("Genesis reset requires all services and servers torn down")
|
||||
return
|
||||
}
|
||||
log.Info("Genesis block destroyed")
|
||||
|
||||
w.conf.Genesis = nil
|
||||
w.conf.flush()
|
||||
|
||||
default:
|
||||
log.Error("That's not something I can do")
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
@ -63,7 +64,7 @@ func (w *wizard) run() {
|
||||
for {
|
||||
w.network = w.readString()
|
||||
if !strings.Contains(w.network, " ") {
|
||||
fmt.Printf("Sweet, you can set this via --network=%s next time!\n\n", w.network)
|
||||
fmt.Printf("\nSweet, you can set this via --network=%s next time!\n\n", w.network)
|
||||
break
|
||||
}
|
||||
log.Error("I also like to live dangerously, still no spaces")
|
||||
@ -80,22 +81,33 @@ func (w *wizard) run() {
|
||||
} else if err := json.Unmarshal(blob, &w.conf); err != nil {
|
||||
log.Crit("Previous configuration corrupted", "path", w.conf.path, "err", err)
|
||||
} else {
|
||||
// Dial all previously known servers concurrently
|
||||
var pend sync.WaitGroup
|
||||
for server, pubkey := range w.conf.Servers {
|
||||
log.Info("Dialing previously configured server", "server", server)
|
||||
client, err := dial(server, pubkey)
|
||||
if err != nil {
|
||||
log.Error("Previous server unreachable", "server", server, "err", err)
|
||||
}
|
||||
w.servers[server] = client
|
||||
pend.Add(1)
|
||||
|
||||
go func(server string, pubkey []byte) {
|
||||
defer pend.Done()
|
||||
|
||||
log.Info("Dialing previously configured server", "server", server)
|
||||
client, err := dial(server, pubkey)
|
||||
if err != nil {
|
||||
log.Error("Previous server unreachable", "server", server, "err", err)
|
||||
}
|
||||
w.lock.Lock()
|
||||
w.servers[server] = client
|
||||
w.lock.Unlock()
|
||||
}(server, pubkey)
|
||||
}
|
||||
w.networkStats(false)
|
||||
pend.Wait()
|
||||
w.networkStats()
|
||||
}
|
||||
// Basics done, loop ad infinitum about what to do
|
||||
for {
|
||||
fmt.Println()
|
||||
fmt.Println("What would you like to do? (default = stats)")
|
||||
fmt.Println(" 1. Show network stats")
|
||||
if w.conf.genesis == nil {
|
||||
if w.conf.Genesis == nil {
|
||||
fmt.Println(" 2. Configure new genesis")
|
||||
} else {
|
||||
fmt.Println(" 2. Manage existing genesis")
|
||||
@ -110,15 +122,14 @@ func (w *wizard) run() {
|
||||
} else {
|
||||
fmt.Println(" 4. Manage network components")
|
||||
}
|
||||
//fmt.Println(" 5. ProTips for common usecases")
|
||||
|
||||
choice := w.read()
|
||||
switch {
|
||||
case choice == "" || choice == "1":
|
||||
w.networkStats(false)
|
||||
w.networkStats()
|
||||
|
||||
case choice == "2":
|
||||
if w.conf.genesis == nil {
|
||||
if w.conf.Genesis == nil {
|
||||
w.makeGenesis()
|
||||
} else {
|
||||
w.manageGenesis()
|
||||
@ -126,7 +137,7 @@ func (w *wizard) run() {
|
||||
case choice == "3":
|
||||
if len(w.servers) == 0 {
|
||||
if w.makeServer() != "" {
|
||||
w.networkStats(false)
|
||||
w.networkStats()
|
||||
}
|
||||
} else {
|
||||
w.manageServers()
|
||||
@ -138,9 +149,6 @@ func (w *wizard) run() {
|
||||
w.manageComponents()
|
||||
}
|
||||
|
||||
case choice == "5":
|
||||
w.networkStats(true)
|
||||
|
||||
default:
|
||||
log.Error("That's not something I can do")
|
||||
}
|
||||
|
@ -18,9 +18,10 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@ -29,127 +30,257 @@ import (
|
||||
|
||||
// networkStats verifies the status of network components and generates a protip
|
||||
// configuration set to give users hints on how to do various tasks.
|
||||
func (w *wizard) networkStats(tips bool) {
|
||||
func (w *wizard) networkStats() {
|
||||
if len(w.servers) == 0 {
|
||||
log.Error("No remote machines to gather stats from")
|
||||
log.Info("No remote machines to gather stats from")
|
||||
return
|
||||
}
|
||||
protips := new(protips)
|
||||
// Clear out some previous configs to refill from current scan
|
||||
w.conf.ethstats = ""
|
||||
w.conf.bootFull = w.conf.bootFull[:0]
|
||||
w.conf.bootLight = w.conf.bootLight[:0]
|
||||
|
||||
// Iterate over all the specified hosts and check their status
|
||||
stats := tablewriter.NewWriter(os.Stdout)
|
||||
stats.SetHeader([]string{"Server", "IP", "Status", "Service", "Details"})
|
||||
stats.SetColWidth(100)
|
||||
var pend sync.WaitGroup
|
||||
|
||||
stats := make(serverStats)
|
||||
for server, pubkey := range w.conf.Servers {
|
||||
client := w.servers[server]
|
||||
logger := log.New("server", server)
|
||||
logger.Info("Starting remote server health-check")
|
||||
pend.Add(1)
|
||||
|
||||
// If the server is not connected, try to connect again
|
||||
if client == nil {
|
||||
conn, err := dial(server, pubkey)
|
||||
if err != nil {
|
||||
logger.Error("Failed to establish remote connection", "err", err)
|
||||
stats.Append([]string{server, "", err.Error(), "", ""})
|
||||
continue
|
||||
}
|
||||
client = conn
|
||||
}
|
||||
// Client connected one way or another, run health-checks
|
||||
services := make(map[string]string)
|
||||
logger.Debug("Checking for nginx availability")
|
||||
if infos, err := checkNginx(client, w.network); err != nil {
|
||||
if err != ErrServiceUnknown {
|
||||
services["nginx"] = err.Error()
|
||||
}
|
||||
} else {
|
||||
services["nginx"] = infos.String()
|
||||
}
|
||||
logger.Debug("Checking for ethstats availability")
|
||||
if infos, err := checkEthstats(client, w.network); err != nil {
|
||||
if err != ErrServiceUnknown {
|
||||
services["ethstats"] = err.Error()
|
||||
}
|
||||
} else {
|
||||
services["ethstats"] = infos.String()
|
||||
protips.ethstats = infos.config
|
||||
}
|
||||
logger.Debug("Checking for bootnode availability")
|
||||
if infos, err := checkNode(client, w.network, true); err != nil {
|
||||
if err != ErrServiceUnknown {
|
||||
services["bootnode"] = err.Error()
|
||||
}
|
||||
} else {
|
||||
services["bootnode"] = infos.String()
|
||||
// Gather the service stats for each server concurrently
|
||||
go func(server string, pubkey []byte) {
|
||||
defer pend.Done()
|
||||
|
||||
protips.genesis = string(infos.genesis)
|
||||
protips.bootFull = append(protips.bootFull, infos.enodeFull)
|
||||
if infos.enodeLight != "" {
|
||||
protips.bootLight = append(protips.bootLight, infos.enodeLight)
|
||||
stat := w.gatherStats(server, pubkey, w.servers[server])
|
||||
|
||||
// All status checks complete, report and check next server
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
delete(w.services, server)
|
||||
for service := range stat.services {
|
||||
w.services[server] = append(w.services[server], service)
|
||||
}
|
||||
}
|
||||
logger.Debug("Checking for sealnode availability")
|
||||
if infos, err := checkNode(client, w.network, false); err != nil {
|
||||
if err != ErrServiceUnknown {
|
||||
services["sealnode"] = err.Error()
|
||||
}
|
||||
} else {
|
||||
services["sealnode"] = infos.String()
|
||||
protips.genesis = string(infos.genesis)
|
||||
}
|
||||
logger.Debug("Checking for faucet availability")
|
||||
if infos, err := checkFaucet(client, w.network); err != nil {
|
||||
if err != ErrServiceUnknown {
|
||||
services["faucet"] = err.Error()
|
||||
}
|
||||
} else {
|
||||
services["faucet"] = infos.String()
|
||||
}
|
||||
logger.Debug("Checking for dashboard availability")
|
||||
if infos, err := checkDashboard(client, w.network); err != nil {
|
||||
if err != ErrServiceUnknown {
|
||||
services["dashboard"] = err.Error()
|
||||
}
|
||||
} else {
|
||||
services["dashboard"] = infos.String()
|
||||
}
|
||||
// All status checks complete, report and check next server
|
||||
delete(w.services, server)
|
||||
for service := range services {
|
||||
w.services[server] = append(w.services[server], service)
|
||||
}
|
||||
server, address := client.server, client.address
|
||||
for service, status := range services {
|
||||
stats.Append([]string{server, address, "online", service, status})
|
||||
server, address = "", ""
|
||||
}
|
||||
if len(services) == 0 {
|
||||
stats.Append([]string{server, address, "online", "", ""})
|
||||
}
|
||||
stats[server] = stat
|
||||
}(server, pubkey)
|
||||
}
|
||||
// If a genesis block was found, load it into our configs
|
||||
if protips.genesis != "" && w.conf.genesis == nil {
|
||||
genesis := new(core.Genesis)
|
||||
if err := json.Unmarshal([]byte(protips.genesis), genesis); err != nil {
|
||||
log.Error("Failed to parse remote genesis", "err", err)
|
||||
} else {
|
||||
w.conf.genesis = genesis
|
||||
protips.network = genesis.Config.ChainId.Int64()
|
||||
}
|
||||
}
|
||||
if protips.ethstats != "" {
|
||||
w.conf.ethstats = protips.ethstats
|
||||
}
|
||||
w.conf.bootFull = protips.bootFull
|
||||
w.conf.bootLight = protips.bootLight
|
||||
pend.Wait()
|
||||
|
||||
// Print any collected stats and return
|
||||
if !tips {
|
||||
stats.Render()
|
||||
} else {
|
||||
protips.print(w.network)
|
||||
stats.render()
|
||||
}
|
||||
|
||||
// gatherStats gathers service statistics for a particular remote server.
|
||||
func (w *wizard) gatherStats(server string, pubkey []byte, client *sshClient) *serverStat {
|
||||
// Gather some global stats to feed into the wizard
|
||||
var (
|
||||
genesis string
|
||||
ethstats string
|
||||
bootFull []string
|
||||
bootLight []string
|
||||
)
|
||||
// Ensure a valid SSH connection to the remote server
|
||||
logger := log.New("server", server)
|
||||
logger.Info("Starting remote server health-check")
|
||||
|
||||
stat := &serverStat{
|
||||
address: client.address,
|
||||
services: make(map[string]map[string]string),
|
||||
}
|
||||
if client == nil {
|
||||
conn, err := dial(server, pubkey)
|
||||
if err != nil {
|
||||
logger.Error("Failed to establish remote connection", "err", err)
|
||||
stat.failure = err.Error()
|
||||
return stat
|
||||
}
|
||||
client = conn
|
||||
}
|
||||
// Client connected one way or another, run health-checks
|
||||
logger.Debug("Checking for nginx availability")
|
||||
if infos, err := checkNginx(client, w.network); err != nil {
|
||||
if err != ErrServiceUnknown {
|
||||
stat.services["nginx"] = map[string]string{"offline": err.Error()}
|
||||
}
|
||||
} else {
|
||||
stat.services["nginx"] = infos.Report()
|
||||
}
|
||||
logger.Debug("Checking for ethstats availability")
|
||||
if infos, err := checkEthstats(client, w.network); err != nil {
|
||||
if err != ErrServiceUnknown {
|
||||
stat.services["ethstats"] = map[string]string{"offline": err.Error()}
|
||||
}
|
||||
} else {
|
||||
stat.services["ethstats"] = infos.Report()
|
||||
ethstats = infos.config
|
||||
}
|
||||
logger.Debug("Checking for bootnode availability")
|
||||
if infos, err := checkNode(client, w.network, true); err != nil {
|
||||
if err != ErrServiceUnknown {
|
||||
stat.services["bootnode"] = map[string]string{"offline": err.Error()}
|
||||
}
|
||||
} else {
|
||||
stat.services["bootnode"] = infos.Report()
|
||||
|
||||
genesis = string(infos.genesis)
|
||||
bootFull = append(bootFull, infos.enodeFull)
|
||||
if infos.enodeLight != "" {
|
||||
bootLight = append(bootLight, infos.enodeLight)
|
||||
}
|
||||
}
|
||||
logger.Debug("Checking for sealnode availability")
|
||||
if infos, err := checkNode(client, w.network, false); err != nil {
|
||||
if err != ErrServiceUnknown {
|
||||
stat.services["sealnode"] = map[string]string{"offline": err.Error()}
|
||||
}
|
||||
} else {
|
||||
stat.services["sealnode"] = infos.Report()
|
||||
genesis = string(infos.genesis)
|
||||
}
|
||||
logger.Debug("Checking for explorer availability")
|
||||
if infos, err := checkExplorer(client, w.network); err != nil {
|
||||
if err != ErrServiceUnknown {
|
||||
stat.services["explorer"] = map[string]string{"offline": err.Error()}
|
||||
}
|
||||
} else {
|
||||
stat.services["explorer"] = infos.Report()
|
||||
}
|
||||
logger.Debug("Checking for wallet availability")
|
||||
if infos, err := checkWallet(client, w.network); err != nil {
|
||||
if err != ErrServiceUnknown {
|
||||
stat.services["wallet"] = map[string]string{"offline": err.Error()}
|
||||
}
|
||||
} else {
|
||||
stat.services["wallet"] = infos.Report()
|
||||
}
|
||||
logger.Debug("Checking for faucet availability")
|
||||
if infos, err := checkFaucet(client, w.network); err != nil {
|
||||
if err != ErrServiceUnknown {
|
||||
stat.services["faucet"] = map[string]string{"offline": err.Error()}
|
||||
}
|
||||
} else {
|
||||
stat.services["faucet"] = infos.Report()
|
||||
}
|
||||
logger.Debug("Checking for dashboard availability")
|
||||
if infos, err := checkDashboard(client, w.network); err != nil {
|
||||
if err != ErrServiceUnknown {
|
||||
stat.services["dashboard"] = map[string]string{"offline": err.Error()}
|
||||
}
|
||||
} else {
|
||||
stat.services["dashboard"] = infos.Report()
|
||||
}
|
||||
// Feed and newly discovered information into the wizard
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
if genesis != "" && w.conf.Genesis == nil {
|
||||
g := new(core.Genesis)
|
||||
if err := json.Unmarshal([]byte(genesis), g); err != nil {
|
||||
log.Error("Failed to parse remote genesis", "err", err)
|
||||
} else {
|
||||
w.conf.Genesis = g
|
||||
}
|
||||
}
|
||||
if ethstats != "" {
|
||||
w.conf.ethstats = ethstats
|
||||
}
|
||||
w.conf.bootFull = append(w.conf.bootFull, bootFull...)
|
||||
w.conf.bootLight = append(w.conf.bootLight, bootLight...)
|
||||
|
||||
return stat
|
||||
}
|
||||
|
||||
// serverStat is a collection of service configuration parameters and health
|
||||
// check reports to print to the user.
|
||||
type serverStat struct {
|
||||
address string
|
||||
failure string
|
||||
services map[string]map[string]string
|
||||
}
|
||||
|
||||
// serverStats is a collection of server stats for multiple hosts.
|
||||
type serverStats map[string]*serverStat
|
||||
|
||||
// render converts the gathered statistics into a user friendly tabular report
|
||||
// and prints it to the standard output.
|
||||
func (stats serverStats) render() {
|
||||
// Start gathering service statistics and config parameters
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
|
||||
table.SetHeader([]string{"Server", "Address", "Service", "Config", "Value"})
|
||||
table.SetAlignment(tablewriter.ALIGN_LEFT)
|
||||
table.SetColWidth(100)
|
||||
|
||||
// Find the longest lines for all columns for the hacked separator
|
||||
separator := make([]string, 5)
|
||||
for server, stat := range stats {
|
||||
if len(server) > len(separator[0]) {
|
||||
separator[0] = strings.Repeat("-", len(server))
|
||||
}
|
||||
if len(stat.address) > len(separator[1]) {
|
||||
separator[1] = strings.Repeat("-", len(stat.address))
|
||||
}
|
||||
for service, configs := range stat.services {
|
||||
if len(service) > len(separator[2]) {
|
||||
separator[2] = strings.Repeat("-", len(service))
|
||||
}
|
||||
for config, value := range configs {
|
||||
if len(config) > len(separator[3]) {
|
||||
separator[3] = strings.Repeat("-", len(config))
|
||||
}
|
||||
if len(value) > len(separator[4]) {
|
||||
separator[4] = strings.Repeat("-", len(value))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Fill up the server report in alphabetical order
|
||||
servers := make([]string, 0, len(stats))
|
||||
for server := range stats {
|
||||
servers = append(servers, server)
|
||||
}
|
||||
sort.Strings(servers)
|
||||
|
||||
for i, server := range servers {
|
||||
// Add a separator between all servers
|
||||
if i > 0 {
|
||||
table.Append(separator)
|
||||
}
|
||||
// Fill up the service report in alphabetical order
|
||||
services := make([]string, 0, len(stats[server].services))
|
||||
for service := range stats[server].services {
|
||||
services = append(services, service)
|
||||
}
|
||||
sort.Strings(services)
|
||||
|
||||
if len(services) == 0 {
|
||||
table.Append([]string{server, stats[server].address, "", "", ""})
|
||||
}
|
||||
for j, service := range services {
|
||||
// Add an empty line between all services
|
||||
if j > 0 {
|
||||
table.Append([]string{"", "", "", separator[3], separator[4]})
|
||||
}
|
||||
// Fill up the config report in alphabetical order
|
||||
configs := make([]string, 0, len(stats[server].services[service]))
|
||||
for service := range stats[server].services[service] {
|
||||
configs = append(configs, service)
|
||||
}
|
||||
sort.Strings(configs)
|
||||
|
||||
for k, config := range configs {
|
||||
switch {
|
||||
case j == 0 && k == 0:
|
||||
table.Append([]string{server, stats[server].address, service, config, stats[server].services[service][config]})
|
||||
case k == 0:
|
||||
table.Append([]string{"", "", service, config, stats[server].services[service][config]})
|
||||
default:
|
||||
table.Append([]string{"", "", "", config, stats[server].services[service][config]})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
table.Render()
|
||||
}
|
||||
|
||||
// protips contains a collection of network infos to report pro-tips
|
||||
@ -161,75 +292,3 @@ type protips struct {
|
||||
bootLight []string
|
||||
ethstats string
|
||||
}
|
||||
|
||||
// print analyzes the network information available and prints a collection of
|
||||
// pro tips for the user's consideration.
|
||||
func (p *protips) print(network string) {
|
||||
// If a known genesis block is available, display it and prepend an init command
|
||||
fullinit, lightinit := "", ""
|
||||
if p.genesis != "" {
|
||||
fullinit = fmt.Sprintf("geth --datadir=$HOME/.%s init %s.json && ", network, network)
|
||||
lightinit = fmt.Sprintf("geth --datadir=$HOME/.%s --light init %s.json && ", network, network)
|
||||
}
|
||||
// If an ethstats server is available, add the ethstats flag
|
||||
statsflag := ""
|
||||
if p.ethstats != "" {
|
||||
if strings.Contains(p.ethstats, " ") {
|
||||
statsflag = fmt.Sprintf(` --ethstats="yournode:%s"`, p.ethstats)
|
||||
} else {
|
||||
statsflag = fmt.Sprintf(` --ethstats=yournode:%s`, p.ethstats)
|
||||
}
|
||||
}
|
||||
// If bootnodes have been specified, add the bootnode flag
|
||||
bootflagFull := ""
|
||||
if len(p.bootFull) > 0 {
|
||||
bootflagFull = fmt.Sprintf(` --bootnodes %s`, strings.Join(p.bootFull, ","))
|
||||
}
|
||||
bootflagLight := ""
|
||||
if len(p.bootLight) > 0 {
|
||||
bootflagLight = fmt.Sprintf(` --bootnodes %s`, strings.Join(p.bootLight, ","))
|
||||
}
|
||||
// Assemble all the known pro-tips
|
||||
var tasks, tips []string
|
||||
|
||||
tasks = append(tasks, "Run an archive node with historical data")
|
||||
tips = append(tips, fmt.Sprintf("%sgeth --networkid=%d --datadir=$HOME/.%s --cache=1024%s%s", fullinit, p.network, network, statsflag, bootflagFull))
|
||||
|
||||
tasks = append(tasks, "Run a full node with recent data only")
|
||||
tips = append(tips, fmt.Sprintf("%sgeth --networkid=%d --datadir=$HOME/.%s --cache=512 --fast%s%s", fullinit, p.network, network, statsflag, bootflagFull))
|
||||
|
||||
tasks = append(tasks, "Run a light node with on demand retrievals")
|
||||
tips = append(tips, fmt.Sprintf("%sgeth --networkid=%d --datadir=$HOME/.%s --light%s%s", lightinit, p.network, network, statsflag, bootflagLight))
|
||||
|
||||
tasks = append(tasks, "Run an embedded node with constrained memory")
|
||||
tips = append(tips, fmt.Sprintf("%sgeth --networkid=%d --datadir=$HOME/.%s --cache=32 --light%s%s", lightinit, p.network, network, statsflag, bootflagLight))
|
||||
|
||||
// If the tips are short, display in a table
|
||||
short := true
|
||||
for _, tip := range tips {
|
||||
if len(tip) > 100 {
|
||||
short = false
|
||||
break
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
if short {
|
||||
howto := tablewriter.NewWriter(os.Stdout)
|
||||
howto.SetHeader([]string{"Fun tasks for you", "Tips on how to"})
|
||||
howto.SetColWidth(100)
|
||||
|
||||
for i := 0; i < len(tasks); i++ {
|
||||
howto.Append([]string{tasks[i], tips[i]})
|
||||
}
|
||||
howto.Render()
|
||||
return
|
||||
}
|
||||
// Meh, tips got ugly, split into many lines
|
||||
for i := 0; i < len(tasks); i++ {
|
||||
fmt.Println(tasks[i])
|
||||
fmt.Println(strings.Repeat("-", len(tasks[i])))
|
||||
fmt.Println(tips[i])
|
||||
fmt.Println()
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
|
@ -53,12 +53,12 @@ func (w *wizard) manageServers() {
|
||||
w.conf.flush()
|
||||
|
||||
log.Info("Disconnected existing server", "server", server)
|
||||
w.networkStats(false)
|
||||
w.networkStats()
|
||||
return
|
||||
}
|
||||
// If the user requested connecting a new server, do it
|
||||
if w.makeServer() != "" {
|
||||
w.networkStats(false)
|
||||
w.networkStats()
|
||||
}
|
||||
}
|
||||
|
||||
@ -174,9 +174,10 @@ func (w *wizard) deployComponent() {
|
||||
fmt.Println(" 1. Ethstats - Network monitoring tool")
|
||||
fmt.Println(" 2. Bootnode - Entry point of the network")
|
||||
fmt.Println(" 3. Sealer - Full node minting new blocks")
|
||||
fmt.Println(" 4. Wallet - Browser wallet for quick sends (todo)")
|
||||
fmt.Println(" 5. Faucet - Crypto faucet to give away funds")
|
||||
fmt.Println(" 6. Dashboard - Website listing above web-services")
|
||||
fmt.Println(" 4. Explorer - Chain analysis webservice (ethash only)")
|
||||
fmt.Println(" 5. Wallet - Browser wallet for quick sends")
|
||||
fmt.Println(" 6. Faucet - Crypto faucet to give away funds")
|
||||
fmt.Println(" 7. Dashboard - Website listing above web-services")
|
||||
|
||||
switch w.read() {
|
||||
case "1":
|
||||
@ -186,9 +187,12 @@ func (w *wizard) deployComponent() {
|
||||
case "3":
|
||||
w.deployNode(false)
|
||||
case "4":
|
||||
w.deployExplorer()
|
||||
case "5":
|
||||
w.deployFaucet()
|
||||
w.deployWallet()
|
||||
case "6":
|
||||
w.deployFaucet()
|
||||
case "7":
|
||||
w.deployDashboard()
|
||||
default:
|
||||
log.Error("That's not something I can do")
|
||||
|
@ -29,7 +29,8 @@ import (
|
||||
//
|
||||
// If the user elects not to use a reverse proxy, an empty hostname is returned!
|
||||
func (w *wizard) ensureVirtualHost(client *sshClient, port int, def string) (string, error) {
|
||||
if proxy, _ := checkNginx(client, w.network); proxy != nil {
|
||||
proxy, _ := checkNginx(client, w.network)
|
||||
if proxy != nil {
|
||||
// Reverse proxy is running, if ports match, we need a virtual host
|
||||
if proxy.port == port {
|
||||
fmt.Println()
|
||||
@ -41,7 +42,13 @@ func (w *wizard) ensureVirtualHost(client *sshClient, port int, def string) (str
|
||||
fmt.Println()
|
||||
fmt.Println("Allow sharing the port with other services (y/n)? (default = yes)")
|
||||
if w.readDefaultString("y") == "y" {
|
||||
if out, err := deployNginx(client, w.network, port); err != nil {
|
||||
nocache := false
|
||||
if proxy != nil {
|
||||
fmt.Println()
|
||||
fmt.Printf("Should the reverse-proxy be rebuilt from scratch (y/n)? (default = no)\n")
|
||||
nocache = w.readDefaultString("n") != "n"
|
||||
}
|
||||
if out, err := deployNginx(client, w.network, port, nocache); err != nil {
|
||||
log.Error("Failed to deploy reverse-proxy", "err", err)
|
||||
if len(out) > 0 {
|
||||
fmt.Printf("%s\n", out)
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
// deployNode creates a new node configuration based on some user input.
|
||||
func (w *wizard) deployNode(boot bool) {
|
||||
// Do some sanity check before the user wastes time on input
|
||||
if w.conf.genesis == nil {
|
||||
if w.conf.Genesis == nil {
|
||||
log.Error("No genesis block configured")
|
||||
return
|
||||
}
|
||||
@ -44,7 +44,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
}
|
||||
client := w.servers[server]
|
||||
|
||||
// Retrieve any active ethstats configurations from the server
|
||||
// Retrieve any active node configurations from the server
|
||||
infos, err := checkNode(client, w.network, boot)
|
||||
if err != nil {
|
||||
if boot {
|
||||
@ -53,8 +53,10 @@ func (w *wizard) deployNode(boot bool) {
|
||||
infos = &nodeInfos{portFull: 30303, peersTotal: 50, peersLight: 0, gasTarget: 4.7, gasPrice: 18}
|
||||
}
|
||||
}
|
||||
infos.genesis, _ = json.MarshalIndent(w.conf.genesis, "", " ")
|
||||
infos.network = w.conf.genesis.Config.ChainId.Int64()
|
||||
existed := err == nil
|
||||
|
||||
infos.genesis, _ = json.MarshalIndent(w.conf.Genesis, "", " ")
|
||||
infos.network = w.conf.Genesis.Config.ChainId.Int64()
|
||||
|
||||
// Figure out where the user wants to store the persistent data
|
||||
fmt.Println()
|
||||
@ -65,6 +67,16 @@ func (w *wizard) deployNode(boot bool) {
|
||||
fmt.Printf("Where should data be stored on the remote machine? (default = %s)\n", infos.datadir)
|
||||
infos.datadir = w.readDefaultString(infos.datadir)
|
||||
}
|
||||
if w.conf.Genesis.Config.Ethash != nil && !boot {
|
||||
fmt.Println()
|
||||
if infos.ethashdir == "" {
|
||||
fmt.Printf("Where should the ethash mining DAGs be stored on the remote machine?\n")
|
||||
infos.ethashdir = w.readString()
|
||||
} else {
|
||||
fmt.Printf("Where should the ethash mining DAGs be stored on the remote machine? (default = %s)\n", infos.ethashdir)
|
||||
infos.ethashdir = w.readDefaultString(infos.ethashdir)
|
||||
}
|
||||
}
|
||||
// Figure out which port to listen on
|
||||
fmt.Println()
|
||||
fmt.Printf("Which TCP/UDP port to listen on? (default = %d)\n", infos.portFull)
|
||||
@ -91,7 +103,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
}
|
||||
// If the node is a miner/signer, load up needed credentials
|
||||
if !boot {
|
||||
if w.conf.genesis.Config.Ethash != nil {
|
||||
if w.conf.Genesis.Config.Ethash != nil {
|
||||
// Ethash based miners only need an etherbase to mine against
|
||||
fmt.Println()
|
||||
if infos.etherbase == "" {
|
||||
@ -106,7 +118,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
fmt.Printf("What address should the miner user? (default = %s)\n", infos.etherbase)
|
||||
infos.etherbase = w.readDefaultAddress(common.HexToAddress(infos.etherbase)).Hex()
|
||||
}
|
||||
} else if w.conf.genesis.Config.Clique != nil {
|
||||
} else if w.conf.Genesis.Config.Clique != nil {
|
||||
// If a previous signer was already set, offer to reuse it
|
||||
if infos.keyJSON != "" {
|
||||
if key, err := keystore.DecryptKey([]byte(infos.keyJSON), infos.keyPass); err != nil {
|
||||
@ -145,7 +157,13 @@ func (w *wizard) deployNode(boot bool) {
|
||||
infos.gasPrice = w.readDefaultFloat(infos.gasPrice)
|
||||
}
|
||||
// Try to deploy the full node on the host
|
||||
if out, err := deployNode(client, w.network, w.conf.bootFull, w.conf.bootLight, infos); err != nil {
|
||||
nocache := false
|
||||
if existed {
|
||||
fmt.Println()
|
||||
fmt.Printf("Should the node be built from scratch (y/n)? (default = no)\n")
|
||||
nocache = w.readDefaultString("n") != "n"
|
||||
}
|
||||
if out, err := deployNode(client, w.network, w.conf.bootFull, w.conf.bootLight, infos, nocache); err != nil {
|
||||
log.Error("Failed to deploy Ethereum node container", "err", err)
|
||||
if len(out) > 0 {
|
||||
fmt.Printf("%s\n", out)
|
||||
@ -156,5 +174,5 @@ func (w *wizard) deployNode(boot bool) {
|
||||
log.Info("Waiting for node to finish booting")
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
w.networkStats(false)
|
||||
w.networkStats()
|
||||
}
|
||||
|
113
cmd/puppeth/wizard_wallet.go
Normal file
113
cmd/puppeth/wizard_wallet.go
Normal file
@ -0,0 +1,113 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// deployWallet creates a new web wallet based on some user input.
|
||||
func (w *wizard) deployWallet() {
|
||||
// Do some sanity check before the user wastes time on input
|
||||
if w.conf.Genesis == nil {
|
||||
log.Error("No genesis block configured")
|
||||
return
|
||||
}
|
||||
if w.conf.ethstats == "" {
|
||||
log.Error("No ethstats server configured")
|
||||
return
|
||||
}
|
||||
// Select the server to interact with
|
||||
server := w.selectServer()
|
||||
if server == "" {
|
||||
return
|
||||
}
|
||||
client := w.servers[server]
|
||||
|
||||
// Retrieve any active node configurations from the server
|
||||
infos, err := checkWallet(client, w.network)
|
||||
if err != nil {
|
||||
infos = &walletInfos{
|
||||
nodePort: 30303, rpcPort: 8545, webPort: 80, webHost: client.server,
|
||||
}
|
||||
}
|
||||
existed := err == nil
|
||||
|
||||
infos.genesis, _ = json.MarshalIndent(w.conf.Genesis, "", " ")
|
||||
infos.network = w.conf.Genesis.Config.ChainId.Int64()
|
||||
|
||||
// Figure out which port to listen on
|
||||
fmt.Println()
|
||||
fmt.Printf("Which port should the wallet listen on? (default = %d)\n", infos.webPort)
|
||||
infos.webPort = w.readDefaultInt(infos.webPort)
|
||||
|
||||
// Figure which virtual-host to deploy ethstats on
|
||||
if infos.webHost, err = w.ensureVirtualHost(client, infos.webPort, infos.webHost); err != nil {
|
||||
log.Error("Failed to decide on wallet host", "err", err)
|
||||
return
|
||||
}
|
||||
// Figure out where the user wants to store the persistent data
|
||||
fmt.Println()
|
||||
if infos.datadir == "" {
|
||||
fmt.Printf("Where should data be stored on the remote machine?\n")
|
||||
infos.datadir = w.readString()
|
||||
} else {
|
||||
fmt.Printf("Where should data be stored on the remote machine? (default = %s)\n", infos.datadir)
|
||||
infos.datadir = w.readDefaultString(infos.datadir)
|
||||
}
|
||||
// Figure out which port to listen on
|
||||
fmt.Println()
|
||||
fmt.Printf("Which TCP/UDP port should the backing node listen on? (default = %d)\n", infos.nodePort)
|
||||
infos.nodePort = w.readDefaultInt(infos.nodePort)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("Which port should the backing RPC API listen on? (default = %d)\n", infos.rpcPort)
|
||||
infos.rpcPort = w.readDefaultInt(infos.rpcPort)
|
||||
|
||||
// Set a proper name to report on the stats page
|
||||
fmt.Println()
|
||||
if infos.ethstats == "" {
|
||||
fmt.Printf("What should the wallet be called on the stats page?\n")
|
||||
infos.ethstats = w.readString() + ":" + w.conf.ethstats
|
||||
} else {
|
||||
fmt.Printf("What should the wallet be called on the stats page? (default = %s)\n", infos.ethstats)
|
||||
infos.ethstats = w.readDefaultString(infos.ethstats) + ":" + w.conf.ethstats
|
||||
}
|
||||
// Try to deploy the wallet on the host
|
||||
nocache := false
|
||||
if existed {
|
||||
fmt.Println()
|
||||
fmt.Printf("Should the wallet be built from scratch (y/n)? (default = no)\n")
|
||||
nocache = w.readDefaultString("n") != "n"
|
||||
}
|
||||
if out, err := deployWallet(client, w.network, w.conf.bootFull, infos, nocache); err != nil {
|
||||
log.Error("Failed to deploy wallet container", "err", err)
|
||||
if len(out) > 0 {
|
||||
fmt.Printf("%s\n", out)
|
||||
}
|
||||
return
|
||||
}
|
||||
// All ok, run a network scan to pick any changes up
|
||||
log.Info("Waiting for node to finish booting")
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
w.networkStats()
|
||||
}
|
342
cmd/swarm/config.go
Normal file
342
cmd/swarm/config.go
Normal file
@ -0,0 +1,342 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/naoina/toml"
|
||||
|
||||
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
|
||||
)
|
||||
|
||||
var (
|
||||
//flag definition for the dumpconfig command
|
||||
DumpConfigCommand = cli.Command{
|
||||
Action: utils.MigrateFlags(dumpConfig),
|
||||
Name: "dumpconfig",
|
||||
Usage: "Show configuration values",
|
||||
ArgsUsage: "",
|
||||
Flags: app.Flags,
|
||||
Category: "MISCELLANEOUS COMMANDS",
|
||||
Description: `The dumpconfig command shows configuration values.`,
|
||||
}
|
||||
|
||||
//flag definition for the config file command
|
||||
SwarmTomlConfigPathFlag = cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "TOML configuration file",
|
||||
}
|
||||
)
|
||||
|
||||
//constants for environment variables
|
||||
const (
|
||||
SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
|
||||
SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
|
||||
SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
|
||||
SWARM_ENV_PORT = "SWARM_PORT"
|
||||
SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
|
||||
SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
|
||||
SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
|
||||
SWARM_ENV_SYNC_ENABLE = "SWARM_SYNC_ENABLE"
|
||||
SWARM_ENV_ENS_API = "SWARM_ENS_API"
|
||||
SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
|
||||
SWARM_ENV_CORS = "SWARM_CORS"
|
||||
SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
|
||||
GETH_ENV_DATADIR = "GETH_DATADIR"
|
||||
)
|
||||
|
||||
// These settings ensure that TOML keys use the same names as Go struct fields.
|
||||
var tomlSettings = toml.Config{
|
||||
NormFieldName: func(rt reflect.Type, key string) string {
|
||||
return key
|
||||
},
|
||||
FieldToKey: func(rt reflect.Type, field string) string {
|
||||
return field
|
||||
},
|
||||
MissingField: func(rt reflect.Type, field string) error {
|
||||
link := ""
|
||||
if unicode.IsUpper(rune(rt.Name()[0])) && rt.PkgPath() != "main" {
|
||||
link = fmt.Sprintf(", check github.com/ethereum/go-ethereum/swarm/api/config.go for available fields")
|
||||
}
|
||||
return fmt.Errorf("field '%s' is not defined in %s%s", field, rt.String(), link)
|
||||
},
|
||||
}
|
||||
|
||||
//before booting the swarm node, build the configuration
|
||||
func buildConfig(ctx *cli.Context) (config *bzzapi.Config, err error) {
|
||||
//check for deprecated flags
|
||||
checkDeprecated(ctx)
|
||||
//start by creating a default config
|
||||
config = bzzapi.NewDefaultConfig()
|
||||
//first load settings from config file (if provided)
|
||||
config, err = configFileOverride(config, ctx)
|
||||
//override settings provided by environment variables
|
||||
config = envVarsOverride(config)
|
||||
//override settings provided by command line
|
||||
config = cmdLineOverride(config, ctx)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
//finally, after the configuration build phase is finished, initialize
|
||||
func initSwarmNode(config *bzzapi.Config, stack *node.Node, ctx *cli.Context) {
|
||||
//at this point, all vars should be set in the Config
|
||||
//get the account for the provided swarm account
|
||||
prvkey := getAccount(config.BzzAccount, ctx, stack)
|
||||
//set the resolved config path (geth --datadir)
|
||||
config.Path = stack.InstanceDir()
|
||||
//finally, initialize the configuration
|
||||
config.Init(prvkey)
|
||||
//configuration phase completed here
|
||||
log.Debug("Starting Swarm with the following parameters:")
|
||||
//after having created the config, print it to screen
|
||||
log.Debug(printConfig(config))
|
||||
}
|
||||
|
||||
//override the current config with whatever is in the config file, if a config file has been provided
|
||||
func configFileOverride(config *bzzapi.Config, ctx *cli.Context) (*bzzapi.Config, error) {
|
||||
var err error
|
||||
|
||||
//only do something if the -config flag has been set
|
||||
if ctx.GlobalIsSet(SwarmTomlConfigPathFlag.Name) {
|
||||
var filepath string
|
||||
if filepath = ctx.GlobalString(SwarmTomlConfigPathFlag.Name); filepath == "" {
|
||||
utils.Fatalf("Config file flag provided with invalid file path")
|
||||
}
|
||||
f, err := os.Open(filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
//decode the TOML file into a Config struct
|
||||
//note that we are decoding into the existing defaultConfig;
|
||||
//if an entry is not present in the file, the default entry is kept
|
||||
err = tomlSettings.NewDecoder(f).Decode(&config)
|
||||
// Add file name to errors that have a line number.
|
||||
if _, ok := err.(*toml.LineError); ok {
|
||||
err = errors.New(filepath + ", " + err.Error())
|
||||
}
|
||||
}
|
||||
return config, err
|
||||
}
|
||||
|
||||
//override the current config with whatever is provided through the command line
|
||||
//most values are not allowed a zero value (empty string), if not otherwise noted
|
||||
func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Config {
|
||||
|
||||
if keyid := ctx.GlobalString(SwarmAccountFlag.Name); keyid != "" {
|
||||
currentConfig.BzzAccount = keyid
|
||||
}
|
||||
|
||||
if chbookaddr := ctx.GlobalString(ChequebookAddrFlag.Name); chbookaddr != "" {
|
||||
currentConfig.Contract = common.HexToAddress(chbookaddr)
|
||||
}
|
||||
|
||||
if networkid := ctx.GlobalString(SwarmNetworkIdFlag.Name); networkid != "" {
|
||||
if id, _ := strconv.Atoi(networkid); id != 0 {
|
||||
currentConfig.NetworkId = uint64(id)
|
||||
}
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(utils.DataDirFlag.Name) {
|
||||
if datadir := ctx.GlobalString(utils.DataDirFlag.Name); datadir != "" {
|
||||
currentConfig.Path = datadir
|
||||
}
|
||||
}
|
||||
|
||||
bzzport := ctx.GlobalString(SwarmPortFlag.Name)
|
||||
if len(bzzport) > 0 {
|
||||
currentConfig.Port = bzzport
|
||||
}
|
||||
|
||||
if bzzaddr := ctx.GlobalString(SwarmListenAddrFlag.Name); bzzaddr != "" {
|
||||
currentConfig.ListenAddr = bzzaddr
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(SwarmSwapEnabledFlag.Name) {
|
||||
currentConfig.SwapEnabled = true
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(SwarmSyncEnabledFlag.Name) {
|
||||
currentConfig.SyncEnabled = true
|
||||
}
|
||||
|
||||
currentConfig.SwapApi = ctx.GlobalString(SwarmSwapAPIFlag.Name)
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapApi == "" {
|
||||
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
|
||||
}
|
||||
|
||||
//EnsAPIs can be set to "", so can't check for empty string, as it is allowed!
|
||||
if ctx.GlobalIsSet(EnsAPIFlag.Name) {
|
||||
ensAPIs := ctx.GlobalStringSlice(EnsAPIFlag.Name)
|
||||
// Disable ENS resolver if --ens-api="" is specified
|
||||
if len(ensAPIs) == 1 && ensAPIs[0] == "" {
|
||||
currentConfig.EnsDisabled = true
|
||||
currentConfig.EnsAPIs = nil
|
||||
} else {
|
||||
currentConfig.EnsDisabled = false
|
||||
currentConfig.EnsAPIs = ensAPIs
|
||||
}
|
||||
}
|
||||
|
||||
if ensaddr := ctx.GlobalString(DeprecatedEnsAddrFlag.Name); ensaddr != "" {
|
||||
currentConfig.EnsRoot = common.HexToAddress(ensaddr)
|
||||
}
|
||||
|
||||
if cors := ctx.GlobalString(CorsStringFlag.Name); cors != "" {
|
||||
currentConfig.Cors = cors
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(utils.BootnodesFlag.Name) {
|
||||
currentConfig.BootNodes = ctx.GlobalString(utils.BootnodesFlag.Name)
|
||||
}
|
||||
|
||||
return currentConfig
|
||||
|
||||
}
|
||||
|
||||
//override the current config with whatver is provided in environment variables
|
||||
//most values are not allowed a zero value (empty string), if not otherwise noted
|
||||
func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
|
||||
|
||||
if keyid := os.Getenv(SWARM_ENV_ACCOUNT); keyid != "" {
|
||||
currentConfig.BzzAccount = keyid
|
||||
}
|
||||
|
||||
if chbookaddr := os.Getenv(SWARM_ENV_CHEQUEBOOK_ADDR); chbookaddr != "" {
|
||||
currentConfig.Contract = common.HexToAddress(chbookaddr)
|
||||
}
|
||||
|
||||
if networkid := os.Getenv(SWARM_ENV_NETWORK_ID); networkid != "" {
|
||||
if id, _ := strconv.Atoi(networkid); id != 0 {
|
||||
currentConfig.NetworkId = uint64(id)
|
||||
}
|
||||
}
|
||||
|
||||
if datadir := os.Getenv(GETH_ENV_DATADIR); datadir != "" {
|
||||
currentConfig.Path = datadir
|
||||
}
|
||||
|
||||
bzzport := os.Getenv(SWARM_ENV_PORT)
|
||||
if len(bzzport) > 0 {
|
||||
currentConfig.Port = bzzport
|
||||
}
|
||||
|
||||
if bzzaddr := os.Getenv(SWARM_ENV_LISTEN_ADDR); bzzaddr != "" {
|
||||
currentConfig.ListenAddr = bzzaddr
|
||||
}
|
||||
|
||||
if swapenable := os.Getenv(SWARM_ENV_SWAP_ENABLE); swapenable != "" {
|
||||
if swap, err := strconv.ParseBool(swapenable); err != nil {
|
||||
currentConfig.SwapEnabled = swap
|
||||
}
|
||||
}
|
||||
|
||||
if syncenable := os.Getenv(SWARM_ENV_SYNC_ENABLE); syncenable != "" {
|
||||
if sync, err := strconv.ParseBool(syncenable); err != nil {
|
||||
currentConfig.SyncEnabled = sync
|
||||
}
|
||||
}
|
||||
|
||||
if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" {
|
||||
currentConfig.SwapApi = swapapi
|
||||
}
|
||||
|
||||
if currentConfig.SwapEnabled && currentConfig.SwapApi == "" {
|
||||
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
|
||||
}
|
||||
|
||||
//EnsAPIs can be set to "", so can't check for empty string, as it is allowed
|
||||
if ensapi, exists := os.LookupEnv(SWARM_ENV_ENS_API); exists == true {
|
||||
ensAPIs := strings.Split(ensapi, ",")
|
||||
// Disable ENS resolver if SWARM_ENS_API="" is specified
|
||||
if len(ensAPIs) == 0 {
|
||||
currentConfig.EnsDisabled = true
|
||||
currentConfig.EnsAPIs = nil
|
||||
} else {
|
||||
currentConfig.EnsDisabled = false
|
||||
currentConfig.EnsAPIs = ensAPIs
|
||||
}
|
||||
}
|
||||
|
||||
if ensaddr := os.Getenv(SWARM_ENV_ENS_ADDR); ensaddr != "" {
|
||||
currentConfig.EnsRoot = common.HexToAddress(ensaddr)
|
||||
}
|
||||
|
||||
if cors := os.Getenv(SWARM_ENV_CORS); cors != "" {
|
||||
currentConfig.Cors = cors
|
||||
}
|
||||
|
||||
if bootnodes := os.Getenv(SWARM_ENV_BOOTNODES); bootnodes != "" {
|
||||
currentConfig.BootNodes = bootnodes
|
||||
}
|
||||
|
||||
return currentConfig
|
||||
}
|
||||
|
||||
// dumpConfig is the dumpconfig command.
|
||||
// writes a default config to STDOUT
|
||||
func dumpConfig(ctx *cli.Context) error {
|
||||
cfg, err := buildConfig(ctx)
|
||||
if err != nil {
|
||||
utils.Fatalf(fmt.Sprintf("Uh oh - dumpconfig triggered an error %v", err))
|
||||
}
|
||||
comment := ""
|
||||
out, err := tomlSettings.Marshal(&cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
io.WriteString(os.Stdout, comment)
|
||||
os.Stdout.Write(out)
|
||||
return nil
|
||||
}
|
||||
|
||||
//deprecated flags checked here
|
||||
func checkDeprecated(ctx *cli.Context) {
|
||||
// exit if the deprecated --ethapi flag is set
|
||||
if ctx.GlobalString(DeprecatedEthAPIFlag.Name) != "" {
|
||||
utils.Fatalf("--ethapi is no longer a valid command line flag, please use --ens-api and/or --swap-api.")
|
||||
}
|
||||
// warn if --ens-api flag is set
|
||||
if ctx.GlobalString(DeprecatedEnsAddrFlag.Name) != "" {
|
||||
log.Warn("--ens-addr is no longer a valid command line flag, please use --ens-api to specify contract address.")
|
||||
}
|
||||
}
|
||||
|
||||
//print a Config as string
|
||||
func printConfig(config *bzzapi.Config) string {
|
||||
out, err := tomlSettings.Marshal(&config)
|
||||
if err != nil {
|
||||
return (fmt.Sprintf("Something is not right with the configuration: %v", err))
|
||||
}
|
||||
return string(out)
|
||||
}
|
459
cmd/swarm/config_test.go
Normal file
459
cmd/swarm/config_test.go
Normal file
@ -0,0 +1,459 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
|
||||
func TestDumpConfig(t *testing.T) {
|
||||
swarm := runSwarm(t, "dumpconfig")
|
||||
defaultConf := api.NewDefaultConfig()
|
||||
out, err := tomlSettings.Marshal(&defaultConf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
swarm.Expect(string(out))
|
||||
swarm.ExpectExit()
|
||||
}
|
||||
|
||||
func TestFailsSwapEnabledNoSwapApi(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
|
||||
fmt.Sprintf("--%s", SwarmSwapEnabledFlag.Name),
|
||||
}
|
||||
|
||||
swarm := runSwarm(t, flags...)
|
||||
swarm.Expect("Fatal: " + SWARM_ERR_SWAP_SET_NO_API + "\n")
|
||||
swarm.ExpectExit()
|
||||
}
|
||||
|
||||
func TestFailsNoBzzAccount(t *testing.T) {
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
|
||||
}
|
||||
|
||||
swarm := runSwarm(t, flags...)
|
||||
swarm.Expect("Fatal: " + SWARM_ERR_NO_BZZACCOUNT + "\n")
|
||||
swarm.ExpectExit()
|
||||
}
|
||||
|
||||
func TestCmdLineOverrides(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "bzztest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
conf, account := getTestAccount(t, dir)
|
||||
node := &testNode{Dir: dir}
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort,
|
||||
fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name),
|
||||
fmt.Sprintf("--%s", CorsStringFlag.Name), "*",
|
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
||||
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
}
|
||||
node.Cmd = runSwarm(t, flags...)
|
||||
node.Cmd.InputLine(testPassphrase)
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
node.Shutdown()
|
||||
}
|
||||
}()
|
||||
// wait for the node to start
|
||||
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
|
||||
node.Client, err = rpc.Dial(conf.IPCEndpoint())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if node.Client == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// load info
|
||||
var info swarm.Info
|
||||
if err := node.Client.Call(&info, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if info.Port != httpPort {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != 42 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkId)
|
||||
}
|
||||
|
||||
if info.SyncEnabled != true {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
}
|
||||
|
||||
if info.Cors != "*" {
|
||||
t.Fatalf("Expected Cors flag to be set to %s, got %s", "*", info.Cors)
|
||||
}
|
||||
|
||||
node.Shutdown()
|
||||
}
|
||||
|
||||
func TestFileOverrides(t *testing.T) {
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//create a config file
|
||||
//first, create a default conf
|
||||
defaultConf := api.NewDefaultConfig()
|
||||
//change some values in order to test if they have been loaded
|
||||
defaultConf.SyncEnabled = true
|
||||
defaultConf.NetworkId = 54
|
||||
defaultConf.Port = httpPort
|
||||
defaultConf.StoreParams.DbCapacity = 9000000
|
||||
defaultConf.ChunkerParams.Branches = 64
|
||||
defaultConf.HiveParams.CallInterval = 6000000000
|
||||
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second
|
||||
defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//create a TOML string
|
||||
out, err := tomlSettings.Marshal(&defaultConf)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating TOML file in TestFileOverride: %v", err)
|
||||
}
|
||||
//create file
|
||||
f, err := ioutil.TempFile("", "testconfig.toml")
|
||||
if err != nil {
|
||||
t.Fatalf("Error writing TOML file in TestFileOverride: %v", err)
|
||||
}
|
||||
//write file
|
||||
_, err = f.WriteString(string(out))
|
||||
if err != nil {
|
||||
t.Fatalf("Error writing TOML file in TestFileOverride: %v", err)
|
||||
}
|
||||
f.Sync()
|
||||
|
||||
dir, err := ioutil.TempDir("", "bzztest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
conf, account := getTestAccount(t, dir)
|
||||
node := &testNode{Dir: dir}
|
||||
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
|
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
||||
"--ens-api", "",
|
||||
"--ipcpath", conf.IPCPath,
|
||||
"--datadir", dir,
|
||||
}
|
||||
node.Cmd = runSwarm(t, flags...)
|
||||
node.Cmd.InputLine(testPassphrase)
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
node.Shutdown()
|
||||
}
|
||||
}()
|
||||
// wait for the node to start
|
||||
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
|
||||
node.Client, err = rpc.Dial(conf.IPCEndpoint())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if node.Client == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// load info
|
||||
var info swarm.Info
|
||||
if err := node.Client.Call(&info, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if info.Port != httpPort {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != 54 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
|
||||
}
|
||||
|
||||
if info.SyncEnabled != true {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
}
|
||||
|
||||
if info.StoreParams.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
|
||||
}
|
||||
|
||||
if info.ChunkerParams.Branches != 64 {
|
||||
t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches)
|
||||
}
|
||||
|
||||
if info.HiveParams.CallInterval != 6000000000 {
|
||||
t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval))
|
||||
}
|
||||
|
||||
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second {
|
||||
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval)
|
||||
}
|
||||
|
||||
if info.SyncParams.KeyBufferSize != 512 {
|
||||
t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
}
|
||||
|
||||
node.Shutdown()
|
||||
}
|
||||
|
||||
func TestEnvVars(t *testing.T) {
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
envVars := os.Environ()
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmPortFlag.EnvVar, httpPort))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmNetworkIdFlag.EnvVar, "999"))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", CorsStringFlag.EnvVar, "*"))
|
||||
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncEnabledFlag.EnvVar, "true"))
|
||||
|
||||
dir, err := ioutil.TempDir("", "bzztest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
conf, account := getTestAccount(t, dir)
|
||||
node := &testNode{Dir: dir}
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
||||
"--ens-api", "",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
}
|
||||
|
||||
//node.Cmd = runSwarm(t,flags...)
|
||||
//node.Cmd.cmd.Env = envVars
|
||||
//the above assignment does not work, so we need a custom Cmd here in order to pass envVars:
|
||||
cmd := &exec.Cmd{
|
||||
Path: reexec.Self(),
|
||||
Args: append([]string{"swarm-test"}, flags...),
|
||||
Stderr: os.Stderr,
|
||||
Stdout: os.Stdout,
|
||||
}
|
||||
cmd.Env = envVars
|
||||
//stdout, err := cmd.StdoutPipe()
|
||||
//if err != nil {
|
||||
// t.Fatal(err)
|
||||
//}
|
||||
//stdout = bufio.NewReader(stdout)
|
||||
var stdin io.WriteCloser
|
||||
if stdin, err = cmd.StdinPipe(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cmd.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//cmd.InputLine(testPassphrase)
|
||||
io.WriteString(stdin, testPassphrase+"\n")
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
node.Shutdown()
|
||||
cmd.Process.Kill()
|
||||
}
|
||||
}()
|
||||
// wait for the node to start
|
||||
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
|
||||
node.Client, err = rpc.Dial(conf.IPCEndpoint())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if node.Client == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// load info
|
||||
var info swarm.Info
|
||||
if err := node.Client.Call(&info, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if info.Port != httpPort {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != 999 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkId)
|
||||
}
|
||||
|
||||
if info.Cors != "*" {
|
||||
t.Fatalf("Expected Cors flag to be set to %s, got %s", "*", info.Cors)
|
||||
}
|
||||
|
||||
if info.SyncEnabled != true {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
}
|
||||
|
||||
node.Shutdown()
|
||||
cmd.Process.Kill()
|
||||
}
|
||||
|
||||
func TestCmdLineOverridesFile(t *testing.T) {
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//create a config file
|
||||
//first, create a default conf
|
||||
defaultConf := api.NewDefaultConfig()
|
||||
//change some values in order to test if they have been loaded
|
||||
defaultConf.SyncEnabled = false
|
||||
defaultConf.NetworkId = 54
|
||||
defaultConf.Port = "8588"
|
||||
defaultConf.StoreParams.DbCapacity = 9000000
|
||||
defaultConf.ChunkerParams.Branches = 64
|
||||
defaultConf.HiveParams.CallInterval = 6000000000
|
||||
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second
|
||||
defaultConf.SyncParams.KeyBufferSize = 512
|
||||
//create a TOML file
|
||||
out, err := tomlSettings.Marshal(&defaultConf)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating TOML file in TestFileOverride: %v", err)
|
||||
}
|
||||
//write file
|
||||
f, err := ioutil.TempFile("", "testconfig.toml")
|
||||
if err != nil {
|
||||
t.Fatalf("Error writing TOML file in TestFileOverride: %v", err)
|
||||
}
|
||||
//write file
|
||||
_, err = f.WriteString(string(out))
|
||||
if err != nil {
|
||||
t.Fatalf("Error writing TOML file in TestFileOverride: %v", err)
|
||||
}
|
||||
f.Sync()
|
||||
|
||||
dir, err := ioutil.TempDir("", "bzztest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
conf, account := getTestAccount(t, dir)
|
||||
node := &testNode{Dir: dir}
|
||||
|
||||
expectNetworkId := uint64(77)
|
||||
|
||||
flags := []string{
|
||||
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "77",
|
||||
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort,
|
||||
fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name),
|
||||
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
|
||||
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
|
||||
"--ens-api", "",
|
||||
"--datadir", dir,
|
||||
"--ipcpath", conf.IPCPath,
|
||||
}
|
||||
node.Cmd = runSwarm(t, flags...)
|
||||
node.Cmd.InputLine(testPassphrase)
|
||||
defer func() {
|
||||
if t.Failed() {
|
||||
node.Shutdown()
|
||||
}
|
||||
}()
|
||||
// wait for the node to start
|
||||
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
|
||||
node.Client, err = rpc.Dial(conf.IPCEndpoint())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if node.Client == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// load info
|
||||
var info swarm.Info
|
||||
if err := node.Client.Call(&info, "bzz_info"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if info.Port != httpPort {
|
||||
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
|
||||
}
|
||||
|
||||
if info.NetworkId != expectNetworkId {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkId)
|
||||
}
|
||||
|
||||
if info.SyncEnabled != true {
|
||||
t.Fatal("Expected Sync to be enabled, but is false")
|
||||
}
|
||||
|
||||
if info.StoreParams.DbCapacity != 9000000 {
|
||||
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
|
||||
}
|
||||
|
||||
if info.ChunkerParams.Branches != 64 {
|
||||
t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches)
|
||||
}
|
||||
|
||||
if info.HiveParams.CallInterval != 6000000000 {
|
||||
t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval))
|
||||
}
|
||||
|
||||
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second {
|
||||
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval)
|
||||
}
|
||||
|
||||
if info.SyncParams.KeyBufferSize != 512 {
|
||||
t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
|
||||
}
|
||||
|
||||
node.Shutdown()
|
||||
}
|
@ -27,7 +27,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unicode"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
@ -44,6 +43,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
|
||||
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
@ -62,44 +62,53 @@ var (
|
||||
|
||||
var (
|
||||
ChequebookAddrFlag = cli.StringFlag{
|
||||
Name: "chequebook",
|
||||
Usage: "chequebook contract address",
|
||||
Name: "chequebook",
|
||||
Usage: "chequebook contract address",
|
||||
EnvVar: SWARM_ENV_CHEQUEBOOK_ADDR,
|
||||
}
|
||||
SwarmAccountFlag = cli.StringFlag{
|
||||
Name: "bzzaccount",
|
||||
Usage: "Swarm account key file",
|
||||
Name: "bzzaccount",
|
||||
Usage: "Swarm account key file",
|
||||
EnvVar: SWARM_ENV_ACCOUNT,
|
||||
}
|
||||
SwarmListenAddrFlag = cli.StringFlag{
|
||||
Name: "httpaddr",
|
||||
Usage: "Swarm HTTP API listening interface",
|
||||
Name: "httpaddr",
|
||||
Usage: "Swarm HTTP API listening interface",
|
||||
EnvVar: SWARM_ENV_LISTEN_ADDR,
|
||||
}
|
||||
SwarmPortFlag = cli.StringFlag{
|
||||
Name: "bzzport",
|
||||
Usage: "Swarm local http api port",
|
||||
Name: "bzzport",
|
||||
Usage: "Swarm local http api port",
|
||||
EnvVar: SWARM_ENV_PORT,
|
||||
}
|
||||
SwarmNetworkIdFlag = cli.IntFlag{
|
||||
Name: "bzznetworkid",
|
||||
Usage: "Network identifier (integer, default 3=swarm testnet)",
|
||||
Name: "bzznetworkid",
|
||||
Usage: "Network identifier (integer, default 3=swarm testnet)",
|
||||
EnvVar: SWARM_ENV_NETWORK_ID,
|
||||
}
|
||||
SwarmConfigPathFlag = cli.StringFlag{
|
||||
Name: "bzzconfig",
|
||||
Usage: "Swarm config file path (datadir/bzz)",
|
||||
Usage: "DEPRECATED: please use --config path/to/TOML-file",
|
||||
}
|
||||
SwarmSwapEnabledFlag = cli.BoolFlag{
|
||||
Name: "swap",
|
||||
Usage: "Swarm SWAP enabled (default false)",
|
||||
Name: "swap",
|
||||
Usage: "Swarm SWAP enabled (default false)",
|
||||
EnvVar: SWARM_ENV_SWAP_ENABLE,
|
||||
}
|
||||
SwarmSwapAPIFlag = cli.StringFlag{
|
||||
Name: "swap-api",
|
||||
Usage: "URL of the Ethereum API provider to use to settle SWAP payments",
|
||||
Name: "swap-api",
|
||||
Usage: "URL of the Ethereum API provider to use to settle SWAP payments",
|
||||
EnvVar: SWARM_ENV_SWAP_API,
|
||||
}
|
||||
SwarmSyncEnabledFlag = cli.BoolTFlag{
|
||||
Name: "sync",
|
||||
Usage: "Swarm Syncing enabled (default true)",
|
||||
Name: "sync",
|
||||
Usage: "Swarm Syncing enabled (default true)",
|
||||
EnvVar: SWARM_ENV_SYNC_ENABLE,
|
||||
}
|
||||
EnsAPIFlag = cli.StringSliceFlag{
|
||||
Name: "ens-api",
|
||||
Usage: "ENS API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url",
|
||||
Name: "ens-api",
|
||||
Usage: "ENS API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url",
|
||||
EnvVar: SWARM_ENV_ENS_API,
|
||||
}
|
||||
SwarmApiFlag = cli.StringFlag{
|
||||
Name: "bzzapi",
|
||||
@ -127,8 +136,9 @@ var (
|
||||
Usage: "force mime type",
|
||||
}
|
||||
CorsStringFlag = cli.StringFlag{
|
||||
Name: "corsdomain",
|
||||
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
|
||||
Name: "corsdomain",
|
||||
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
|
||||
EnvVar: SWARM_ENV_CORS,
|
||||
}
|
||||
|
||||
// the following flags are deprecated and should be removed in the future
|
||||
@ -142,6 +152,12 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
//declare a few constant error messages, useful for later error check comparisons in test
|
||||
var (
|
||||
SWARM_ERR_NO_BZZACCOUNT = "bzzaccount option is required but not set; check your config file, command line or environment variables"
|
||||
SWARM_ERR_SWAP_SET_NO_API = "SWAP is enabled but --swap-api is not set"
|
||||
)
|
||||
|
||||
var defaultNodeConfig = node.DefaultConfig
|
||||
|
||||
// This init function sets defaults so cmd/swarm can run alongside geth.
|
||||
@ -297,6 +313,8 @@ Remove corrupt entries from a local chunk database.
|
||||
DEPRECATED: use 'swarm db clean'.
|
||||
`,
|
||||
},
|
||||
// See config.go
|
||||
DumpConfigCommand,
|
||||
}
|
||||
sort.Sort(cli.CommandsByName(app.Commands))
|
||||
|
||||
@ -319,6 +337,7 @@ DEPRECATED: use 'swarm db clean'.
|
||||
// bzzd-specific flags
|
||||
CorsStringFlag,
|
||||
EnsAPIFlag,
|
||||
SwarmTomlConfigPathFlag,
|
||||
SwarmConfigPathFlag,
|
||||
SwarmSwapEnabledFlag,
|
||||
SwarmSwapAPIFlag,
|
||||
@ -372,19 +391,32 @@ func version(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
func bzzd(ctx *cli.Context) error {
|
||||
// exit if the deprecated --ethapi flag is set
|
||||
if ctx.GlobalString(DeprecatedEthAPIFlag.Name) != "" {
|
||||
utils.Fatalf("--ethapi is no longer a valid command line flag, please use --ens-api and/or --swap-api.")
|
||||
//build a valid bzzapi.Config from all available sources:
|
||||
//default config, file config, command line and env vars
|
||||
bzzconfig, err := buildConfig(ctx)
|
||||
if err != nil {
|
||||
utils.Fatalf("unable to configure swarm: %v", err)
|
||||
}
|
||||
|
||||
cfg := defaultNodeConfig
|
||||
//geth only supports --datadir via command line
|
||||
//in order to be consistent within swarm, if we pass --datadir via environment variable
|
||||
//or via config file, we get the same directory for geth and swarm
|
||||
if _, err := os.Stat(bzzconfig.Path); err == nil {
|
||||
cfg.DataDir = bzzconfig.Path
|
||||
}
|
||||
//setup the ethereum node
|
||||
utils.SetNodeConfig(ctx, &cfg)
|
||||
stack, err := node.New(&cfg)
|
||||
if err != nil {
|
||||
utils.Fatalf("can't create node: %v", err)
|
||||
}
|
||||
|
||||
registerBzzService(ctx, stack)
|
||||
//a few steps need to be done after the config phase is completed,
|
||||
//due to overriding behavior
|
||||
initSwarmNode(bzzconfig, stack, ctx)
|
||||
//register BZZ as node.Service in the ethereum node
|
||||
registerBzzService(bzzconfig, ctx, stack)
|
||||
//start the node
|
||||
utils.StartNode(stack)
|
||||
|
||||
go func() {
|
||||
@ -396,13 +428,12 @@ func bzzd(ctx *cli.Context) error {
|
||||
stack.Stop()
|
||||
}()
|
||||
|
||||
networkId := ctx.GlobalUint64(SwarmNetworkIdFlag.Name)
|
||||
// Add bootnodes as initial peers.
|
||||
if ctx.GlobalIsSet(utils.BootnodesFlag.Name) {
|
||||
bootnodes := strings.Split(ctx.GlobalString(utils.BootnodesFlag.Name), ",")
|
||||
if bzzconfig.BootNodes != "" {
|
||||
bootnodes := strings.Split(bzzconfig.BootNodes, ",")
|
||||
injectBootnodes(stack.Server(), bootnodes)
|
||||
} else {
|
||||
if networkId == 3 {
|
||||
if bzzconfig.NetworkId == 3 {
|
||||
injectBootnodes(stack.Server(), testbetBootNodes)
|
||||
}
|
||||
}
|
||||
@ -411,139 +442,35 @@ func bzzd(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func registerBzzService(ctx *cli.Context, stack *node.Node) {
|
||||
prvkey := getAccount(ctx, stack)
|
||||
|
||||
chbookaddr := common.HexToAddress(ctx.GlobalString(ChequebookAddrFlag.Name))
|
||||
bzzdir := ctx.GlobalString(SwarmConfigPathFlag.Name)
|
||||
if bzzdir == "" {
|
||||
bzzdir = stack.InstanceDir()
|
||||
}
|
||||
|
||||
bzzconfig, err := bzzapi.NewConfig(bzzdir, chbookaddr, prvkey, ctx.GlobalUint64(SwarmNetworkIdFlag.Name))
|
||||
if err != nil {
|
||||
utils.Fatalf("unable to configure swarm: %v", err)
|
||||
}
|
||||
bzzport := ctx.GlobalString(SwarmPortFlag.Name)
|
||||
if len(bzzport) > 0 {
|
||||
bzzconfig.Port = bzzport
|
||||
}
|
||||
if bzzaddr := ctx.GlobalString(SwarmListenAddrFlag.Name); bzzaddr != "" {
|
||||
bzzconfig.ListenAddr = bzzaddr
|
||||
}
|
||||
swapEnabled := ctx.GlobalBool(SwarmSwapEnabledFlag.Name)
|
||||
syncEnabled := ctx.GlobalBoolT(SwarmSyncEnabledFlag.Name)
|
||||
|
||||
swapapi := ctx.GlobalString(SwarmSwapAPIFlag.Name)
|
||||
if swapEnabled && swapapi == "" {
|
||||
utils.Fatalf("SWAP is enabled but --swap-api is not set")
|
||||
}
|
||||
|
||||
ensAPIs := ctx.GlobalStringSlice(EnsAPIFlag.Name)
|
||||
ensAddr := ctx.GlobalString(DeprecatedEnsAddrFlag.Name)
|
||||
|
||||
if ensAddr != "" {
|
||||
log.Warn("--ens-addr is no longer a valid command line flag, please use --ens-api to specify contract address.")
|
||||
}
|
||||
|
||||
cors := ctx.GlobalString(CorsStringFlag.Name)
|
||||
func registerBzzService(bzzconfig *bzzapi.Config, ctx *cli.Context, stack *node.Node) {
|
||||
|
||||
//define the swarm service boot function
|
||||
boot := func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
var swapClient *ethclient.Client
|
||||
if swapapi != "" {
|
||||
log.Info("connecting to SWAP API", "url", swapapi)
|
||||
swapClient, err = ethclient.Dial(swapapi)
|
||||
var err error
|
||||
if bzzconfig.SwapApi != "" {
|
||||
log.Info("connecting to SWAP API", "url", bzzconfig.SwapApi)
|
||||
swapClient, err = ethclient.Dial(bzzconfig.SwapApi)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error connecting to SWAP API %s: %s", swapapi, err)
|
||||
return nil, fmt.Errorf("error connecting to SWAP API %s: %s", bzzconfig.SwapApi, err)
|
||||
}
|
||||
}
|
||||
|
||||
ensClientConfigs := []swarm.ENSClientConfig{}
|
||||
switch len(ensAPIs) {
|
||||
case 0:
|
||||
ensClientConfigs = append(ensClientConfigs, swarm.ENSClientConfig{
|
||||
Endpoint: node.DefaultIPCEndpoint("geth"),
|
||||
ContractAddress: ensAddr,
|
||||
})
|
||||
case 1:
|
||||
// Check if "--ens-api ''" is specified in order to disable ENS.
|
||||
if ensAPIs[0] == "" {
|
||||
break
|
||||
}
|
||||
// Check if only one --ens-api is specified in order to use --ens-addr value
|
||||
// to preserve the backward compatibility with single --ens-api flag.
|
||||
c := parseFlagEnsAPI(ensAPIs[0])
|
||||
if ensAddr != "" {
|
||||
// If contract address is specified in both cases, check for conflict.
|
||||
if c.ContractAddress != "" && ensAddr != c.ContractAddress {
|
||||
utils.Fatalf("--ens-addr flag in conflict with --ens-api flag contract address")
|
||||
}
|
||||
c.ContractAddress = ensAddr
|
||||
}
|
||||
ensClientConfigs = append(ensClientConfigs, c)
|
||||
default:
|
||||
// Backward compatibility with single --ens-api flag and --ens-addr is preserved.
|
||||
// Check for case where multiple --ens-api flags are set with --ens-addr where
|
||||
// the specified contract address is not clear to which api belongs.
|
||||
if ensAddr != "" {
|
||||
utils.Fatalf("--ens-addr flag can not be used with multiple --ens-api flags")
|
||||
}
|
||||
for _, s := range ensAPIs {
|
||||
if s != "" {
|
||||
ensClientConfigs = append(ensClientConfigs, parseFlagEnsAPI(s))
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(ensClientConfigs) == 0 {
|
||||
log.Warn("No ENS, please specify non-empty --ens-api to use domain name resolution")
|
||||
}
|
||||
|
||||
return swarm.NewSwarm(ctx, swapClient, ensClientConfigs, bzzconfig, swapEnabled, syncEnabled, cors)
|
||||
return swarm.NewSwarm(ctx, swapClient, bzzconfig)
|
||||
}
|
||||
//register within the ethereum node
|
||||
if err := stack.Register(boot); err != nil {
|
||||
utils.Fatalf("Failed to register the Swarm service: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// parseFlagEnsAPI parses EnsAPIFlag according to format
|
||||
// [tld:][contract-addr@]url and returns ENSClientConfig structure
|
||||
// with endpoint, contract address and TLD.
|
||||
func parseFlagEnsAPI(s string) swarm.ENSClientConfig {
|
||||
isAllLetterString := func(s string) bool {
|
||||
for _, r := range s {
|
||||
if !unicode.IsLetter(r) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
endpoint := s
|
||||
var addr, tld string
|
||||
if i := strings.Index(endpoint, ":"); i > 0 {
|
||||
if isAllLetterString(endpoint[:i]) && len(endpoint) > i+2 && endpoint[i+1:i+3] != "//" {
|
||||
tld = endpoint[:i]
|
||||
endpoint = endpoint[i+1:]
|
||||
}
|
||||
}
|
||||
if i := strings.Index(endpoint, "@"); i > 0 {
|
||||
addr = endpoint[:i]
|
||||
endpoint = endpoint[i+1:]
|
||||
}
|
||||
return swarm.ENSClientConfig{
|
||||
Endpoint: endpoint,
|
||||
ContractAddress: addr,
|
||||
TLD: tld,
|
||||
}
|
||||
}
|
||||
|
||||
func getAccount(ctx *cli.Context, stack *node.Node) *ecdsa.PrivateKey {
|
||||
keyid := ctx.GlobalString(SwarmAccountFlag.Name)
|
||||
|
||||
if keyid == "" {
|
||||
utils.Fatalf("Option %q is required", SwarmAccountFlag.Name)
|
||||
func getAccount(bzzaccount string, ctx *cli.Context, stack *node.Node) *ecdsa.PrivateKey {
|
||||
//an account is mandatory
|
||||
if bzzaccount == "" {
|
||||
utils.Fatalf(SWARM_ERR_NO_BZZACCOUNT)
|
||||
}
|
||||
// Try to load the arg as a hex key file.
|
||||
if key, err := crypto.LoadECDSA(keyid); err == nil {
|
||||
if key, err := crypto.LoadECDSA(bzzaccount); err == nil {
|
||||
log.Info("Swarm account key loaded", "address", crypto.PubkeyToAddress(key.PublicKey))
|
||||
return key
|
||||
}
|
||||
@ -551,7 +478,7 @@ func getAccount(ctx *cli.Context, stack *node.Node) *ecdsa.PrivateKey {
|
||||
am := stack.AccountManager()
|
||||
ks := am.Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
|
||||
return decryptStoreAccount(ks, keyid, utils.MakePasswordList(ctx))
|
||||
return decryptStoreAccount(ks, bzzaccount, utils.MakePasswordList(ctx))
|
||||
}
|
||||
|
||||
func decryptStoreAccount(ks *keystore.KeyStore, account string, passwords []string) *ecdsa.PrivateKey {
|
||||
@ -569,7 +496,7 @@ func decryptStoreAccount(ks *keystore.KeyStore, account string, passwords []stri
|
||||
utils.Fatalf("Can't find swarm account key %s", account)
|
||||
}
|
||||
if err != nil {
|
||||
utils.Fatalf("Can't find swarm account key: %v", err)
|
||||
utils.Fatalf("Can't find swarm account key: %v - Is the provided bzzaccount(%s) from the right datadir/Path?", err, account)
|
||||
}
|
||||
keyjson, err := ioutil.ReadFile(a.URL.Path)
|
||||
if err != nil {
|
||||
|
@ -1,141 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm"
|
||||
)
|
||||
|
||||
func TestParseFlagEnsAPI(t *testing.T) {
|
||||
for _, x := range []struct {
|
||||
description string
|
||||
value string
|
||||
config swarm.ENSClientConfig
|
||||
}{
|
||||
{
|
||||
description: "IPC endpoint",
|
||||
value: "/data/testnet/geth.ipc",
|
||||
config: swarm.ENSClientConfig{
|
||||
Endpoint: "/data/testnet/geth.ipc",
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "HTTP endpoint",
|
||||
value: "http://127.0.0.1:1234",
|
||||
config: swarm.ENSClientConfig{
|
||||
Endpoint: "http://127.0.0.1:1234",
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "WS endpoint",
|
||||
value: "ws://127.0.0.1:1234",
|
||||
config: swarm.ENSClientConfig{
|
||||
Endpoint: "ws://127.0.0.1:1234",
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "IPC Endpoint and TLD",
|
||||
value: "test:/data/testnet/geth.ipc",
|
||||
config: swarm.ENSClientConfig{
|
||||
Endpoint: "/data/testnet/geth.ipc",
|
||||
TLD: "test",
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "HTTP endpoint and TLD",
|
||||
value: "test:http://127.0.0.1:1234",
|
||||
config: swarm.ENSClientConfig{
|
||||
Endpoint: "http://127.0.0.1:1234",
|
||||
TLD: "test",
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "WS endpoint and TLD",
|
||||
value: "test:ws://127.0.0.1:1234",
|
||||
config: swarm.ENSClientConfig{
|
||||
Endpoint: "ws://127.0.0.1:1234",
|
||||
TLD: "test",
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "IPC Endpoint and contract address",
|
||||
value: "314159265dD8dbb310642f98f50C066173C1259b@/data/testnet/geth.ipc",
|
||||
config: swarm.ENSClientConfig{
|
||||
Endpoint: "/data/testnet/geth.ipc",
|
||||
ContractAddress: "314159265dD8dbb310642f98f50C066173C1259b",
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "HTTP endpoint and contract address",
|
||||
value: "314159265dD8dbb310642f98f50C066173C1259b@http://127.0.0.1:1234",
|
||||
config: swarm.ENSClientConfig{
|
||||
Endpoint: "http://127.0.0.1:1234",
|
||||
ContractAddress: "314159265dD8dbb310642f98f50C066173C1259b",
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "WS endpoint and contract address",
|
||||
value: "314159265dD8dbb310642f98f50C066173C1259b@ws://127.0.0.1:1234",
|
||||
config: swarm.ENSClientConfig{
|
||||
Endpoint: "ws://127.0.0.1:1234",
|
||||
ContractAddress: "314159265dD8dbb310642f98f50C066173C1259b",
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "IPC Endpoint, TLD and contract address",
|
||||
value: "test:314159265dD8dbb310642f98f50C066173C1259b@/data/testnet/geth.ipc",
|
||||
config: swarm.ENSClientConfig{
|
||||
Endpoint: "/data/testnet/geth.ipc",
|
||||
ContractAddress: "314159265dD8dbb310642f98f50C066173C1259b",
|
||||
TLD: "test",
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "HTTP endpoint, TLD and contract address",
|
||||
value: "eth:314159265dD8dbb310642f98f50C066173C1259b@http://127.0.0.1:1234",
|
||||
config: swarm.ENSClientConfig{
|
||||
Endpoint: "http://127.0.0.1:1234",
|
||||
ContractAddress: "314159265dD8dbb310642f98f50C066173C1259b",
|
||||
TLD: "eth",
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "WS endpoint, TLD and contract address",
|
||||
value: "eth:314159265dD8dbb310642f98f50C066173C1259b@ws://127.0.0.1:1234",
|
||||
config: swarm.ENSClientConfig{
|
||||
Endpoint: "ws://127.0.0.1:1234",
|
||||
ContractAddress: "314159265dD8dbb310642f98f50C066173C1259b",
|
||||
TLD: "eth",
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(x.description, func(t *testing.T) {
|
||||
config := parseFlagEnsAPI(x.value)
|
||||
if config.Endpoint != x.config.Endpoint {
|
||||
t.Errorf("expected Endpoint %q, got %q", x.config.Endpoint, config.Endpoint)
|
||||
}
|
||||
if config.ContractAddress != x.config.ContractAddress {
|
||||
t.Errorf("expected ContractAddress %q, got %q", x.config.ContractAddress, config.ContractAddress)
|
||||
}
|
||||
if config.TLD != x.config.TLD {
|
||||
t.Errorf("expected TLD %q, got %q", x.config.TLD, config.TLD)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -30,6 +30,8 @@ import (
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
const bzzManifestJSON = "application/bzz-manifest+json"
|
||||
|
||||
func add(ctx *cli.Context) {
|
||||
args := ctx.Args()
|
||||
if len(args) < 3 {
|
||||
@ -145,7 +147,7 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
|
||||
if path == entry.Path {
|
||||
utils.Fatalf("Path %s already present, not adding anything", path)
|
||||
} else {
|
||||
if entry.ContentType == "application/bzz-manifest+json" {
|
||||
if entry.ContentType == bzzManifestJSON {
|
||||
prfxlen := strings.HasPrefix(path, entry.Path)
|
||||
if prfxlen && len(path) > len(longestPathEntry.Path) {
|
||||
longestPathEntry = entry
|
||||
@ -207,7 +209,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
|
||||
if path == entry.Path {
|
||||
newEntry = entry
|
||||
} else {
|
||||
if entry.ContentType == "application/bzz-manifest+json" {
|
||||
if entry.ContentType == bzzManifestJSON {
|
||||
prfxlen := strings.HasPrefix(path, entry.Path)
|
||||
if prfxlen && len(path) > len(longestPathEntry.Path) {
|
||||
longestPathEntry = entry
|
||||
@ -281,7 +283,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
|
||||
if path == entry.Path {
|
||||
entryToRemove = entry
|
||||
} else {
|
||||
if entry.ContentType == "application/bzz-manifest+json" {
|
||||
if entry.ContentType == bzzManifestJSON {
|
||||
prfxlen := strings.HasPrefix(path, entry.Path)
|
||||
if prfxlen && len(path) > len(longestPathEntry.Path) {
|
||||
longestPathEntry = entry
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/accounts"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/internal/cmdtest"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
@ -156,9 +157,9 @@ type testNode struct {
|
||||
|
||||
const testPassphrase = "swarm-test-passphrase"
|
||||
|
||||
func newTestNode(t *testing.T, dir string) *testNode {
|
||||
func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accounts.Account) {
|
||||
// create key
|
||||
conf := &node.Config{
|
||||
conf = &node.Config{
|
||||
DataDir: dir,
|
||||
IPCPath: "bzzd.ipc",
|
||||
NoUSB: true,
|
||||
@ -167,18 +168,24 @@ func newTestNode(t *testing.T, dir string) *testNode {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
account, err := n.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore).NewAccount(testPassphrase)
|
||||
account, err = n.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore).NewAccount(testPassphrase)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
node := &testNode{Dir: dir}
|
||||
|
||||
// use a unique IPCPath when running tests on Windows
|
||||
if runtime.GOOS == "windows" {
|
||||
conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", account.Address.String())
|
||||
}
|
||||
|
||||
return conf, account
|
||||
}
|
||||
|
||||
func newTestNode(t *testing.T, dir string) *testNode {
|
||||
|
||||
conf, account := getTestAccount(t, dir)
|
||||
node := &testNode{Dir: dir}
|
||||
|
||||
// assign ports
|
||||
httpPort, err := assignTCPPort()
|
||||
if err != nil {
|
||||
|
@ -217,27 +217,27 @@ var (
|
||||
EthashCachesInMemoryFlag = cli.IntFlag{
|
||||
Name: "ethash.cachesinmem",
|
||||
Usage: "Number of recent ethash caches to keep in memory (16MB each)",
|
||||
Value: eth.DefaultConfig.EthashCachesInMem,
|
||||
Value: eth.DefaultConfig.Ethash.CachesInMem,
|
||||
}
|
||||
EthashCachesOnDiskFlag = cli.IntFlag{
|
||||
Name: "ethash.cachesondisk",
|
||||
Usage: "Number of recent ethash caches to keep on disk (16MB each)",
|
||||
Value: eth.DefaultConfig.EthashCachesOnDisk,
|
||||
Value: eth.DefaultConfig.Ethash.CachesOnDisk,
|
||||
}
|
||||
EthashDatasetDirFlag = DirectoryFlag{
|
||||
Name: "ethash.dagdir",
|
||||
Usage: "Directory to store the ethash mining DAGs (default = inside home folder)",
|
||||
Value: DirectoryString{eth.DefaultConfig.EthashDatasetDir},
|
||||
Value: DirectoryString{eth.DefaultConfig.Ethash.DatasetDir},
|
||||
}
|
||||
EthashDatasetsInMemoryFlag = cli.IntFlag{
|
||||
Name: "ethash.dagsinmem",
|
||||
Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)",
|
||||
Value: eth.DefaultConfig.EthashDatasetsInMem,
|
||||
Value: eth.DefaultConfig.Ethash.DatasetsInMem,
|
||||
}
|
||||
EthashDatasetsOnDiskFlag = cli.IntFlag{
|
||||
Name: "ethash.dagsondisk",
|
||||
Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)",
|
||||
Value: eth.DefaultConfig.EthashDatasetsOnDisk,
|
||||
Value: eth.DefaultConfig.Ethash.DatasetsOnDisk,
|
||||
}
|
||||
// Transaction pool settings
|
||||
TxPoolNoLocalsFlag = cli.BoolFlag{
|
||||
@ -584,6 +584,8 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
|
||||
urls = params.TestnetBootnodes
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
urls = params.RinkebyBootnodes
|
||||
case cfg.BootstrapNodes != nil:
|
||||
return // already set, don't apply defaults.
|
||||
}
|
||||
|
||||
cfg.BootstrapNodes = make([]*discover.Node, 0, len(urls))
|
||||
@ -744,6 +746,12 @@ func MakeAddress(ks *keystore.KeyStore, account string) (accounts.Account, error
|
||||
if err != nil || index < 0 {
|
||||
return accounts.Account{}, fmt.Errorf("invalid account address or index %q", account)
|
||||
}
|
||||
log.Warn("-------------------------------------------------------------------")
|
||||
log.Warn("Referring to accounts by order in the keystore folder is dangerous!")
|
||||
log.Warn("This functionality is deprecated and will be removed in the future!")
|
||||
log.Warn("Please use explicit addresses! (can search via `geth account list`)")
|
||||
log.Warn("-------------------------------------------------------------------")
|
||||
|
||||
accs := ks.Accounts()
|
||||
if len(accs) <= index {
|
||||
return accounts.Account{}, fmt.Errorf("index %d higher than number of accounts %d", index, len(accs))
|
||||
@ -760,15 +768,6 @@ func setEtherbase(ctx *cli.Context, ks *keystore.KeyStore, cfg *eth.Config) {
|
||||
Fatalf("Option %q: %v", EtherbaseFlag.Name, err)
|
||||
}
|
||||
cfg.Etherbase = account.Address
|
||||
return
|
||||
}
|
||||
accounts := ks.Accounts()
|
||||
if (cfg.Etherbase == common.Address{}) {
|
||||
if len(accounts) > 0 {
|
||||
cfg.Etherbase = accounts[0].Address
|
||||
} else {
|
||||
log.Warn("No etherbase set and no accounts found as default")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -910,34 +909,60 @@ func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) {
|
||||
|
||||
func setEthash(ctx *cli.Context, cfg *eth.Config) {
|
||||
if ctx.GlobalIsSet(EthashCacheDirFlag.Name) {
|
||||
cfg.EthashCacheDir = ctx.GlobalString(EthashCacheDirFlag.Name)
|
||||
cfg.Ethash.CacheDir = ctx.GlobalString(EthashCacheDirFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(EthashDatasetDirFlag.Name) {
|
||||
cfg.EthashDatasetDir = ctx.GlobalString(EthashDatasetDirFlag.Name)
|
||||
cfg.Ethash.DatasetDir = ctx.GlobalString(EthashDatasetDirFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(EthashCachesInMemoryFlag.Name) {
|
||||
cfg.EthashCachesInMem = ctx.GlobalInt(EthashCachesInMemoryFlag.Name)
|
||||
cfg.Ethash.CachesInMem = ctx.GlobalInt(EthashCachesInMemoryFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(EthashCachesOnDiskFlag.Name) {
|
||||
cfg.EthashCachesOnDisk = ctx.GlobalInt(EthashCachesOnDiskFlag.Name)
|
||||
cfg.Ethash.CachesOnDisk = ctx.GlobalInt(EthashCachesOnDiskFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(EthashDatasetsInMemoryFlag.Name) {
|
||||
cfg.EthashDatasetsInMem = ctx.GlobalInt(EthashDatasetsInMemoryFlag.Name)
|
||||
cfg.Ethash.DatasetsInMem = ctx.GlobalInt(EthashDatasetsInMemoryFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(EthashDatasetsOnDiskFlag.Name) {
|
||||
cfg.EthashDatasetsOnDisk = ctx.GlobalInt(EthashDatasetsOnDiskFlag.Name)
|
||||
cfg.Ethash.DatasetsOnDisk = ctx.GlobalInt(EthashDatasetsOnDiskFlag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func checkExclusive(ctx *cli.Context, flags ...cli.Flag) {
|
||||
// checkExclusive verifies that only a single isntance of the provided flags was
|
||||
// set by the user. Each flag might optionally be followed by a string type to
|
||||
// specialize it further.
|
||||
func checkExclusive(ctx *cli.Context, args ...interface{}) {
|
||||
set := make([]string, 0, 1)
|
||||
for _, flag := range flags {
|
||||
for i := 0; i < len(args); i++ {
|
||||
// Make sure the next argument is a flag and skip if not set
|
||||
flag, ok := args[i].(cli.Flag)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("invalid argument, not cli.Flag type: %T", args[i]))
|
||||
}
|
||||
// Check if next arg extends current and expand its name if so
|
||||
name := flag.GetName()
|
||||
|
||||
if i+1 < len(args) {
|
||||
switch option := args[i+1].(type) {
|
||||
case string:
|
||||
// Extended flag, expand the name and shift the arguments
|
||||
if ctx.GlobalString(flag.GetName()) == option {
|
||||
name += "=" + option
|
||||
}
|
||||
i++
|
||||
|
||||
case cli.Flag:
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid argument, not cli.Flag or string extension: %T", args[i+1]))
|
||||
}
|
||||
}
|
||||
// Mark the flag if it's set
|
||||
if ctx.GlobalIsSet(flag.GetName()) {
|
||||
set = append(set, "--"+flag.GetName())
|
||||
set = append(set, "--"+name)
|
||||
}
|
||||
}
|
||||
if len(set) > 1 {
|
||||
Fatalf("flags %v can't be used at the same time", strings.Join(set, ", "))
|
||||
Fatalf("Flags %v can't be used at the same time", strings.Join(set, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
@ -956,6 +981,8 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
// Avoid conflicting network flags
|
||||
checkExclusive(ctx, DeveloperFlag, TestnetFlag, RinkebyFlag)
|
||||
checkExclusive(ctx, FastSyncFlag, LightModeFlag, SyncModeFlag)
|
||||
checkExclusive(ctx, LightServFlag, LightModeFlag)
|
||||
checkExclusive(ctx, LightServFlag, SyncModeFlag, "light")
|
||||
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
setEtherbase(ctx, ks, cfg)
|
||||
@ -1159,10 +1186,14 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
|
||||
} else {
|
||||
engine = ethash.NewFaker()
|
||||
if !ctx.GlobalBool(FakePoWFlag.Name) {
|
||||
engine = ethash.New(
|
||||
stack.ResolvePath(eth.DefaultConfig.EthashCacheDir), eth.DefaultConfig.EthashCachesInMem, eth.DefaultConfig.EthashCachesOnDisk,
|
||||
stack.ResolvePath(eth.DefaultConfig.EthashDatasetDir), eth.DefaultConfig.EthashDatasetsInMem, eth.DefaultConfig.EthashDatasetsOnDisk,
|
||||
)
|
||||
engine = ethash.New(ethash.Config{
|
||||
CacheDir: stack.ResolvePath(eth.DefaultConfig.Ethash.CacheDir),
|
||||
CachesInMem: eth.DefaultConfig.Ethash.CachesInMem,
|
||||
CachesOnDisk: eth.DefaultConfig.Ethash.CachesOnDisk,
|
||||
DatasetDir: stack.ResolvePath(eth.DefaultConfig.Ethash.DatasetDir),
|
||||
DatasetsInMem: eth.DefaultConfig.Ethash.DatasetsInMem,
|
||||
DatasetsOnDisk: eth.DefaultConfig.Ethash.DatasetsOnDisk,
|
||||
})
|
||||
}
|
||||
}
|
||||
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
|
||||
|
@ -17,9 +17,7 @@
|
||||
// Package common contains various helper functions.
|
||||
package common
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
)
|
||||
import "encoding/hex"
|
||||
|
||||
func ToHex(b []byte) string {
|
||||
hex := Bytes2Hex(b)
|
||||
@ -35,12 +33,11 @@ func FromHex(s string) []byte {
|
||||
if s[0:2] == "0x" || s[0:2] == "0X" {
|
||||
s = s[2:]
|
||||
}
|
||||
if len(s)%2 == 1 {
|
||||
s = "0" + s
|
||||
}
|
||||
return Hex2Bytes(s)
|
||||
}
|
||||
return nil
|
||||
if len(s)%2 == 1 {
|
||||
s = "0" + s
|
||||
}
|
||||
return Hex2Bytes(s)
|
||||
}
|
||||
|
||||
// Copy bytes
|
||||
@ -56,14 +53,24 @@ func CopyBytes(b []byte) (copiedBytes []byte) {
|
||||
return
|
||||
}
|
||||
|
||||
func HasHexPrefix(str string) bool {
|
||||
l := len(str)
|
||||
return l >= 2 && str[0:2] == "0x"
|
||||
func hasHexPrefix(str string) bool {
|
||||
return len(str) >= 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X')
|
||||
}
|
||||
|
||||
func IsHex(str string) bool {
|
||||
l := len(str)
|
||||
return l >= 4 && l%2 == 0 && str[0:2] == "0x"
|
||||
func isHexCharacter(c byte) bool {
|
||||
return ('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F')
|
||||
}
|
||||
|
||||
func isHex(str string) bool {
|
||||
if len(str)%2 != 0 {
|
||||
return false
|
||||
}
|
||||
for _, c := range []byte(str) {
|
||||
if !isHexCharacter(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func Bytes2Hex(d []byte) string {
|
||||
|
@ -34,19 +34,6 @@ func (s *BytesSuite) TestCopyBytes(c *checker.C) {
|
||||
c.Assert(res1, checker.DeepEquals, exp1)
|
||||
}
|
||||
|
||||
func (s *BytesSuite) TestIsHex(c *checker.C) {
|
||||
data1 := "a9e67e"
|
||||
exp1 := false
|
||||
res1 := IsHex(data1)
|
||||
c.Assert(res1, checker.DeepEquals, exp1)
|
||||
|
||||
data2 := "0xa9e67e00"
|
||||
exp2 := true
|
||||
res2 := IsHex(data2)
|
||||
c.Assert(res2, checker.DeepEquals, exp2)
|
||||
|
||||
}
|
||||
|
||||
func (s *BytesSuite) TestLeftPadBytes(c *checker.C) {
|
||||
val1 := []byte{1, 2, 3, 4}
|
||||
exp1 := []byte{0, 0, 0, 0, 1, 2, 3, 4}
|
||||
@ -74,7 +61,28 @@ func TestFromHex(t *testing.T) {
|
||||
expected := []byte{1}
|
||||
result := FromHex(input)
|
||||
if !bytes.Equal(expected, result) {
|
||||
t.Errorf("Expected % x got % x", expected, result)
|
||||
t.Errorf("Expected %x got %x", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsHex(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
ok bool
|
||||
}{
|
||||
{"", true},
|
||||
{"0", false},
|
||||
{"00", true},
|
||||
{"a9e67e", true},
|
||||
{"A9E67E", true},
|
||||
{"0xa9e67e", false},
|
||||
{"a9e67e001", false},
|
||||
{"0xHELLO_MY_NAME_IS_STEVEN_@#$^&*", false},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if ok := isHex(test.input); ok != test.ok {
|
||||
t.Errorf("isHex(%q) = %v, want %v", test.input, ok, test.ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -83,6 +91,15 @@ func TestFromHexOddLength(t *testing.T) {
|
||||
expected := []byte{1}
|
||||
result := FromHex(input)
|
||||
if !bytes.Equal(expected, result) {
|
||||
t.Errorf("Expected % x got % x", expected, result)
|
||||
t.Errorf("Expected %x got %x", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoPrefixShortHexOddLength(t *testing.T) {
|
||||
input := "1"
|
||||
expected := []byte{1}
|
||||
result := FromHex(input)
|
||||
if !bytes.Equal(expected, result) {
|
||||
t.Errorf("Expected %x got %x", expected, result)
|
||||
}
|
||||
}
|
||||
|
@ -150,13 +150,10 @@ func HexToAddress(s string) Address { return BytesToAddress(FromHex(s)) }
|
||||
// IsHexAddress verifies whether a string can represent a valid hex-encoded
|
||||
// Ethereum address or not.
|
||||
func IsHexAddress(s string) bool {
|
||||
if len(s) == 2+2*AddressLength && IsHex(s) {
|
||||
return true
|
||||
if hasHexPrefix(s) {
|
||||
s = s[2:]
|
||||
}
|
||||
if len(s) == 2*AddressLength && IsHex("0x"+s) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return len(s) == 2*AddressLength && isHex(s)
|
||||
}
|
||||
|
||||
// Get the string representation of the underlying address
|
||||
|
@ -35,6 +35,30 @@ func TestBytesConversion(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsHexAddress(t *testing.T) {
|
||||
tests := []struct {
|
||||
str string
|
||||
exp bool
|
||||
}{
|
||||
{"0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed", true},
|
||||
{"5aaeb6053f3e94c9b9a09f33669435e7ef1beaed", true},
|
||||
{"0X5aaeb6053f3e94c9b9a09f33669435e7ef1beaed", true},
|
||||
{"0XAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", true},
|
||||
{"0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", true},
|
||||
{"0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed1", false},
|
||||
{"0x5aaeb6053f3e94c9b9a09f33669435e7ef1beae", false},
|
||||
{"5aaeb6053f3e94c9b9a09f33669435e7ef1beaed11", false},
|
||||
{"0xxaaeb6053f3e94c9b9a09f33669435e7ef1beaed", false},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
if result := IsHexAddress(test.str); result != test.exp {
|
||||
t.Errorf("IsHexAddress(%s) == %v; expected %v",
|
||||
test.str, result, test.exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashJsonValidation(t *testing.T) {
|
||||
var tests = []struct {
|
||||
Prefix string
|
||||
|
@ -703,8 +703,7 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
|
||||
|
||||
go func(idx int) {
|
||||
defer pend.Done()
|
||||
|
||||
ethash := New(cachedir, 0, 1, "", 0, 0)
|
||||
ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal})
|
||||
if err := ethash.VerifySeal(nil, block.Header()); err != nil {
|
||||
t.Errorf("proc %d: block verification failed: %v", idx, err)
|
||||
}
|
||||
|
@ -36,8 +36,8 @@ import (
|
||||
|
||||
// Ethash proof-of-work protocol constants.
|
||||
var (
|
||||
frontierBlockReward *big.Int = big.NewInt(5e+18) // Block reward in wei for successfully mining a block
|
||||
byzantiumBlockReward *big.Int = big.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium
|
||||
FrontierBlockReward *big.Int = big.NewInt(5e+18) // Block reward in wei for successfully mining a block
|
||||
ByzantiumBlockReward *big.Int = big.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium
|
||||
maxUncles = 2 // Maximum number of uncles allowed in a single block
|
||||
)
|
||||
|
||||
@ -68,7 +68,7 @@ func (ethash *Ethash) Author(header *types.Header) (common.Address, error) {
|
||||
// stock Ethereum ethash engine.
|
||||
func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
|
||||
// If we're running a full engine faking, accept any input as valid
|
||||
if ethash.fakeFull {
|
||||
if ethash.config.PowMode == ModeFullFake {
|
||||
return nil
|
||||
}
|
||||
// Short circuit if the header is known, or it's parent not
|
||||
@ -89,7 +89,7 @@ func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.He
|
||||
// a results channel to retrieve the async verifications.
|
||||
func (ethash *Ethash) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
|
||||
// If we're running a full engine faking, accept any input as valid
|
||||
if ethash.fakeFull || len(headers) == 0 {
|
||||
if ethash.config.PowMode == ModeFullFake || len(headers) == 0 {
|
||||
abort, results := make(chan struct{}), make(chan error, len(headers))
|
||||
for i := 0; i < len(headers); i++ {
|
||||
results <- nil
|
||||
@ -169,7 +169,7 @@ func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainReader, headers []
|
||||
// rules of the stock Ethereum ethash engine.
|
||||
func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Block) error {
|
||||
// If we're running a full engine faking, accept any input as valid
|
||||
if ethash.fakeFull {
|
||||
if ethash.config.PowMode == ModeFullFake {
|
||||
return nil
|
||||
}
|
||||
// Verify that there are at most 2 uncles included in this block
|
||||
@ -455,7 +455,7 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
|
||||
// the PoW difficulty requirements.
|
||||
func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
|
||||
// If we're running a fake PoW, accept any seal as valid
|
||||
if ethash.fakeMode {
|
||||
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
|
||||
time.Sleep(ethash.fakeDelay)
|
||||
if ethash.fakeFail == header.Number.Uint64() {
|
||||
return errInvalidPoW
|
||||
@ -480,7 +480,7 @@ func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Head
|
||||
cache := ethash.cache(number)
|
||||
|
||||
size := datasetSize(number)
|
||||
if ethash.tester {
|
||||
if ethash.config.PowMode == ModeTest {
|
||||
size = 32 * 1024
|
||||
}
|
||||
digest, result := hashimotoLight(size, cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
|
||||
@ -529,9 +529,9 @@ var (
|
||||
// TODO (karalabe): Move the chain maker into this package and make this private!
|
||||
func AccumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) {
|
||||
// Select the correct block reward based on chain progression
|
||||
blockReward := frontierBlockReward
|
||||
blockReward := FrontierBlockReward
|
||||
if config.IsByzantium(header.Number) {
|
||||
blockReward = byzantiumBlockReward
|
||||
blockReward = ByzantiumBlockReward
|
||||
}
|
||||
// Accumulate the rewards for the miner and any included uncles
|
||||
reward := new(big.Int).Set(blockReward)
|
||||
|
@ -45,7 +45,7 @@ var (
|
||||
maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||
|
||||
// sharedEthash is a full instance that can be shared between multiple users.
|
||||
sharedEthash = New("", 3, 0, "", 1, 0)
|
||||
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal})
|
||||
|
||||
// algorithmRevision is the data structure version used for file naming.
|
||||
algorithmRevision = 23
|
||||
@ -320,15 +320,32 @@ func MakeDataset(block uint64, dir string) {
|
||||
d.release()
|
||||
}
|
||||
|
||||
// Mode defines the type and amount of PoW verification an ethash engine makes.
|
||||
type Mode uint
|
||||
|
||||
const (
|
||||
ModeNormal Mode = iota
|
||||
ModeShared
|
||||
ModeTest
|
||||
ModeFake
|
||||
ModeFullFake
|
||||
)
|
||||
|
||||
// Config are the configuration parameters of the ethash.
|
||||
type Config struct {
|
||||
CacheDir string
|
||||
CachesInMem int
|
||||
CachesOnDisk int
|
||||
DatasetDir string
|
||||
DatasetsInMem int
|
||||
DatasetsOnDisk int
|
||||
PowMode Mode
|
||||
}
|
||||
|
||||
// Ethash is a consensus engine based on proot-of-work implementing the ethash
|
||||
// algorithm.
|
||||
type Ethash struct {
|
||||
cachedir string // Data directory to store the verification caches
|
||||
cachesinmem int // Number of caches to keep in memory
|
||||
cachesondisk int // Number of caches to keep on disk
|
||||
dagdir string // Data directory to store full mining datasets
|
||||
dagsinmem int // Number of mining datasets to keep in memory
|
||||
dagsondisk int // Number of mining datasets to keep on disk
|
||||
config Config
|
||||
|
||||
caches map[uint64]*cache // In memory caches to avoid regenerating too often
|
||||
fcache *cache // Pre-generated cache for the estimated future epoch
|
||||
@ -342,10 +359,7 @@ type Ethash struct {
|
||||
hashrate metrics.Meter // Meter tracking the average hashrate
|
||||
|
||||
// The fields below are hooks for testing
|
||||
tester bool // Flag whether to use a smaller test dataset
|
||||
shared *Ethash // Shared PoW verifier to avoid cache regeneration
|
||||
fakeMode bool // Flag whether to disable PoW checking
|
||||
fakeFull bool // Flag whether to disable all consensus rules
|
||||
fakeFail uint64 // Block number which fails PoW check even in fake mode
|
||||
fakeDelay time.Duration // Time delay to sleep for before returning from verify
|
||||
|
||||
@ -353,28 +367,23 @@ type Ethash struct {
|
||||
}
|
||||
|
||||
// New creates a full sized ethash PoW scheme.
|
||||
func New(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinmem, dagsondisk int) *Ethash {
|
||||
if cachesinmem <= 0 {
|
||||
log.Warn("One ethash cache must always be in memory", "requested", cachesinmem)
|
||||
cachesinmem = 1
|
||||
func New(config Config) *Ethash {
|
||||
if config.CachesInMem <= 0 {
|
||||
log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
|
||||
config.CachesInMem = 1
|
||||
}
|
||||
if cachedir != "" && cachesondisk > 0 {
|
||||
log.Info("Disk storage enabled for ethash caches", "dir", cachedir, "count", cachesondisk)
|
||||
if config.CacheDir != "" && config.CachesOnDisk > 0 {
|
||||
log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
|
||||
}
|
||||
if dagdir != "" && dagsondisk > 0 {
|
||||
log.Info("Disk storage enabled for ethash DAGs", "dir", dagdir, "count", dagsondisk)
|
||||
if config.DatasetDir != "" && config.DatasetsOnDisk > 0 {
|
||||
log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
|
||||
}
|
||||
return &Ethash{
|
||||
cachedir: cachedir,
|
||||
cachesinmem: cachesinmem,
|
||||
cachesondisk: cachesondisk,
|
||||
dagdir: dagdir,
|
||||
dagsinmem: dagsinmem,
|
||||
dagsondisk: dagsondisk,
|
||||
caches: make(map[uint64]*cache),
|
||||
datasets: make(map[uint64]*dataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeter(),
|
||||
config: config,
|
||||
caches: make(map[uint64]*cache),
|
||||
datasets: make(map[uint64]*dataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeter(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -382,12 +391,14 @@ func New(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinme
|
||||
// purposes.
|
||||
func NewTester() *Ethash {
|
||||
return &Ethash{
|
||||
cachesinmem: 1,
|
||||
caches: make(map[uint64]*cache),
|
||||
datasets: make(map[uint64]*dataset),
|
||||
tester: true,
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeter(),
|
||||
config: Config{
|
||||
CachesInMem: 1,
|
||||
PowMode: ModeTest,
|
||||
},
|
||||
caches: make(map[uint64]*cache),
|
||||
datasets: make(map[uint64]*dataset),
|
||||
update: make(chan struct{}),
|
||||
hashrate: metrics.NewMeter(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -395,27 +406,45 @@ func NewTester() *Ethash {
|
||||
// all blocks' seal as valid, though they still have to conform to the Ethereum
|
||||
// consensus rules.
|
||||
func NewFaker() *Ethash {
|
||||
return &Ethash{fakeMode: true}
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFake,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that
|
||||
// accepts all blocks as valid apart from the single one specified, though they
|
||||
// still have to conform to the Ethereum consensus rules.
|
||||
func NewFakeFailer(fail uint64) *Ethash {
|
||||
return &Ethash{fakeMode: true, fakeFail: fail}
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFake,
|
||||
},
|
||||
fakeFail: fail,
|
||||
}
|
||||
}
|
||||
|
||||
// NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that
|
||||
// accepts all blocks as valid, but delays verifications by some time, though
|
||||
// they still have to conform to the Ethereum consensus rules.
|
||||
func NewFakeDelayer(delay time.Duration) *Ethash {
|
||||
return &Ethash{fakeMode: true, fakeDelay: delay}
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFake,
|
||||
},
|
||||
fakeDelay: delay,
|
||||
}
|
||||
}
|
||||
|
||||
// NewFullFaker creates an ethash consensus engine with a full fake scheme that
|
||||
// accepts all blocks as valid, without checking any consensus rules whatsoever.
|
||||
func NewFullFaker() *Ethash {
|
||||
return &Ethash{fakeMode: true, fakeFull: true}
|
||||
return &Ethash{
|
||||
config: Config{
|
||||
PowMode: ModeFullFake,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewShared creates a full sized ethash PoW shared between all requesters running
|
||||
@ -436,7 +465,7 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
|
||||
current, future := ethash.caches[epoch], (*cache)(nil)
|
||||
if current == nil {
|
||||
// No in-memory cache, evict the oldest if the cache limit was reached
|
||||
for len(ethash.caches) > 0 && len(ethash.caches) >= ethash.cachesinmem {
|
||||
for len(ethash.caches) > 0 && len(ethash.caches) >= ethash.config.CachesInMem {
|
||||
var evict *cache
|
||||
for _, cache := range ethash.caches {
|
||||
if evict == nil || evict.used.After(cache.used) {
|
||||
@ -473,7 +502,7 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
|
||||
ethash.lock.Unlock()
|
||||
|
||||
// Wait for generation finish, bump the timestamp and finalize the cache
|
||||
current.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester)
|
||||
current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest)
|
||||
|
||||
current.lock.Lock()
|
||||
current.used = time.Now()
|
||||
@ -481,7 +510,7 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
|
||||
|
||||
// If we exhausted the future cache, now's a good time to regenerate it
|
||||
if future != nil {
|
||||
go future.generate(ethash.cachedir, ethash.cachesondisk, ethash.tester)
|
||||
go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.PowMode == ModeTest)
|
||||
}
|
||||
return current.cache
|
||||
}
|
||||
@ -498,7 +527,7 @@ func (ethash *Ethash) dataset(block uint64) []uint32 {
|
||||
current, future := ethash.datasets[epoch], (*dataset)(nil)
|
||||
if current == nil {
|
||||
// No in-memory dataset, evict the oldest if the dataset limit was reached
|
||||
for len(ethash.datasets) > 0 && len(ethash.datasets) >= ethash.dagsinmem {
|
||||
for len(ethash.datasets) > 0 && len(ethash.datasets) >= ethash.config.DatasetsInMem {
|
||||
var evict *dataset
|
||||
for _, dataset := range ethash.datasets {
|
||||
if evict == nil || evict.used.After(dataset.used) {
|
||||
@ -536,7 +565,7 @@ func (ethash *Ethash) dataset(block uint64) []uint32 {
|
||||
ethash.lock.Unlock()
|
||||
|
||||
// Wait for generation finish, bump the timestamp and finalize the cache
|
||||
current.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester)
|
||||
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
|
||||
|
||||
current.lock.Lock()
|
||||
current.used = time.Now()
|
||||
@ -544,7 +573,7 @@ func (ethash *Ethash) dataset(block uint64) []uint32 {
|
||||
|
||||
// If we exhausted the future dataset, now's a good time to regenerate it
|
||||
if future != nil {
|
||||
go future.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester)
|
||||
go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
|
||||
}
|
||||
return current.dataset
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ import (
|
||||
// the block's difficulty requirements.
|
||||
func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error) {
|
||||
// If we're running a fake PoW, simply return a 0 nonce immediately
|
||||
if ethash.fakeMode {
|
||||
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
|
||||
header := block.Header()
|
||||
header.Nonce, header.MixDigest = types.BlockNonce{}, common.Hash{}
|
||||
return block.WithSeal(header), nil
|
||||
|
@ -47,7 +47,7 @@ const HistoryFile = "history"
|
||||
// DefaultPrompt is the default prompt line prefix to use for user input querying.
|
||||
const DefaultPrompt = "> "
|
||||
|
||||
// Config is te collection of configurations to fine tune the behavior of the
|
||||
// Config is the collection of configurations to fine tune the behavior of the
|
||||
// JavaScript console.
|
||||
type Config struct {
|
||||
DataDir string // Data directory to store the console history at
|
||||
@ -192,6 +192,7 @@ func (c *Console) init(preload []string) error {
|
||||
if obj := admin.Object(); obj != nil { // make sure the admin api is enabled over the interface
|
||||
obj.Set("sleepBlocks", bridge.SleepBlocks)
|
||||
obj.Set("sleep", bridge.Sleep)
|
||||
obj.Set("clearHistory", c.clearHistory)
|
||||
}
|
||||
// Preload any JavaScript files before starting the console
|
||||
for _, path := range preload {
|
||||
@ -216,6 +217,16 @@ func (c *Console) init(preload []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Console) clearHistory() {
|
||||
c.history = nil
|
||||
c.prompter.ClearHistory()
|
||||
if err := os.Remove(c.histPath); err != nil {
|
||||
fmt.Fprintln(c.printer, "can't delete history file:", err)
|
||||
} else {
|
||||
fmt.Fprintln(c.printer, "history file deleted")
|
||||
}
|
||||
}
|
||||
|
||||
// consoleOutput is an override for the console.log and console.error methods to
|
||||
// stream the output into the configured output stream instead of stdout.
|
||||
func (c *Console) consoleOutput(call otto.FunctionCall) otto.Value {
|
||||
@ -238,7 +249,7 @@ func (c *Console) AutoCompleteInput(line string, pos int) (string, []string, str
|
||||
// E.g. in case of nested lines eth.getBalance(eth.coinb<tab><tab>
|
||||
start := pos - 1
|
||||
for ; start > 0; start-- {
|
||||
// Skip all methods and namespaces (i.e. including te dot)
|
||||
// Skip all methods and namespaces (i.e. including the dot)
|
||||
if line[start] == '.' || (line[start] >= 'a' && line[start] <= 'z') || (line[start] >= 'A' && line[start] <= 'Z') {
|
||||
continue
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/internal/jsre"
|
||||
@ -67,6 +68,7 @@ func (p *hookedPrompter) PromptConfirm(prompt string) (bool, error) {
|
||||
}
|
||||
func (p *hookedPrompter) SetHistory(history []string) {}
|
||||
func (p *hookedPrompter) AppendHistory(command string) {}
|
||||
func (p *hookedPrompter) ClearHistory() {}
|
||||
func (p *hookedPrompter) SetWordCompleter(completer WordCompleter) {}
|
||||
|
||||
// tester is a console test environment for the console tests to operate on.
|
||||
@ -96,7 +98,9 @@ func newTester(t *testing.T, confOverride func(*eth.Config)) *tester {
|
||||
ethConf := ð.Config{
|
||||
Genesis: core.DeveloperGenesisBlock(15, common.Address{}),
|
||||
Etherbase: common.HexToAddress(testAddress),
|
||||
PowTest: true,
|
||||
Ethash: ethash.Config{
|
||||
PowMode: ethash.ModeTest,
|
||||
},
|
||||
}
|
||||
if confOverride != nil {
|
||||
confOverride(ethConf)
|
||||
|
@ -51,6 +51,9 @@ type UserPrompter interface {
|
||||
// if and only if the prompt to append was a valid command.
|
||||
AppendHistory(command string)
|
||||
|
||||
// ClearHistory clears the entire history
|
||||
ClearHistory()
|
||||
|
||||
// SetWordCompleter sets the completion function that the prompter will call to
|
||||
// fetch completion candidates when the user presses tab.
|
||||
SetWordCompleter(completer WordCompleter)
|
||||
@ -158,6 +161,11 @@ func (p *terminalPrompter) AppendHistory(command string) {
|
||||
p.State.AppendHistory(command)
|
||||
}
|
||||
|
||||
// ClearHistory clears the entire history
|
||||
func (p *terminalPrompter) ClearHistory() {
|
||||
p.State.ClearHistory()
|
||||
}
|
||||
|
||||
// SetWordCompleter sets the completion function that the prompter will call to
|
||||
// fetch completion candidates when the user presses tab.
|
||||
func (p *terminalPrompter) SetWordCompleter(completer WordCompleter) {
|
||||
|
@ -77,7 +77,7 @@ contract ReleaseOracle {
|
||||
}
|
||||
}
|
||||
|
||||
// signers is an accessor method to retrieve all te signers (public accessor
|
||||
// signers is an accessor method to retrieve all the signers (public accessor
|
||||
// generates an indexed one, not a retrieve-all version).
|
||||
function signers() constant returns(address[]) {
|
||||
return voters;
|
||||
|
@ -230,7 +230,7 @@ func (c *ChainIndexer) newHead(head uint64, reorg bool) {
|
||||
if changed < c.storedSections {
|
||||
c.setValidSections(changed)
|
||||
}
|
||||
// Update the new head number to te finalized section end and notify children
|
||||
// Update the new head number to the finalized section end and notify children
|
||||
head = changed * c.sectionSize
|
||||
|
||||
if head < c.cascadedHead {
|
||||
|
@ -453,7 +453,7 @@ func (self *StateDB) Copy() *StateDB {
|
||||
// Copy all the basic fields, initialize the memory ones
|
||||
state := &StateDB{
|
||||
db: self.db,
|
||||
trie: self.trie,
|
||||
trie: self.db.CopyTrie(self.trie),
|
||||
stateObjects: make(map[common.Address]*stateObject, len(self.stateObjectsDirty)),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}, len(self.stateObjectsDirty)),
|
||||
refund: new(big.Int).Set(self.refund),
|
||||
|
@ -117,6 +117,57 @@ func TestIntermediateLeaks(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopy tests that copying a statedb object indeed makes the original and
|
||||
// the copy independent of each other. This test is a regression test against
|
||||
// https://github.com/ethereum/go-ethereum/pull/15549.
|
||||
func TestCopy(t *testing.T) {
|
||||
// Create a random state test to copy and modify "independently"
|
||||
mem, _ := ethdb.NewMemDatabase()
|
||||
orig, _ := New(common.Hash{}, NewDatabase(mem))
|
||||
|
||||
for i := byte(0); i < 255; i++ {
|
||||
obj := orig.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||
obj.AddBalance(big.NewInt(int64(i)))
|
||||
orig.updateStateObject(obj)
|
||||
}
|
||||
orig.Finalise(false)
|
||||
|
||||
// Copy the state, modify both in-memory
|
||||
copy := orig.Copy()
|
||||
|
||||
for i := byte(0); i < 255; i++ {
|
||||
origObj := orig.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||
copyObj := copy.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||
|
||||
origObj.AddBalance(big.NewInt(2 * int64(i)))
|
||||
copyObj.AddBalance(big.NewInt(3 * int64(i)))
|
||||
|
||||
orig.updateStateObject(origObj)
|
||||
copy.updateStateObject(copyObj)
|
||||
}
|
||||
// Finalise the changes on both concurrently
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
orig.Finalise(true)
|
||||
close(done)
|
||||
}()
|
||||
copy.Finalise(true)
|
||||
<-done
|
||||
|
||||
// Verify that the two states have been updated independently
|
||||
for i := byte(0); i < 255; i++ {
|
||||
origObj := orig.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||
copyObj := copy.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
|
||||
|
||||
if want := big.NewInt(3 * int64(i)); origObj.Balance().Cmp(want) != 0 {
|
||||
t.Errorf("orig obj %d: balance mismatch: have %v, want %v", i, origObj.Balance(), want)
|
||||
}
|
||||
if want := big.NewInt(4 * int64(i)); copyObj.Balance().Cmp(want) != 0 {
|
||||
t.Errorf("copy obj %d: balance mismatch: have %v, want %v", i, copyObj.Balance(), want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSnapshotRandom(t *testing.T) {
|
||||
config := &quick.Config{MaxCount: 1000}
|
||||
err := quick.Check((*snapshotTest).run, config)
|
||||
|
@ -1266,7 +1266,7 @@ func TestTransactionPoolRepricingKeepsLocals(t *testing.T) {
|
||||
|
||||
// Tests that when the pool reaches its global transaction limit, underpriced
|
||||
// transactions are gradually shifted out for more expensive ones and any gapped
|
||||
// pending transactions are moved into te queue.
|
||||
// pending transactions are moved into the queue.
|
||||
//
|
||||
// Note, local transactions are never allowed to be dropped.
|
||||
func TestTransactionPoolUnderpricing(t *testing.T) {
|
||||
|
@ -137,7 +137,7 @@ func isProtectedV(V *big.Int) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// DecodeRLP implements rlp.Encoder
|
||||
// EncodeRLP implements rlp.Encoder
|
||||
func (tx *Transaction) EncodeRLP(w io.Writer) error {
|
||||
return rlp.Encode(w, &tx.data)
|
||||
}
|
||||
|
@ -104,6 +104,10 @@ type EVM struct {
|
||||
// abort is used to abort the EVM calling operations
|
||||
// NOTE: must be set atomically
|
||||
abort int32
|
||||
// callGasTemp holds the gas available for the current call. This is needed because the
|
||||
// available gas is calculated in gasCall* according to the 63/64 rule and later
|
||||
// applied in opCall*.
|
||||
callGasTemp uint64
|
||||
}
|
||||
|
||||
// NewEVM retutrns a new EVM . The returned EVM is not thread safe and should
|
||||
|
@ -342,19 +342,11 @@ func gasCall(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, mem
|
||||
return 0, errGasUintOverflow
|
||||
}
|
||||
|
||||
cg, err := callGas(gt, contract.Gas, gas, stack.Back(0))
|
||||
evm.callGasTemp, err = callGas(gt, contract.Gas, gas, stack.Back(0))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Replace the stack item with the new gas calculation. This means that
|
||||
// either the original item is left on the stack or the item is replaced by:
|
||||
// (availableGas - gas) * 63 / 64
|
||||
// We replace the stack item so that it's available when the opCall instruction is
|
||||
// called. This information is otherwise lost due to the dependency on *current*
|
||||
// available gas.
|
||||
stack.data[stack.len()-1] = new(big.Int).SetUint64(cg)
|
||||
|
||||
if gas, overflow = math.SafeAdd(gas, cg); overflow {
|
||||
if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow {
|
||||
return 0, errGasUintOverflow
|
||||
}
|
||||
return gas, nil
|
||||
@ -374,19 +366,11 @@ func gasCallCode(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack,
|
||||
return 0, errGasUintOverflow
|
||||
}
|
||||
|
||||
cg, err := callGas(gt, contract.Gas, gas, stack.Back(0))
|
||||
evm.callGasTemp, err = callGas(gt, contract.Gas, gas, stack.Back(0))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Replace the stack item with the new gas calculation. This means that
|
||||
// either the original item is left on the stack or the item is replaced by:
|
||||
// (availableGas - gas) * 63 / 64
|
||||
// We replace the stack item so that it's available when the opCall instruction is
|
||||
// called. This information is otherwise lost due to the dependency on *current*
|
||||
// available gas.
|
||||
stack.data[stack.len()-1] = new(big.Int).SetUint64(cg)
|
||||
|
||||
if gas, overflow = math.SafeAdd(gas, cg); overflow {
|
||||
if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow {
|
||||
return 0, errGasUintOverflow
|
||||
}
|
||||
return gas, nil
|
||||
@ -436,18 +420,11 @@ func gasDelegateCall(gt params.GasTable, evm *EVM, contract *Contract, stack *St
|
||||
return 0, errGasUintOverflow
|
||||
}
|
||||
|
||||
cg, err := callGas(gt, contract.Gas, gas, stack.Back(0))
|
||||
evm.callGasTemp, err = callGas(gt, contract.Gas, gas, stack.Back(0))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Replace the stack item with the new gas calculation. This means that
|
||||
// either the original item is left on the stack or the item is replaced by:
|
||||
// (availableGas - gas) * 63 / 64
|
||||
// We replace the stack item so that it's available when the opCall instruction is
|
||||
// called.
|
||||
stack.data[stack.len()-1] = new(big.Int).SetUint64(cg)
|
||||
|
||||
if gas, overflow = math.SafeAdd(gas, cg); overflow {
|
||||
if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow {
|
||||
return 0, errGasUintOverflow
|
||||
}
|
||||
return gas, nil
|
||||
@ -463,18 +440,11 @@ func gasStaticCall(gt params.GasTable, evm *EVM, contract *Contract, stack *Stac
|
||||
return 0, errGasUintOverflow
|
||||
}
|
||||
|
||||
cg, err := callGas(gt, contract.Gas, gas, stack.Back(0))
|
||||
evm.callGasTemp, err = callGas(gt, contract.Gas, gas, stack.Back(0))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Replace the stack item with the new gas calculation. This means that
|
||||
// either the original item is left on the stack or the item is replaced by:
|
||||
// (availableGas - gas) * 63 / 64
|
||||
// We replace the stack item so that it's available when the opCall instruction is
|
||||
// called.
|
||||
stack.data[stack.len()-1] = new(big.Int).SetUint64(cg)
|
||||
|
||||
if gas, overflow = math.SafeAdd(gas, cg); overflow {
|
||||
if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow {
|
||||
return 0, errGasUintOverflow
|
||||
}
|
||||
return gas, nil
|
||||
|
@ -603,24 +603,20 @@ func opCreate(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *S
|
||||
}
|
||||
|
||||
func opCall(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||
gas := stack.pop().Uint64()
|
||||
// pop gas and value of the stack.
|
||||
addr, value := stack.pop(), stack.pop()
|
||||
// Pop gas. The actual gas in in evm.callGasTemp.
|
||||
evm.interpreter.intPool.put(stack.pop())
|
||||
gas := evm.callGasTemp
|
||||
// Pop other call parameters.
|
||||
addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
|
||||
toAddr := common.BigToAddress(addr)
|
||||
value = math.U256(value)
|
||||
// pop input size and offset
|
||||
inOffset, inSize := stack.pop(), stack.pop()
|
||||
// pop return size and offset
|
||||
retOffset, retSize := stack.pop(), stack.pop()
|
||||
|
||||
address := common.BigToAddress(addr)
|
||||
|
||||
// Get the arguments from the memory
|
||||
// Get the arguments from the memory.
|
||||
args := memory.Get(inOffset.Int64(), inSize.Int64())
|
||||
|
||||
if value.Sign() != 0 {
|
||||
gas += params.CallStipend
|
||||
}
|
||||
ret, returnGas, err := evm.Call(contract, address, args, gas, value)
|
||||
ret, returnGas, err := evm.Call(contract, toAddr, args, gas, value)
|
||||
if err != nil {
|
||||
stack.push(new(big.Int))
|
||||
} else {
|
||||
@ -636,25 +632,20 @@ func opCall(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Sta
|
||||
}
|
||||
|
||||
func opCallCode(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||
gas := stack.pop().Uint64()
|
||||
// pop gas and value of the stack.
|
||||
addr, value := stack.pop(), stack.pop()
|
||||
// Pop gas. The actual gas is in evm.callGasTemp.
|
||||
evm.interpreter.intPool.put(stack.pop())
|
||||
gas := evm.callGasTemp
|
||||
// Pop other call parameters.
|
||||
addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
|
||||
toAddr := common.BigToAddress(addr)
|
||||
value = math.U256(value)
|
||||
// pop input size and offset
|
||||
inOffset, inSize := stack.pop(), stack.pop()
|
||||
// pop return size and offset
|
||||
retOffset, retSize := stack.pop(), stack.pop()
|
||||
|
||||
address := common.BigToAddress(addr)
|
||||
|
||||
// Get the arguments from the memory
|
||||
// Get arguments from the memory.
|
||||
args := memory.Get(inOffset.Int64(), inSize.Int64())
|
||||
|
||||
if value.Sign() != 0 {
|
||||
gas += params.CallStipend
|
||||
}
|
||||
|
||||
ret, returnGas, err := evm.CallCode(contract, address, args, gas, value)
|
||||
ret, returnGas, err := evm.CallCode(contract, toAddr, args, gas, value)
|
||||
if err != nil {
|
||||
stack.push(new(big.Int))
|
||||
} else {
|
||||
@ -670,9 +661,13 @@ func opCallCode(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack
|
||||
}
|
||||
|
||||
func opDelegateCall(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||
gas, to, inOffset, inSize, outOffset, outSize := stack.pop().Uint64(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
|
||||
|
||||
toAddr := common.BigToAddress(to)
|
||||
// Pop gas. The actual gas is in evm.callGasTemp.
|
||||
evm.interpreter.intPool.put(stack.pop())
|
||||
gas := evm.callGasTemp
|
||||
// Pop other call parameters.
|
||||
addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
|
||||
toAddr := common.BigToAddress(addr)
|
||||
// Get arguments from the memory.
|
||||
args := memory.Get(inOffset.Int64(), inSize.Int64())
|
||||
|
||||
ret, returnGas, err := evm.DelegateCall(contract, toAddr, args, gas)
|
||||
@ -682,30 +677,25 @@ func opDelegateCall(pc *uint64, evm *EVM, contract *Contract, memory *Memory, st
|
||||
stack.push(big.NewInt(1))
|
||||
}
|
||||
if err == nil || err == errExecutionReverted {
|
||||
memory.Set(outOffset.Uint64(), outSize.Uint64(), ret)
|
||||
memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
|
||||
}
|
||||
contract.Gas += returnGas
|
||||
|
||||
evm.interpreter.intPool.put(to, inOffset, inSize, outOffset, outSize)
|
||||
evm.interpreter.intPool.put(addr, inOffset, inSize, retOffset, retSize)
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func opStaticCall(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||
// pop gas
|
||||
gas := stack.pop().Uint64()
|
||||
// pop address
|
||||
addr := stack.pop()
|
||||
// pop input size and offset
|
||||
inOffset, inSize := stack.pop(), stack.pop()
|
||||
// pop return size and offset
|
||||
retOffset, retSize := stack.pop(), stack.pop()
|
||||
|
||||
address := common.BigToAddress(addr)
|
||||
|
||||
// Get the arguments from the memory
|
||||
// Pop gas. The actual gas is in evm.callGasTemp.
|
||||
evm.interpreter.intPool.put(stack.pop())
|
||||
gas := evm.callGasTemp
|
||||
// Pop other call parameters.
|
||||
addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
|
||||
toAddr := common.BigToAddress(addr)
|
||||
// Get arguments from the memory.
|
||||
args := memory.Get(inOffset.Int64(), inSize.Int64())
|
||||
|
||||
ret, returnGas, err := evm.StaticCall(contract, address, args, gas)
|
||||
ret, returnGas, err := evm.StaticCall(contract, toAddr, args, gas)
|
||||
if err != nil {
|
||||
stack.push(new(big.Int))
|
||||
} else {
|
||||
|
@ -138,16 +138,15 @@ func (in *Interpreter) Run(snapshot int, contract *Contract, input []byte) (ret
|
||||
pc = uint64(0) // program counter
|
||||
cost uint64
|
||||
// copies used by tracer
|
||||
stackCopy = newstack() // stackCopy needed for Tracer since stack is mutated by 63/64 gas rule
|
||||
pcCopy uint64 // needed for the deferred Tracer
|
||||
gasCopy uint64 // for Tracer to log gas remaining before execution
|
||||
logged bool // deferred Tracer should ignore already logged steps
|
||||
pcCopy uint64 // needed for the deferred Tracer
|
||||
gasCopy uint64 // for Tracer to log gas remaining before execution
|
||||
logged bool // deferred Tracer should ignore already logged steps
|
||||
)
|
||||
contract.Input = input
|
||||
|
||||
defer func() {
|
||||
if err != nil && !logged && in.cfg.Debug {
|
||||
in.cfg.Tracer.CaptureState(in.evm, pcCopy, op, gasCopy, cost, mem, stackCopy, contract, in.evm.depth, err)
|
||||
in.cfg.Tracer.CaptureState(in.evm, pcCopy, op, gasCopy, cost, mem, stack, contract, in.evm.depth, err)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -156,21 +155,14 @@ func (in *Interpreter) Run(snapshot int, contract *Contract, input []byte) (ret
|
||||
// the execution of one of the operations or until the done flag is set by the
|
||||
// parent context.
|
||||
for atomic.LoadInt32(&in.evm.abort) == 0 {
|
||||
// Get the memory location of pc
|
||||
op = contract.GetOp(pc)
|
||||
|
||||
if in.cfg.Debug {
|
||||
logged = false
|
||||
pcCopy = pc
|
||||
gasCopy = contract.Gas
|
||||
stackCopy = newstack()
|
||||
for _, val := range stack.data {
|
||||
stackCopy.push(val)
|
||||
}
|
||||
// Capture pre-execution values for tracing.
|
||||
logged, pcCopy, gasCopy = false, pc, contract.Gas
|
||||
}
|
||||
|
||||
// Get the operation from the jump table matching the opcode and validate the
|
||||
// stack and make sure there enough stack items available to perform the operation
|
||||
// Get the operation from the jump table and validate the stack to ensure there are
|
||||
// enough stack items available to perform the operation.
|
||||
op = contract.GetOp(pc)
|
||||
operation := in.cfg.JumpTable[op]
|
||||
if !operation.valid {
|
||||
return nil, fmt.Errorf("invalid opcode 0x%x", int(op))
|
||||
@ -211,7 +203,7 @@ func (in *Interpreter) Run(snapshot int, contract *Contract, input []byte) (ret
|
||||
}
|
||||
|
||||
if in.cfg.Debug {
|
||||
in.cfg.Tracer.CaptureState(in.evm, pc, op, gasCopy, cost, mem, stackCopy, contract, in.evm.depth, err)
|
||||
in.cfg.Tracer.CaptureState(in.evm, pc, op, gasCopy, cost, mem, stack, contract, in.evm.depth, err)
|
||||
logged = true
|
||||
}
|
||||
|
||||
|
@ -45,7 +45,6 @@ type LogConfig struct {
|
||||
DisableMemory bool // disable memory capture
|
||||
DisableStack bool // disable stack capture
|
||||
DisableStorage bool // disable storage capture
|
||||
FullStorage bool // show full storage (slow)
|
||||
Limit int // maximum length of output, but zero means unlimited
|
||||
}
|
||||
|
||||
@ -136,14 +135,13 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui
|
||||
)
|
||||
l.changedValues[contract.Address()][address] = value
|
||||
}
|
||||
// copy a snapstot of the current memory state to a new buffer
|
||||
// Copy a snapstot of the current memory state to a new buffer
|
||||
var mem []byte
|
||||
if !l.cfg.DisableMemory {
|
||||
mem = make([]byte, len(memory.Data()))
|
||||
copy(mem, memory.Data())
|
||||
}
|
||||
|
||||
// copy a snapshot of the current stack state to a new buffer
|
||||
// Copy a snapshot of the current stack state to a new buffer
|
||||
var stck []*big.Int
|
||||
if !l.cfg.DisableStack {
|
||||
stck = make([]*big.Int, len(stack.Data()))
|
||||
@ -151,26 +149,10 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui
|
||||
stck[i] = new(big.Int).Set(item)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the storage based on the settings specified in the log config. If full storage
|
||||
// is disabled (default) we can use the simple Storage.Copy method, otherwise we use
|
||||
// the state object to query for all values (slow process).
|
||||
// Copy a snapshot of the current storage to a new container
|
||||
var storage Storage
|
||||
if !l.cfg.DisableStorage {
|
||||
if l.cfg.FullStorage {
|
||||
storage = make(Storage)
|
||||
// Get the contract account and loop over each storage entry. This may involve looping over
|
||||
// the trie and is a very expensive process.
|
||||
|
||||
env.StateDB.ForEachStorage(contract.Address(), func(key, value common.Hash) bool {
|
||||
storage[key] = value
|
||||
// Return true, indicating we'd like to continue.
|
||||
return true
|
||||
})
|
||||
} else {
|
||||
// copy a snapshot of the current storage to a new container.
|
||||
storage = l.changedValues[contract.Address()].Copy()
|
||||
}
|
||||
storage = l.changedValues[contract.Address()].Copy()
|
||||
}
|
||||
// create a new snaptshot of the EVM.
|
||||
log := StructLog{pc, op, gas, cost, mem, memory.Len(), stck, storage, depth, err}
|
||||
|
@ -63,32 +63,8 @@ func TestStoreCapture(t *testing.T) {
|
||||
if len(logger.changedValues[contract.Address()]) == 0 {
|
||||
t.Fatalf("expected exactly 1 changed value on address %x, got %d", contract.Address(), len(logger.changedValues[contract.Address()]))
|
||||
}
|
||||
|
||||
exp := common.BigToHash(big.NewInt(1))
|
||||
if logger.changedValues[contract.Address()][index] != exp {
|
||||
t.Errorf("expected %x, got %x", exp, logger.changedValues[contract.Address()][index])
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorageCapture(t *testing.T) {
|
||||
t.Skip("implementing this function is difficult. it requires all sort of interfaces to be implemented which isn't trivial. The value (the actual test) isn't worth it")
|
||||
var (
|
||||
ref = &dummyContractRef{}
|
||||
contract = NewContract(ref, ref, new(big.Int), 0)
|
||||
env = NewEVM(Context{}, dummyStateDB{ref: ref}, params.TestChainConfig, Config{EnableJit: false, ForceJit: false})
|
||||
logger = NewStructLogger(nil)
|
||||
mem = NewMemory()
|
||||
stack = newstack()
|
||||
)
|
||||
|
||||
logger.CaptureState(env, 0, STOP, 0, 0, mem, stack, contract, 0, nil)
|
||||
if ref.calledForEach {
|
||||
t.Error("didn't expect for each to be called")
|
||||
}
|
||||
|
||||
logger = NewStructLogger(&LogConfig{FullStorage: true})
|
||||
logger.CaptureState(env, 0, STOP, 0, 0, mem, stack, contract, 0, nil)
|
||||
if !ref.calledForEach {
|
||||
t.Error("expected for each to be called")
|
||||
}
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ var curveB = new(big.Int).SetInt64(3)
|
||||
// curveGen is the generator of G₁.
|
||||
var curveGen = &curvePoint{
|
||||
new(big.Int).SetInt64(1),
|
||||
new(big.Int).SetInt64(-2),
|
||||
new(big.Int).SetInt64(2),
|
||||
new(big.Int).SetInt64(1),
|
||||
new(big.Int).SetInt64(1),
|
||||
}
|
||||
|
@ -98,6 +98,9 @@ func toECDSA(d []byte, strict bool) (*ecdsa.PrivateKey, error) {
|
||||
}
|
||||
priv.D = new(big.Int).SetBytes(d)
|
||||
priv.PublicKey.X, priv.PublicKey.Y = priv.PublicKey.Curve.ScalarBaseMult(d)
|
||||
if priv.PublicKey.X == nil {
|
||||
return nil, errors.New("invalid private key")
|
||||
}
|
||||
return priv, nil
|
||||
}
|
||||
|
||||
|
@ -20,12 +20,10 @@ import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
@ -42,15 +40,20 @@ func TestKeccak256Hash(t *testing.T) {
|
||||
checkhash(t, "Sha3-256-array", func(in []byte) []byte { h := Keccak256Hash(in); return h[:] }, msg, exp)
|
||||
}
|
||||
|
||||
func TestToECDSAErrors(t *testing.T) {
|
||||
if _, err := HexToECDSA("0000000000000000000000000000000000000000000000000000000000000000"); err == nil {
|
||||
t.Fatal("HexToECDSA should've returned error")
|
||||
}
|
||||
if _, err := HexToECDSA("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); err == nil {
|
||||
t.Fatal("HexToECDSA should've returned error")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSha3(b *testing.B) {
|
||||
a := []byte("hello world")
|
||||
amount := 1000000
|
||||
start := time.Now()
|
||||
for i := 0; i < amount; i++ {
|
||||
for i := 0; i < b.N; i++ {
|
||||
Keccak256(a)
|
||||
}
|
||||
|
||||
fmt.Println(amount, ":", time.Since(start))
|
||||
}
|
||||
|
||||
func TestSign(t *testing.T) {
|
||||
|
@ -46,6 +46,55 @@ static int secp256k1_ecdsa_recover_pubkey(
|
||||
return secp256k1_ec_pubkey_serialize(ctx, pubkey_out, &outputlen, &pubkey, SECP256K1_EC_UNCOMPRESSED);
|
||||
}
|
||||
|
||||
// secp256k1_ecdsa_verify_enc verifies an encoded compact signature.
|
||||
//
|
||||
// Returns: 1: signature is valid
|
||||
// 0: signature is invalid
|
||||
// Args: ctx: pointer to a context object (cannot be NULL)
|
||||
// In: sigdata: pointer to a 64-byte signature (cannot be NULL)
|
||||
// msgdata: pointer to a 32-byte message (cannot be NULL)
|
||||
// pubkeydata: pointer to public key data (cannot be NULL)
|
||||
// pubkeylen: length of pubkeydata
|
||||
static int secp256k1_ecdsa_verify_enc(
|
||||
const secp256k1_context* ctx,
|
||||
const unsigned char *sigdata,
|
||||
const unsigned char *msgdata,
|
||||
const unsigned char *pubkeydata,
|
||||
size_t pubkeylen
|
||||
) {
|
||||
secp256k1_ecdsa_signature sig;
|
||||
secp256k1_pubkey pubkey;
|
||||
|
||||
if (!secp256k1_ecdsa_signature_parse_compact(ctx, &sig, sigdata)) {
|
||||
return 0;
|
||||
}
|
||||
if (!secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeydata, pubkeylen)) {
|
||||
return 0;
|
||||
}
|
||||
return secp256k1_ecdsa_verify(ctx, &sig, msgdata, &pubkey);
|
||||
}
|
||||
|
||||
// secp256k1_decompress_pubkey decompresses a public key.
|
||||
//
|
||||
// Returns: 1: public key is valid
|
||||
// 0: public key is invalid
|
||||
// Args: ctx: pointer to a context object (cannot be NULL)
|
||||
// Out: pubkey_out: the serialized 65-byte public key (cannot be NULL)
|
||||
// In: pubkeydata: pointer to 33 bytes of compressed public key data (cannot be NULL)
|
||||
static int secp256k1_decompress_pubkey(
|
||||
const secp256k1_context* ctx,
|
||||
unsigned char *pubkey_out,
|
||||
const unsigned char *pubkeydata
|
||||
) {
|
||||
secp256k1_pubkey pubkey;
|
||||
|
||||
if (!secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeydata, 33)) {
|
||||
return 0;
|
||||
}
|
||||
size_t outputlen = 65;
|
||||
return secp256k1_ec_pubkey_serialize(ctx, pubkey_out, &outputlen, &pubkey, SECP256K1_EC_UNCOMPRESSED);
|
||||
}
|
||||
|
||||
// secp256k1_pubkey_scalar_mul multiplies a point by a scalar in constant time.
|
||||
//
|
||||
// Returns: 1: multiplication was successful
|
||||
|
@ -38,6 +38,7 @@ import "C"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/big"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
@ -55,6 +56,7 @@ var (
|
||||
ErrInvalidSignatureLen = errors.New("invalid signature length")
|
||||
ErrInvalidRecoveryID = errors.New("invalid signature recovery id")
|
||||
ErrInvalidKey = errors.New("invalid private key")
|
||||
ErrInvalidPubkey = errors.New("invalid public key")
|
||||
ErrSignFailed = errors.New("signing failed")
|
||||
ErrRecoverFailed = errors.New("recovery failed")
|
||||
)
|
||||
@ -119,6 +121,33 @@ func RecoverPubkey(msg []byte, sig []byte) ([]byte, error) {
|
||||
return pubkey, nil
|
||||
}
|
||||
|
||||
// VerifySignature checks that the given pubkey created signature over message.
|
||||
// The signature should be in [R || S] format.
|
||||
func VerifySignature(pubkey, msg, signature []byte) bool {
|
||||
if len(msg) != 32 || len(signature) != 64 || len(pubkey) == 0 {
|
||||
return false
|
||||
}
|
||||
sigdata := (*C.uchar)(unsafe.Pointer(&signature[0]))
|
||||
msgdata := (*C.uchar)(unsafe.Pointer(&msg[0]))
|
||||
keydata := (*C.uchar)(unsafe.Pointer(&pubkey[0]))
|
||||
return C.secp256k1_ecdsa_verify_enc(context, sigdata, msgdata, keydata, C.size_t(len(pubkey))) != 0
|
||||
}
|
||||
|
||||
// DecompressPubkey parses a public key in the 33-byte compressed format.
|
||||
// It returns non-nil coordinates if the public key is valid.
|
||||
func DecompressPubkey(pubkey []byte) (X, Y *big.Int) {
|
||||
if len(pubkey) != 33 {
|
||||
return nil, nil
|
||||
}
|
||||
buf := make([]byte, 65)
|
||||
bufdata := (*C.uchar)(unsafe.Pointer(&buf[0]))
|
||||
pubkeydata := (*C.uchar)(unsafe.Pointer(&pubkey[0]))
|
||||
if C.secp256k1_decompress_pubkey(context, bufdata, pubkeydata) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return new(big.Int).SetBytes(buf[1:33]), new(big.Int).SetBytes(buf[33:])
|
||||
}
|
||||
|
||||
func checkSignature(sig []byte) error {
|
||||
if len(sig) != 65 {
|
||||
return ErrInvalidSignatureLen
|
||||
|
@ -27,10 +27,12 @@ import (
|
||||
"github.com/ethereum/go-ethereum/crypto/secp256k1"
|
||||
)
|
||||
|
||||
// Ecrecover returns the uncompressed public key that created the given signature.
|
||||
func Ecrecover(hash, sig []byte) ([]byte, error) {
|
||||
return secp256k1.RecoverPubkey(hash, sig)
|
||||
}
|
||||
|
||||
// SigToPub returns the public key that created the given signature.
|
||||
func SigToPub(hash, sig []byte) (*ecdsa.PublicKey, error) {
|
||||
s, err := Ecrecover(hash, sig)
|
||||
if err != nil {
|
||||
@ -58,6 +60,22 @@ func Sign(hash []byte, prv *ecdsa.PrivateKey) (sig []byte, err error) {
|
||||
return secp256k1.Sign(hash, seckey)
|
||||
}
|
||||
|
||||
// VerifySignature checks that the given public key created signature over hash.
|
||||
// The public key should be in compressed (33 bytes) or uncompressed (65 bytes) format.
|
||||
// The signature should have the 64 byte [R || S] format.
|
||||
func VerifySignature(pubkey, hash, signature []byte) bool {
|
||||
return secp256k1.VerifySignature(pubkey, hash, signature)
|
||||
}
|
||||
|
||||
// DecompressPubkey parses a public key in the 33-byte compressed format.
|
||||
func DecompressPubkey(pubkey []byte) (*ecdsa.PublicKey, error) {
|
||||
x, y := secp256k1.DecompressPubkey(pubkey)
|
||||
if x == nil {
|
||||
return nil, fmt.Errorf("invalid public key")
|
||||
}
|
||||
return &ecdsa.PublicKey{X: x, Y: y, Curve: S256()}, nil
|
||||
}
|
||||
|
||||
// S256 returns an instance of the secp256k1 curve.
|
||||
func S256() elliptic.Curve {
|
||||
return secp256k1.S256()
|
||||
|
@ -21,11 +21,14 @@ package crypto
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
)
|
||||
|
||||
// Ecrecover returns the uncompressed public key that created the given signature.
|
||||
func Ecrecover(hash, sig []byte) ([]byte, error) {
|
||||
pub, err := SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
@ -35,6 +38,7 @@ func Ecrecover(hash, sig []byte) ([]byte, error) {
|
||||
return bytes, err
|
||||
}
|
||||
|
||||
// SigToPub returns the public key that created the given signature.
|
||||
func SigToPub(hash, sig []byte) (*ecdsa.PublicKey, error) {
|
||||
// Convert to btcec input format with 'recovery id' v at the beginning.
|
||||
btcsig := make([]byte, 65)
|
||||
@ -71,6 +75,33 @@ func Sign(hash []byte, prv *ecdsa.PrivateKey) ([]byte, error) {
|
||||
return sig, nil
|
||||
}
|
||||
|
||||
// VerifySignature checks that the given public key created signature over hash.
|
||||
// The public key should be in compressed (33 bytes) or uncompressed (65 bytes) format.
|
||||
// The signature should have the 64 byte [R || S] format.
|
||||
func VerifySignature(pubkey, hash, signature []byte) bool {
|
||||
if len(signature) != 64 {
|
||||
return false
|
||||
}
|
||||
sig := &btcec.Signature{R: new(big.Int).SetBytes(signature[:32]), S: new(big.Int).SetBytes(signature[32:])}
|
||||
key, err := btcec.ParsePubKey(pubkey, btcec.S256())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return sig.Verify(hash, key)
|
||||
}
|
||||
|
||||
// DecompressPubkey parses a public key in the 33-byte compressed format.
|
||||
func DecompressPubkey(pubkey []byte) (*ecdsa.PublicKey, error) {
|
||||
if len(pubkey) != 33 {
|
||||
return nil, errors.New("invalid compressed public key length")
|
||||
}
|
||||
key, err := btcec.ParsePubKey(pubkey, btcec.S256())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return key.ToECDSA(), nil
|
||||
}
|
||||
|
||||
// S256 returns an instance of the secp256k1 curve.
|
||||
func S256() elliptic.Curve {
|
||||
return btcec.S256()
|
||||
|
@ -18,19 +18,95 @@ package crypto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
func TestRecoverSanity(t *testing.T) {
|
||||
msg, _ := hex.DecodeString("ce0677bb30baa8cf067c88db9811f4333d131bf8bcf12fe7065d211dce971008")
|
||||
sig, _ := hex.DecodeString("90f27b8b488db00b00606796d2987f6a5f59ae62ea05effe84fef5b8b0e549984a691139ad57a3f0b906637673aa2f63d1f55cb1a69199d4009eea23ceaddc9301")
|
||||
pubkey1, _ := hex.DecodeString("04e32df42865e97135acfb65f3bae71bdc86f4d49150ad6a440b6f15878109880a0a2b2667f7e725ceea70c673093bf67663e0312623c8e091b13cf2c0f11ef652")
|
||||
pubkey2, err := Ecrecover(msg, sig)
|
||||
var (
|
||||
testmsg = hexutil.MustDecode("0xce0677bb30baa8cf067c88db9811f4333d131bf8bcf12fe7065d211dce971008")
|
||||
testsig = hexutil.MustDecode("0x90f27b8b488db00b00606796d2987f6a5f59ae62ea05effe84fef5b8b0e549984a691139ad57a3f0b906637673aa2f63d1f55cb1a69199d4009eea23ceaddc9301")
|
||||
testpubkey = hexutil.MustDecode("0x04e32df42865e97135acfb65f3bae71bdc86f4d49150ad6a440b6f15878109880a0a2b2667f7e725ceea70c673093bf67663e0312623c8e091b13cf2c0f11ef652")
|
||||
testpubkeyc = hexutil.MustDecode("0x02e32df42865e97135acfb65f3bae71bdc86f4d49150ad6a440b6f15878109880a")
|
||||
)
|
||||
|
||||
func TestEcrecover(t *testing.T) {
|
||||
pubkey, err := Ecrecover(testmsg, testsig)
|
||||
if err != nil {
|
||||
t.Fatalf("recover error: %s", err)
|
||||
}
|
||||
if !bytes.Equal(pubkey1, pubkey2) {
|
||||
t.Errorf("pubkey mismatch: want: %x have: %x", pubkey1, pubkey2)
|
||||
if !bytes.Equal(pubkey, testpubkey) {
|
||||
t.Errorf("pubkey mismatch: want: %x have: %x", testpubkey, pubkey)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifySignature(t *testing.T) {
|
||||
sig := testsig[:len(testsig)-1] // remove recovery id
|
||||
if !VerifySignature(testpubkey, testmsg, sig) {
|
||||
t.Errorf("can't verify signature with uncompressed key")
|
||||
}
|
||||
if !VerifySignature(testpubkeyc, testmsg, sig) {
|
||||
t.Errorf("can't verify signature with compressed key")
|
||||
}
|
||||
|
||||
if VerifySignature(nil, testmsg, sig) {
|
||||
t.Errorf("signature valid with no key")
|
||||
}
|
||||
if VerifySignature(testpubkey, nil, sig) {
|
||||
t.Errorf("signature valid with no message")
|
||||
}
|
||||
if VerifySignature(testpubkey, testmsg, nil) {
|
||||
t.Errorf("nil signature valid")
|
||||
}
|
||||
if VerifySignature(testpubkey, testmsg, append(common.CopyBytes(sig), 1, 2, 3)) {
|
||||
t.Errorf("signature valid with extra bytes at the end")
|
||||
}
|
||||
if VerifySignature(testpubkey, testmsg, sig[:len(sig)-2]) {
|
||||
t.Errorf("signature valid even though it's incomplete")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecompressPubkey(t *testing.T) {
|
||||
key, err := DecompressPubkey(testpubkeyc)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if uncompressed := FromECDSAPub(key); !bytes.Equal(uncompressed, testpubkey) {
|
||||
t.Errorf("wrong public key result: got %x, want %x", uncompressed, testpubkey)
|
||||
}
|
||||
if _, err := DecompressPubkey(nil); err == nil {
|
||||
t.Errorf("no error for nil pubkey")
|
||||
}
|
||||
if _, err := DecompressPubkey(testpubkeyc[:5]); err == nil {
|
||||
t.Errorf("no error for incomplete pubkey")
|
||||
}
|
||||
if _, err := DecompressPubkey(append(common.CopyBytes(testpubkeyc), 1, 2, 3)); err == nil {
|
||||
t.Errorf("no error for pubkey with extra bytes at the end")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEcrecoverSignature(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err := Ecrecover(testmsg, testsig); err != nil {
|
||||
b.Fatal("ecrecover error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkVerifySignature(b *testing.B) {
|
||||
sig := testsig[:len(testsig)-1] // remove recovery id
|
||||
for i := 0; i < b.N; i++ {
|
||||
if !VerifySignature(testpubkey, testmsg, sig) {
|
||||
b.Fatal("verify error")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkDecompressPubkey(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err := DecompressPubkey(testpubkeyc); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
109
eth/api.go
109
eth/api.go
@ -452,7 +452,12 @@ func (api *PrivateDebugAPI) traceBlock(block *types.Block, logConfig *vm.LogConf
|
||||
}
|
||||
statedb, err := blockchain.StateAt(blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1).Root())
|
||||
if err != nil {
|
||||
return false, structLogger.StructLogs(), err
|
||||
switch err.(type) {
|
||||
case *trie.MissingNodeError:
|
||||
return false, structLogger.StructLogs(), fmt.Errorf("required historical state unavailable")
|
||||
default:
|
||||
return false, structLogger.StructLogs(), err
|
||||
}
|
||||
}
|
||||
|
||||
receipts, _, usedGas, err := processor.Process(block, statedb, config)
|
||||
@ -518,7 +523,12 @@ func (api *PrivateDebugAPI) TraceTransaction(ctx context.Context, txHash common.
|
||||
}
|
||||
msg, context, statedb, err := api.computeTxEnv(blockHash, int(txIndex))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
switch err.(type) {
|
||||
case *trie.MissingNodeError:
|
||||
return nil, fmt.Errorf("required historical state unavailable")
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Run the transaction with tracing enabled.
|
||||
@ -615,14 +625,18 @@ func (api *PrivateDebugAPI) StorageRangeAt(ctx context.Context, blockHash common
|
||||
if st == nil {
|
||||
return StorageRangeResult{}, fmt.Errorf("account %x doesn't exist", contractAddress)
|
||||
}
|
||||
return storageRangeAt(st, keyStart, maxResult), nil
|
||||
return storageRangeAt(st, keyStart, maxResult)
|
||||
}
|
||||
|
||||
func storageRangeAt(st state.Trie, start []byte, maxResult int) StorageRangeResult {
|
||||
func storageRangeAt(st state.Trie, start []byte, maxResult int) (StorageRangeResult, error) {
|
||||
it := trie.NewIterator(st.NodeIterator(start))
|
||||
result := StorageRangeResult{Storage: storageMap{}}
|
||||
for i := 0; i < maxResult && it.Next(); i++ {
|
||||
e := storageEntry{Value: common.BytesToHash(it.Value)}
|
||||
_, content, _, err := rlp.Split(it.Value)
|
||||
if err != nil {
|
||||
return StorageRangeResult{}, err
|
||||
}
|
||||
e := storageEntry{Value: common.BytesToHash(content)}
|
||||
if preimage := st.GetKey(it.Key); preimage != nil {
|
||||
preimage := common.BytesToHash(preimage)
|
||||
e.Key = &preimage
|
||||
@ -634,5 +648,88 @@ func storageRangeAt(st state.Trie, start []byte, maxResult int) StorageRangeResu
|
||||
next := common.BytesToHash(it.Key)
|
||||
result.NextKey = &next
|
||||
}
|
||||
return result
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetModifiedAccountsByumber returns all accounts that have changed between the
|
||||
// two blocks specified. A change is defined as a difference in nonce, balance,
|
||||
// code hash, or storage hash.
|
||||
//
|
||||
// With one parameter, returns the list of accounts modified in the specified block.
|
||||
func (api *PrivateDebugAPI) GetModifiedAccountsByNumber(startNum uint64, endNum *uint64) ([]common.Address, error) {
|
||||
var startBlock, endBlock *types.Block
|
||||
|
||||
startBlock = api.eth.blockchain.GetBlockByNumber(startNum)
|
||||
if startBlock == nil {
|
||||
return nil, fmt.Errorf("start block %x not found", startNum)
|
||||
}
|
||||
|
||||
if endNum == nil {
|
||||
endBlock = startBlock
|
||||
startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash())
|
||||
if startBlock == nil {
|
||||
return nil, fmt.Errorf("block %x has no parent", endBlock.Number())
|
||||
}
|
||||
} else {
|
||||
endBlock = api.eth.blockchain.GetBlockByNumber(*endNum)
|
||||
if endBlock == nil {
|
||||
return nil, fmt.Errorf("end block %d not found", *endNum)
|
||||
}
|
||||
}
|
||||
return api.getModifiedAccounts(startBlock, endBlock)
|
||||
}
|
||||
|
||||
// GetModifiedAccountsByHash returns all accounts that have changed between the
|
||||
// two blocks specified. A change is defined as a difference in nonce, balance,
|
||||
// code hash, or storage hash.
|
||||
//
|
||||
// With one parameter, returns the list of accounts modified in the specified block.
|
||||
func (api *PrivateDebugAPI) GetModifiedAccountsByHash(startHash common.Hash, endHash *common.Hash) ([]common.Address, error) {
|
||||
var startBlock, endBlock *types.Block
|
||||
startBlock = api.eth.blockchain.GetBlockByHash(startHash)
|
||||
if startBlock == nil {
|
||||
return nil, fmt.Errorf("start block %x not found", startHash)
|
||||
}
|
||||
|
||||
if endHash == nil {
|
||||
endBlock = startBlock
|
||||
startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash())
|
||||
if startBlock == nil {
|
||||
return nil, fmt.Errorf("block %x has no parent", endBlock.Number())
|
||||
}
|
||||
} else {
|
||||
endBlock = api.eth.blockchain.GetBlockByHash(*endHash)
|
||||
if endBlock == nil {
|
||||
return nil, fmt.Errorf("end block %x not found", *endHash)
|
||||
}
|
||||
}
|
||||
return api.getModifiedAccounts(startBlock, endBlock)
|
||||
}
|
||||
|
||||
func (api *PrivateDebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]common.Address, error) {
|
||||
if startBlock.Number().Uint64() >= endBlock.Number().Uint64() {
|
||||
return nil, fmt.Errorf("start block height (%d) must be less than end block height (%d)", startBlock.Number().Uint64(), endBlock.Number().Uint64())
|
||||
}
|
||||
|
||||
oldTrie, err := trie.NewSecure(startBlock.Root(), api.eth.chainDb, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newTrie, err := trie.NewSecure(endBlock.Root(), api.eth.chainDb, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diff, _ := trie.NewDifferenceIterator(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}))
|
||||
iter := trie.NewIterator(diff)
|
||||
|
||||
var dirty []common.Address
|
||||
for iter.Next() {
|
||||
key := newTrie.GetKey(iter.Key)
|
||||
if key == nil {
|
||||
return nil, fmt.Errorf("no preimage found for hash %x", iter.Key)
|
||||
}
|
||||
dirty = append(dirty, common.BytesToAddress(key))
|
||||
}
|
||||
return dirty, nil
|
||||
}
|
||||
|
@ -79,7 +79,10 @@ func TestStorageRangeAt(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
result := storageRangeAt(state.StorageTrie(addr), test.start, test.limit)
|
||||
result, err := storageRangeAt(state.StorageTrie(addr), test.start, test.limit)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !reflect.DeepEqual(result, test.want) {
|
||||
t.Fatalf("wrong result for range 0x%x.., limit %d:\ngot %s\nwant %s",
|
||||
test.start, test.limit, dumper.Sdump(result), dumper.Sdump(&test.want))
|
||||
|
@ -125,7 +125,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
|
||||
chainConfig: chainConfig,
|
||||
eventMux: ctx.EventMux,
|
||||
accountManager: ctx.AccountManager,
|
||||
engine: CreateConsensusEngine(ctx, config, chainConfig, chainDb),
|
||||
engine: CreateConsensusEngine(ctx, &config.Ethash, chainConfig, chainDb),
|
||||
shutdownChan: make(chan bool),
|
||||
stopDbUpgrade: stopDbUpgrade,
|
||||
networkId: config.NetworkId,
|
||||
@ -209,25 +209,31 @@ func CreateDB(ctx *node.ServiceContext, config *Config, name string) (ethdb.Data
|
||||
}
|
||||
|
||||
// CreateConsensusEngine creates the required type of consensus engine instance for an Ethereum service
|
||||
func CreateConsensusEngine(ctx *node.ServiceContext, config *Config, chainConfig *params.ChainConfig, db ethdb.Database) consensus.Engine {
|
||||
func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chainConfig *params.ChainConfig, db ethdb.Database) consensus.Engine {
|
||||
// If proof-of-authority is requested, set it up
|
||||
if chainConfig.Clique != nil {
|
||||
return clique.New(chainConfig.Clique, db)
|
||||
}
|
||||
// Otherwise assume proof-of-work
|
||||
switch {
|
||||
case config.PowFake:
|
||||
case config.PowMode == ethash.ModeFake:
|
||||
log.Warn("Ethash used in fake mode")
|
||||
return ethash.NewFaker()
|
||||
case config.PowTest:
|
||||
case config.PowMode == ethash.ModeTest:
|
||||
log.Warn("Ethash used in test mode")
|
||||
return ethash.NewTester()
|
||||
case config.PowShared:
|
||||
case config.PowMode == ethash.ModeShared:
|
||||
log.Warn("Ethash used in shared mode")
|
||||
return ethash.NewShared()
|
||||
default:
|
||||
engine := ethash.New(ctx.ResolvePath(config.EthashCacheDir), config.EthashCachesInMem, config.EthashCachesOnDisk,
|
||||
config.EthashDatasetDir, config.EthashDatasetsInMem, config.EthashDatasetsOnDisk)
|
||||
engine := ethash.New(ethash.Config{
|
||||
CacheDir: ctx.ResolvePath(config.CacheDir),
|
||||
CachesInMem: config.CachesInMem,
|
||||
CachesOnDisk: config.CachesOnDisk,
|
||||
DatasetDir: config.DatasetDir,
|
||||
DatasetsInMem: config.DatasetsInMem,
|
||||
DatasetsOnDisk: config.DatasetsOnDisk,
|
||||
})
|
||||
engine.SetThreads(-1) // Disable CPU mining
|
||||
return engine
|
||||
}
|
||||
@ -304,10 +310,17 @@ func (s *Ethereum) Etherbase() (eb common.Address, err error) {
|
||||
}
|
||||
if wallets := s.AccountManager().Wallets(); len(wallets) > 0 {
|
||||
if accounts := wallets[0].Accounts(); len(accounts) > 0 {
|
||||
return accounts[0].Address, nil
|
||||
etherbase := accounts[0].Address
|
||||
|
||||
s.lock.Lock()
|
||||
s.etherbase = etherbase
|
||||
s.lock.Unlock()
|
||||
|
||||
log.Info("Etherbase automatically configured", "address", etherbase)
|
||||
return etherbase, nil
|
||||
}
|
||||
}
|
||||
return common.Address{}, fmt.Errorf("etherbase address must be explicitly specified")
|
||||
return common.Address{}, fmt.Errorf("etherbase must be explicitly specified")
|
||||
}
|
||||
|
||||
// set in js console via admin interface or wrapper from cli flags
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/gasprice"
|
||||
@ -33,16 +34,18 @@ import (
|
||||
|
||||
// DefaultConfig contains default settings for use on the Ethereum main net.
|
||||
var DefaultConfig = Config{
|
||||
SyncMode: downloader.FastSync,
|
||||
EthashCacheDir: "ethash",
|
||||
EthashCachesInMem: 2,
|
||||
EthashCachesOnDisk: 3,
|
||||
EthashDatasetsInMem: 1,
|
||||
EthashDatasetsOnDisk: 2,
|
||||
NetworkId: 1,
|
||||
LightPeers: 20,
|
||||
DatabaseCache: 128,
|
||||
GasPrice: big.NewInt(18 * params.Shannon),
|
||||
SyncMode: downloader.FastSync,
|
||||
Ethash: ethash.Config{
|
||||
CacheDir: "ethash",
|
||||
CachesInMem: 2,
|
||||
CachesOnDisk: 3,
|
||||
DatasetsInMem: 1,
|
||||
DatasetsOnDisk: 2,
|
||||
},
|
||||
NetworkId: 1,
|
||||
LightPeers: 20,
|
||||
DatabaseCache: 128,
|
||||
GasPrice: big.NewInt(18 * params.Shannon),
|
||||
|
||||
TxPool: core.DefaultTxPoolConfig,
|
||||
GPO: gasprice.Config{
|
||||
@ -59,9 +62,9 @@ func init() {
|
||||
}
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
DefaultConfig.EthashDatasetDir = filepath.Join(home, "AppData", "Ethash")
|
||||
DefaultConfig.Ethash.DatasetDir = filepath.Join(home, "AppData", "Ethash")
|
||||
} else {
|
||||
DefaultConfig.EthashDatasetDir = filepath.Join(home, ".ethash")
|
||||
DefaultConfig.Ethash.DatasetDir = filepath.Join(home, ".ethash")
|
||||
}
|
||||
}
|
||||
|
||||
@ -92,12 +95,7 @@ type Config struct {
|
||||
GasPrice *big.Int
|
||||
|
||||
// Ethash options
|
||||
EthashCacheDir string
|
||||
EthashCachesInMem int
|
||||
EthashCachesOnDisk int
|
||||
EthashDatasetDir string
|
||||
EthashDatasetsInMem int
|
||||
EthashDatasetsOnDisk int
|
||||
Ethash ethash.Config
|
||||
|
||||
// Transaction pool options
|
||||
TxPool core.TxPoolConfig
|
||||
@ -109,10 +107,7 @@ type Config struct {
|
||||
EnablePreimageRecording bool
|
||||
|
||||
// Miscellaneous options
|
||||
DocRoot string `toml:"-"`
|
||||
PowFake bool `toml:"-"`
|
||||
PowTest bool `toml:"-"`
|
||||
PowShared bool `toml:"-"`
|
||||
DocRoot string `toml:"-"`
|
||||
}
|
||||
|
||||
type configMarshaling struct {
|
||||
|
@ -704,6 +704,7 @@ func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
|
||||
func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
|
||||
|
||||
func testThrottling(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
@ -1166,6 +1167,8 @@ func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 6
|
||||
func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
|
||||
|
||||
func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
@ -1198,6 +1201,8 @@ func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(
|
||||
func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
|
||||
|
||||
func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
|
||||
t.Parallel()
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
@ -1310,6 +1315,8 @@ func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDr
|
||||
func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
|
||||
|
||||
func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
|
||||
t.Parallel()
|
||||
|
||||
// Define the disconnection requirement for individual hash fetch errors
|
||||
tests := []struct {
|
||||
result error
|
||||
@ -1665,12 +1672,26 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// This test reproduces an issue where unexpected deliveries would
|
||||
// block indefinitely if they arrived at the right time.
|
||||
func TestDeliverHeadersHang62(t *testing.T) { testDeliverHeadersHang(t, 62, FullSync) }
|
||||
func TestDeliverHeadersHang63Full(t *testing.T) { testDeliverHeadersHang(t, 63, FullSync) }
|
||||
func TestDeliverHeadersHang63Fast(t *testing.T) { testDeliverHeadersHang(t, 63, FastSync) }
|
||||
func TestDeliverHeadersHang64Full(t *testing.T) { testDeliverHeadersHang(t, 64, FullSync) }
|
||||
func TestDeliverHeadersHang64Fast(t *testing.T) { testDeliverHeadersHang(t, 64, FastSync) }
|
||||
func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64, LightSync) }
|
||||
// We use data driven subtests to manage this so that it will be parallel on its own
|
||||
// and not with the other tests, avoiding intermittent failures.
|
||||
func TestDeliverHeadersHang(t *testing.T) {
|
||||
testCases := []struct {
|
||||
protocol int
|
||||
syncMode SyncMode
|
||||
}{
|
||||
{62, FullSync},
|
||||
{63, FullSync},
|
||||
{63, FastSync},
|
||||
{64, FullSync},
|
||||
{64, FastSync},
|
||||
{64, LightSync},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
|
||||
testDeliverHeadersHang(t, tc.protocol, tc.syncMode)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type floodingTestPeer struct {
|
||||
peer Peer
|
||||
@ -1703,7 +1724,7 @@ func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int
|
||||
// Deliver the actual requested headers.
|
||||
go ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
|
||||
// None of the extra deliveries should block.
|
||||
timeout := time.After(15 * time.Second)
|
||||
timeout := time.After(60 * time.Second)
|
||||
for i := 0; i < cap(deliveriesDone); i++ {
|
||||
select {
|
||||
case <-deliveriesDone:
|
||||
@ -1732,7 +1753,6 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
|
||||
tester.downloader.peers.peers["peer"].peer,
|
||||
tester,
|
||||
}
|
||||
|
||||
if err := tester.sync("peer", nil, mode); err != nil {
|
||||
t.Errorf("sync failed: %v", err)
|
||||
}
|
||||
@ -1742,12 +1762,28 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
|
||||
|
||||
// Tests that if fast sync aborts in the critical section, it can restart a few
|
||||
// times before giving up.
|
||||
func TestFastCriticalRestartsFail63(t *testing.T) { testFastCriticalRestarts(t, 63, false) }
|
||||
func TestFastCriticalRestartsFail64(t *testing.T) { testFastCriticalRestarts(t, 64, false) }
|
||||
func TestFastCriticalRestartsCont63(t *testing.T) { testFastCriticalRestarts(t, 63, true) }
|
||||
func TestFastCriticalRestartsCont64(t *testing.T) { testFastCriticalRestarts(t, 64, true) }
|
||||
// We use data driven subtests to manage this so that it will be parallel on its own
|
||||
// and not with the other tests, avoiding intermittent failures.
|
||||
func TestFastCriticalRestarts(t *testing.T) {
|
||||
testCases := []struct {
|
||||
protocol int
|
||||
progress bool
|
||||
}{
|
||||
{63, false},
|
||||
{64, false},
|
||||
{63, true},
|
||||
{64, true},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("protocol %d progress %v", tc.protocol, tc.progress), func(t *testing.T) {
|
||||
testFastCriticalRestarts(t, tc.protocol, tc.progress)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
|
||||
t.Parallel()
|
||||
|
||||
tester := newTester()
|
||||
defer tester.terminate()
|
||||
|
||||
@ -1776,6 +1812,7 @@ func testFastCriticalRestarts(t *testing.T, protocol int, progress bool) {
|
||||
|
||||
// If it's the first failure, pivot should be locked => reenable all others to detect pivot changes
|
||||
if i == 0 {
|
||||
time.Sleep(150 * time.Millisecond) // Make sure no in-flight requests remain
|
||||
if tester.downloader.fsPivotLock == nil {
|
||||
time.Sleep(400 * time.Millisecond) // Make sure the first huge timeout expires too
|
||||
t.Fatalf("pivot block not locked in after critical section failure")
|
||||
|
@ -548,7 +548,7 @@ func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peerC
|
||||
return idle, total
|
||||
}
|
||||
|
||||
// medianRTT returns the median RTT of te peerset, considering only the tuning
|
||||
// medianRTT returns the median RTT of the peerset, considering only the tuning
|
||||
// peers if there are more peers available.
|
||||
func (ps *peerSet) medianRTT() time.Duration {
|
||||
// Gather all the currnetly measured round trip times
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/gasprice"
|
||||
@ -36,10 +37,8 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
TxPool core.TxPoolConfig
|
||||
GPO gasprice.Config
|
||||
EnablePreimageRecording bool
|
||||
DocRoot string `toml:"-"`
|
||||
PowFake bool `toml:"-"`
|
||||
PowTest bool `toml:"-"`
|
||||
PowShared bool `toml:"-"`
|
||||
DocRoot string `toml:"-"`
|
||||
PowMode ethash.Mode `toml:"-"`
|
||||
}
|
||||
var enc Config
|
||||
enc.Genesis = c.Genesis
|
||||
@ -54,19 +53,17 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
||||
enc.MinerThreads = c.MinerThreads
|
||||
enc.ExtraData = c.ExtraData
|
||||
enc.GasPrice = c.GasPrice
|
||||
enc.EthashCacheDir = c.EthashCacheDir
|
||||
enc.EthashCachesInMem = c.EthashCachesInMem
|
||||
enc.EthashCachesOnDisk = c.EthashCachesOnDisk
|
||||
enc.EthashDatasetDir = c.EthashDatasetDir
|
||||
enc.EthashDatasetsInMem = c.EthashDatasetsInMem
|
||||
enc.EthashDatasetsOnDisk = c.EthashDatasetsOnDisk
|
||||
enc.EthashCacheDir = c.Ethash.CacheDir
|
||||
enc.EthashCachesInMem = c.Ethash.CachesInMem
|
||||
enc.EthashCachesOnDisk = c.Ethash.CachesOnDisk
|
||||
enc.EthashDatasetDir = c.Ethash.DatasetDir
|
||||
enc.EthashDatasetsInMem = c.Ethash.DatasetsInMem
|
||||
enc.EthashDatasetsOnDisk = c.Ethash.DatasetsOnDisk
|
||||
enc.TxPool = c.TxPool
|
||||
enc.GPO = c.GPO
|
||||
enc.EnablePreimageRecording = c.EnablePreimageRecording
|
||||
enc.DocRoot = c.DocRoot
|
||||
enc.PowFake = c.PowFake
|
||||
enc.PowTest = c.PowTest
|
||||
enc.PowShared = c.PowShared
|
||||
enc.PowMode = c.Ethash.PowMode
|
||||
return &enc, nil
|
||||
}
|
||||
|
||||
@ -94,10 +91,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
TxPool *core.TxPoolConfig
|
||||
GPO *gasprice.Config
|
||||
EnablePreimageRecording *bool
|
||||
DocRoot *string `toml:"-"`
|
||||
PowFake *bool `toml:"-"`
|
||||
PowTest *bool `toml:"-"`
|
||||
PowShared *bool `toml:"-"`
|
||||
DocRoot *string `toml:"-"`
|
||||
PowMode *ethash.Mode `toml:"-"`
|
||||
}
|
||||
var dec Config
|
||||
if err := unmarshal(&dec); err != nil {
|
||||
@ -140,22 +135,22 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
c.GasPrice = dec.GasPrice
|
||||
}
|
||||
if dec.EthashCacheDir != nil {
|
||||
c.EthashCacheDir = *dec.EthashCacheDir
|
||||
c.Ethash.CacheDir = *dec.EthashCacheDir
|
||||
}
|
||||
if dec.EthashCachesInMem != nil {
|
||||
c.EthashCachesInMem = *dec.EthashCachesInMem
|
||||
c.Ethash.CachesInMem = *dec.EthashCachesInMem
|
||||
}
|
||||
if dec.EthashCachesOnDisk != nil {
|
||||
c.EthashCachesOnDisk = *dec.EthashCachesOnDisk
|
||||
c.Ethash.CachesOnDisk = *dec.EthashCachesOnDisk
|
||||
}
|
||||
if dec.EthashDatasetDir != nil {
|
||||
c.EthashDatasetDir = *dec.EthashDatasetDir
|
||||
c.Ethash.DatasetDir = *dec.EthashDatasetDir
|
||||
}
|
||||
if dec.EthashDatasetsInMem != nil {
|
||||
c.EthashDatasetsInMem = *dec.EthashDatasetsInMem
|
||||
c.Ethash.DatasetsInMem = *dec.EthashDatasetsInMem
|
||||
}
|
||||
if dec.EthashDatasetsOnDisk != nil {
|
||||
c.EthashDatasetsOnDisk = *dec.EthashDatasetsOnDisk
|
||||
c.Ethash.DatasetsOnDisk = *dec.EthashDatasetsOnDisk
|
||||
}
|
||||
if dec.TxPool != nil {
|
||||
c.TxPool = *dec.TxPool
|
||||
@ -169,14 +164,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||
if dec.DocRoot != nil {
|
||||
c.DocRoot = *dec.DocRoot
|
||||
}
|
||||
if dec.PowFake != nil {
|
||||
c.PowFake = *dec.PowFake
|
||||
}
|
||||
if dec.PowTest != nil {
|
||||
c.PowTest = *dec.PowTest
|
||||
}
|
||||
if dec.PowShared != nil {
|
||||
c.PowShared = *dec.PowShared
|
||||
if dec.PowMode != nil {
|
||||
c.Ethash.PowMode = *dec.PowMode
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -151,9 +151,9 @@ func (s *PublicTxPoolAPI) Inspect() map[string]map[string]map[string]string {
|
||||
// Define a formatter to flatten a transaction into a string
|
||||
var format = func(tx *types.Transaction) string {
|
||||
if to := tx.To(); to != nil {
|
||||
return fmt.Sprintf("%s: %v wei + %v × %v gas", tx.To().Hex(), tx.Value(), tx.Gas(), tx.GasPrice())
|
||||
return fmt.Sprintf("%s: %v wei + %v gas × %v wei", tx.To().Hex(), tx.Value(), tx.Gas(), tx.GasPrice())
|
||||
}
|
||||
return fmt.Sprintf("contract creation: %v wei + %v × %v gas", tx.Value(), tx.Gas(), tx.GasPrice())
|
||||
return fmt.Sprintf("contract creation: %v wei + %v gas × %v wei", tx.Value(), tx.Gas(), tx.GasPrice())
|
||||
}
|
||||
// Flatten the pending transactions
|
||||
for account, txs := range pending {
|
||||
@ -710,45 +710,52 @@ type ExecutionResult struct {
|
||||
// StructLogRes stores a structured log emitted by the EVM while replaying a
|
||||
// transaction in debug mode
|
||||
type StructLogRes struct {
|
||||
Pc uint64 `json:"pc"`
|
||||
Op string `json:"op"`
|
||||
Gas uint64 `json:"gas"`
|
||||
GasCost uint64 `json:"gasCost"`
|
||||
Depth int `json:"depth"`
|
||||
Error error `json:"error"`
|
||||
Stack []string `json:"stack"`
|
||||
Memory []string `json:"memory"`
|
||||
Storage map[string]string `json:"storage"`
|
||||
Pc uint64 `json:"pc"`
|
||||
Op string `json:"op"`
|
||||
Gas uint64 `json:"gas"`
|
||||
GasCost uint64 `json:"gasCost"`
|
||||
Depth int `json:"depth"`
|
||||
Error error `json:"error,omitempty"`
|
||||
Stack *[]string `json:"stack,omitempty"`
|
||||
Memory *[]string `json:"memory,omitempty"`
|
||||
Storage *map[string]string `json:"storage,omitempty"`
|
||||
}
|
||||
|
||||
// formatLogs formats EVM returned structured logs for json output
|
||||
func FormatLogs(structLogs []vm.StructLog) []StructLogRes {
|
||||
formattedStructLogs := make([]StructLogRes, len(structLogs))
|
||||
for index, trace := range structLogs {
|
||||
formattedStructLogs[index] = StructLogRes{
|
||||
func FormatLogs(logs []vm.StructLog) []StructLogRes {
|
||||
formatted := make([]StructLogRes, len(logs))
|
||||
for index, trace := range logs {
|
||||
formatted[index] = StructLogRes{
|
||||
Pc: trace.Pc,
|
||||
Op: trace.Op.String(),
|
||||
Gas: trace.Gas,
|
||||
GasCost: trace.GasCost,
|
||||
Depth: trace.Depth,
|
||||
Error: trace.Err,
|
||||
Stack: make([]string, len(trace.Stack)),
|
||||
Storage: make(map[string]string),
|
||||
}
|
||||
|
||||
for i, stackValue := range trace.Stack {
|
||||
formattedStructLogs[index].Stack[i] = fmt.Sprintf("%x", math.PaddedBigBytes(stackValue, 32))
|
||||
if trace.Stack != nil {
|
||||
stack := make([]string, len(trace.Stack))
|
||||
for i, stackValue := range trace.Stack {
|
||||
stack[i] = fmt.Sprintf("%x", math.PaddedBigBytes(stackValue, 32))
|
||||
}
|
||||
formatted[index].Stack = &stack
|
||||
}
|
||||
|
||||
for i := 0; i+32 <= len(trace.Memory); i += 32 {
|
||||
formattedStructLogs[index].Memory = append(formattedStructLogs[index].Memory, fmt.Sprintf("%x", trace.Memory[i:i+32]))
|
||||
if trace.Memory != nil {
|
||||
memory := make([]string, 0, (len(trace.Memory)+31)/32)
|
||||
for i := 0; i+32 <= len(trace.Memory); i += 32 {
|
||||
memory = append(memory, fmt.Sprintf("%x", trace.Memory[i:i+32]))
|
||||
}
|
||||
formatted[index].Memory = &memory
|
||||
}
|
||||
|
||||
for i, storageValue := range trace.Storage {
|
||||
formattedStructLogs[index].Storage[fmt.Sprintf("%x", i)] = fmt.Sprintf("%x", storageValue)
|
||||
if trace.Storage != nil {
|
||||
storage := make(map[string]string)
|
||||
for i, storageValue := range trace.Storage {
|
||||
storage[fmt.Sprintf("%x", i)] = fmt.Sprintf("%x", storageValue)
|
||||
}
|
||||
formatted[index].Storage = &storage
|
||||
}
|
||||
}
|
||||
return formattedStructLogs
|
||||
return formatted
|
||||
}
|
||||
|
||||
// rpcOutputBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are
|
||||
|
@ -130,28 +130,28 @@ type dbWrapper struct {
|
||||
}
|
||||
|
||||
// getBalance retrieves an account's balance
|
||||
func (dw *dbWrapper) getBalance(addr common.Address) *big.Int {
|
||||
return dw.db.GetBalance(addr)
|
||||
func (dw *dbWrapper) getBalance(addr []byte) *big.Int {
|
||||
return dw.db.GetBalance(common.BytesToAddress(addr))
|
||||
}
|
||||
|
||||
// getNonce retrieves an account's nonce
|
||||
func (dw *dbWrapper) getNonce(addr common.Address) uint64 {
|
||||
return dw.db.GetNonce(addr)
|
||||
func (dw *dbWrapper) getNonce(addr []byte) uint64 {
|
||||
return dw.db.GetNonce(common.BytesToAddress(addr))
|
||||
}
|
||||
|
||||
// getCode retrieves an account's code
|
||||
func (dw *dbWrapper) getCode(addr common.Address) []byte {
|
||||
return dw.db.GetCode(addr)
|
||||
func (dw *dbWrapper) getCode(addr []byte) []byte {
|
||||
return dw.db.GetCode(common.BytesToAddress(addr))
|
||||
}
|
||||
|
||||
// getState retrieves an account's state data for the given hash
|
||||
func (dw *dbWrapper) getState(addr common.Address, hash common.Hash) common.Hash {
|
||||
return dw.db.GetState(addr, hash)
|
||||
func (dw *dbWrapper) getState(addr []byte, hash common.Hash) common.Hash {
|
||||
return dw.db.GetState(common.BytesToAddress(addr), hash)
|
||||
}
|
||||
|
||||
// exists returns true iff the account exists
|
||||
func (dw *dbWrapper) exists(addr common.Address) bool {
|
||||
return dw.db.Exist(addr)
|
||||
func (dw *dbWrapper) exists(addr []byte) bool {
|
||||
return dw.db.Exist(common.BytesToAddress(addr))
|
||||
}
|
||||
|
||||
// toValue returns an otto.Value for the dbWrapper
|
||||
@ -200,19 +200,18 @@ func (c *contractWrapper) toValue(vm *otto.Otto) otto.Value {
|
||||
// JavascriptTracer provides an implementation of Tracer that evaluates a
|
||||
// Javascript function for each VM execution step.
|
||||
type JavascriptTracer struct {
|
||||
vm *otto.Otto // Javascript VM instance
|
||||
traceobj *otto.Object // User-supplied object to call
|
||||
log map[string]interface{} // (Reusable) map for the `log` arg to `step`
|
||||
logvalue otto.Value // JS view of `log`
|
||||
memory *memoryWrapper // Wrapper around the VM memory
|
||||
memvalue otto.Value // JS view of `memory`
|
||||
stack *stackWrapper // Wrapper around the VM stack
|
||||
stackvalue otto.Value // JS view of `stack`
|
||||
db *dbWrapper // Wrapper around the VM environment
|
||||
dbvalue otto.Value // JS view of `db`
|
||||
contract *contractWrapper // Wrapper around the contract object
|
||||
contractvalue otto.Value // JS view of `contract`
|
||||
err error // Error, if one has occurred
|
||||
vm *otto.Otto // Javascript VM instance
|
||||
traceobj *otto.Object // User-supplied object to call
|
||||
op *opCodeWrapper // Wrapper around the VM opcode
|
||||
log map[string]interface{} // (Reusable) map for the `log` arg to `step`
|
||||
logvalue otto.Value // JS view of `log`
|
||||
memory *memoryWrapper // Wrapper around the VM memory
|
||||
stack *stackWrapper // Wrapper around the VM stack
|
||||
db *dbWrapper // Wrapper around the VM environment
|
||||
dbvalue otto.Value // JS view of `db`
|
||||
contract *contractWrapper // Wrapper around the contract object
|
||||
err error // Error, if one has occurred
|
||||
result interface{} // Final result to return to the user
|
||||
}
|
||||
|
||||
// NewJavascriptTracer instantiates a new JavascriptTracer instance.
|
||||
@ -230,7 +229,6 @@ func NewJavascriptTracer(code string) (*JavascriptTracer, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check the required functions exist
|
||||
step, err := jstracer.Get("step")
|
||||
if err != nil {
|
||||
@ -247,31 +245,34 @@ func NewJavascriptTracer(code string) (*JavascriptTracer, error) {
|
||||
if !result.IsFunction() {
|
||||
return nil, fmt.Errorf("Trace object must expose a function result()")
|
||||
}
|
||||
|
||||
// Create the persistent log object
|
||||
log := make(map[string]interface{})
|
||||
var (
|
||||
op = new(opCodeWrapper)
|
||||
mem = new(memoryWrapper)
|
||||
stack = new(stackWrapper)
|
||||
db = new(dbWrapper)
|
||||
contract = new(contractWrapper)
|
||||
)
|
||||
log := map[string]interface{}{
|
||||
"op": op.toValue(vm),
|
||||
"memory": mem.toValue(vm),
|
||||
"stack": stack.toValue(vm),
|
||||
"contract": contract.toValue(vm),
|
||||
}
|
||||
logvalue, _ := vm.ToValue(log)
|
||||
|
||||
// Create persistent wrappers for memory and stack
|
||||
mem := &memoryWrapper{}
|
||||
stack := &stackWrapper{}
|
||||
db := &dbWrapper{}
|
||||
contract := &contractWrapper{}
|
||||
|
||||
return &JavascriptTracer{
|
||||
vm: vm,
|
||||
traceobj: jstracer,
|
||||
log: log,
|
||||
logvalue: logvalue,
|
||||
memory: mem,
|
||||
memvalue: mem.toValue(vm),
|
||||
stack: stack,
|
||||
stackvalue: stack.toValue(vm),
|
||||
db: db,
|
||||
dbvalue: db.toValue(vm),
|
||||
contract: contract,
|
||||
contractvalue: contract.toValue(vm),
|
||||
err: nil,
|
||||
vm: vm,
|
||||
traceobj: jstracer,
|
||||
op: op,
|
||||
log: log,
|
||||
logvalue: logvalue,
|
||||
memory: mem,
|
||||
stack: stack,
|
||||
db: db,
|
||||
dbvalue: db.toValue(vm),
|
||||
contract: contract,
|
||||
err: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -319,24 +320,22 @@ func wrapError(context string, err error) error {
|
||||
// CaptureState implements the Tracer interface to trace a single step of VM execution
|
||||
func (jst *JavascriptTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, memory *vm.Memory, stack *vm.Stack, contract *vm.Contract, depth int, err error) error {
|
||||
if jst.err == nil {
|
||||
jst.op.op = op
|
||||
jst.memory.memory = memory
|
||||
jst.stack.stack = stack
|
||||
jst.db.db = env.StateDB
|
||||
jst.contract.contract = contract
|
||||
|
||||
ocw := &opCodeWrapper{op}
|
||||
|
||||
jst.log["pc"] = pc
|
||||
jst.log["op"] = ocw.toValue(jst.vm)
|
||||
jst.log["gas"] = gas
|
||||
jst.log["gasPrice"] = cost
|
||||
jst.log["memory"] = jst.memvalue
|
||||
jst.log["stack"] = jst.stackvalue
|
||||
jst.log["contract"] = jst.contractvalue
|
||||
jst.log["cost"] = cost
|
||||
jst.log["depth"] = depth
|
||||
jst.log["account"] = contract.Address()
|
||||
jst.log["err"] = err
|
||||
|
||||
delete(jst.log, "error")
|
||||
if err != nil {
|
||||
jst.log["error"] = err
|
||||
}
|
||||
_, err := jst.callSafely("step", jst.logvalue, jst.dbvalue)
|
||||
if err != nil {
|
||||
jst.err = wrapError("step", err)
|
||||
|
@ -354,6 +354,18 @@ web3._extend({
|
||||
call: 'debug_storageRangeAt',
|
||||
params: 5,
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'getModifiedAccountsByNumber',
|
||||
call: 'debug_getModifiedAccountsByNumber',
|
||||
params: 2,
|
||||
inputFormatter: [null, null],
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'getModifiedAccountsByHash',
|
||||
call: 'debug_getModifiedAccountsByHash',
|
||||
params: 2,
|
||||
inputFormatter:[null, null],
|
||||
}),
|
||||
],
|
||||
properties: []
|
||||
});
|
||||
|
@ -98,7 +98,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
|
||||
peers: peers,
|
||||
reqDist: newRequestDistributor(peers, quitSync),
|
||||
accountManager: ctx.AccountManager,
|
||||
engine: eth.CreateConsensusEngine(ctx, config, chainConfig, chainDb),
|
||||
engine: eth.CreateConsensusEngine(ctx, &config.Ethash, chainConfig, chainDb),
|
||||
shutdownChan: make(chan bool),
|
||||
networkId: config.NetworkId,
|
||||
bloomRequests: make(chan chan *bloombits.Retrieval),
|
||||
|
@ -416,7 +416,9 @@ func TestTransactionStatusLes2(t *testing.T) {
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
pm := newTestProtocolManagerMust(t, false, 0, nil, nil, nil, db)
|
||||
chain := pm.blockchain.(*core.BlockChain)
|
||||
txpool := core.NewTxPool(core.DefaultTxPoolConfig, params.TestChainConfig, chain)
|
||||
config := core.DefaultTxPoolConfig
|
||||
config.Journal = ""
|
||||
txpool := core.NewTxPool(config, params.TestChainConfig, chain)
|
||||
pm.txpool = txpool
|
||||
peer, _ := newTestPeer(t, "peer", 2, pm, true)
|
||||
defer peer.close()
|
||||
|
@ -42,7 +42,7 @@ type unconfirmedBlock struct {
|
||||
// unconfirmedBlocks implements a data structure to maintain locally mined blocks
|
||||
// have have not yet reached enough maturity to guarantee chain inclusion. It is
|
||||
// used by the miner to provide logs to the user when a previously mined block
|
||||
// has a high enough guarantee to not be reorged out of te canonical chain.
|
||||
// has a high enough guarantee to not be reorged out of the canonical chain.
|
||||
type unconfirmedBlocks struct {
|
||||
chain headerRetriever // Blockchain to verify canonical status through
|
||||
depth uint // Depth after which to discard previous blocks
|
||||
|
@ -62,6 +62,16 @@ func (bi *BigInt) SetInt64(x int64) {
|
||||
bi.bigint.SetInt64(x)
|
||||
}
|
||||
|
||||
// Sign returns:
|
||||
//
|
||||
// -1 if x < 0
|
||||
// 0 if x == 0
|
||||
// +1 if x > 0
|
||||
//
|
||||
func (bi *BigInt) Sign() int {
|
||||
return bi.bigint.Sign()
|
||||
}
|
||||
|
||||
// SetString sets the big int to x.
|
||||
//
|
||||
// The string prefix determines the actual conversion base. A prefix of "0x" or
|
||||
|
@ -135,6 +135,9 @@ type Config struct {
|
||||
// *WARNING* Only set this if the node is running in a trusted network, exposing
|
||||
// private APIs to untrusted users is a major security risk.
|
||||
WSExposeAll bool `toml:",omitempty"`
|
||||
|
||||
// Logger is a custom logger to use with the p2p.Server.
|
||||
Logger log.Logger
|
||||
}
|
||||
|
||||
// IPCEndpoint resolves an IPC endpoint based on a configured value, taking into
|
||||
@ -360,35 +363,43 @@ func (c *Config) parsePersistentNodes(path string) []*discover.Node {
|
||||
return nodes
|
||||
}
|
||||
|
||||
func makeAccountManager(conf *Config) (*accounts.Manager, string, error) {
|
||||
// AccountConfig determines the settings for scrypt and keydirectory
|
||||
func (c *Config) AccountConfig() (int, int, string, error) {
|
||||
scryptN := keystore.StandardScryptN
|
||||
scryptP := keystore.StandardScryptP
|
||||
if conf.UseLightweightKDF {
|
||||
if c.UseLightweightKDF {
|
||||
scryptN = keystore.LightScryptN
|
||||
scryptP = keystore.LightScryptP
|
||||
}
|
||||
|
||||
var (
|
||||
keydir string
|
||||
ephemeral string
|
||||
err error
|
||||
keydir string
|
||||
err error
|
||||
)
|
||||
switch {
|
||||
case filepath.IsAbs(conf.KeyStoreDir):
|
||||
keydir = conf.KeyStoreDir
|
||||
case conf.DataDir != "":
|
||||
if conf.KeyStoreDir == "" {
|
||||
keydir = filepath.Join(conf.DataDir, datadirDefaultKeyStore)
|
||||
case filepath.IsAbs(c.KeyStoreDir):
|
||||
keydir = c.KeyStoreDir
|
||||
case c.DataDir != "":
|
||||
if c.KeyStoreDir == "" {
|
||||
keydir = filepath.Join(c.DataDir, datadirDefaultKeyStore)
|
||||
} else {
|
||||
keydir, err = filepath.Abs(conf.KeyStoreDir)
|
||||
keydir, err = filepath.Abs(c.KeyStoreDir)
|
||||
}
|
||||
case conf.KeyStoreDir != "":
|
||||
keydir, err = filepath.Abs(conf.KeyStoreDir)
|
||||
default:
|
||||
case c.KeyStoreDir != "":
|
||||
keydir, err = filepath.Abs(c.KeyStoreDir)
|
||||
}
|
||||
return scryptN, scryptP, keydir, err
|
||||
}
|
||||
|
||||
func makeAccountManager(conf *Config) (*accounts.Manager, string, error) {
|
||||
scryptN, scryptP, keydir, err := conf.AccountConfig()
|
||||
var ephemeral string
|
||||
if keydir == "" {
|
||||
// There is no datadir.
|
||||
keydir, err = ioutil.TempDir("", "go-ethereum-keystore")
|
||||
ephemeral = keydir
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
33
node/node.go
33
node/node.go
@ -69,6 +69,8 @@ type Node struct {
|
||||
|
||||
stop chan struct{} // Channel to wait for termination notifications
|
||||
lock sync.RWMutex
|
||||
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
// New creates a new P2P node, ready for protocol registration.
|
||||
@ -101,6 +103,9 @@ func New(conf *Config) (*Node, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if conf.Logger == nil {
|
||||
conf.Logger = log.New()
|
||||
}
|
||||
// Note: any interaction with Config that would create/touch files
|
||||
// in the data directory or instance directory is delayed until Start.
|
||||
return &Node{
|
||||
@ -112,6 +117,7 @@ func New(conf *Config) (*Node, error) {
|
||||
httpEndpoint: conf.HTTPEndpoint(),
|
||||
wsEndpoint: conf.WSEndpoint(),
|
||||
eventmux: new(event.TypeMux),
|
||||
log: conf.Logger,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -146,6 +152,7 @@ func (n *Node) Start() error {
|
||||
n.serverConfig = n.config.P2P
|
||||
n.serverConfig.PrivateKey = n.config.NodeKey()
|
||||
n.serverConfig.Name = n.config.NodeName()
|
||||
n.serverConfig.Logger = n.log
|
||||
if n.serverConfig.StaticNodes == nil {
|
||||
n.serverConfig.StaticNodes = n.config.StaticNodes()
|
||||
}
|
||||
@ -156,7 +163,7 @@ func (n *Node) Start() error {
|
||||
n.serverConfig.NodeDatabase = n.config.NodeDB()
|
||||
}
|
||||
running := &p2p.Server{Config: n.serverConfig}
|
||||
log.Info("Starting peer-to-peer node", "instance", n.serverConfig.Name)
|
||||
n.log.Info("Starting peer-to-peer node", "instance", n.serverConfig.Name)
|
||||
|
||||
// Otherwise copy and specialize the P2P configuration
|
||||
services := make(map[reflect.Type]Service)
|
||||
@ -280,7 +287,7 @@ func (n *Node) startInProc(apis []rpc.API) error {
|
||||
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug(fmt.Sprintf("InProc registered %T under '%s'", api.Service, api.Namespace))
|
||||
n.log.Debug(fmt.Sprintf("InProc registered %T under '%s'", api.Service, api.Namespace))
|
||||
}
|
||||
n.inprocHandler = handler
|
||||
return nil
|
||||
@ -306,7 +313,7 @@ func (n *Node) startIPC(apis []rpc.API) error {
|
||||
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug(fmt.Sprintf("IPC registered %T under '%s'", api.Service, api.Namespace))
|
||||
n.log.Debug(fmt.Sprintf("IPC registered %T under '%s'", api.Service, api.Namespace))
|
||||
}
|
||||
// All APIs registered, start the IPC listener
|
||||
var (
|
||||
@ -317,7 +324,7 @@ func (n *Node) startIPC(apis []rpc.API) error {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
log.Info(fmt.Sprintf("IPC endpoint opened: %s", n.ipcEndpoint))
|
||||
n.log.Info(fmt.Sprintf("IPC endpoint opened: %s", n.ipcEndpoint))
|
||||
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
@ -330,7 +337,7 @@ func (n *Node) startIPC(apis []rpc.API) error {
|
||||
return
|
||||
}
|
||||
// Not closed, just some error; report and continue
|
||||
log.Error(fmt.Sprintf("IPC accept failed: %v", err))
|
||||
n.log.Error(fmt.Sprintf("IPC accept failed: %v", err))
|
||||
continue
|
||||
}
|
||||
go handler.ServeCodec(rpc.NewJSONCodec(conn), rpc.OptionMethodInvocation|rpc.OptionSubscriptions)
|
||||
@ -349,7 +356,7 @@ func (n *Node) stopIPC() {
|
||||
n.ipcListener.Close()
|
||||
n.ipcListener = nil
|
||||
|
||||
log.Info(fmt.Sprintf("IPC endpoint closed: %s", n.ipcEndpoint))
|
||||
n.log.Info(fmt.Sprintf("IPC endpoint closed: %s", n.ipcEndpoint))
|
||||
}
|
||||
if n.ipcHandler != nil {
|
||||
n.ipcHandler.Stop()
|
||||
@ -375,7 +382,7 @@ func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors
|
||||
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug(fmt.Sprintf("HTTP registered %T under '%s'", api.Service, api.Namespace))
|
||||
n.log.Debug(fmt.Sprintf("HTTP registered %T under '%s'", api.Service, api.Namespace))
|
||||
}
|
||||
}
|
||||
// All APIs registered, start the HTTP listener
|
||||
@ -387,7 +394,7 @@ func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors
|
||||
return err
|
||||
}
|
||||
go rpc.NewHTTPServer(cors, handler).Serve(listener)
|
||||
log.Info(fmt.Sprintf("HTTP endpoint opened: http://%s", endpoint))
|
||||
n.log.Info(fmt.Sprintf("HTTP endpoint opened: http://%s", endpoint))
|
||||
|
||||
// All listeners booted successfully
|
||||
n.httpEndpoint = endpoint
|
||||
@ -403,7 +410,7 @@ func (n *Node) stopHTTP() {
|
||||
n.httpListener.Close()
|
||||
n.httpListener = nil
|
||||
|
||||
log.Info(fmt.Sprintf("HTTP endpoint closed: http://%s", n.httpEndpoint))
|
||||
n.log.Info(fmt.Sprintf("HTTP endpoint closed: http://%s", n.httpEndpoint))
|
||||
}
|
||||
if n.httpHandler != nil {
|
||||
n.httpHandler.Stop()
|
||||
@ -429,7 +436,7 @@ func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrig
|
||||
if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug(fmt.Sprintf("WebSocket registered %T under '%s'", api.Service, api.Namespace))
|
||||
n.log.Debug(fmt.Sprintf("WebSocket registered %T under '%s'", api.Service, api.Namespace))
|
||||
}
|
||||
}
|
||||
// All APIs registered, start the HTTP listener
|
||||
@ -441,7 +448,7 @@ func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrig
|
||||
return err
|
||||
}
|
||||
go rpc.NewWSServer(wsOrigins, handler).Serve(listener)
|
||||
log.Info(fmt.Sprintf("WebSocket endpoint opened: ws://%s", listener.Addr()))
|
||||
n.log.Info(fmt.Sprintf("WebSocket endpoint opened: ws://%s", listener.Addr()))
|
||||
|
||||
// All listeners booted successfully
|
||||
n.wsEndpoint = endpoint
|
||||
@ -457,7 +464,7 @@ func (n *Node) stopWS() {
|
||||
n.wsListener.Close()
|
||||
n.wsListener = nil
|
||||
|
||||
log.Info(fmt.Sprintf("WebSocket endpoint closed: ws://%s", n.wsEndpoint))
|
||||
n.log.Info(fmt.Sprintf("WebSocket endpoint closed: ws://%s", n.wsEndpoint))
|
||||
}
|
||||
if n.wsHandler != nil {
|
||||
n.wsHandler.Stop()
|
||||
@ -496,7 +503,7 @@ func (n *Node) Stop() error {
|
||||
// Release instance directory lock.
|
||||
if n.instanceDirLock != nil {
|
||||
if err := n.instanceDirLock.Release(); err != nil {
|
||||
log.Error("Can't release datadir lock", "err", err)
|
||||
n.log.Error("Can't release datadir lock", "err", err)
|
||||
}
|
||||
n.instanceDirLock = nil
|
||||
}
|
||||
|
27
p2p/dial.go
27
p2p/dial.go
@ -157,7 +157,7 @@ func (s *dialstate) removeStatic(n *discover.Node) {
|
||||
}
|
||||
|
||||
func (s *dialstate) newTasks(nRunning int, peers map[discover.NodeID]*Peer, now time.Time) []task {
|
||||
if s.start == (time.Time{}) {
|
||||
if s.start.IsZero() {
|
||||
s.start = now
|
||||
}
|
||||
|
||||
@ -291,11 +291,14 @@ func (t *dialTask) Do(srv *Server) {
|
||||
return
|
||||
}
|
||||
}
|
||||
success := t.dial(srv, t.dest)
|
||||
// Try resolving the ID of static nodes if dialing failed.
|
||||
if !success && t.flags&staticDialedConn != 0 {
|
||||
if t.resolve(srv) {
|
||||
t.dial(srv, t.dest)
|
||||
err := t.dial(srv, t.dest)
|
||||
if err != nil {
|
||||
log.Trace("Dial error", "task", t, "err", err)
|
||||
// Try resolving the ID of static nodes if dialing failed.
|
||||
if _, ok := err.(*dialError); ok && t.flags&staticDialedConn != 0 {
|
||||
if t.resolve(srv) {
|
||||
t.dial(srv, t.dest)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -334,16 +337,18 @@ func (t *dialTask) resolve(srv *Server) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type dialError struct {
|
||||
error
|
||||
}
|
||||
|
||||
// dial performs the actual connection attempt.
|
||||
func (t *dialTask) dial(srv *Server, dest *discover.Node) bool {
|
||||
func (t *dialTask) dial(srv *Server, dest *discover.Node) error {
|
||||
fd, err := srv.Dialer.Dial(dest)
|
||||
if err != nil {
|
||||
log.Trace("Dial error", "task", t, "err", err)
|
||||
return false
|
||||
return &dialError{err}
|
||||
}
|
||||
mfd := newMeteredConn(fd, false)
|
||||
srv.SetupConn(mfd, t.flags, dest)
|
||||
return true
|
||||
return srv.SetupConn(mfd, t.flags, dest)
|
||||
}
|
||||
|
||||
func (t *dialTask) String() string {
|
||||
|
@ -160,6 +160,11 @@ func (p *Peer) String() string {
|
||||
return fmt.Sprintf("Peer %x %v", p.rw.id[:8], p.RemoteAddr())
|
||||
}
|
||||
|
||||
// Inbound returns true if the peer is an inbound connection
|
||||
func (p *Peer) Inbound() bool {
|
||||
return p.rw.flags&inboundConn != 0
|
||||
}
|
||||
|
||||
func newPeer(conn *conn, protocols []Protocol) *Peer {
|
||||
protomap := matchProtocols(protocols, conn.caps, conn)
|
||||
p := &Peer{
|
||||
|
@ -139,6 +139,9 @@ type Config struct {
|
||||
// If EnableMsgEvents is set then the server will emit PeerEvents
|
||||
// whenever a message is sent to or received from a peer
|
||||
EnableMsgEvents bool
|
||||
|
||||
// Logger is a custom logger to use with the p2p.Server.
|
||||
Logger log.Logger
|
||||
}
|
||||
|
||||
// Server manages all peer connections.
|
||||
@ -172,6 +175,7 @@ type Server struct {
|
||||
delpeer chan peerDrop
|
||||
loopWG sync.WaitGroup // loop, listenLoop
|
||||
peerFeed event.Feed
|
||||
log log.Logger
|
||||
}
|
||||
|
||||
type peerOpFunc func(map[discover.NodeID]*Peer)
|
||||
@ -359,7 +363,11 @@ func (srv *Server) Start() (err error) {
|
||||
return errors.New("server already running")
|
||||
}
|
||||
srv.running = true
|
||||
log.Info("Starting P2P networking")
|
||||
srv.log = srv.Config.Logger
|
||||
if srv.log == nil {
|
||||
srv.log = log.New()
|
||||
}
|
||||
srv.log.Info("Starting P2P networking")
|
||||
|
||||
// static fields
|
||||
if srv.PrivateKey == nil {
|
||||
@ -421,7 +429,7 @@ func (srv *Server) Start() (err error) {
|
||||
}
|
||||
}
|
||||
if srv.NoDial && srv.ListenAddr == "" {
|
||||
log.Warn("P2P server will be useless, neither dialing nor listening")
|
||||
srv.log.Warn("P2P server will be useless, neither dialing nor listening")
|
||||
}
|
||||
|
||||
srv.loopWG.Add(1)
|
||||
@ -489,7 +497,7 @@ func (srv *Server) run(dialstate dialer) {
|
||||
i := 0
|
||||
for ; len(runningTasks) < maxActiveDialTasks && i < len(ts); i++ {
|
||||
t := ts[i]
|
||||
log.Trace("New dial task", "task", t)
|
||||
srv.log.Trace("New dial task", "task", t)
|
||||
go func() { t.Do(srv); taskdone <- t }()
|
||||
runningTasks = append(runningTasks, t)
|
||||
}
|
||||
@ -517,13 +525,13 @@ running:
|
||||
// This channel is used by AddPeer to add to the
|
||||
// ephemeral static peer list. Add it to the dialer,
|
||||
// it will keep the node connected.
|
||||
log.Debug("Adding static node", "node", n)
|
||||
srv.log.Debug("Adding static node", "node", n)
|
||||
dialstate.addStatic(n)
|
||||
case n := <-srv.removestatic:
|
||||
// This channel is used by RemovePeer to send a
|
||||
// disconnect request to a peer and begin the
|
||||
// stop keeping the node connected
|
||||
log.Debug("Removing static node", "node", n)
|
||||
srv.log.Debug("Removing static node", "node", n)
|
||||
dialstate.removeStatic(n)
|
||||
if p, ok := peers[n.ID]; ok {
|
||||
p.Disconnect(DiscRequested)
|
||||
@ -536,7 +544,7 @@ running:
|
||||
// A task got done. Tell dialstate about it so it
|
||||
// can update its state and remove it from the active
|
||||
// tasks list.
|
||||
log.Trace("Dial task done", "task", t)
|
||||
srv.log.Trace("Dial task done", "task", t)
|
||||
dialstate.taskDone(t, time.Now())
|
||||
delTask(t)
|
||||
case c := <-srv.posthandshake:
|
||||
@ -565,7 +573,7 @@ running:
|
||||
p.events = &srv.peerFeed
|
||||
}
|
||||
name := truncateName(c.name)
|
||||
log.Debug("Adding p2p peer", "id", c.id, "name", name, "addr", c.fd.RemoteAddr(), "peers", len(peers)+1)
|
||||
srv.log.Debug("Adding p2p peer", "name", name, "addr", c.fd.RemoteAddr(), "peers", len(peers)+1)
|
||||
peers[c.id] = p
|
||||
go srv.runPeer(p)
|
||||
}
|
||||
@ -585,7 +593,7 @@ running:
|
||||
}
|
||||
}
|
||||
|
||||
log.Trace("P2P networking is spinning down")
|
||||
srv.log.Trace("P2P networking is spinning down")
|
||||
|
||||
// Terminate discovery. If there is a running lookup it will terminate soon.
|
||||
if srv.ntab != nil {
|
||||
@ -639,7 +647,7 @@ type tempError interface {
|
||||
// inbound connections.
|
||||
func (srv *Server) listenLoop() {
|
||||
defer srv.loopWG.Done()
|
||||
log.Info("RLPx listener up", "self", srv.makeSelf(srv.listener, srv.ntab))
|
||||
srv.log.Info("RLPx listener up", "self", srv.makeSelf(srv.listener, srv.ntab))
|
||||
|
||||
// This channel acts as a semaphore limiting
|
||||
// active inbound connections that are lingering pre-handshake.
|
||||
@ -664,10 +672,10 @@ func (srv *Server) listenLoop() {
|
||||
for {
|
||||
fd, err = srv.listener.Accept()
|
||||
if tempErr, ok := err.(tempError); ok && tempErr.Temporary() {
|
||||
log.Debug("Temporary read error", "err", err)
|
||||
srv.log.Debug("Temporary read error", "err", err)
|
||||
continue
|
||||
} else if err != nil {
|
||||
log.Debug("Read error", "err", err)
|
||||
srv.log.Debug("Read error", "err", err)
|
||||
return
|
||||
}
|
||||
break
|
||||
@ -676,7 +684,7 @@ func (srv *Server) listenLoop() {
|
||||
// Reject connections that do not match NetRestrict.
|
||||
if srv.NetRestrict != nil {
|
||||
if tcp, ok := fd.RemoteAddr().(*net.TCPAddr); ok && !srv.NetRestrict.Contains(tcp.IP) {
|
||||
log.Debug("Rejected conn (not whitelisted in NetRestrict)", "addr", fd.RemoteAddr())
|
||||
srv.log.Debug("Rejected conn (not whitelisted in NetRestrict)", "addr", fd.RemoteAddr())
|
||||
fd.Close()
|
||||
slots <- struct{}{}
|
||||
continue
|
||||
@ -684,7 +692,7 @@ func (srv *Server) listenLoop() {
|
||||
}
|
||||
|
||||
fd = newMeteredConn(fd, true)
|
||||
log.Trace("Accepted connection", "addr", fd.RemoteAddr())
|
||||
srv.log.Trace("Accepted connection", "addr", fd.RemoteAddr())
|
||||
|
||||
// Spawn the handler. It will give the slot back when the connection
|
||||
// has been established.
|
||||
@ -698,55 +706,65 @@ func (srv *Server) listenLoop() {
|
||||
// SetupConn runs the handshakes and attempts to add the connection
|
||||
// as a peer. It returns when the connection has been added as a peer
|
||||
// or the handshakes have failed.
|
||||
func (srv *Server) SetupConn(fd net.Conn, flags connFlag, dialDest *discover.Node) {
|
||||
func (srv *Server) SetupConn(fd net.Conn, flags connFlag, dialDest *discover.Node) error {
|
||||
self := srv.Self()
|
||||
if self == nil {
|
||||
return errors.New("shutdown")
|
||||
}
|
||||
c := &conn{fd: fd, transport: srv.newTransport(fd), flags: flags, cont: make(chan error)}
|
||||
err := srv.setupConn(c, flags, dialDest)
|
||||
if err != nil {
|
||||
c.close(err)
|
||||
srv.log.Trace("Setting up connection failed", "id", c.id, "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *discover.Node) error {
|
||||
// Prevent leftover pending conns from entering the handshake.
|
||||
srv.lock.Lock()
|
||||
running := srv.running
|
||||
srv.lock.Unlock()
|
||||
c := &conn{fd: fd, transport: srv.newTransport(fd), flags: flags, cont: make(chan error)}
|
||||
if !running {
|
||||
c.close(errServerStopped)
|
||||
return
|
||||
return errServerStopped
|
||||
}
|
||||
// Run the encryption handshake.
|
||||
var err error
|
||||
if c.id, err = c.doEncHandshake(srv.PrivateKey, dialDest); err != nil {
|
||||
log.Trace("Failed RLPx handshake", "addr", c.fd.RemoteAddr(), "conn", c.flags, "err", err)
|
||||
c.close(err)
|
||||
return
|
||||
srv.log.Trace("Failed RLPx handshake", "addr", c.fd.RemoteAddr(), "conn", c.flags, "err", err)
|
||||
return err
|
||||
}
|
||||
clog := log.New("id", c.id, "addr", c.fd.RemoteAddr(), "conn", c.flags)
|
||||
clog := srv.log.New("id", c.id, "addr", c.fd.RemoteAddr(), "conn", c.flags)
|
||||
// For dialed connections, check that the remote public key matches.
|
||||
if dialDest != nil && c.id != dialDest.ID {
|
||||
c.close(DiscUnexpectedIdentity)
|
||||
clog.Trace("Dialed identity mismatch", "want", c, dialDest.ID)
|
||||
return
|
||||
return DiscUnexpectedIdentity
|
||||
}
|
||||
if err := srv.checkpoint(c, srv.posthandshake); err != nil {
|
||||
err = srv.checkpoint(c, srv.posthandshake)
|
||||
if err != nil {
|
||||
clog.Trace("Rejected peer before protocol handshake", "err", err)
|
||||
c.close(err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
// Run the protocol handshake
|
||||
phs, err := c.doProtoHandshake(srv.ourHandshake)
|
||||
if err != nil {
|
||||
clog.Trace("Failed proto handshake", "err", err)
|
||||
c.close(err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
if phs.ID != c.id {
|
||||
clog.Trace("Wrong devp2p handshake identity", "err", phs.ID)
|
||||
c.close(DiscUnexpectedIdentity)
|
||||
return
|
||||
return DiscUnexpectedIdentity
|
||||
}
|
||||
c.caps, c.name = phs.Caps, phs.Name
|
||||
if err := srv.checkpoint(c, srv.addpeer); err != nil {
|
||||
err = srv.checkpoint(c, srv.addpeer)
|
||||
if err != nil {
|
||||
clog.Trace("Rejected peer", "err", err)
|
||||
c.close(err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
// If the checks completed successfully, runPeer has now been
|
||||
// launched by run.
|
||||
clog.Trace("connection set up", "inbound", dialDest == nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
func truncateName(s string) string {
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
)
|
||||
|
||||
@ -206,6 +207,7 @@ func TestServerTaskScheduling(t *testing.T) {
|
||||
quit: make(chan struct{}),
|
||||
ntab: fakeTable{},
|
||||
running: true,
|
||||
log: log.New(),
|
||||
}
|
||||
srv.loopWG.Add(1)
|
||||
go func() {
|
||||
@ -246,7 +248,12 @@ func TestServerManyTasks(t *testing.T) {
|
||||
}
|
||||
|
||||
var (
|
||||
srv = &Server{quit: make(chan struct{}), ntab: fakeTable{}, running: true}
|
||||
srv = &Server{
|
||||
quit: make(chan struct{}),
|
||||
ntab: fakeTable{},
|
||||
running: true,
|
||||
log: log.New(),
|
||||
}
|
||||
done = make(chan *testTask)
|
||||
start, end = 0, 0
|
||||
)
|
||||
@ -428,6 +435,7 @@ func TestServerSetupConn(t *testing.T) {
|
||||
Protocols: []Protocol{discard},
|
||||
},
|
||||
newTransport: func(fd net.Conn) transport { return test.tt },
|
||||
log: log.New(),
|
||||
}
|
||||
if !test.dontstart {
|
||||
if err := srv.Start(); err != nil {
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
)
|
||||
@ -94,6 +95,7 @@ func (d *DockerAdapter) NewNode(config *NodeConfig) (Node, error) {
|
||||
conf.Stack.P2P.NoDiscovery = true
|
||||
conf.Stack.P2P.NAT = nil
|
||||
conf.Stack.NoUSB = true
|
||||
conf.Stack.Logger = log.New("node.id", config.ID.String())
|
||||
|
||||
node := &DockerNode{
|
||||
ExecNode: ExecNode{
|
||||
|
@ -359,6 +359,7 @@ func execP2PNode() {
|
||||
log.Crit("error decoding _P2P_NODE_CONFIG", "err", err)
|
||||
}
|
||||
conf.Stack.P2P.PrivateKey = conf.Node.PrivateKey
|
||||
conf.Stack.Logger = log.New("node.id", conf.Node.ID.String())
|
||||
|
||||
// use explicit IP address in ListenAddr so that Enode URL is usable
|
||||
externalIP := func() string {
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
@ -82,7 +83,8 @@ func (s *SimAdapter) NewNode(config *NodeConfig) (Node, error) {
|
||||
Dialer: s,
|
||||
EnableMsgEvents: true,
|
||||
},
|
||||
NoUSB: true,
|
||||
NoUSB: true,
|
||||
Logger: log.New("node.id", id.String()),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -83,6 +83,9 @@ type NodeConfig struct {
|
||||
// stack to encrypt communications
|
||||
PrivateKey *ecdsa.PrivateKey
|
||||
|
||||
// Enable peer events for Msgs
|
||||
EnableMsgEvents bool
|
||||
|
||||
// Name is a human friendly name for the node like "node01"
|
||||
Name string
|
||||
|
||||
@ -91,6 +94,9 @@ type NodeConfig struct {
|
||||
// contained in SimAdapter.services, for other nodes it should be
|
||||
// services registered by calling the RegisterService function)
|
||||
Services []string
|
||||
|
||||
// function to sanction or prevent suggesting a peer
|
||||
Reachable func(id discover.NodeID) bool
|
||||
}
|
||||
|
||||
// nodeConfigJSON is used to encode and decode NodeConfig as JSON by encoding
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user