forked from cerc-io/plugeth
Merge branch 'release/0.9.30'
This commit is contained in:
commit
5daf8729be
5
Makefile
5
Makefile
@ -10,6 +10,11 @@ geth:
|
|||||||
@echo "Done building."
|
@echo "Done building."
|
||||||
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
||||||
|
|
||||||
|
console:
|
||||||
|
build/env.sh go install -v $(shell build/ldflags.sh) ./cmd/console
|
||||||
|
@echo "Done building."
|
||||||
|
@echo "Run \"$(GOBIN)/console\" to launch the console."
|
||||||
|
|
||||||
mist:
|
mist:
|
||||||
build/env.sh go install -v $(shell build/ldflags.sh) ./cmd/mist
|
build/env.sh go install -v $(shell build/ldflags.sh) ./cmd/mist
|
||||||
@echo "Done building."
|
@echo "Done building."
|
||||||
|
9
cmd/console/admin.go
Normal file
9
cmd/console/admin.go
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
/*
|
||||||
|
node admin bindings
|
||||||
|
*/
|
||||||
|
|
||||||
|
func (js *jsre) adminBindings() {
|
||||||
|
|
||||||
|
}
|
6
cmd/console/contracts.go
Normal file
6
cmd/console/contracts.go
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
var (
|
||||||
|
globalRegistrar = `var GlobalRegistrar = web3.eth.contract([{"constant":true,"inputs":[{"name":"_owner","type":"address"}],"name":"name","outputs":[{"name":"o_name","type":"bytes32"}],"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"owner","outputs":[{"name":"","type":"address"}],"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"content","outputs":[{"name":"","type":"bytes32"}],"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"addr","outputs":[{"name":"","type":"address"}],"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserve","outputs":[],"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"subRegistrar","outputs":[{"name":"o_subRegistrar","type":"address"}],"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_newOwner","type":"address"}],"name":"transfer","outputs":[],"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_registrar","type":"address"}],"name":"setSubRegistrar","outputs":[],"type":"function"},{"constant":false,"inputs":[],"name":"Registrar","outputs":[],"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_a","type":"address"},{"name":"_primary","type":"bool"}],"name":"setAddress","outputs":[],"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_content","type":"bytes32"}],"name":"setContent","outputs":[],"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"disown","outputs":[],"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"register","outputs":[{"name":"","type":"address"}],"type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"bytes32"}],"name":"Changed","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"bytes32"},{"indexed":true,"name":"addr","type":"address"}],"name":"PrimaryChanged","type":"event"}]);`
|
||||||
|
globalRegistrarAddr = "0xc6d9d2cd449a754c494264e1809c50e34d64562b"
|
||||||
|
)
|
431
cmd/console/js.go
Normal file
431
cmd/console/js.go
Normal file
@ -0,0 +1,431 @@
|
|||||||
|
// Copyright (c) 2013-2014, Jeffrey Wilcke. All rights reserved.
|
||||||
|
//
|
||||||
|
// This library is free software; you can redistribute it and/or
|
||||||
|
// modify it under the terms of the GNU General Public
|
||||||
|
// License as published by the Free Software Foundation; either
|
||||||
|
// version 2.1 of the License, or (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
// General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with this library; if not, write to the Free Software
|
||||||
|
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
||||||
|
// MA 02110-1301 USA
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
|
"github.com/ethereum/go-ethereum/common/docserver"
|
||||||
|
re "github.com/ethereum/go-ethereum/jsre"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/api"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/codec"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/comms"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
"github.com/peterh/liner"
|
||||||
|
"github.com/robertkrimen/otto"
|
||||||
|
)
|
||||||
|
|
||||||
|
type prompter interface {
|
||||||
|
AppendHistory(string)
|
||||||
|
Prompt(p string) (string, error)
|
||||||
|
PasswordPrompt(p string) (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type dumbterm struct{ r *bufio.Reader }
|
||||||
|
|
||||||
|
func (r dumbterm) Prompt(p string) (string, error) {
|
||||||
|
fmt.Print(p)
|
||||||
|
line, err := r.r.ReadString('\n')
|
||||||
|
return strings.TrimSuffix(line, "\n"), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r dumbterm) PasswordPrompt(p string) (string, error) {
|
||||||
|
fmt.Println("!! Unsupported terminal, password will echo.")
|
||||||
|
fmt.Print(p)
|
||||||
|
input, err := bufio.NewReader(os.Stdin).ReadString('\n')
|
||||||
|
fmt.Println()
|
||||||
|
return input, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r dumbterm) AppendHistory(string) {}
|
||||||
|
|
||||||
|
type jsre struct {
|
||||||
|
re *re.JSRE
|
||||||
|
wait chan *big.Int
|
||||||
|
ps1 string
|
||||||
|
atexit func()
|
||||||
|
datadir string
|
||||||
|
prompter
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
loadedModulesMethods map[string][]string
|
||||||
|
)
|
||||||
|
|
||||||
|
func loadAutoCompletion(js *jsre, ipcpath string) {
|
||||||
|
modules, err := js.suportedApis(ipcpath)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("Unable to determine supported modules - %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
loadedModulesMethods = make(map[string][]string)
|
||||||
|
for module, _ := range modules {
|
||||||
|
loadedModulesMethods[module] = api.AutoCompletion[module]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func keywordCompleter(line string) []string {
|
||||||
|
results := make([]string, 0)
|
||||||
|
|
||||||
|
if strings.Contains(line, ".") {
|
||||||
|
elements := strings.Split(line, ".")
|
||||||
|
if len(elements) == 2 {
|
||||||
|
module := elements[0]
|
||||||
|
partialMethod := elements[1]
|
||||||
|
if methods, found := loadedModulesMethods[module]; found {
|
||||||
|
for _, method := range methods {
|
||||||
|
if strings.HasPrefix(method, partialMethod) { // e.g. debug.se
|
||||||
|
results = append(results, module+"."+method)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for module, methods := range loadedModulesMethods {
|
||||||
|
if line == module { // user typed in full module name, show all methods
|
||||||
|
for _, method := range methods {
|
||||||
|
results = append(results, module+"."+method)
|
||||||
|
}
|
||||||
|
} else if strings.HasPrefix(module, line) { // partial method name, e.g. admi
|
||||||
|
results = append(results, module)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
func apiWordCompleter(line string, pos int) (head string, completions []string, tail string) {
|
||||||
|
if len(line) == 0 {
|
||||||
|
return "", nil, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for i = pos - 1; i > 0; i-- {
|
||||||
|
if line[i] == '.' || (line[i] >= 'a' && line[i] <= 'z') || (line[i] >= 'A' && line[i] <= 'Z') {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if i >= 3 && line[i] == '3' && line[i-3] == 'w' && line[i-2] == 'e' && line[i-1] == 'b' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
i += 1
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
begin := line[:i]
|
||||||
|
keyword := line[i:pos]
|
||||||
|
end := line[pos:]
|
||||||
|
|
||||||
|
completionWords := keywordCompleter(keyword)
|
||||||
|
return begin, completionWords, end
|
||||||
|
}
|
||||||
|
|
||||||
|
func newJSRE(libPath, ipcpath string) *jsre {
|
||||||
|
js := &jsre{ps1: "> "}
|
||||||
|
js.wait = make(chan *big.Int)
|
||||||
|
|
||||||
|
// update state in separare forever blocks
|
||||||
|
js.re = re.New(libPath)
|
||||||
|
js.apiBindings(ipcpath)
|
||||||
|
|
||||||
|
if !liner.TerminalSupported() {
|
||||||
|
js.prompter = dumbterm{bufio.NewReader(os.Stdin)}
|
||||||
|
} else {
|
||||||
|
lr := liner.NewLiner()
|
||||||
|
js.withHistory(func(hist *os.File) { lr.ReadHistory(hist) })
|
||||||
|
lr.SetCtrlCAborts(true)
|
||||||
|
loadAutoCompletion(js, ipcpath)
|
||||||
|
lr.SetWordCompleter(apiWordCompleter)
|
||||||
|
lr.SetTabCompletionStyle(liner.TabPrints)
|
||||||
|
js.prompter = lr
|
||||||
|
js.atexit = func() {
|
||||||
|
js.withHistory(func(hist *os.File) { hist.Truncate(0); lr.WriteHistory(hist) })
|
||||||
|
lr.Close()
|
||||||
|
close(js.wait)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return js
|
||||||
|
}
|
||||||
|
|
||||||
|
func (js *jsre) apiBindings(ipcpath string) {
|
||||||
|
ethApi := rpc.NewEthereumApi(nil)
|
||||||
|
jeth := rpc.NewJeth(ethApi, js.re, ipcpath)
|
||||||
|
|
||||||
|
js.re.Set("jeth", struct{}{})
|
||||||
|
t, _ := js.re.Get("jeth")
|
||||||
|
jethObj := t.Object()
|
||||||
|
jethObj.Set("send", jeth.SendIpc)
|
||||||
|
jethObj.Set("sendAsync", jeth.SendIpc)
|
||||||
|
|
||||||
|
err := js.re.Compile("bignumber.js", re.BigNumber_JS)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("Error loading bignumber.js: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = js.re.Compile("ethereum.js", re.Web3_JS)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("Error loading web3.js: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = js.re.Eval("var web3 = require('web3');")
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("Error requiring web3: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = js.re.Eval("web3.setProvider(jeth)")
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("Error setting web3 provider: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
apis, err := js.suportedApis(ipcpath)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("Unable to determine supported api's: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// load only supported API's in javascript runtime
|
||||||
|
shortcuts := "var eth = web3.eth; "
|
||||||
|
for apiName, _ := range apis {
|
||||||
|
if apiName == api.Web3ApiName || apiName == api.EthApiName {
|
||||||
|
continue // manually mapped
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = js.re.Compile(fmt.Sprintf("%s.js", apiName), api.Javascript(apiName)); err == nil {
|
||||||
|
shortcuts += fmt.Sprintf("var %s = web3.%s; ", apiName, apiName)
|
||||||
|
} else {
|
||||||
|
utils.Fatalf("Error loading %s.js: %v", apiName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = js.re.Eval(shortcuts)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("Error setting namespaces: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
js.re.Eval(globalRegistrar + "registrar = GlobalRegistrar.at(\"" + globalRegistrarAddr + "\");")
|
||||||
|
}
|
||||||
|
|
||||||
|
var ds, _ = docserver.New("/")
|
||||||
|
|
||||||
|
/*
|
||||||
|
func (self *jsre) ConfirmTransaction(tx string) bool {
|
||||||
|
if self.ethereum.NatSpec {
|
||||||
|
notice := natspec.GetNotice(self.xeth, tx, ds)
|
||||||
|
fmt.Println(notice)
|
||||||
|
answer, _ := self.Prompt("Confirm Transaction [y/n]")
|
||||||
|
return strings.HasPrefix(strings.Trim(answer, " "), "y")
|
||||||
|
} else {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *jsre) UnlockAccount(addr []byte) bool {
|
||||||
|
fmt.Printf("Please unlock account %x.\n", addr)
|
||||||
|
pass, err := self.PasswordPrompt("Passphrase: ")
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// TODO: allow retry
|
||||||
|
if err := self.ethereum.AccountManager().Unlock(common.BytesToAddress(addr), pass); err != nil {
|
||||||
|
return false
|
||||||
|
} else {
|
||||||
|
fmt.Println("Account is now unlocked for this session.")
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
func (self *jsre) exec(filename string) error {
|
||||||
|
if err := self.re.Exec(filename); err != nil {
|
||||||
|
self.re.Stop(false)
|
||||||
|
return fmt.Errorf("Javascript Error: %v", err)
|
||||||
|
}
|
||||||
|
self.re.Stop(true)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *jsre) suportedApis(ipcpath string) (map[string]string, error) {
|
||||||
|
config := comms.IpcConfig{
|
||||||
|
Endpoint: ipcpath,
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := comms.NewIpcClient(config, codec.JSON)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req := shared.Request{
|
||||||
|
Id: 1,
|
||||||
|
Jsonrpc: "2.0",
|
||||||
|
Method: "modules",
|
||||||
|
}
|
||||||
|
|
||||||
|
err = client.Send(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := client.Recv()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if sucRes, ok := res.(shared.SuccessResponse); ok {
|
||||||
|
data, _ := json.Marshal(sucRes.Result)
|
||||||
|
apis := make(map[string]string)
|
||||||
|
err = json.Unmarshal(data, &apis)
|
||||||
|
if err == nil {
|
||||||
|
return apis, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("Unable to determine supported API's")
|
||||||
|
}
|
||||||
|
|
||||||
|
// show summary of current geth instance
|
||||||
|
func (self *jsre) welcome(ipcpath string) {
|
||||||
|
self.re.Eval(`console.log('instance: ' + web3.version.client);`)
|
||||||
|
self.re.Eval(`console.log(' datadir: ' + admin.datadir);`)
|
||||||
|
self.re.Eval(`console.log("coinbase: " + eth.coinbase);`)
|
||||||
|
self.re.Eval(`var lastBlockTimestamp = 1000 * eth.getBlock(eth.blockNumber).timestamp`)
|
||||||
|
self.re.Eval(`console.log("at block: " + eth.blockNumber + " (" + new Date(lastBlockTimestamp).toLocaleDateString()
|
||||||
|
+ " " + new Date(lastBlockTimestamp).toLocaleTimeString() + ")");`)
|
||||||
|
|
||||||
|
if modules, err := self.suportedApis(ipcpath); err == nil {
|
||||||
|
loadedModules := make([]string, 0)
|
||||||
|
for api, version := range modules {
|
||||||
|
loadedModules = append(loadedModules, fmt.Sprintf("%s:%s", api, version))
|
||||||
|
}
|
||||||
|
sort.Strings(loadedModules)
|
||||||
|
|
||||||
|
self.re.Eval(fmt.Sprintf("var modules = '%s';", strings.Join(loadedModules, " ")))
|
||||||
|
self.re.Eval(`console.log(" modules: " + modules);`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *jsre) interactive() {
|
||||||
|
// Read input lines.
|
||||||
|
prompt := make(chan string)
|
||||||
|
inputln := make(chan string)
|
||||||
|
go func() {
|
||||||
|
defer close(inputln)
|
||||||
|
for {
|
||||||
|
line, err := self.Prompt(<-prompt)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
inputln <- line
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// Wait for Ctrl-C, too.
|
||||||
|
sig := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(sig, os.Interrupt)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if self.atexit != nil {
|
||||||
|
self.atexit()
|
||||||
|
}
|
||||||
|
self.re.Stop(false)
|
||||||
|
}()
|
||||||
|
for {
|
||||||
|
prompt <- self.ps1
|
||||||
|
select {
|
||||||
|
case <-sig:
|
||||||
|
fmt.Println("caught interrupt, exiting")
|
||||||
|
return
|
||||||
|
case input, ok := <-inputln:
|
||||||
|
if !ok || indentCount <= 0 && input == "exit" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if input == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
str += input + "\n"
|
||||||
|
self.setIndent()
|
||||||
|
if indentCount <= 0 {
|
||||||
|
hist := str[:len(str)-1]
|
||||||
|
self.AppendHistory(hist)
|
||||||
|
self.parseInput(str)
|
||||||
|
str = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *jsre) withHistory(op func(*os.File)) {
|
||||||
|
hist, err := os.OpenFile(filepath.Join(self.datadir, "history"), os.O_RDWR|os.O_CREATE, os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("unable to open history file: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
op(hist)
|
||||||
|
hist.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *jsre) parseInput(code string) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
fmt.Println("[native] error", r)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
value, err := self.re.Run(code)
|
||||||
|
if err != nil {
|
||||||
|
if ottoErr, ok := err.(*otto.Error); ok {
|
||||||
|
fmt.Println(ottoErr.String())
|
||||||
|
} else {
|
||||||
|
fmt.Println(err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
self.printValue(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
var indentCount = 0
|
||||||
|
var str = ""
|
||||||
|
|
||||||
|
func (self *jsre) setIndent() {
|
||||||
|
open := strings.Count(str, "{")
|
||||||
|
open += strings.Count(str, "(")
|
||||||
|
closed := strings.Count(str, "}")
|
||||||
|
closed += strings.Count(str, ")")
|
||||||
|
indentCount = open - closed
|
||||||
|
if indentCount <= 0 {
|
||||||
|
self.ps1 = "> "
|
||||||
|
} else {
|
||||||
|
self.ps1 = strings.Join(make([]string, indentCount*2), "..")
|
||||||
|
self.ps1 += " "
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *jsre) printValue(v interface{}) {
|
||||||
|
val, err := self.re.PrettyPrint(v)
|
||||||
|
if err == nil {
|
||||||
|
fmt.Printf("%v", val)
|
||||||
|
}
|
||||||
|
}
|
100
cmd/console/main.go
Normal file
100
cmd/console/main.go
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
/*
|
||||||
|
This file is part of go-ethereum
|
||||||
|
|
||||||
|
go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* @authors
|
||||||
|
* Jeffrey Wilcke <i@jev.io>
|
||||||
|
*/
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/codegangsta/cli"
|
||||||
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/mattn/go-colorable"
|
||||||
|
"github.com/mattn/go-isatty"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ClientIdentifier = "Geth console"
|
||||||
|
Version = "0.9.27"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
gitCommit string // set via linker flag
|
||||||
|
nodeNameVersion string
|
||||||
|
app = utils.NewApp(Version, "the ether console")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if gitCommit == "" {
|
||||||
|
nodeNameVersion = Version
|
||||||
|
} else {
|
||||||
|
nodeNameVersion = Version + "-" + gitCommit[:8]
|
||||||
|
}
|
||||||
|
|
||||||
|
app.Action = run
|
||||||
|
app.Flags = []cli.Flag{
|
||||||
|
utils.IPCPathFlag,
|
||||||
|
utils.VerbosityFlag,
|
||||||
|
utils.JSpathFlag,
|
||||||
|
}
|
||||||
|
|
||||||
|
app.Before = func(ctx *cli.Context) error {
|
||||||
|
utils.SetupLogger(ctx)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Wrap the standard output with a colorified stream (windows)
|
||||||
|
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||||
|
if pr, pw, err := os.Pipe(); err == nil {
|
||||||
|
go io.Copy(colorable.NewColorableStdout(), pr)
|
||||||
|
os.Stdout = pw
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var interrupted = false
|
||||||
|
utils.RegisterInterrupt(func(os.Signal) {
|
||||||
|
interrupted = true
|
||||||
|
})
|
||||||
|
utils.HandleInterrupt()
|
||||||
|
|
||||||
|
if err := app.Run(os.Args); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "Error: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// we need to run the interrupt callbacks in case gui is closed
|
||||||
|
// this skips if we got here by actual interrupt stopping the GUI
|
||||||
|
if !interrupted {
|
||||||
|
utils.RunInterruptCallbacks(os.Interrupt)
|
||||||
|
}
|
||||||
|
logger.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
func run(ctx *cli.Context) {
|
||||||
|
jspath := ctx.GlobalString(utils.JSpathFlag.Name)
|
||||||
|
ipcpath := utils.IpcSocketPath(ctx)
|
||||||
|
|
||||||
|
repl := newJSRE(jspath, ipcpath)
|
||||||
|
repl.welcome(ipcpath)
|
||||||
|
repl.interactive()
|
||||||
|
}
|
@ -59,6 +59,7 @@ func main() {
|
|||||||
|
|
||||||
logger.AddLogSystem(logger.NewStdLogSystem(os.Stdout, log.LstdFlags, logger.LogLevel(*loglevel)))
|
logger.AddLogSystem(logger.NewStdLogSystem(os.Stdout, log.LstdFlags, logger.LogLevel(*loglevel)))
|
||||||
|
|
||||||
|
vm.Debug = true
|
||||||
db, _ := ethdb.NewMemDatabase()
|
db, _ := ethdb.NewMemDatabase()
|
||||||
statedb := state.New(common.Hash{}, db)
|
statedb := state.New(common.Hash{}, db)
|
||||||
sender := statedb.CreateAccount(common.StringToAddress("sender"))
|
sender := statedb.CreateAccount(common.StringToAddress("sender"))
|
||||||
@ -80,6 +81,8 @@ func main() {
|
|||||||
fmt.Println(string(statedb.Dump()))
|
fmt.Println(string(statedb.Dump()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vm.StdErrFormat(vmenv.StructLogs())
|
||||||
|
|
||||||
var mem runtime.MemStats
|
var mem runtime.MemStats
|
||||||
runtime.ReadMemStats(&mem)
|
runtime.ReadMemStats(&mem)
|
||||||
fmt.Printf("vm took %v\n", time.Since(tstart))
|
fmt.Printf("vm took %v\n", time.Since(tstart))
|
||||||
@ -104,6 +107,7 @@ type VMEnv struct {
|
|||||||
depth int
|
depth int
|
||||||
Gas *big.Int
|
Gas *big.Int
|
||||||
time int64
|
time int64
|
||||||
|
logs []vm.StructLog
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEnv(state *state.StateDB, transactor common.Address, value *big.Int) *VMEnv {
|
func NewEnv(state *state.StateDB, transactor common.Address, value *big.Int) *VMEnv {
|
||||||
@ -133,6 +137,12 @@ func (self *VMEnv) GetHash(n uint64) common.Hash {
|
|||||||
}
|
}
|
||||||
return common.Hash{}
|
return common.Hash{}
|
||||||
}
|
}
|
||||||
|
func (self *VMEnv) AddStructLog(log vm.StructLog) {
|
||||||
|
self.logs = append(self.logs, log)
|
||||||
|
}
|
||||||
|
func (self *VMEnv) StructLogs() []vm.StructLog {
|
||||||
|
return self.logs
|
||||||
|
}
|
||||||
func (self *VMEnv) AddLog(log *state.Log) {
|
func (self *VMEnv) AddLog(log *state.Log) {
|
||||||
self.state.AddLog(log)
|
self.state.AddLog(log)
|
||||||
}
|
}
|
||||||
|
@ -51,7 +51,7 @@ func (js *jsre) adminBindings() {
|
|||||||
admin.Set("import", js.importChain)
|
admin.Set("import", js.importChain)
|
||||||
admin.Set("export", js.exportChain)
|
admin.Set("export", js.exportChain)
|
||||||
admin.Set("verbosity", js.verbosity)
|
admin.Set("verbosity", js.verbosity)
|
||||||
admin.Set("progress", js.downloadProgress)
|
admin.Set("progress", js.syncProgress)
|
||||||
admin.Set("setSolc", js.setSolc)
|
admin.Set("setSolc", js.setSolc)
|
||||||
|
|
||||||
admin.Set("contractInfo", struct{}{})
|
admin.Set("contractInfo", struct{}{})
|
||||||
@ -271,9 +271,12 @@ func (js *jsre) debugBlock(call otto.FunctionCall) otto.Value {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tstart := time.Now()
|
tstart := time.Now()
|
||||||
|
|
||||||
old := vm.Debug
|
old := vm.Debug
|
||||||
vm.Debug = true
|
|
||||||
|
if len(call.ArgumentList) > 1 {
|
||||||
|
vm.Debug, _ = call.Argument(1).ToBoolean()
|
||||||
|
}
|
||||||
|
|
||||||
_, err = js.ethereum.BlockProcessor().RetryProcess(block)
|
_, err = js.ethereum.BlockProcessor().RetryProcess(block)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
@ -324,9 +327,14 @@ func (js *jsre) setHead(call otto.FunctionCall) otto.Value {
|
|||||||
return otto.UndefinedValue()
|
return otto.UndefinedValue()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (js *jsre) downloadProgress(call otto.FunctionCall) otto.Value {
|
func (js *jsre) syncProgress(call otto.FunctionCall) otto.Value {
|
||||||
pending, cached := js.ethereum.Downloader().Stats()
|
pending, cached, importing, eta := js.ethereum.Downloader().Stats()
|
||||||
v, _ := call.Otto.ToValue(map[string]interface{}{"pending": pending, "cached": cached})
|
v, _ := call.Otto.ToValue(map[string]interface{}{
|
||||||
|
"pending": pending,
|
||||||
|
"cached": cached,
|
||||||
|
"importing": importing,
|
||||||
|
"estimate": (eta / time.Second * time.Second).String(),
|
||||||
|
})
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ type jsre struct {
|
|||||||
prompter
|
prompter
|
||||||
}
|
}
|
||||||
|
|
||||||
func newJSRE(ethereum *eth.Ethereum, libPath, corsDomain string, interactive bool, f xeth.Frontend) *jsre {
|
func newJSRE(ethereum *eth.Ethereum, libPath, corsDomain, ipcpath string, interactive bool, f xeth.Frontend) *jsre {
|
||||||
js := &jsre{ethereum: ethereum, ps1: "> "}
|
js := &jsre{ethereum: ethereum, ps1: "> "}
|
||||||
// set default cors domain used by startRpc from CLI flag
|
// set default cors domain used by startRpc from CLI flag
|
||||||
js.corsDomain = corsDomain
|
js.corsDomain = corsDomain
|
||||||
@ -84,7 +84,7 @@ func newJSRE(ethereum *eth.Ethereum, libPath, corsDomain string, interactive boo
|
|||||||
js.wait = js.xeth.UpdateState()
|
js.wait = js.xeth.UpdateState()
|
||||||
// update state in separare forever blocks
|
// update state in separare forever blocks
|
||||||
js.re = re.New(libPath)
|
js.re = re.New(libPath)
|
||||||
js.apiBindings(f)
|
js.apiBindings(ipcpath, f)
|
||||||
js.adminBindings()
|
js.adminBindings()
|
||||||
|
|
||||||
if !liner.TerminalSupported() || !interactive {
|
if !liner.TerminalSupported() || !interactive {
|
||||||
@ -103,14 +103,15 @@ func newJSRE(ethereum *eth.Ethereum, libPath, corsDomain string, interactive boo
|
|||||||
return js
|
return js
|
||||||
}
|
}
|
||||||
|
|
||||||
func (js *jsre) apiBindings(f xeth.Frontend) {
|
func (js *jsre) apiBindings(ipcpath string, f xeth.Frontend) {
|
||||||
xe := xeth.New(js.ethereum, f)
|
xe := xeth.New(js.ethereum, f)
|
||||||
ethApi := rpc.NewEthereumApi(xe)
|
ethApi := rpc.NewEthereumApi(xe)
|
||||||
jeth := rpc.NewJeth(ethApi, js.re)
|
jeth := rpc.NewJeth(ethApi, js.re, ipcpath)
|
||||||
|
|
||||||
js.re.Set("jeth", struct{}{})
|
js.re.Set("jeth", struct{}{})
|
||||||
t, _ := js.re.Get("jeth")
|
t, _ := js.re.Get("jeth")
|
||||||
jethObj := t.Object()
|
jethObj := t.Object()
|
||||||
|
|
||||||
jethObj.Set("send", jeth.Send)
|
jethObj.Set("send", jeth.Send)
|
||||||
jethObj.Set("sendAsync", jeth.Send)
|
jethObj.Set("sendAsync", jeth.Send)
|
||||||
|
|
||||||
@ -119,7 +120,7 @@ func (js *jsre) apiBindings(f xeth.Frontend) {
|
|||||||
utils.Fatalf("Error loading bignumber.js: %v", err)
|
utils.Fatalf("Error loading bignumber.js: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = js.re.Compile("ethereum.js", re.Ethereum_JS)
|
err = js.re.Compile("ethereum.js", re.Web3_JS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Error loading ethereum.js: %v", err)
|
utils.Fatalf("Error loading ethereum.js: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -105,7 +105,7 @@ func testJEthRE(t *testing.T) (string, *testjethre, *eth.Ethereum) {
|
|||||||
t.Errorf("Error creating DocServer: %v", err)
|
t.Errorf("Error creating DocServer: %v", err)
|
||||||
}
|
}
|
||||||
tf := &testjethre{ds: ds, stateDb: ethereum.ChainManager().State().Copy()}
|
tf := &testjethre{ds: ds, stateDb: ethereum.ChainManager().State().Copy()}
|
||||||
repl := newJSRE(ethereum, assetPath, "", false, tf)
|
repl := newJSRE(ethereum, assetPath, "", "", false, tf)
|
||||||
tf.jsre = repl
|
tf.jsre = repl
|
||||||
return tmp, tf, ethereum
|
return tmp, tf, ethereum
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
ClientIdentifier = "Geth"
|
ClientIdentifier = "Geth"
|
||||||
Version = "0.9.28"
|
Version = "0.9.30"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -239,6 +239,9 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
|
|||||||
utils.RPCEnabledFlag,
|
utils.RPCEnabledFlag,
|
||||||
utils.RPCListenAddrFlag,
|
utils.RPCListenAddrFlag,
|
||||||
utils.RPCPortFlag,
|
utils.RPCPortFlag,
|
||||||
|
utils.IPCDisabledFlag,
|
||||||
|
utils.IPCApiFlag,
|
||||||
|
utils.IPCPathFlag,
|
||||||
utils.WhisperEnabledFlag,
|
utils.WhisperEnabledFlag,
|
||||||
utils.VMDebugFlag,
|
utils.VMDebugFlag,
|
||||||
utils.ProtocolVersionFlag,
|
utils.ProtocolVersionFlag,
|
||||||
@ -253,6 +256,12 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
|
|||||||
utils.PProfEanbledFlag,
|
utils.PProfEanbledFlag,
|
||||||
utils.PProfPortFlag,
|
utils.PProfPortFlag,
|
||||||
utils.SolcPathFlag,
|
utils.SolcPathFlag,
|
||||||
|
utils.GpoMinGasPriceFlag,
|
||||||
|
utils.GpoMaxGasPriceFlag,
|
||||||
|
utils.GpoFullBlockRatioFlag,
|
||||||
|
utils.GpobaseStepDownFlag,
|
||||||
|
utils.GpobaseStepUpFlag,
|
||||||
|
utils.GpobaseCorrectionFactorFlag,
|
||||||
}
|
}
|
||||||
app.Before = func(ctx *cli.Context) error {
|
app.Before = func(ctx *cli.Context) error {
|
||||||
utils.SetupLogger(ctx)
|
utils.SetupLogger(ctx)
|
||||||
@ -305,6 +314,7 @@ func console(ctx *cli.Context) {
|
|||||||
ethereum,
|
ethereum,
|
||||||
ctx.String(utils.JSpathFlag.Name),
|
ctx.String(utils.JSpathFlag.Name),
|
||||||
ctx.GlobalString(utils.RPCCORSDomainFlag.Name),
|
ctx.GlobalString(utils.RPCCORSDomainFlag.Name),
|
||||||
|
utils.IpcSocketPath(ctx),
|
||||||
true,
|
true,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
@ -326,6 +336,7 @@ func execJSFiles(ctx *cli.Context) {
|
|||||||
ethereum,
|
ethereum,
|
||||||
ctx.String(utils.JSpathFlag.Name),
|
ctx.String(utils.JSpathFlag.Name),
|
||||||
ctx.GlobalString(utils.RPCCORSDomainFlag.Name),
|
ctx.GlobalString(utils.RPCCORSDomainFlag.Name),
|
||||||
|
utils.IpcSocketPath(ctx),
|
||||||
false,
|
false,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
@ -382,6 +393,11 @@ func startEth(ctx *cli.Context, eth *eth.Ethereum) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Start auxiliary services if enabled.
|
// Start auxiliary services if enabled.
|
||||||
|
if !ctx.GlobalBool(utils.IPCDisabledFlag.Name) {
|
||||||
|
if err := utils.StartIPC(eth, ctx); err != nil {
|
||||||
|
utils.Fatalf("Error string IPC: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
if ctx.GlobalBool(utils.RPCEnabledFlag.Name) {
|
if ctx.GlobalBool(utils.RPCEnabledFlag.Name) {
|
||||||
if err := utils.StartRPC(eth, ctx); err != nil {
|
if err := utils.StartRPC(eth, ctx); err != nil {
|
||||||
utils.Fatalf("Error starting RPC: %v", err)
|
utils.Fatalf("Error starting RPC: %v", err)
|
||||||
|
@ -23,6 +23,9 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/api"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/codec"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/comms"
|
||||||
"github.com/ethereum/go-ethereum/xeth"
|
"github.com/ethereum/go-ethereum/xeth"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -129,7 +132,7 @@ var (
|
|||||||
GasPriceFlag = cli.StringFlag{
|
GasPriceFlag = cli.StringFlag{
|
||||||
Name: "gasprice",
|
Name: "gasprice",
|
||||||
Usage: "Sets the minimal gasprice when mining transactions",
|
Usage: "Sets the minimal gasprice when mining transactions",
|
||||||
Value: new(big.Int).Mul(big.NewInt(10), common.Szabo).String(),
|
Value: new(big.Int).Mul(big.NewInt(1), common.Szabo).String(),
|
||||||
}
|
}
|
||||||
|
|
||||||
UnlockedAccountFlag = cli.StringFlag{
|
UnlockedAccountFlag = cli.StringFlag{
|
||||||
@ -206,6 +209,20 @@ var (
|
|||||||
Usage: "Domain on which to send Access-Control-Allow-Origin header",
|
Usage: "Domain on which to send Access-Control-Allow-Origin header",
|
||||||
Value: "",
|
Value: "",
|
||||||
}
|
}
|
||||||
|
IPCDisabledFlag = cli.BoolFlag{
|
||||||
|
Name: "ipcdisable",
|
||||||
|
Usage: "Disable the IPC-RPC server",
|
||||||
|
}
|
||||||
|
IPCApiFlag = cli.StringFlag{
|
||||||
|
Name: "ipcapi",
|
||||||
|
Usage: "Specify the API's which are offered over this interface",
|
||||||
|
Value: api.DefaultIpcApis,
|
||||||
|
}
|
||||||
|
IPCPathFlag = DirectoryFlag{
|
||||||
|
Name: "ipcpath",
|
||||||
|
Usage: "Filename for IPC socket/pipe",
|
||||||
|
Value: DirectoryString{common.DefaultIpcPath()},
|
||||||
|
}
|
||||||
// Network Settings
|
// Network Settings
|
||||||
MaxPeersFlag = cli.IntFlag{
|
MaxPeersFlag = cli.IntFlag{
|
||||||
Name: "maxpeers",
|
Name: "maxpeers",
|
||||||
@ -259,6 +276,36 @@ var (
|
|||||||
Usage: "solidity compiler to be used",
|
Usage: "solidity compiler to be used",
|
||||||
Value: "solc",
|
Value: "solc",
|
||||||
}
|
}
|
||||||
|
GpoMinGasPriceFlag = cli.StringFlag{
|
||||||
|
Name: "gpomin",
|
||||||
|
Usage: "Minimum suggested gas price",
|
||||||
|
Value: new(big.Int).Mul(big.NewInt(1), common.Szabo).String(),
|
||||||
|
}
|
||||||
|
GpoMaxGasPriceFlag = cli.StringFlag{
|
||||||
|
Name: "gpomax",
|
||||||
|
Usage: "Maximum suggested gas price",
|
||||||
|
Value: new(big.Int).Mul(big.NewInt(100), common.Szabo).String(),
|
||||||
|
}
|
||||||
|
GpoFullBlockRatioFlag = cli.IntFlag{
|
||||||
|
Name: "gpofull",
|
||||||
|
Usage: "Full block threshold for gas price calculation (%)",
|
||||||
|
Value: 80,
|
||||||
|
}
|
||||||
|
GpobaseStepDownFlag = cli.IntFlag{
|
||||||
|
Name: "gpobasedown",
|
||||||
|
Usage: "Suggested gas price base step down ratio (1/1000)",
|
||||||
|
Value: 10,
|
||||||
|
}
|
||||||
|
GpobaseStepUpFlag = cli.IntFlag{
|
||||||
|
Name: "gpobaseup",
|
||||||
|
Usage: "Suggested gas price base step up ratio (1/1000)",
|
||||||
|
Value: 100,
|
||||||
|
}
|
||||||
|
GpobaseCorrectionFactorFlag = cli.IntFlag{
|
||||||
|
Name: "gpobasecf",
|
||||||
|
Usage: "Suggested gas price base correction factor (%)",
|
||||||
|
Value: 110,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// MakeNAT creates a port mapper from set command line flags.
|
// MakeNAT creates a port mapper from set command line flags.
|
||||||
@ -321,6 +368,12 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config {
|
|||||||
Dial: true,
|
Dial: true,
|
||||||
BootNodes: ctx.GlobalString(BootnodesFlag.Name),
|
BootNodes: ctx.GlobalString(BootnodesFlag.Name),
|
||||||
GasPrice: common.String2Big(ctx.GlobalString(GasPriceFlag.Name)),
|
GasPrice: common.String2Big(ctx.GlobalString(GasPriceFlag.Name)),
|
||||||
|
GpoMinGasPrice: common.String2Big(ctx.GlobalString(GpoMinGasPriceFlag.Name)),
|
||||||
|
GpoMaxGasPrice: common.String2Big(ctx.GlobalString(GpoMaxGasPriceFlag.Name)),
|
||||||
|
GpoFullBlockRatio: ctx.GlobalInt(GpoFullBlockRatioFlag.Name),
|
||||||
|
GpobaseStepDown: ctx.GlobalInt(GpobaseStepDownFlag.Name),
|
||||||
|
GpobaseStepUp: ctx.GlobalInt(GpobaseStepUpFlag.Name),
|
||||||
|
GpobaseCorrectionFactor: ctx.GlobalInt(GpobaseCorrectionFactorFlag.Name),
|
||||||
SolcPath: ctx.GlobalString(SolcPathFlag.Name),
|
SolcPath: ctx.GlobalString(SolcPathFlag.Name),
|
||||||
AutoDAG: ctx.GlobalBool(AutoDAGFlag.Name) || ctx.GlobalBool(MiningEnabledFlag.Name),
|
AutoDAG: ctx.GlobalBool(AutoDAGFlag.Name) || ctx.GlobalBool(MiningEnabledFlag.Name),
|
||||||
}
|
}
|
||||||
@ -368,6 +421,41 @@ func MakeAccountManager(ctx *cli.Context) *accounts.Manager {
|
|||||||
return accounts.NewManager(ks)
|
return accounts.NewManager(ks)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func IpcSocketPath(ctx *cli.Context) (ipcpath string) {
|
||||||
|
if common.IsWindows() {
|
||||||
|
ipcpath = common.DefaultIpcPath()
|
||||||
|
if ipcpath != ctx.GlobalString(IPCPathFlag.Name) {
|
||||||
|
ipcpath = ctx.GlobalString(IPCPathFlag.Name)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ipcpath = common.DefaultIpcPath()
|
||||||
|
if ctx.GlobalString(IPCPathFlag.Name) != common.DefaultIpcPath() {
|
||||||
|
ipcpath = ctx.GlobalString(IPCPathFlag.Name)
|
||||||
|
} else if ctx.GlobalString(DataDirFlag.Name) != "" &&
|
||||||
|
ctx.GlobalString(DataDirFlag.Name) != common.DefaultDataDir() {
|
||||||
|
ipcpath = filepath.Join(ctx.GlobalString(DataDirFlag.Name), "geth.ipc")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func StartIPC(eth *eth.Ethereum, ctx *cli.Context) error {
|
||||||
|
config := comms.IpcConfig{
|
||||||
|
Endpoint: IpcSocketPath(ctx),
|
||||||
|
}
|
||||||
|
|
||||||
|
xeth := xeth.New(eth, nil)
|
||||||
|
codec := codec.JSON
|
||||||
|
|
||||||
|
apis, err := api.ParseApiString(ctx.GlobalString(IPCApiFlag.Name), codec, xeth, eth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return comms.StartIpc(config, codec, apis...)
|
||||||
|
}
|
||||||
|
|
||||||
func StartRPC(eth *eth.Ethereum, ctx *cli.Context) error {
|
func StartRPC(eth *eth.Ethereum, ctx *cli.Context) error {
|
||||||
config := rpc.RpcConfig{
|
config := rpc.RpcConfig{
|
||||||
ListenAddress: ctx.GlobalString(RPCListenAddrFlag.Name),
|
ListenAddress: ctx.GlobalString(RPCListenAddrFlag.Name),
|
||||||
|
@ -94,6 +94,13 @@ func DefaultDataDir() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func DefaultIpcPath() string {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
return `\\.\pipe\geth.ipc`
|
||||||
|
}
|
||||||
|
return filepath.Join(DefaultDataDir(), "geth.ipc")
|
||||||
|
}
|
||||||
|
|
||||||
func IsWindows() bool {
|
func IsWindows() bool {
|
||||||
return runtime.GOOS == "windows"
|
return runtime.GOOS == "windows"
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -95,3 +96,13 @@ func (a *Address) Set(other Address) {
|
|||||||
a[i] = v
|
a[i] = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PP Pretty Prints a byte slice in the following format:
|
||||||
|
// hex(value[:4])...(hex[len(value)-4:])
|
||||||
|
func PP(value []byte) string {
|
||||||
|
if len(value) <= 8 {
|
||||||
|
return Bytes2Hex(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%x...%x", value[:4], value[len(value)-4])
|
||||||
|
}
|
||||||
|
@ -71,14 +71,10 @@ func (sm *BlockProcessor) TransitionState(statedb *state.StateDB, parent, block
|
|||||||
|
|
||||||
func (self *BlockProcessor) ApplyTransaction(coinbase *state.StateObject, statedb *state.StateDB, block *types.Block, tx *types.Transaction, usedGas *big.Int, transientProcess bool) (*types.Receipt, *big.Int, error) {
|
func (self *BlockProcessor) ApplyTransaction(coinbase *state.StateObject, statedb *state.StateDB, block *types.Block, tx *types.Transaction, usedGas *big.Int, transientProcess bool) (*types.Receipt, *big.Int, error) {
|
||||||
// If we are mining this block and validating we want to set the logs back to 0
|
// If we are mining this block and validating we want to set the logs back to 0
|
||||||
//statedb.EmptyLogs()
|
|
||||||
|
|
||||||
cb := statedb.GetStateObject(coinbase.Address())
|
cb := statedb.GetStateObject(coinbase.Address())
|
||||||
_, gas, err := ApplyMessage(NewEnv(statedb, self.bc, tx, block), tx, cb)
|
_, gas, err := ApplyMessage(NewEnv(statedb, self.bc, tx, block), tx, cb)
|
||||||
if err != nil && (IsNonceErr(err) || state.IsGasLimitErr(err) || IsInvalidTxErr(err)) {
|
if err != nil && (IsNonceErr(err) || state.IsGasLimitErr(err) || IsInvalidTxErr(err)) {
|
||||||
// If the account is managed, remove the invalid nonce.
|
|
||||||
//from, _ := tx.From()
|
|
||||||
//self.bc.TxState().RemoveNonce(from, tx.Nonce())
|
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -151,11 +147,17 @@ func (sm *BlockProcessor) RetryProcess(block *types.Block) (logs state.Logs, err
|
|||||||
return nil, ParentError(header.ParentHash)
|
return nil, ParentError(header.ParentHash)
|
||||||
}
|
}
|
||||||
parent := sm.bc.GetBlock(header.ParentHash)
|
parent := sm.bc.GetBlock(header.ParentHash)
|
||||||
if !sm.Pow.Verify(block) {
|
|
||||||
|
// FIXME Change to full header validation. See #1225
|
||||||
|
errch := make(chan bool)
|
||||||
|
go func() { errch <- sm.Pow.Verify(block) }()
|
||||||
|
|
||||||
|
logs, err = sm.processWithParent(block, parent)
|
||||||
|
if !<-errch {
|
||||||
return nil, ValidationError("Block's nonce is invalid (= %x)", block.Nonce)
|
return nil, ValidationError("Block's nonce is invalid (= %x)", block.Nonce)
|
||||||
}
|
}
|
||||||
|
|
||||||
return sm.processWithParent(block, parent)
|
return logs, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process block will attempt to process the given block's transactions and applies them
|
// Process block will attempt to process the given block's transactions and applies them
|
||||||
@ -258,6 +260,12 @@ func (sm *BlockProcessor) processWithParent(block, parent *types.Block) (logs st
|
|||||||
putTx(sm.extraDb, tx, block, uint64(i))
|
putTx(sm.extraDb, tx, block, uint64(i))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
receiptsRlp := receipts.RlpEncode()
|
||||||
|
/*if len(receipts) > 0 {
|
||||||
|
glog.V(logger.Info).Infof("Saving %v receipts, rlp len is %v\n", len(receipts), len(receiptsRlp))
|
||||||
|
}*/
|
||||||
|
sm.extraDb.Put(append(receiptsPre, block.Hash().Bytes()...), receiptsRlp)
|
||||||
|
|
||||||
return state.Logs(), nil
|
return state.Logs(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -402,6 +410,8 @@ func getBlockReceipts(db common.Database, bhash common.Hash) (receipts types.Rec
|
|||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = rlp.DecodeBytes(rdata, &receipts)
|
err = rlp.DecodeBytes(rdata, &receipts)
|
||||||
|
} else {
|
||||||
|
glog.V(logger.Detail).Infof("getBlockReceipts error %v\n", err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -5,9 +5,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -101,6 +101,8 @@ type ChainManager struct {
|
|||||||
futureBlocks *BlockCache
|
futureBlocks *BlockCache
|
||||||
|
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
|
// procInterrupt must be atomically called
|
||||||
|
procInterrupt int32 // interrupt signaler for block processing
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
|
|
||||||
pow pow.PoW
|
pow pow.PoW
|
||||||
@ -232,15 +234,8 @@ func (bc *ChainManager) setLastState() {
|
|||||||
if block != nil {
|
if block != nil {
|
||||||
bc.currentBlock = block
|
bc.currentBlock = block
|
||||||
bc.lastBlockHash = block.Hash()
|
bc.lastBlockHash = block.Hash()
|
||||||
} else { // TODO CLEAN THIS UP TMP CODE
|
} else {
|
||||||
block = bc.GetBlockByNumber(400000)
|
glog.Fatalf("Fatal. LastBlock not found. Please run removedb and resync")
|
||||||
if block == nil {
|
|
||||||
fmt.Println("Fatal. LastBlock not found. Report this issue")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
bc.currentBlock = block
|
|
||||||
bc.lastBlockHash = block.Hash()
|
|
||||||
bc.insert(block)
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
bc.Reset()
|
bc.Reset()
|
||||||
@ -516,6 +511,7 @@ func (self *ChainManager) CalcTotalDiff(block *types.Block) (*big.Int, error) {
|
|||||||
|
|
||||||
func (bc *ChainManager) Stop() {
|
func (bc *ChainManager) Stop() {
|
||||||
close(bc.quit)
|
close(bc.quit)
|
||||||
|
atomic.StoreInt32(&bc.procInterrupt, 1)
|
||||||
|
|
||||||
bc.wg.Wait()
|
bc.wg.Wait()
|
||||||
|
|
||||||
@ -567,7 +563,13 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
|
|||||||
go verifyNonces(self.pow, chain, nonceQuit, nonceDone)
|
go verifyNonces(self.pow, chain, nonceQuit, nonceDone)
|
||||||
defer close(nonceQuit)
|
defer close(nonceQuit)
|
||||||
|
|
||||||
|
txcount := 0
|
||||||
for i, block := range chain {
|
for i, block := range chain {
|
||||||
|
if atomic.LoadInt32(&self.procInterrupt) == 1 {
|
||||||
|
glog.V(logger.Debug).Infoln("Premature abort during chain processing")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
bstart := time.Now()
|
bstart := time.Now()
|
||||||
// Wait for block i's nonce to be verified before processing
|
// Wait for block i's nonce to be verified before processing
|
||||||
// its state transition.
|
// its state transition.
|
||||||
@ -625,6 +627,8 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
|
|||||||
return i, err
|
return i, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
txcount += len(block.Transactions())
|
||||||
|
|
||||||
cblock := self.currentBlock
|
cblock := self.currentBlock
|
||||||
// Compare the TD of the last known block in the canonical chain to make sure it's greater.
|
// Compare the TD of the last known block in the canonical chain to make sure it's greater.
|
||||||
// At this point it's possible that a different chain (fork) becomes the new canonical chain.
|
// At this point it's possible that a different chain (fork) becomes the new canonical chain.
|
||||||
@ -683,7 +687,7 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
|
|||||||
if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) {
|
if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) {
|
||||||
tend := time.Since(tstart)
|
tend := time.Since(tstart)
|
||||||
start, end := chain[0], chain[len(chain)-1]
|
start, end := chain[0], chain[len(chain)-1]
|
||||||
glog.Infof("imported %d block(s) (%d queued %d ignored) in %v. #%v [%x / %x]\n", stats.processed, stats.queued, stats.ignored, tend, end.Number(), start.Hash().Bytes()[:4], end.Hash().Bytes()[:4])
|
glog.Infof("imported %d block(s) (%d queued %d ignored) including %d txs in %v. #%v [%x / %x]\n", stats.processed, stats.queued, stats.ignored, txcount, tend, end.Number(), start.Hash().Bytes()[:4], end.Hash().Bytes()[:4])
|
||||||
}
|
}
|
||||||
|
|
||||||
go self.eventMux.Post(queueEvent)
|
go self.eventMux.Post(queueEvent)
|
||||||
|
@ -2,7 +2,6 @@ package core
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"math/big"
|
"math/big"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
@ -49,8 +48,6 @@ func (self *Execution) Create(caller vm.ContextRef) (ret []byte, err error, acco
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (self *Execution) exec(contextAddr *common.Address, code []byte, caller vm.ContextRef) (ret []byte, err error) {
|
func (self *Execution) exec(contextAddr *common.Address, code []byte, caller vm.ContextRef) (ret []byte, err error) {
|
||||||
start := time.Now()
|
|
||||||
|
|
||||||
env := self.env
|
env := self.env
|
||||||
evm := self.evm
|
evm := self.evm
|
||||||
if env.Depth() > int(params.CallCreateDepth.Int64()) {
|
if env.Depth() > int(params.CallCreateDepth.Int64()) {
|
||||||
@ -96,7 +93,6 @@ func (self *Execution) exec(contextAddr *common.Address, code []byte, caller vm.
|
|||||||
context.SetCallCode(contextAddr, code)
|
context.SetCallCode(contextAddr, code)
|
||||||
|
|
||||||
ret, err = evm.Run(context, self.input)
|
ret, err = evm.Run(context, self.input)
|
||||||
evm.Printf("message call took %v", time.Since(start)).Endl()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
env.State().Set(snapshot)
|
env.State().Set(snapshot)
|
||||||
}
|
}
|
||||||
|
@ -6,8 +6,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
// "github.com/ethereum/go-ethereum/crypto"
|
// "github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -76,8 +76,5 @@ func NewTestManager() *TestManager {
|
|||||||
// testManager.blockChain = NewChainManager(testManager)
|
// testManager.blockChain = NewChainManager(testManager)
|
||||||
// testManager.stateManager = NewStateManager(testManager)
|
// testManager.stateManager = NewStateManager(testManager)
|
||||||
|
|
||||||
// Start the tx pool
|
|
||||||
testManager.txPool.Start()
|
|
||||||
|
|
||||||
return testManager
|
return testManager
|
||||||
}
|
}
|
||||||
|
@ -336,6 +336,22 @@ func (self *StateObject) Nonce() uint64 {
|
|||||||
return self.nonce
|
return self.nonce
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (self *StateObject) EachStorage(cb func(key, value []byte)) {
|
||||||
|
// When iterating over the storage check the cache first
|
||||||
|
for h, v := range self.storage {
|
||||||
|
cb([]byte(h), v.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
it := self.State.trie.Iterator()
|
||||||
|
for it.Next() {
|
||||||
|
// ignore cached values
|
||||||
|
key := self.State.trie.GetKey(it.Key)
|
||||||
|
if _, ok := self.storage[string(key)]; !ok {
|
||||||
|
cb(key, it.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// Encoding
|
// Encoding
|
||||||
//
|
//
|
||||||
|
@ -223,6 +223,10 @@ func (self *StateTransition) transitionState() (ret []byte, usedGas *big.Int, er
|
|||||||
return nil, nil, InvalidTxError(err)
|
return nil, nil, InvalidTxError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if vm.Debug {
|
||||||
|
vm.StdErrFormat(vmenv.StructLogs())
|
||||||
|
}
|
||||||
|
|
||||||
self.refundGas()
|
self.refundGas()
|
||||||
self.state.AddBalance(self.coinbase, new(big.Int).Mul(self.gasUsed(), self.gasPrice))
|
self.state.AddBalance(self.coinbase, new(big.Int).Mul(self.gasUsed(), self.gasPrice))
|
||||||
|
|
||||||
|
@ -19,6 +19,7 @@ var (
|
|||||||
// Transaction Pool Errors
|
// Transaction Pool Errors
|
||||||
ErrInvalidSender = errors.New("Invalid sender")
|
ErrInvalidSender = errors.New("Invalid sender")
|
||||||
ErrNonce = errors.New("Nonce too low")
|
ErrNonce = errors.New("Nonce too low")
|
||||||
|
ErrCheap = errors.New("Gas price too low for acceptance")
|
||||||
ErrBalance = errors.New("Insufficient balance")
|
ErrBalance = errors.New("Insufficient balance")
|
||||||
ErrNonExistentAccount = errors.New("Account does not exist or account balance too low")
|
ErrNonExistentAccount = errors.New("Account does not exist or account balance too low")
|
||||||
ErrInsufficientFunds = errors.New("Insufficient funds for gas * price + value")
|
ErrInsufficientFunds = errors.New("Insufficient funds for gas * price + value")
|
||||||
@ -27,6 +28,10 @@ var (
|
|||||||
ErrNegativeValue = errors.New("Negative value")
|
ErrNegativeValue = errors.New("Negative value")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxQueued = 200 // max limit of queued txs per address
|
||||||
|
)
|
||||||
|
|
||||||
type stateFn func() *state.StateDB
|
type stateFn func() *state.StateDB
|
||||||
|
|
||||||
// TxPool contains all currently known transactions. Transactions
|
// TxPool contains all currently known transactions. Transactions
|
||||||
@ -41,6 +46,7 @@ type TxPool struct {
|
|||||||
currentState stateFn // The state function which will allow us to do some pre checkes
|
currentState stateFn // The state function which will allow us to do some pre checkes
|
||||||
pendingState *state.ManagedState
|
pendingState *state.ManagedState
|
||||||
gasLimit func() *big.Int // The current gas limit function callback
|
gasLimit func() *big.Int // The current gas limit function callback
|
||||||
|
minGasPrice *big.Int
|
||||||
eventMux *event.TypeMux
|
eventMux *event.TypeMux
|
||||||
events event.Subscription
|
events event.Subscription
|
||||||
|
|
||||||
@ -50,26 +56,35 @@ type TxPool struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewTxPool(eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func() *big.Int) *TxPool {
|
func NewTxPool(eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func() *big.Int) *TxPool {
|
||||||
return &TxPool{
|
pool := &TxPool{
|
||||||
pending: make(map[common.Hash]*types.Transaction),
|
pending: make(map[common.Hash]*types.Transaction),
|
||||||
queue: make(map[common.Address]map[common.Hash]*types.Transaction),
|
queue: make(map[common.Address]map[common.Hash]*types.Transaction),
|
||||||
quit: make(chan bool),
|
quit: make(chan bool),
|
||||||
eventMux: eventMux,
|
eventMux: eventMux,
|
||||||
currentState: currentStateFn,
|
currentState: currentStateFn,
|
||||||
gasLimit: gasLimitFn,
|
gasLimit: gasLimitFn,
|
||||||
|
minGasPrice: new(big.Int),
|
||||||
pendingState: state.ManageState(currentStateFn()),
|
pendingState: state.ManageState(currentStateFn()),
|
||||||
|
events: eventMux.Subscribe(ChainEvent{}, GasPriceChanged{}),
|
||||||
}
|
}
|
||||||
|
go pool.eventLoop()
|
||||||
|
|
||||||
|
return pool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pool *TxPool) Start() {
|
func (pool *TxPool) eventLoop() {
|
||||||
// Track chain events. When a chain events occurs (new chain canon block)
|
// Track chain events. When a chain events occurs (new chain canon block)
|
||||||
// we need to know the new state. The new state will help us determine
|
// we need to know the new state. The new state will help us determine
|
||||||
// the nonces in the managed state
|
// the nonces in the managed state
|
||||||
pool.events = pool.eventMux.Subscribe(ChainEvent{})
|
for ev := range pool.events.Chan() {
|
||||||
for _ = range pool.events.Chan() {
|
|
||||||
pool.mu.Lock()
|
pool.mu.Lock()
|
||||||
|
|
||||||
|
switch ev := ev.(type) {
|
||||||
|
case ChainEvent:
|
||||||
pool.resetState()
|
pool.resetState()
|
||||||
|
case GasPriceChanged:
|
||||||
|
pool.minGasPrice = ev.Price
|
||||||
|
}
|
||||||
|
|
||||||
pool.mu.Unlock()
|
pool.mu.Unlock()
|
||||||
}
|
}
|
||||||
@ -100,7 +115,6 @@ func (pool *TxPool) resetState() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (pool *TxPool) Stop() {
|
func (pool *TxPool) Stop() {
|
||||||
pool.pending = make(map[common.Hash]*types.Transaction)
|
|
||||||
close(pool.quit)
|
close(pool.quit)
|
||||||
pool.events.Unsubscribe()
|
pool.events.Unsubscribe()
|
||||||
glog.V(logger.Info).Infoln("TX Pool stopped")
|
glog.V(logger.Info).Infoln("TX Pool stopped")
|
||||||
@ -122,6 +136,11 @@ func (pool *TxPool) validateTx(tx *types.Transaction) error {
|
|||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Drop transactions under our own minimal accepted gas price
|
||||||
|
if pool.minGasPrice.Cmp(tx.GasPrice()) > 0 {
|
||||||
|
return ErrCheap
|
||||||
|
}
|
||||||
|
|
||||||
// Validate the transaction sender and it's sig. Throw
|
// Validate the transaction sender and it's sig. Throw
|
||||||
// if the from fields is invalid.
|
// if the from fields is invalid.
|
||||||
if from, err = tx.From(); err != nil {
|
if from, err = tx.From(); err != nil {
|
||||||
@ -169,15 +188,10 @@ func (pool *TxPool) validateTx(tx *types.Transaction) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validate and queue transactions.
|
||||||
func (self *TxPool) add(tx *types.Transaction) error {
|
func (self *TxPool) add(tx *types.Transaction) error {
|
||||||
hash := tx.Hash()
|
hash := tx.Hash()
|
||||||
|
|
||||||
/* XXX I'm unsure about this. This is extremely dangerous and may result
|
|
||||||
in total black listing of certain transactions
|
|
||||||
if self.invalidHashes.Has(hash) {
|
|
||||||
return fmt.Errorf("Invalid transaction (%x)", hash[:4])
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
if self.pending[hash] != nil {
|
if self.pending[hash] != nil {
|
||||||
return fmt.Errorf("Known transaction (%x)", hash[:4])
|
return fmt.Errorf("Known transaction (%x)", hash[:4])
|
||||||
}
|
}
|
||||||
@ -207,6 +221,30 @@ func (self *TxPool) add(tx *types.Transaction) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// queueTx will queue an unknown transaction
|
||||||
|
func (self *TxPool) queueTx(hash common.Hash, tx *types.Transaction) {
|
||||||
|
from, _ := tx.From() // already validated
|
||||||
|
if self.queue[from] == nil {
|
||||||
|
self.queue[from] = make(map[common.Hash]*types.Transaction)
|
||||||
|
}
|
||||||
|
self.queue[from][hash] = tx
|
||||||
|
}
|
||||||
|
|
||||||
|
// addTx will add a transaction to the pending (processable queue) list of transactions
|
||||||
|
func (pool *TxPool) addTx(hash common.Hash, addr common.Address, tx *types.Transaction) {
|
||||||
|
if _, ok := pool.pending[hash]; !ok {
|
||||||
|
pool.pending[hash] = tx
|
||||||
|
|
||||||
|
// Increment the nonce on the pending state. This can only happen if
|
||||||
|
// the nonce is +1 to the previous one.
|
||||||
|
pool.pendingState.SetNonce(addr, tx.AccountNonce+1)
|
||||||
|
// Notify the subscribers. This event is posted in a goroutine
|
||||||
|
// because it's possible that somewhere during the post "Remove transaction"
|
||||||
|
// gets called which will then wait for the global tx pool lock and deadlock.
|
||||||
|
go pool.eventMux.Post(TxPreEvent{tx})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Add queues a single transaction in the pool if it is valid.
|
// Add queues a single transaction in the pool if it is valid.
|
||||||
func (self *TxPool) Add(tx *types.Transaction) error {
|
func (self *TxPool) Add(tx *types.Transaction) error {
|
||||||
self.mu.Lock()
|
self.mu.Lock()
|
||||||
@ -290,28 +328,6 @@ func (self *TxPool) RemoveTransactions(txs types.Transactions) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *TxPool) queueTx(hash common.Hash, tx *types.Transaction) {
|
|
||||||
from, _ := tx.From() // already validated
|
|
||||||
if self.queue[from] == nil {
|
|
||||||
self.queue[from] = make(map[common.Hash]*types.Transaction)
|
|
||||||
}
|
|
||||||
self.queue[from][hash] = tx
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pool *TxPool) addTx(hash common.Hash, addr common.Address, tx *types.Transaction) {
|
|
||||||
if _, ok := pool.pending[hash]; !ok {
|
|
||||||
pool.pending[hash] = tx
|
|
||||||
|
|
||||||
// Increment the nonce on the pending state. This can only happen if
|
|
||||||
// the nonce is +1 to the previous one.
|
|
||||||
pool.pendingState.SetNonce(addr, tx.AccountNonce+1)
|
|
||||||
// Notify the subscribers. This event is posted in a goroutine
|
|
||||||
// because it's possible that somewhere during the post "Remove transaction"
|
|
||||||
// gets called which will then wait for the global tx pool lock and deadlock.
|
|
||||||
go pool.eventMux.Post(TxPreEvent{tx})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkQueue moves transactions that have become processable to main pool.
|
// checkQueue moves transactions that have become processable to main pool.
|
||||||
func (pool *TxPool) checkQueue() {
|
func (pool *TxPool) checkQueue() {
|
||||||
state := pool.pendingState
|
state := pool.pendingState
|
||||||
@ -336,7 +352,16 @@ func (pool *TxPool) checkQueue() {
|
|||||||
// Find the next consecutive nonce range starting at the
|
// Find the next consecutive nonce range starting at the
|
||||||
// current account nonce.
|
// current account nonce.
|
||||||
sort.Sort(addq)
|
sort.Sort(addq)
|
||||||
for _, e := range addq {
|
for i, e := range addq {
|
||||||
|
// start deleting the transactions from the queue if they exceed the limit
|
||||||
|
if i > maxQueued {
|
||||||
|
if glog.V(logger.Debug) {
|
||||||
|
glog.Infof("Queued tx limit exceeded for %s. Tx %s removed\n", common.PP(address[:]), common.PP(e.hash[:]))
|
||||||
|
}
|
||||||
|
delete(pool.queue[address], e.hash)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if e.AccountNonce > guessedNonce {
|
if e.AccountNonce > guessedNonce {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -49,13 +49,13 @@ func NewContext(caller ContextRef, object ContextRef, value, gas, price *big.Int
|
|||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Context) GetOp(n *big.Int) OpCode {
|
func (c *Context) GetOp(n uint64) OpCode {
|
||||||
return OpCode(c.GetByte(n))
|
return OpCode(c.GetByte(n))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Context) GetByte(n *big.Int) byte {
|
func (c *Context) GetByte(n uint64) byte {
|
||||||
if n.Cmp(big.NewInt(int64(len(c.Code)))) < 0 {
|
if n < uint64(len(c.Code)) {
|
||||||
return c.Code[n.Int64()]
|
return c.Code[n]
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
|
@ -8,6 +8,8 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Environment is is required by the virtual machine to get information from
|
||||||
|
// it's own isolated environment. For an example see `core.VMEnv`
|
||||||
type Environment interface {
|
type Environment interface {
|
||||||
State() *state.StateDB
|
State() *state.StateDB
|
||||||
|
|
||||||
@ -20,6 +22,8 @@ type Environment interface {
|
|||||||
GasLimit() *big.Int
|
GasLimit() *big.Int
|
||||||
Transfer(from, to Account, amount *big.Int) error
|
Transfer(from, to Account, amount *big.Int) error
|
||||||
AddLog(*state.Log)
|
AddLog(*state.Log)
|
||||||
|
AddStructLog(StructLog)
|
||||||
|
StructLogs() []StructLog
|
||||||
|
|
||||||
VmType() Type
|
VmType() Type
|
||||||
|
|
||||||
@ -31,6 +35,19 @@ type Environment interface {
|
|||||||
Create(me ContextRef, data []byte, gas, price, value *big.Int) ([]byte, error, ContextRef)
|
Create(me ContextRef, data []byte, gas, price, value *big.Int) ([]byte, error, ContextRef)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StructLog is emited to the Environment each cycle and lists information about the curent internal state
|
||||||
|
// prior to the execution of the statement.
|
||||||
|
type StructLog struct {
|
||||||
|
Pc uint64
|
||||||
|
Op OpCode
|
||||||
|
Gas *big.Int
|
||||||
|
GasCost *big.Int
|
||||||
|
Memory []byte
|
||||||
|
Stack []*big.Int
|
||||||
|
Storage map[common.Hash][]byte
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
type Account interface {
|
type Account interface {
|
||||||
SubBalance(amount *big.Int)
|
SubBalance(amount *big.Int)
|
||||||
AddBalance(amount *big.Int)
|
AddBalance(amount *big.Int)
|
||||||
|
@ -2,20 +2,14 @@ package vm
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"math/big"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type OutOfGasError struct {
|
type OutOfGasError struct{}
|
||||||
req, has *big.Int
|
|
||||||
}
|
|
||||||
|
|
||||||
func OOG(req, has *big.Int) OutOfGasError {
|
|
||||||
return OutOfGasError{req, has}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (self OutOfGasError) Error() string {
|
func (self OutOfGasError) Error() string {
|
||||||
return fmt.Sprintf("out of gas! require %v, have %v", self.req, self.has)
|
return "Out Of Gas"
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsOOGErr(err error) bool {
|
func IsOOGErr(err error) bool {
|
||||||
|
51
core/vm/logger.go
Normal file
51
core/vm/logger.go
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
package vm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"unicode"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
func StdErrFormat(logs []StructLog) {
|
||||||
|
fmt.Fprintf(os.Stderr, "VM STAT %d OPs\n", len(logs))
|
||||||
|
for _, log := range logs {
|
||||||
|
fmt.Fprintf(os.Stderr, "PC %08d: %s GAS: %v COST: %v", log.Pc, log.Op, log.Gas, log.GasCost)
|
||||||
|
if log.Err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, " ERROR: %v", log.Err)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "\n")
|
||||||
|
|
||||||
|
fmt.Fprintln(os.Stderr, "STACK =", len(log.Stack))
|
||||||
|
|
||||||
|
for i := len(log.Stack) - 1; i >= 0; i-- {
|
||||||
|
fmt.Fprintf(os.Stderr, "%04d: %x\n", len(log.Stack)-i-1, common.LeftPadBytes(log.Stack[i].Bytes(), 32))
|
||||||
|
}
|
||||||
|
|
||||||
|
const maxMem = 10
|
||||||
|
addr := 0
|
||||||
|
fmt.Fprintln(os.Stderr, "MEM =", len(log.Memory))
|
||||||
|
for i := 0; i+16 <= len(log.Memory) && addr < maxMem; i += 16 {
|
||||||
|
data := log.Memory[i : i+16]
|
||||||
|
str := fmt.Sprintf("%04d: % x ", addr*16, data)
|
||||||
|
for _, r := range data {
|
||||||
|
if r == 0 {
|
||||||
|
str += "."
|
||||||
|
} else if unicode.IsPrint(rune(r)) {
|
||||||
|
str += fmt.Sprintf("%s", string(r))
|
||||||
|
} else {
|
||||||
|
str += "?"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
addr++
|
||||||
|
fmt.Fprintln(os.Stderr, str)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(os.Stderr, "STORAGE =", len(log.Storage))
|
||||||
|
for h, item := range log.Storage {
|
||||||
|
fmt.Fprintf(os.Stderr, "%x: %x\n", h, common.LeftPadBytes(item, 32))
|
||||||
|
}
|
||||||
|
fmt.Fprintln(os.Stderr)
|
||||||
|
}
|
||||||
|
}
|
@ -5,7 +5,7 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newStack() *stack {
|
func newstack() *stack {
|
||||||
return &stack{}
|
return &stack{}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -14,6 +14,10 @@ type stack struct {
|
|||||||
ptr int
|
ptr int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (st *stack) Data() []*big.Int {
|
||||||
|
return st.data[:st.ptr]
|
||||||
|
}
|
||||||
|
|
||||||
func (st *stack) push(d *big.Int) {
|
func (st *stack) push(d *big.Int) {
|
||||||
// NOTE push limit (1024) is checked in baseCheck
|
// NOTE push limit (1024) is checked in baseCheck
|
||||||
stackItem := new(big.Int).Set(d)
|
stackItem := new(big.Int).Set(d)
|
||||||
|
@ -3,6 +3,4 @@ package vm
|
|||||||
type VirtualMachine interface {
|
type VirtualMachine interface {
|
||||||
Env() Environment
|
Env() Environment
|
||||||
Run(context *Context, data []byte) ([]byte, error)
|
Run(context *Context, data []byte) ([]byte, error)
|
||||||
Printf(string, ...interface{}) VirtualMachine
|
|
||||||
Endl() VirtualMachine
|
|
||||||
}
|
}
|
||||||
|
235
core/vm/vm.go
235
core/vm/vm.go
@ -7,16 +7,13 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Vm implements VirtualMachine
|
||||||
type Vm struct {
|
type Vm struct {
|
||||||
env Environment
|
env Environment
|
||||||
|
|
||||||
logTy byte
|
|
||||||
logStr string
|
|
||||||
|
|
||||||
err error
|
err error
|
||||||
// For logging
|
// For logging
|
||||||
debug bool
|
debug bool
|
||||||
@ -31,13 +28,13 @@ type Vm struct {
|
|||||||
After func(*Context, error)
|
After func(*Context, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// New returns a new Virtual Machine
|
||||||
func New(env Environment) *Vm {
|
func New(env Environment) *Vm {
|
||||||
lt := LogTyPretty
|
return &Vm{env: env, debug: Debug, Recoverable: true}
|
||||||
|
|
||||||
return &Vm{debug: Debug, env: env, logTy: lt, Recoverable: true}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
// Run loops and evaluates the contract's code with the given input data
|
||||||
|
func (self *Vm) Run(context *Context, input []byte) (ret []byte, err error) {
|
||||||
self.env.SetDepth(self.env.Depth() + 1)
|
self.env.SetDepth(self.env.Depth() + 1)
|
||||||
defer self.env.SetDepth(self.env.Depth() - 1)
|
defer self.env.SetDepth(self.env.Depth() - 1)
|
||||||
|
|
||||||
@ -46,9 +43,32 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
code = context.Code
|
code = context.Code
|
||||||
value = context.value
|
value = context.value
|
||||||
price = context.Price
|
price = context.Price
|
||||||
)
|
|
||||||
|
|
||||||
self.Printf("(%d) (%x) %x (code=%d) gas: %v (d) %x", self.env.Depth(), caller.Address().Bytes()[:4], context.Address(), len(code), context.Gas, callData).Endl()
|
op OpCode // current opcode
|
||||||
|
codehash = crypto.Sha3Hash(code) // codehash is used when doing jump dest caching
|
||||||
|
mem = NewMemory() // bound memory
|
||||||
|
stack = newstack() // local stack
|
||||||
|
statedb = self.env.State() // current state
|
||||||
|
// For optimisation reason we're using uint64 as the program counter.
|
||||||
|
// It's theoretically possible to go above 2^64. The YP defines the PC to be uint256. Pratically much less so feasible.
|
||||||
|
pc = uint64(0) // program counter
|
||||||
|
|
||||||
|
// jump evaluates and checks whether the given jump destination is a valid one
|
||||||
|
// if valid move the `pc` otherwise return an error.
|
||||||
|
jump = func(from uint64, to *big.Int) error {
|
||||||
|
if !context.jumpdests.has(codehash, code, to) {
|
||||||
|
nop := context.GetOp(to.Uint64())
|
||||||
|
return fmt.Errorf("invalid jump destination (%v) %v", nop, to)
|
||||||
|
}
|
||||||
|
|
||||||
|
pc = to.Uint64()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
newMemSize *big.Int
|
||||||
|
cost *big.Int
|
||||||
|
)
|
||||||
|
|
||||||
// User defer pattern to check for an error and, based on the error being nil or not, use all gas and return.
|
// User defer pattern to check for an error and, based on the error being nil or not, use all gas and return.
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -57,7 +77,8 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
self.Printf(" %v", err).Endl()
|
self.log(pc, op, context.Gas, cost, mem, stack, context, err)
|
||||||
|
|
||||||
// In case of a VM exception (known exceptions) all gas consumed (panics NOT included).
|
// In case of a VM exception (known exceptions) all gas consumed (panics NOT included).
|
||||||
context.UseGas(context.Gas)
|
context.UseGas(context.Gas)
|
||||||
|
|
||||||
@ -67,7 +88,7 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
|
|
||||||
if context.CodeAddr != nil {
|
if context.CodeAddr != nil {
|
||||||
if p := Precompiled[context.CodeAddr.Str()]; p != nil {
|
if p := Precompiled[context.CodeAddr.Str()]; p != nil {
|
||||||
return self.RunPrecompiled(p, callData, context)
|
return self.RunPrecompiled(p, input, context)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -76,29 +97,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
return context.Return(nil), nil
|
return context.Return(nil), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
op OpCode
|
|
||||||
codehash = crypto.Sha3Hash(code)
|
|
||||||
mem = NewMemory()
|
|
||||||
stack = newStack()
|
|
||||||
pc = new(big.Int)
|
|
||||||
statedb = self.env.State()
|
|
||||||
|
|
||||||
jump = func(from *big.Int, to *big.Int) error {
|
|
||||||
if !context.jumpdests.has(codehash, code, to) {
|
|
||||||
nop := context.GetOp(to)
|
|
||||||
return fmt.Errorf("invalid jump destination (%v) %v", nop, to)
|
|
||||||
}
|
|
||||||
|
|
||||||
self.Printf(" ~> %v", to)
|
|
||||||
pc = to
|
|
||||||
|
|
||||||
self.Endl()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
// The base for all big integer arithmetic
|
// The base for all big integer arithmetic
|
||||||
base := new(big.Int)
|
base := new(big.Int)
|
||||||
@ -106,63 +104,55 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
// Get the memory location of pc
|
// Get the memory location of pc
|
||||||
op = context.GetOp(pc)
|
op = context.GetOp(pc)
|
||||||
|
|
||||||
self.Printf("(pc) %-3d -o- %-14s (m) %-4d (s) %-4d ", pc, op.String(), mem.Len(), stack.len())
|
// calculate the new memory size and gas price for the current executing opcode
|
||||||
newMemSize, gas, err := self.calculateGasAndSize(context, caller, op, statedb, mem, stack)
|
newMemSize, cost, err = self.calculateGasAndSize(context, caller, op, statedb, mem, stack)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
self.Printf("(g) %-3v (%v)", gas, context.Gas)
|
// Use the calculated gas. When insufficient gas is present, use all gas and return an
|
||||||
|
// Out Of Gas error
|
||||||
if !context.UseGas(gas) {
|
if !context.UseGas(cost) {
|
||||||
self.Endl()
|
|
||||||
|
|
||||||
tmp := new(big.Int).Set(context.Gas)
|
|
||||||
|
|
||||||
context.UseGas(context.Gas)
|
context.UseGas(context.Gas)
|
||||||
|
|
||||||
return context.Return(nil), OOG(gas, tmp)
|
return context.Return(nil), OutOfGasError{}
|
||||||
}
|
}
|
||||||
|
// Resize the memory calculated previously
|
||||||
mem.Resize(newMemSize.Uint64())
|
mem.Resize(newMemSize.Uint64())
|
||||||
|
// Add a log message
|
||||||
|
self.log(pc, op, context.Gas, cost, mem, stack, context, nil)
|
||||||
|
|
||||||
switch op {
|
switch op {
|
||||||
case ADD:
|
case ADD:
|
||||||
x, y := stack.pop(), stack.pop()
|
x, y := stack.pop(), stack.pop()
|
||||||
self.Printf(" %v + %v", y, x)
|
|
||||||
|
|
||||||
base.Add(x, y)
|
base.Add(x, y)
|
||||||
|
|
||||||
U256(base)
|
U256(base)
|
||||||
|
|
||||||
self.Printf(" = %v", base)
|
|
||||||
// pop result back on the stack
|
// pop result back on the stack
|
||||||
stack.push(base)
|
stack.push(base)
|
||||||
case SUB:
|
case SUB:
|
||||||
x, y := stack.pop(), stack.pop()
|
x, y := stack.pop(), stack.pop()
|
||||||
self.Printf(" %v - %v", x, y)
|
|
||||||
|
|
||||||
base.Sub(x, y)
|
base.Sub(x, y)
|
||||||
|
|
||||||
U256(base)
|
U256(base)
|
||||||
|
|
||||||
self.Printf(" = %v", base)
|
|
||||||
// pop result back on the stack
|
// pop result back on the stack
|
||||||
stack.push(base)
|
stack.push(base)
|
||||||
case MUL:
|
case MUL:
|
||||||
x, y := stack.pop(), stack.pop()
|
x, y := stack.pop(), stack.pop()
|
||||||
self.Printf(" %v * %v", y, x)
|
|
||||||
|
|
||||||
base.Mul(x, y)
|
base.Mul(x, y)
|
||||||
|
|
||||||
U256(base)
|
U256(base)
|
||||||
|
|
||||||
self.Printf(" = %v", base)
|
|
||||||
// pop result back on the stack
|
// pop result back on the stack
|
||||||
stack.push(base)
|
stack.push(base)
|
||||||
case DIV:
|
case DIV:
|
||||||
x, y := stack.pop(), stack.pop()
|
x, y := stack.pop(), stack.pop()
|
||||||
self.Printf(" %v / %v", x, y)
|
|
||||||
|
|
||||||
if y.Cmp(common.Big0) != 0 {
|
if y.Cmp(common.Big0) != 0 {
|
||||||
base.Div(x, y)
|
base.Div(x, y)
|
||||||
@ -170,14 +160,11 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
|
|
||||||
U256(base)
|
U256(base)
|
||||||
|
|
||||||
self.Printf(" = %v", base)
|
|
||||||
// pop result back on the stack
|
// pop result back on the stack
|
||||||
stack.push(base)
|
stack.push(base)
|
||||||
case SDIV:
|
case SDIV:
|
||||||
x, y := S256(stack.pop()), S256(stack.pop())
|
x, y := S256(stack.pop()), S256(stack.pop())
|
||||||
|
|
||||||
self.Printf(" %v / %v", x, y)
|
|
||||||
|
|
||||||
if y.Cmp(common.Big0) == 0 {
|
if y.Cmp(common.Big0) == 0 {
|
||||||
base.Set(common.Big0)
|
base.Set(common.Big0)
|
||||||
} else {
|
} else {
|
||||||
@ -193,13 +180,10 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
U256(base)
|
U256(base)
|
||||||
}
|
}
|
||||||
|
|
||||||
self.Printf(" = %v", base)
|
|
||||||
stack.push(base)
|
stack.push(base)
|
||||||
case MOD:
|
case MOD:
|
||||||
x, y := stack.pop(), stack.pop()
|
x, y := stack.pop(), stack.pop()
|
||||||
|
|
||||||
self.Printf(" %v %% %v", x, y)
|
|
||||||
|
|
||||||
if y.Cmp(common.Big0) == 0 {
|
if y.Cmp(common.Big0) == 0 {
|
||||||
base.Set(common.Big0)
|
base.Set(common.Big0)
|
||||||
} else {
|
} else {
|
||||||
@ -208,13 +192,10 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
|
|
||||||
U256(base)
|
U256(base)
|
||||||
|
|
||||||
self.Printf(" = %v", base)
|
|
||||||
stack.push(base)
|
stack.push(base)
|
||||||
case SMOD:
|
case SMOD:
|
||||||
x, y := S256(stack.pop()), S256(stack.pop())
|
x, y := S256(stack.pop()), S256(stack.pop())
|
||||||
|
|
||||||
self.Printf(" %v %% %v", x, y)
|
|
||||||
|
|
||||||
if y.Cmp(common.Big0) == 0 {
|
if y.Cmp(common.Big0) == 0 {
|
||||||
base.Set(common.Big0)
|
base.Set(common.Big0)
|
||||||
} else {
|
} else {
|
||||||
@ -230,20 +211,15 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
U256(base)
|
U256(base)
|
||||||
}
|
}
|
||||||
|
|
||||||
self.Printf(" = %v", base)
|
|
||||||
stack.push(base)
|
stack.push(base)
|
||||||
|
|
||||||
case EXP:
|
case EXP:
|
||||||
x, y := stack.pop(), stack.pop()
|
x, y := stack.pop(), stack.pop()
|
||||||
|
|
||||||
self.Printf(" %v ** %v", x, y)
|
|
||||||
|
|
||||||
base.Exp(x, y, Pow256)
|
base.Exp(x, y, Pow256)
|
||||||
|
|
||||||
U256(base)
|
U256(base)
|
||||||
|
|
||||||
self.Printf(" = %v", base)
|
|
||||||
|
|
||||||
stack.push(base)
|
stack.push(base)
|
||||||
case SIGNEXTEND:
|
case SIGNEXTEND:
|
||||||
back := stack.pop()
|
back := stack.pop()
|
||||||
@ -260,15 +236,13 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
|
|
||||||
num = U256(num)
|
num = U256(num)
|
||||||
|
|
||||||
self.Printf(" = %v", num)
|
|
||||||
|
|
||||||
stack.push(num)
|
stack.push(num)
|
||||||
}
|
}
|
||||||
case NOT:
|
case NOT:
|
||||||
stack.push(U256(new(big.Int).Not(stack.pop())))
|
stack.push(U256(new(big.Int).Not(stack.pop())))
|
||||||
case LT:
|
case LT:
|
||||||
x, y := stack.pop(), stack.pop()
|
x, y := stack.pop(), stack.pop()
|
||||||
self.Printf(" %v < %v", x, y)
|
|
||||||
// x < y
|
// x < y
|
||||||
if x.Cmp(y) < 0 {
|
if x.Cmp(y) < 0 {
|
||||||
stack.push(common.BigTrue)
|
stack.push(common.BigTrue)
|
||||||
@ -277,7 +251,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
}
|
}
|
||||||
case GT:
|
case GT:
|
||||||
x, y := stack.pop(), stack.pop()
|
x, y := stack.pop(), stack.pop()
|
||||||
self.Printf(" %v > %v", x, y)
|
|
||||||
|
|
||||||
// x > y
|
// x > y
|
||||||
if x.Cmp(y) > 0 {
|
if x.Cmp(y) > 0 {
|
||||||
@ -288,7 +261,7 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
|
|
||||||
case SLT:
|
case SLT:
|
||||||
x, y := S256(stack.pop()), S256(stack.pop())
|
x, y := S256(stack.pop()), S256(stack.pop())
|
||||||
self.Printf(" %v < %v", x, y)
|
|
||||||
// x < y
|
// x < y
|
||||||
if x.Cmp(S256(y)) < 0 {
|
if x.Cmp(S256(y)) < 0 {
|
||||||
stack.push(common.BigTrue)
|
stack.push(common.BigTrue)
|
||||||
@ -297,7 +270,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
}
|
}
|
||||||
case SGT:
|
case SGT:
|
||||||
x, y := S256(stack.pop()), S256(stack.pop())
|
x, y := S256(stack.pop()), S256(stack.pop())
|
||||||
self.Printf(" %v > %v", x, y)
|
|
||||||
|
|
||||||
// x > y
|
// x > y
|
||||||
if x.Cmp(y) > 0 {
|
if x.Cmp(y) > 0 {
|
||||||
@ -308,7 +280,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
|
|
||||||
case EQ:
|
case EQ:
|
||||||
x, y := stack.pop(), stack.pop()
|
x, y := stack.pop(), stack.pop()
|
||||||
self.Printf(" %v == %v", y, x)
|
|
||||||
|
|
||||||
// x == y
|
// x == y
|
||||||
if x.Cmp(y) == 0 {
|
if x.Cmp(y) == 0 {
|
||||||
@ -326,17 +297,14 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
|
|
||||||
case AND:
|
case AND:
|
||||||
x, y := stack.pop(), stack.pop()
|
x, y := stack.pop(), stack.pop()
|
||||||
self.Printf(" %v & %v", y, x)
|
|
||||||
|
|
||||||
stack.push(base.And(x, y))
|
stack.push(base.And(x, y))
|
||||||
case OR:
|
case OR:
|
||||||
x, y := stack.pop(), stack.pop()
|
x, y := stack.pop(), stack.pop()
|
||||||
self.Printf(" %v | %v", x, y)
|
|
||||||
|
|
||||||
stack.push(base.Or(x, y))
|
stack.push(base.Or(x, y))
|
||||||
case XOR:
|
case XOR:
|
||||||
x, y := stack.pop(), stack.pop()
|
x, y := stack.pop(), stack.pop()
|
||||||
self.Printf(" %v ^ %v", x, y)
|
|
||||||
|
|
||||||
stack.push(base.Xor(x, y))
|
stack.push(base.Xor(x, y))
|
||||||
case BYTE:
|
case BYTE:
|
||||||
@ -350,8 +318,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
base.Set(common.BigFalse)
|
base.Set(common.BigFalse)
|
||||||
}
|
}
|
||||||
|
|
||||||
self.Printf(" => 0x%x", base.Bytes())
|
|
||||||
|
|
||||||
stack.push(base)
|
stack.push(base)
|
||||||
case ADDMOD:
|
case ADDMOD:
|
||||||
x := stack.pop()
|
x := stack.pop()
|
||||||
@ -365,8 +331,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
base = U256(base)
|
base = U256(base)
|
||||||
}
|
}
|
||||||
|
|
||||||
self.Printf(" %v + %v %% %v = %v", x, y, z, base)
|
|
||||||
|
|
||||||
stack.push(base)
|
stack.push(base)
|
||||||
case MULMOD:
|
case MULMOD:
|
||||||
x := stack.pop()
|
x := stack.pop()
|
||||||
@ -380,8 +344,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
U256(base)
|
U256(base)
|
||||||
}
|
}
|
||||||
|
|
||||||
self.Printf(" %v + %v %% %v = %v", x, y, z, base)
|
|
||||||
|
|
||||||
stack.push(base)
|
stack.push(base)
|
||||||
|
|
||||||
case SHA3:
|
case SHA3:
|
||||||
@ -390,55 +352,45 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
|
|
||||||
stack.push(common.BigD(data))
|
stack.push(common.BigD(data))
|
||||||
|
|
||||||
self.Printf(" => (%v) %x", size, data)
|
|
||||||
case ADDRESS:
|
case ADDRESS:
|
||||||
stack.push(common.Bytes2Big(context.Address().Bytes()))
|
stack.push(common.Bytes2Big(context.Address().Bytes()))
|
||||||
|
|
||||||
self.Printf(" => %x", context.Address())
|
|
||||||
case BALANCE:
|
case BALANCE:
|
||||||
addr := common.BigToAddress(stack.pop())
|
addr := common.BigToAddress(stack.pop())
|
||||||
balance := statedb.GetBalance(addr)
|
balance := statedb.GetBalance(addr)
|
||||||
|
|
||||||
stack.push(balance)
|
stack.push(balance)
|
||||||
|
|
||||||
self.Printf(" => %v (%x)", balance, addr)
|
|
||||||
case ORIGIN:
|
case ORIGIN:
|
||||||
origin := self.env.Origin()
|
origin := self.env.Origin()
|
||||||
|
|
||||||
stack.push(origin.Big())
|
stack.push(origin.Big())
|
||||||
|
|
||||||
self.Printf(" => %x", origin)
|
|
||||||
case CALLER:
|
case CALLER:
|
||||||
caller := context.caller.Address()
|
caller := context.caller.Address()
|
||||||
stack.push(common.Bytes2Big(caller.Bytes()))
|
stack.push(common.Bytes2Big(caller.Bytes()))
|
||||||
|
|
||||||
self.Printf(" => %x", caller)
|
|
||||||
case CALLVALUE:
|
case CALLVALUE:
|
||||||
stack.push(value)
|
stack.push(value)
|
||||||
|
|
||||||
self.Printf(" => %v", value)
|
|
||||||
case CALLDATALOAD:
|
case CALLDATALOAD:
|
||||||
data := getData(callData, stack.pop(), common.Big32)
|
data := getData(input, stack.pop(), common.Big32)
|
||||||
|
|
||||||
self.Printf(" => 0x%x", data)
|
|
||||||
|
|
||||||
stack.push(common.Bytes2Big(data))
|
stack.push(common.Bytes2Big(data))
|
||||||
case CALLDATASIZE:
|
case CALLDATASIZE:
|
||||||
l := int64(len(callData))
|
l := int64(len(input))
|
||||||
stack.push(big.NewInt(l))
|
stack.push(big.NewInt(l))
|
||||||
|
|
||||||
self.Printf(" => %d", l)
|
|
||||||
case CALLDATACOPY:
|
case CALLDATACOPY:
|
||||||
var (
|
var (
|
||||||
mOff = stack.pop()
|
mOff = stack.pop()
|
||||||
cOff = stack.pop()
|
cOff = stack.pop()
|
||||||
l = stack.pop()
|
l = stack.pop()
|
||||||
)
|
)
|
||||||
data := getData(callData, cOff, l)
|
data := getData(input, cOff, l)
|
||||||
|
|
||||||
mem.Set(mOff.Uint64(), l.Uint64(), data)
|
mem.Set(mOff.Uint64(), l.Uint64(), data)
|
||||||
|
|
||||||
self.Printf(" => [%v, %v, %v]", mOff, cOff, l)
|
|
||||||
case CODESIZE, EXTCODESIZE:
|
case CODESIZE, EXTCODESIZE:
|
||||||
var code []byte
|
var code []byte
|
||||||
if op == EXTCODESIZE {
|
if op == EXTCODESIZE {
|
||||||
@ -452,7 +404,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
l := big.NewInt(int64(len(code)))
|
l := big.NewInt(int64(len(code)))
|
||||||
stack.push(l)
|
stack.push(l)
|
||||||
|
|
||||||
self.Printf(" => %d", l)
|
|
||||||
case CODECOPY, EXTCODECOPY:
|
case CODECOPY, EXTCODECOPY:
|
||||||
var code []byte
|
var code []byte
|
||||||
if op == EXTCODECOPY {
|
if op == EXTCODECOPY {
|
||||||
@ -472,12 +423,9 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
|
|
||||||
mem.Set(mOff.Uint64(), l.Uint64(), codeCopy)
|
mem.Set(mOff.Uint64(), l.Uint64(), codeCopy)
|
||||||
|
|
||||||
self.Printf(" => [%v, %v, %v] %x", mOff, cOff, l, codeCopy)
|
|
||||||
case GASPRICE:
|
case GASPRICE:
|
||||||
stack.push(context.Price)
|
stack.push(context.Price)
|
||||||
|
|
||||||
self.Printf(" => %x", context.Price)
|
|
||||||
|
|
||||||
case BLOCKHASH:
|
case BLOCKHASH:
|
||||||
num := stack.pop()
|
num := stack.pop()
|
||||||
|
|
||||||
@ -488,56 +436,47 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
stack.push(common.Big0)
|
stack.push(common.Big0)
|
||||||
}
|
}
|
||||||
|
|
||||||
self.Printf(" => 0x%x", stack.peek().Bytes())
|
|
||||||
case COINBASE:
|
case COINBASE:
|
||||||
coinbase := self.env.Coinbase()
|
coinbase := self.env.Coinbase()
|
||||||
|
|
||||||
stack.push(coinbase.Big())
|
stack.push(coinbase.Big())
|
||||||
|
|
||||||
self.Printf(" => 0x%x", coinbase)
|
|
||||||
case TIMESTAMP:
|
case TIMESTAMP:
|
||||||
time := self.env.Time()
|
time := self.env.Time()
|
||||||
|
|
||||||
stack.push(big.NewInt(time))
|
stack.push(big.NewInt(time))
|
||||||
|
|
||||||
self.Printf(" => 0x%x", time)
|
|
||||||
case NUMBER:
|
case NUMBER:
|
||||||
number := self.env.BlockNumber()
|
number := self.env.BlockNumber()
|
||||||
|
|
||||||
stack.push(U256(number))
|
stack.push(U256(number))
|
||||||
|
|
||||||
self.Printf(" => 0x%x", number.Bytes())
|
|
||||||
case DIFFICULTY:
|
case DIFFICULTY:
|
||||||
difficulty := self.env.Difficulty()
|
difficulty := self.env.Difficulty()
|
||||||
|
|
||||||
stack.push(difficulty)
|
stack.push(difficulty)
|
||||||
|
|
||||||
self.Printf(" => 0x%x", difficulty.Bytes())
|
|
||||||
case GASLIMIT:
|
case GASLIMIT:
|
||||||
self.Printf(" => %v", self.env.GasLimit())
|
|
||||||
|
|
||||||
stack.push(self.env.GasLimit())
|
stack.push(self.env.GasLimit())
|
||||||
|
|
||||||
case PUSH1, PUSH2, PUSH3, PUSH4, PUSH5, PUSH6, PUSH7, PUSH8, PUSH9, PUSH10, PUSH11, PUSH12, PUSH13, PUSH14, PUSH15, PUSH16, PUSH17, PUSH18, PUSH19, PUSH20, PUSH21, PUSH22, PUSH23, PUSH24, PUSH25, PUSH26, PUSH27, PUSH28, PUSH29, PUSH30, PUSH31, PUSH32:
|
case PUSH1, PUSH2, PUSH3, PUSH4, PUSH5, PUSH6, PUSH7, PUSH8, PUSH9, PUSH10, PUSH11, PUSH12, PUSH13, PUSH14, PUSH15, PUSH16, PUSH17, PUSH18, PUSH19, PUSH20, PUSH21, PUSH22, PUSH23, PUSH24, PUSH25, PUSH26, PUSH27, PUSH28, PUSH29, PUSH30, PUSH31, PUSH32:
|
||||||
a := big.NewInt(int64(op - PUSH1 + 1))
|
size := uint64(op - PUSH1 + 1)
|
||||||
byts := getData(code, new(big.Int).Add(pc, big.NewInt(1)), a)
|
byts := getData(code, new(big.Int).SetUint64(pc+1), new(big.Int).SetUint64(size))
|
||||||
// push value to stack
|
// push value to stack
|
||||||
stack.push(common.Bytes2Big(byts))
|
stack.push(common.Bytes2Big(byts))
|
||||||
pc.Add(pc, a)
|
pc += size
|
||||||
|
|
||||||
self.Printf(" => 0x%x", byts)
|
|
||||||
case POP:
|
case POP:
|
||||||
stack.pop()
|
stack.pop()
|
||||||
case DUP1, DUP2, DUP3, DUP4, DUP5, DUP6, DUP7, DUP8, DUP9, DUP10, DUP11, DUP12, DUP13, DUP14, DUP15, DUP16:
|
case DUP1, DUP2, DUP3, DUP4, DUP5, DUP6, DUP7, DUP8, DUP9, DUP10, DUP11, DUP12, DUP13, DUP14, DUP15, DUP16:
|
||||||
n := int(op - DUP1 + 1)
|
n := int(op - DUP1 + 1)
|
||||||
stack.dup(n)
|
stack.dup(n)
|
||||||
|
|
||||||
self.Printf(" => [%d] 0x%x", n, stack.peek().Bytes())
|
|
||||||
case SWAP1, SWAP2, SWAP3, SWAP4, SWAP5, SWAP6, SWAP7, SWAP8, SWAP9, SWAP10, SWAP11, SWAP12, SWAP13, SWAP14, SWAP15, SWAP16:
|
case SWAP1, SWAP2, SWAP3, SWAP4, SWAP5, SWAP6, SWAP7, SWAP8, SWAP9, SWAP10, SWAP11, SWAP12, SWAP13, SWAP14, SWAP15, SWAP16:
|
||||||
n := int(op - SWAP1 + 2)
|
n := int(op - SWAP1 + 2)
|
||||||
stack.swap(n)
|
stack.swap(n)
|
||||||
|
|
||||||
self.Printf(" => [%d]", n)
|
|
||||||
case LOG0, LOG1, LOG2, LOG3, LOG4:
|
case LOG0, LOG1, LOG2, LOG3, LOG4:
|
||||||
n := int(op - LOG0)
|
n := int(op - LOG0)
|
||||||
topics := make([]common.Hash, n)
|
topics := make([]common.Hash, n)
|
||||||
@ -550,38 +489,32 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
log := state.NewLog(context.Address(), topics, data, self.env.BlockNumber().Uint64())
|
log := state.NewLog(context.Address(), topics, data, self.env.BlockNumber().Uint64())
|
||||||
self.env.AddLog(log)
|
self.env.AddLog(log)
|
||||||
|
|
||||||
self.Printf(" => %v", log)
|
|
||||||
case MLOAD:
|
case MLOAD:
|
||||||
offset := stack.pop()
|
offset := stack.pop()
|
||||||
val := common.BigD(mem.Get(offset.Int64(), 32))
|
val := common.BigD(mem.Get(offset.Int64(), 32))
|
||||||
stack.push(val)
|
stack.push(val)
|
||||||
|
|
||||||
self.Printf(" => 0x%x", val.Bytes())
|
|
||||||
case MSTORE:
|
case MSTORE:
|
||||||
// pop value of the stack
|
// pop value of the stack
|
||||||
mStart, val := stack.pop(), stack.pop()
|
mStart, val := stack.pop(), stack.pop()
|
||||||
mem.Set(mStart.Uint64(), 32, common.BigToBytes(val, 256))
|
mem.Set(mStart.Uint64(), 32, common.BigToBytes(val, 256))
|
||||||
|
|
||||||
self.Printf(" => 0x%x", val)
|
|
||||||
case MSTORE8:
|
case MSTORE8:
|
||||||
off, val := stack.pop().Int64(), stack.pop().Int64()
|
off, val := stack.pop().Int64(), stack.pop().Int64()
|
||||||
|
|
||||||
mem.store[off] = byte(val & 0xff)
|
mem.store[off] = byte(val & 0xff)
|
||||||
|
|
||||||
self.Printf(" => [%v] 0x%x", off, mem.store[off])
|
|
||||||
case SLOAD:
|
case SLOAD:
|
||||||
loc := common.BigToHash(stack.pop())
|
loc := common.BigToHash(stack.pop())
|
||||||
val := common.Bytes2Big(statedb.GetState(context.Address(), loc))
|
val := common.Bytes2Big(statedb.GetState(context.Address(), loc))
|
||||||
stack.push(val)
|
stack.push(val)
|
||||||
|
|
||||||
self.Printf(" {0x%x : 0x%x}", loc, val.Bytes())
|
|
||||||
case SSTORE:
|
case SSTORE:
|
||||||
loc := common.BigToHash(stack.pop())
|
loc := common.BigToHash(stack.pop())
|
||||||
val := stack.pop()
|
val := stack.pop()
|
||||||
|
|
||||||
statedb.SetState(context.Address(), loc, val)
|
statedb.SetState(context.Address(), loc, val)
|
||||||
|
|
||||||
self.Printf(" {0x%x : 0x%x}", loc, val.Bytes())
|
|
||||||
case JUMP:
|
case JUMP:
|
||||||
if err := jump(pc, stack.pop()); err != nil {
|
if err := jump(pc, stack.pop()); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -599,17 +532,14 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
self.Printf(" ~> false")
|
|
||||||
|
|
||||||
case JUMPDEST:
|
case JUMPDEST:
|
||||||
case PC:
|
case PC:
|
||||||
stack.push(pc)
|
stack.push(new(big.Int).SetUint64(pc))
|
||||||
case MSIZE:
|
case MSIZE:
|
||||||
stack.push(big.NewInt(int64(mem.Len())))
|
stack.push(big.NewInt(int64(mem.Len())))
|
||||||
case GAS:
|
case GAS:
|
||||||
stack.push(context.Gas)
|
stack.push(context.Gas)
|
||||||
|
|
||||||
self.Printf(" => %x", context.Gas)
|
|
||||||
case CREATE:
|
case CREATE:
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -619,14 +549,12 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
gas = new(big.Int).Set(context.Gas)
|
gas = new(big.Int).Set(context.Gas)
|
||||||
addr common.Address
|
addr common.Address
|
||||||
)
|
)
|
||||||
self.Endl()
|
|
||||||
|
|
||||||
context.UseGas(context.Gas)
|
context.UseGas(context.Gas)
|
||||||
ret, suberr, ref := self.env.Create(context, input, gas, price, value)
|
ret, suberr, ref := self.env.Create(context, input, gas, price, value)
|
||||||
if suberr != nil {
|
if suberr != nil {
|
||||||
stack.push(common.BigFalse)
|
stack.push(common.BigFalse)
|
||||||
|
|
||||||
self.Printf(" (*) 0x0 %v", suberr)
|
|
||||||
} else {
|
} else {
|
||||||
// gas < len(ret) * CreateDataGas == NO_CODE
|
// gas < len(ret) * CreateDataGas == NO_CODE
|
||||||
dataGas := big.NewInt(int64(len(ret)))
|
dataGas := big.NewInt(int64(len(ret)))
|
||||||
@ -651,7 +579,6 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
retOffset, retSize := stack.pop(), stack.pop()
|
retOffset, retSize := stack.pop(), stack.pop()
|
||||||
|
|
||||||
address := common.BigToAddress(addr)
|
address := common.BigToAddress(addr)
|
||||||
self.Printf(" => %x", address).Endl()
|
|
||||||
|
|
||||||
// Get the arguments from the memory
|
// Get the arguments from the memory
|
||||||
args := mem.Get(inOffset.Int64(), inSize.Int64())
|
args := mem.Get(inOffset.Int64(), inSize.Int64())
|
||||||
@ -673,47 +600,41 @@ func (self *Vm) Run(context *Context, callData []byte) (ret []byte, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
stack.push(common.BigFalse)
|
stack.push(common.BigFalse)
|
||||||
|
|
||||||
self.Printf("%v").Endl()
|
|
||||||
} else {
|
} else {
|
||||||
stack.push(common.BigTrue)
|
stack.push(common.BigTrue)
|
||||||
|
|
||||||
mem.Set(retOffset.Uint64(), retSize.Uint64(), ret)
|
mem.Set(retOffset.Uint64(), retSize.Uint64(), ret)
|
||||||
}
|
}
|
||||||
self.Printf("resume %x (%v)", context.Address(), context.Gas)
|
|
||||||
case RETURN:
|
case RETURN:
|
||||||
offset, size := stack.pop(), stack.pop()
|
offset, size := stack.pop(), stack.pop()
|
||||||
ret := mem.GetPtr(offset.Int64(), size.Int64())
|
ret := mem.GetPtr(offset.Int64(), size.Int64())
|
||||||
|
|
||||||
self.Printf(" => [%v, %v] (%d) 0x%x", offset, size, len(ret), ret).Endl()
|
|
||||||
|
|
||||||
return context.Return(ret), nil
|
return context.Return(ret), nil
|
||||||
case SUICIDE:
|
case SUICIDE:
|
||||||
receiver := statedb.GetOrNewStateObject(common.BigToAddress(stack.pop()))
|
receiver := statedb.GetOrNewStateObject(common.BigToAddress(stack.pop()))
|
||||||
balance := statedb.GetBalance(context.Address())
|
balance := statedb.GetBalance(context.Address())
|
||||||
|
|
||||||
self.Printf(" => (%x) %v", receiver.Address().Bytes()[:4], balance)
|
|
||||||
|
|
||||||
receiver.AddBalance(balance)
|
receiver.AddBalance(balance)
|
||||||
|
|
||||||
statedb.Delete(context.Address())
|
statedb.Delete(context.Address())
|
||||||
|
|
||||||
fallthrough
|
fallthrough
|
||||||
case STOP: // Stop the context
|
case STOP: // Stop the context
|
||||||
self.Endl()
|
|
||||||
|
|
||||||
return context.Return(nil), nil
|
return context.Return(nil), nil
|
||||||
default:
|
default:
|
||||||
self.Printf("(pc) %-3v Invalid opcode %x\n", pc, op).Endl()
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("Invalid opcode %x", op)
|
return nil, fmt.Errorf("Invalid opcode %x", op)
|
||||||
}
|
}
|
||||||
|
|
||||||
pc.Add(pc, One)
|
pc++
|
||||||
|
|
||||||
self.Endl()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// calculateGasAndSize calculates the required given the opcode and stack items calculates the new memorysize for
|
||||||
|
// the operation. This does not reduce gas or resizes the memory.
|
||||||
func (self *Vm) calculateGasAndSize(context *Context, caller ContextRef, op OpCode, statedb *state.StateDB, mem *Memory, stack *stack) (*big.Int, *big.Int, error) {
|
func (self *Vm) calculateGasAndSize(context *Context, caller ContextRef, op OpCode, statedb *state.StateDB, mem *Memory, stack *stack) (*big.Int, *big.Int, error) {
|
||||||
var (
|
var (
|
||||||
gas = new(big.Int)
|
gas = new(big.Int)
|
||||||
@ -855,40 +776,38 @@ func (self *Vm) calculateGasAndSize(context *Context, caller ContextRef, op OpCo
|
|||||||
return newMemSize, gas, nil
|
return newMemSize, gas, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *Vm) RunPrecompiled(p *PrecompiledAccount, callData []byte, context *Context) (ret []byte, err error) {
|
// RunPrecompile runs and evaluate the output of a precompiled contract defined in contracts.go
|
||||||
gas := p.Gas(len(callData))
|
func (self *Vm) RunPrecompiled(p *PrecompiledAccount, input []byte, context *Context) (ret []byte, err error) {
|
||||||
|
gas := p.Gas(len(input))
|
||||||
if context.UseGas(gas) {
|
if context.UseGas(gas) {
|
||||||
ret = p.Call(callData)
|
ret = p.Call(input)
|
||||||
self.Printf("NATIVE_FUNC => %x", ret)
|
|
||||||
self.Endl()
|
|
||||||
|
|
||||||
return context.Return(ret), nil
|
return context.Return(ret), nil
|
||||||
} else {
|
} else {
|
||||||
self.Printf("NATIVE_FUNC => failed").Endl()
|
return nil, OutOfGasError{}
|
||||||
|
|
||||||
tmp := new(big.Int).Set(context.Gas)
|
|
||||||
|
|
||||||
return nil, OOG(gas, tmp)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *Vm) Printf(format string, v ...interface{}) VirtualMachine {
|
// log emits a log event to the environment for each opcode encountered. This is not to be confused with the
|
||||||
if self.debug {
|
// LOG* opcode.
|
||||||
self.logStr += fmt.Sprintf(format, v...)
|
func (self *Vm) log(pc uint64, op OpCode, gas, cost *big.Int, memory *Memory, stack *stack, context *Context, err error) {
|
||||||
}
|
if Debug {
|
||||||
|
mem := make([]byte, len(memory.Data()))
|
||||||
|
copy(mem, memory.Data())
|
||||||
|
stck := make([]*big.Int, len(stack.Data()))
|
||||||
|
copy(stck, stack.Data())
|
||||||
|
|
||||||
return self
|
object := context.self.(*state.StateObject)
|
||||||
}
|
storage := make(map[common.Hash][]byte)
|
||||||
|
object.EachStorage(func(k, v []byte) {
|
||||||
func (self *Vm) Endl() VirtualMachine {
|
storage[common.BytesToHash(k)] = v
|
||||||
if self.debug {
|
})
|
||||||
glog.V(0).Infoln(self.logStr)
|
|
||||||
self.logStr = ""
|
self.env.AddStructLog(StructLog{pc, op, new(big.Int).Set(gas), cost, mem, stck, storage, err})
|
||||||
}
|
}
|
||||||
|
|
||||||
return self
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Environment returns the current workable state of the VM
|
||||||
func (self *Vm) Env() Environment {
|
func (self *Vm) Env() Environment {
|
||||||
return self.env
|
return self.env
|
||||||
}
|
}
|
||||||
|
@ -16,6 +16,8 @@ type VMEnv struct {
|
|||||||
depth int
|
depth int
|
||||||
chain *ChainManager
|
chain *ChainManager
|
||||||
typ vm.Type
|
typ vm.Type
|
||||||
|
// structured logging
|
||||||
|
logs []vm.StructLog
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEnv(state *state.StateDB, chain *ChainManager, msg Message, block *types.Block) *VMEnv {
|
func NewEnv(state *state.StateDB, chain *ChainManager, msg Message, block *types.Block) *VMEnv {
|
||||||
@ -47,6 +49,7 @@ func (self *VMEnv) GetHash(n uint64) common.Hash {
|
|||||||
|
|
||||||
return common.Hash{}
|
return common.Hash{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *VMEnv) AddLog(log *state.Log) {
|
func (self *VMEnv) AddLog(log *state.Log) {
|
||||||
self.state.AddLog(log)
|
self.state.AddLog(log)
|
||||||
}
|
}
|
||||||
@ -68,3 +71,11 @@ func (self *VMEnv) Create(me vm.ContextRef, data []byte, gas, price, value *big.
|
|||||||
exe := NewExecution(self, nil, data, gas, price, value)
|
exe := NewExecution(self, nil, data, gas, price, value)
|
||||||
return exe.Create(me)
|
return exe.Create(me)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (self *VMEnv) StructLogs() []vm.StructLog {
|
||||||
|
return self.logs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *VMEnv) AddStructLog(log vm.StructLog) {
|
||||||
|
self.logs = append(self.logs, log)
|
||||||
|
}
|
||||||
|
@ -93,6 +93,13 @@ type Config struct {
|
|||||||
AccountManager *accounts.Manager
|
AccountManager *accounts.Manager
|
||||||
SolcPath string
|
SolcPath string
|
||||||
|
|
||||||
|
GpoMinGasPrice *big.Int
|
||||||
|
GpoMaxGasPrice *big.Int
|
||||||
|
GpoFullBlockRatio int
|
||||||
|
GpobaseStepDown int
|
||||||
|
GpobaseStepUp int
|
||||||
|
GpobaseCorrectionFactor int
|
||||||
|
|
||||||
// NewDB is used to create databases.
|
// NewDB is used to create databases.
|
||||||
// If nil, the default is to create leveldb databases on disk.
|
// If nil, the default is to create leveldb databases on disk.
|
||||||
NewDB func(path string) (common.Database, error)
|
NewDB func(path string) (common.Database, error)
|
||||||
@ -193,10 +200,16 @@ type Ethereum struct {
|
|||||||
whisper *whisper.Whisper
|
whisper *whisper.Whisper
|
||||||
pow *ethash.Ethash
|
pow *ethash.Ethash
|
||||||
protocolManager *ProtocolManager
|
protocolManager *ProtocolManager
|
||||||
downloader *downloader.Downloader
|
|
||||||
SolcPath string
|
SolcPath string
|
||||||
solc *compiler.Solidity
|
solc *compiler.Solidity
|
||||||
|
|
||||||
|
GpoMinGasPrice *big.Int
|
||||||
|
GpoMaxGasPrice *big.Int
|
||||||
|
GpoFullBlockRatio int
|
||||||
|
GpobaseStepDown int
|
||||||
|
GpobaseStepUp int
|
||||||
|
GpobaseCorrectionFactor int
|
||||||
|
|
||||||
net *p2p.Server
|
net *p2p.Server
|
||||||
eventMux *event.TypeMux
|
eventMux *event.TypeMux
|
||||||
miner *miner.Miner
|
miner *miner.Miner
|
||||||
@ -282,6 +295,12 @@ func New(config *Config) (*Ethereum, error) {
|
|||||||
MinerThreads: config.MinerThreads,
|
MinerThreads: config.MinerThreads,
|
||||||
SolcPath: config.SolcPath,
|
SolcPath: config.SolcPath,
|
||||||
AutoDAG: config.AutoDAG,
|
AutoDAG: config.AutoDAG,
|
||||||
|
GpoMinGasPrice: config.GpoMinGasPrice,
|
||||||
|
GpoMaxGasPrice: config.GpoMaxGasPrice,
|
||||||
|
GpoFullBlockRatio: config.GpoFullBlockRatio,
|
||||||
|
GpobaseStepDown: config.GpobaseStepDown,
|
||||||
|
GpobaseStepUp: config.GpobaseStepUp,
|
||||||
|
GpobaseCorrectionFactor: config.GpobaseCorrectionFactor,
|
||||||
}
|
}
|
||||||
|
|
||||||
eth.pow = ethash.New()
|
eth.pow = ethash.New()
|
||||||
@ -290,14 +309,14 @@ func New(config *Config) (*Ethereum, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
eth.downloader = downloader.New(eth.EventMux(), eth.chainManager.HasBlock, eth.chainManager.GetBlock)
|
|
||||||
eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State, eth.chainManager.GasLimit)
|
eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State, eth.chainManager.GasLimit)
|
||||||
|
|
||||||
eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.chainManager, eth.EventMux())
|
eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.chainManager, eth.EventMux())
|
||||||
eth.chainManager.SetProcessor(eth.blockProcessor)
|
eth.chainManager.SetProcessor(eth.blockProcessor)
|
||||||
|
eth.protocolManager = NewProtocolManager(config.ProtocolVersion, config.NetworkId, eth.eventMux, eth.txPool, eth.chainManager)
|
||||||
|
|
||||||
eth.miner = miner.New(eth, eth.EventMux(), eth.pow)
|
eth.miner = miner.New(eth, eth.EventMux(), eth.pow)
|
||||||
eth.miner.SetGasPrice(config.GasPrice)
|
eth.miner.SetGasPrice(config.GasPrice)
|
||||||
|
|
||||||
eth.protocolManager = NewProtocolManager(config.ProtocolVersion, config.NetworkId, eth.eventMux, eth.txPool, eth.chainManager, eth.downloader)
|
|
||||||
if config.Shh {
|
if config.Shh {
|
||||||
eth.whisper = whisper.New()
|
eth.whisper = whisper.New()
|
||||||
eth.shhVersionId = int(eth.whisper.Version())
|
eth.shhVersionId = int(eth.whisper.Version())
|
||||||
@ -447,7 +466,7 @@ func (s *Ethereum) ClientVersion() string { return s.clientVersio
|
|||||||
func (s *Ethereum) EthVersion() int { return s.ethVersionId }
|
func (s *Ethereum) EthVersion() int { return s.ethVersionId }
|
||||||
func (s *Ethereum) NetVersion() int { return s.netVersionId }
|
func (s *Ethereum) NetVersion() int { return s.netVersionId }
|
||||||
func (s *Ethereum) ShhVersion() int { return s.shhVersionId }
|
func (s *Ethereum) ShhVersion() int { return s.shhVersionId }
|
||||||
func (s *Ethereum) Downloader() *downloader.Downloader { return s.downloader }
|
func (s *Ethereum) Downloader() *downloader.Downloader { return s.protocolManager.downloader }
|
||||||
|
|
||||||
// Start the ethereum
|
// Start the ethereum
|
||||||
func (s *Ethereum) Start() error {
|
func (s *Ethereum) Start() error {
|
||||||
@ -466,8 +485,6 @@ func (s *Ethereum) Start() error {
|
|||||||
s.StartAutoDAG()
|
s.StartAutoDAG()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start services
|
|
||||||
go s.txPool.Start()
|
|
||||||
s.protocolManager.Start()
|
s.protocolManager.Start()
|
||||||
|
|
||||||
if s.whisper != nil {
|
if s.whisper != nil {
|
||||||
@ -513,9 +530,6 @@ func (s *Ethereum) StartForTest() {
|
|||||||
ClientString: s.net.Name,
|
ClientString: s.net.Name,
|
||||||
ProtocolVersion: ProtocolVersion,
|
ProtocolVersion: ProtocolVersion,
|
||||||
})
|
})
|
||||||
|
|
||||||
// Start services
|
|
||||||
s.txPool.Start()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddPeer connects to the given node and maintains the connection until the
|
// AddPeer connects to the given node and maintains the connection until the
|
||||||
@ -532,8 +546,8 @@ func (self *Ethereum) AddPeer(nodeURL string) error {
|
|||||||
|
|
||||||
func (s *Ethereum) Stop() {
|
func (s *Ethereum) Stop() {
|
||||||
s.net.Stop()
|
s.net.Stop()
|
||||||
s.protocolManager.Stop()
|
|
||||||
s.chainManager.Stop()
|
s.chainManager.Stop()
|
||||||
|
s.protocolManager.Stop()
|
||||||
s.txPool.Stop()
|
s.txPool.Stop()
|
||||||
s.eventMux.Stop()
|
s.eventMux.Stop()
|
||||||
if s.whisper != nil {
|
if s.whisper != nil {
|
||||||
|
@ -3,6 +3,7 @@ package downloader
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@ -28,32 +29,40 @@ var (
|
|||||||
crossCheckCycle = time.Second // Period after which to check for expired cross checks
|
crossCheckCycle = time.Second // Period after which to check for expired cross checks
|
||||||
|
|
||||||
maxBannedHashes = 4096 // Number of bannable hashes before phasing old ones out
|
maxBannedHashes = 4096 // Number of bannable hashes before phasing old ones out
|
||||||
|
maxBlockProcess = 256 // Number of blocks to import at once into the chain
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errLowTd = errors.New("peers TD is too low")
|
errBusy = errors.New("busy")
|
||||||
ErrBusy = errors.New("busy")
|
|
||||||
errUnknownPeer = errors.New("peer is unknown or unhealthy")
|
errUnknownPeer = errors.New("peer is unknown or unhealthy")
|
||||||
ErrBadPeer = errors.New("action from bad peer ignored")
|
errBadPeer = errors.New("action from bad peer ignored")
|
||||||
ErrStallingPeer = errors.New("peer is stalling")
|
errStallingPeer = errors.New("peer is stalling")
|
||||||
errBannedHead = errors.New("peer head hash already banned")
|
errBannedHead = errors.New("peer head hash already banned")
|
||||||
errNoPeers = errors.New("no peers to keep download active")
|
errNoPeers = errors.New("no peers to keep download active")
|
||||||
ErrPendingQueue = errors.New("pending items in queue")
|
errPendingQueue = errors.New("pending items in queue")
|
||||||
ErrTimeout = errors.New("timeout")
|
errTimeout = errors.New("timeout")
|
||||||
ErrEmptyHashSet = errors.New("empty hash set by peer")
|
errEmptyHashSet = errors.New("empty hash set by peer")
|
||||||
errPeersUnavailable = errors.New("no peers available or all peers tried for block download process")
|
errPeersUnavailable = errors.New("no peers available or all peers tried for block download process")
|
||||||
errAlreadyInPool = errors.New("hash already in pool")
|
errAlreadyInPool = errors.New("hash already in pool")
|
||||||
ErrInvalidChain = errors.New("retrieved hash chain is invalid")
|
errInvalidChain = errors.New("retrieved hash chain is invalid")
|
||||||
ErrCrossCheckFailed = errors.New("block cross-check failed")
|
errCrossCheckFailed = errors.New("block cross-check failed")
|
||||||
errCancelHashFetch = errors.New("hash fetching cancelled (requested)")
|
errCancelHashFetch = errors.New("hash fetching canceled (requested)")
|
||||||
errCancelBlockFetch = errors.New("block downloading cancelled (requested)")
|
errCancelBlockFetch = errors.New("block downloading canceled (requested)")
|
||||||
|
errCancelChainImport = errors.New("chain importing canceled (requested)")
|
||||||
errNoSyncActive = errors.New("no sync active")
|
errNoSyncActive = errors.New("no sync active")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// hashCheckFn is a callback type for verifying a hash's presence in the local chain.
|
||||||
type hashCheckFn func(common.Hash) bool
|
type hashCheckFn func(common.Hash) bool
|
||||||
type getBlockFn func(common.Hash) *types.Block
|
|
||||||
|
// blockRetrievalFn is a callback type for retrieving a block from the local chain.
|
||||||
|
type blockRetrievalFn func(common.Hash) *types.Block
|
||||||
|
|
||||||
|
// chainInsertFn is a callback type to insert a batch of blocks into the local chain.
|
||||||
type chainInsertFn func(types.Blocks) (int, error)
|
type chainInsertFn func(types.Blocks) (int, error)
|
||||||
type hashIterFn func() (common.Hash, error)
|
|
||||||
|
// peerDropFn is a callback type for dropping a peer detected as malicious.
|
||||||
|
type peerDropFn func(id string)
|
||||||
|
|
||||||
type blockPack struct {
|
type blockPack struct {
|
||||||
peerId string
|
peerId string
|
||||||
@ -78,12 +87,22 @@ type Downloader struct {
|
|||||||
checks map[common.Hash]*crossCheck // Pending cross checks to verify a hash chain
|
checks map[common.Hash]*crossCheck // Pending cross checks to verify a hash chain
|
||||||
banned *set.Set // Set of hashes we've received and banned
|
banned *set.Set // Set of hashes we've received and banned
|
||||||
|
|
||||||
|
// Statistics
|
||||||
|
importStart time.Time // Instance when the last blocks were taken from the cache
|
||||||
|
importQueue []*Block // Previously taken blocks to check import progress
|
||||||
|
importDone int // Number of taken blocks already imported from the last batch
|
||||||
|
importLock sync.Mutex
|
||||||
|
|
||||||
// Callbacks
|
// Callbacks
|
||||||
hasBlock hashCheckFn
|
hasBlock hashCheckFn // Checks if a block is present in the chain
|
||||||
getBlock getBlockFn
|
getBlock blockRetrievalFn // Retrieves a block from the chain
|
||||||
|
insertChain chainInsertFn // Injects a batch of blocks into the chain
|
||||||
|
dropPeer peerDropFn // Retrieved the TD of our own chain
|
||||||
|
|
||||||
// Status
|
// Status
|
||||||
|
synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing
|
||||||
synchronising int32
|
synchronising int32
|
||||||
|
processing int32
|
||||||
notified int32
|
notified int32
|
||||||
|
|
||||||
// Channels
|
// Channels
|
||||||
@ -101,7 +120,8 @@ type Block struct {
|
|||||||
OriginPeer string
|
OriginPeer string
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock getBlockFn) *Downloader {
|
// New creates a new downloader to fetch hashes and blocks from remote peers.
|
||||||
|
func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock blockRetrievalFn, insertChain chainInsertFn, dropPeer peerDropFn) *Downloader {
|
||||||
// Create the base downloader
|
// Create the base downloader
|
||||||
downloader := &Downloader{
|
downloader := &Downloader{
|
||||||
mux: mux,
|
mux: mux,
|
||||||
@ -109,6 +129,8 @@ func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock getBlockFn) *Downloa
|
|||||||
peers: newPeerSet(),
|
peers: newPeerSet(),
|
||||||
hasBlock: hasBlock,
|
hasBlock: hasBlock,
|
||||||
getBlock: getBlock,
|
getBlock: getBlock,
|
||||||
|
insertChain: insertChain,
|
||||||
|
dropPeer: dropPeer,
|
||||||
newPeerCh: make(chan *peer, 1),
|
newPeerCh: make(chan *peer, 1),
|
||||||
hashCh: make(chan hashPack, 1),
|
hashCh: make(chan hashPack, 1),
|
||||||
blockCh: make(chan blockPack, 1),
|
blockCh: make(chan blockPack, 1),
|
||||||
@ -121,11 +143,30 @@ func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock getBlockFn) *Downloa
|
|||||||
return downloader
|
return downloader
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Downloader) Stats() (current int, max int) {
|
// Stats retrieves the current status of the downloader.
|
||||||
return d.queue.Size()
|
func (d *Downloader) Stats() (pending int, cached int, importing int, estimate time.Duration) {
|
||||||
|
// Fetch the download status
|
||||||
|
pending, cached = d.queue.Size()
|
||||||
|
|
||||||
|
// Figure out the import progress
|
||||||
|
d.importLock.Lock()
|
||||||
|
defer d.importLock.Unlock()
|
||||||
|
|
||||||
|
for len(d.importQueue) > 0 && d.hasBlock(d.importQueue[0].RawBlock.Hash()) {
|
||||||
|
d.importQueue = d.importQueue[1:]
|
||||||
|
d.importDone++
|
||||||
|
}
|
||||||
|
importing = len(d.importQueue)
|
||||||
|
|
||||||
|
// Make an estimate on the total sync
|
||||||
|
estimate = 0
|
||||||
|
if d.importDone > 0 {
|
||||||
|
estimate = time.Since(d.importStart) / time.Duration(d.importDone) * time.Duration(pending+cached+importing)
|
||||||
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Synchronising returns the state of the downloader
|
// Synchronising returns whether the downloader is currently retrieving blocks.
|
||||||
func (d *Downloader) Synchronising() bool {
|
func (d *Downloader) Synchronising() bool {
|
||||||
return atomic.LoadInt32(&d.synchronising) > 0
|
return atomic.LoadInt32(&d.synchronising) > 0
|
||||||
}
|
}
|
||||||
@ -158,19 +199,47 @@ func (d *Downloader) UnregisterPeer(id string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Synchronise will select the peer and use it for synchronising. If an empty string is given
|
// Synchronise tries to sync up our local block chain with a remote peer, both
|
||||||
|
// adding various sanity checks as well as wrapping it with various log entries.
|
||||||
|
func (d *Downloader) Synchronise(id string, head common.Hash) {
|
||||||
|
glog.V(logger.Detail).Infof("Attempting synchronisation: %v, 0x%x", id, head)
|
||||||
|
|
||||||
|
switch err := d.synchronise(id, head); err {
|
||||||
|
case nil:
|
||||||
|
glog.V(logger.Detail).Infof("Synchronisation completed")
|
||||||
|
|
||||||
|
case errBusy:
|
||||||
|
glog.V(logger.Detail).Infof("Synchronisation already in progress")
|
||||||
|
|
||||||
|
case errTimeout, errBadPeer, errStallingPeer, errBannedHead, errEmptyHashSet, errPeersUnavailable, errInvalidChain, errCrossCheckFailed:
|
||||||
|
glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err)
|
||||||
|
d.dropPeer(id)
|
||||||
|
|
||||||
|
case errPendingQueue:
|
||||||
|
glog.V(logger.Debug).Infoln("Synchronisation aborted:", err)
|
||||||
|
|
||||||
|
default:
|
||||||
|
glog.V(logger.Warn).Infof("Synchronisation failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// synchronise will select the peer and use it for synchronising. If an empty string is given
|
||||||
// it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the
|
// it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the
|
||||||
// checks fail an error will be returned. This method is synchronous
|
// checks fail an error will be returned. This method is synchronous
|
||||||
func (d *Downloader) Synchronise(id string, hash common.Hash) error {
|
func (d *Downloader) synchronise(id string, hash common.Hash) error {
|
||||||
|
// Mock out the synchonisation if testing
|
||||||
|
if d.synchroniseMock != nil {
|
||||||
|
return d.synchroniseMock(id, hash)
|
||||||
|
}
|
||||||
// Make sure only one goroutine is ever allowed past this point at once
|
// Make sure only one goroutine is ever allowed past this point at once
|
||||||
if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) {
|
if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) {
|
||||||
return ErrBusy
|
return errBusy
|
||||||
}
|
}
|
||||||
defer atomic.StoreInt32(&d.synchronising, 0)
|
defer atomic.StoreInt32(&d.synchronising, 0)
|
||||||
|
|
||||||
// If the head hash is banned, terminate immediately
|
// If the head hash is banned, terminate immediately
|
||||||
if d.banned.Has(hash) {
|
if d.banned.Has(hash) {
|
||||||
return ErrInvalidChain
|
return errBannedHead
|
||||||
}
|
}
|
||||||
// Post a user notification of the sync (only once per session)
|
// Post a user notification of the sync (only once per session)
|
||||||
if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
|
if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
|
||||||
@ -184,7 +253,7 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error {
|
|||||||
|
|
||||||
// Abort if the queue still contains some leftover data
|
// Abort if the queue still contains some leftover data
|
||||||
if _, cached := d.queue.Size(); cached > 0 && d.queue.GetHeadBlock() != nil {
|
if _, cached := d.queue.Size(); cached > 0 && d.queue.GetHeadBlock() != nil {
|
||||||
return ErrPendingQueue
|
return errPendingQueue
|
||||||
}
|
}
|
||||||
// Reset the queue and peer set to clean any internal leftover state
|
// Reset the queue and peer set to clean any internal leftover state
|
||||||
d.queue.Reset()
|
d.queue.Reset()
|
||||||
@ -200,11 +269,6 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error {
|
|||||||
return d.syncWithPeer(p, hash)
|
return d.syncWithPeer(p, hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TakeBlocks takes blocks from the queue and yields them to the caller.
|
|
||||||
func (d *Downloader) TakeBlocks() []*Block {
|
|
||||||
return d.queue.TakeBlocks()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Has checks if the downloader knows about a particular hash, meaning that its
|
// Has checks if the downloader knows about a particular hash, meaning that its
|
||||||
// either already downloaded of pending retrieval.
|
// either already downloaded of pending retrieval.
|
||||||
func (d *Downloader) Has(hash common.Hash) bool {
|
func (d *Downloader) Has(hash common.Hash) bool {
|
||||||
@ -239,29 +303,26 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash) (err error) {
|
|||||||
|
|
||||||
// Cancel cancels all of the operations and resets the queue. It returns true
|
// Cancel cancels all of the operations and resets the queue. It returns true
|
||||||
// if the cancel operation was completed.
|
// if the cancel operation was completed.
|
||||||
func (d *Downloader) Cancel() bool {
|
func (d *Downloader) Cancel() {
|
||||||
// If we're not syncing just return.
|
|
||||||
hs, bs := d.queue.Size()
|
|
||||||
if atomic.LoadInt32(&d.synchronising) == 0 && hs == 0 && bs == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// Close the current cancel channel
|
// Close the current cancel channel
|
||||||
d.cancelLock.Lock()
|
d.cancelLock.Lock()
|
||||||
|
if d.cancelCh != nil {
|
||||||
select {
|
select {
|
||||||
case <-d.cancelCh:
|
case <-d.cancelCh:
|
||||||
// Channel was already closed
|
// Channel was already closed
|
||||||
default:
|
default:
|
||||||
close(d.cancelCh)
|
close(d.cancelCh)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
d.cancelLock.Unlock()
|
d.cancelLock.Unlock()
|
||||||
|
|
||||||
// reset the queue
|
// Reset the queue
|
||||||
d.queue.Reset()
|
d.queue.Reset()
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// XXX Make synchronous
|
// fetchHahes starts retrieving hashes backwards from a specific peer and hash,
|
||||||
|
// up until it finds a common ancestor. If the source peer times out, alternative
|
||||||
|
// ones are tried for continuation.
|
||||||
func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
|
func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
|
||||||
var (
|
var (
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
@ -279,7 +340,7 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
|
|||||||
<-timeout.C // timeout channel should be initially empty.
|
<-timeout.C // timeout channel should be initially empty.
|
||||||
|
|
||||||
getHashes := func(from common.Hash) {
|
getHashes := func(from common.Hash) {
|
||||||
active.getHashes(from)
|
go active.getHashes(from)
|
||||||
timeout.Reset(hashTTL)
|
timeout.Reset(hashTTL)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -304,7 +365,7 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
|
|||||||
// Make sure the peer actually gave something valid
|
// Make sure the peer actually gave something valid
|
||||||
if len(hashPack.hashes) == 0 {
|
if len(hashPack.hashes) == 0 {
|
||||||
glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set", active.id)
|
glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set", active.id)
|
||||||
return ErrEmptyHashSet
|
return errEmptyHashSet
|
||||||
}
|
}
|
||||||
for index, hash := range hashPack.hashes {
|
for index, hash := range hashPack.hashes {
|
||||||
if d.banned.Has(hash) {
|
if d.banned.Has(hash) {
|
||||||
@ -314,7 +375,7 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
|
|||||||
if err := d.banBlocks(active.id, hash); err != nil {
|
if err := d.banBlocks(active.id, hash); err != nil {
|
||||||
glog.V(logger.Debug).Infof("Failed to ban batch of blocks: %v", err)
|
glog.V(logger.Debug).Infof("Failed to ban batch of blocks: %v", err)
|
||||||
}
|
}
|
||||||
return ErrInvalidChain
|
return errInvalidChain
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Determine if we're done fetching hashes (queue up all pending), and continue if not done
|
// Determine if we're done fetching hashes (queue up all pending), and continue if not done
|
||||||
@ -331,12 +392,12 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
|
|||||||
inserts := d.queue.Insert(hashPack.hashes)
|
inserts := d.queue.Insert(hashPack.hashes)
|
||||||
if len(inserts) == 0 && !done {
|
if len(inserts) == 0 && !done {
|
||||||
glog.V(logger.Debug).Infof("Peer (%s) responded with stale hashes", active.id)
|
glog.V(logger.Debug).Infof("Peer (%s) responded with stale hashes", active.id)
|
||||||
return ErrBadPeer
|
return errBadPeer
|
||||||
}
|
}
|
||||||
if !done {
|
if !done {
|
||||||
// Check that the peer is not stalling the sync
|
// Check that the peer is not stalling the sync
|
||||||
if len(inserts) < MinHashFetch {
|
if len(inserts) < MinHashFetch {
|
||||||
return ErrStallingPeer
|
return errStallingPeer
|
||||||
}
|
}
|
||||||
// Try and fetch a random block to verify the hash batch
|
// Try and fetch a random block to verify the hash batch
|
||||||
// Skip the last hash as the cross check races with the next hash fetch
|
// Skip the last hash as the cross check races with the next hash fetch
|
||||||
@ -348,9 +409,9 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
|
|||||||
expire: time.Now().Add(blockSoftTTL),
|
expire: time.Now().Add(blockSoftTTL),
|
||||||
parent: parent,
|
parent: parent,
|
||||||
}
|
}
|
||||||
active.getBlocks([]common.Hash{origin})
|
go active.getBlocks([]common.Hash{origin})
|
||||||
|
|
||||||
// Also fetch a fresh
|
// Also fetch a fresh batch of hashes
|
||||||
getHashes(head)
|
getHashes(head)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -370,7 +431,7 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
|
|||||||
block := blockPack.blocks[0]
|
block := blockPack.blocks[0]
|
||||||
if check, ok := d.checks[block.Hash()]; ok {
|
if check, ok := d.checks[block.Hash()]; ok {
|
||||||
if block.ParentHash() != check.parent {
|
if block.ParentHash() != check.parent {
|
||||||
return ErrCrossCheckFailed
|
return errCrossCheckFailed
|
||||||
}
|
}
|
||||||
delete(d.checks, block.Hash())
|
delete(d.checks, block.Hash())
|
||||||
}
|
}
|
||||||
@ -380,7 +441,7 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
|
|||||||
for hash, check := range d.checks {
|
for hash, check := range d.checks {
|
||||||
if time.Now().After(check.expire) {
|
if time.Now().After(check.expire) {
|
||||||
glog.V(logger.Debug).Infof("Cross check timeout for %x", hash)
|
glog.V(logger.Debug).Infof("Cross check timeout for %x", hash)
|
||||||
return ErrCrossCheckFailed
|
return errCrossCheckFailed
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -400,7 +461,7 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
|
|||||||
// if all peers have been tried, abort the process entirely or if the hash is
|
// if all peers have been tried, abort the process entirely or if the hash is
|
||||||
// the zero hash.
|
// the zero hash.
|
||||||
if p == nil || (head == common.Hash{}) {
|
if p == nil || (head == common.Hash{}) {
|
||||||
return ErrTimeout
|
return errTimeout
|
||||||
}
|
}
|
||||||
// set p to the active peer. this will invalidate any hashes that may be returned
|
// set p to the active peer. this will invalidate any hashes that may be returned
|
||||||
// by our previous (delayed) peer.
|
// by our previous (delayed) peer.
|
||||||
@ -457,12 +518,13 @@ out:
|
|||||||
glog.V(logger.Detail).Infof("%s: no blocks delivered", peer)
|
glog.V(logger.Detail).Infof("%s: no blocks delivered", peer)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// All was successful, promote the peer
|
// All was successful, promote the peer and potentially start processing
|
||||||
peer.Promote()
|
peer.Promote()
|
||||||
peer.SetIdle()
|
peer.SetIdle()
|
||||||
glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blockPack.blocks))
|
glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blockPack.blocks))
|
||||||
|
go d.process()
|
||||||
|
|
||||||
case ErrInvalidChain:
|
case errInvalidChain:
|
||||||
// The hash chain is invalid (blocks are not ordered properly), abort
|
// The hash chain is invalid (blocks are not ordered properly), abort
|
||||||
return err
|
return err
|
||||||
|
|
||||||
@ -579,7 +641,7 @@ func (d *Downloader) banBlocks(peerId string, head common.Hash) error {
|
|||||||
return errCancelBlockFetch
|
return errCancelBlockFetch
|
||||||
|
|
||||||
case <-timeout:
|
case <-timeout:
|
||||||
return ErrTimeout
|
return errTimeout
|
||||||
|
|
||||||
case <-d.hashCh:
|
case <-d.hashCh:
|
||||||
// Out of bounds hashes received, ignore them
|
// Out of bounds hashes received, ignore them
|
||||||
@ -636,6 +698,92 @@ func (d *Downloader) banBlocks(peerId string, head common.Hash) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// process takes blocks from the queue and tries to import them into the chain.
|
||||||
|
//
|
||||||
|
// The algorithmic flow is as follows:
|
||||||
|
// - The `processing` flag is swapped to 1 to ensure singleton access
|
||||||
|
// - The current `cancel` channel is retrieved to detect sync abortions
|
||||||
|
// - Blocks are iteratively taken from the cache and inserted into the chain
|
||||||
|
// - When the cache becomes empty, insertion stops
|
||||||
|
// - The `processing` flag is swapped back to 0
|
||||||
|
// - A post-exit check is made whether new blocks became available
|
||||||
|
// - This step is important: it handles a potential race condition between
|
||||||
|
// checking for no more work, and releasing the processing "mutex". In
|
||||||
|
// between these state changes, a block may have arrived, but a processing
|
||||||
|
// attempt denied, so we need to re-enter to ensure the block isn't left
|
||||||
|
// to idle in the cache.
|
||||||
|
func (d *Downloader) process() (err error) {
|
||||||
|
// Make sure only one goroutine is ever allowed to process blocks at once
|
||||||
|
if !atomic.CompareAndSwapInt32(&d.processing, 0, 1) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// If the processor just exited, but there are freshly pending items, try to
|
||||||
|
// reenter. This is needed because the goroutine spinned up for processing
|
||||||
|
// the fresh blocks might have been rejected entry to to this present thread
|
||||||
|
// not yet releasing the `processing` state.
|
||||||
|
defer func() {
|
||||||
|
if err == nil && d.queue.GetHeadBlock() != nil {
|
||||||
|
err = d.process()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// Release the lock upon exit (note, before checking for reentry!), and set
|
||||||
|
// the import statistics to zero.
|
||||||
|
defer func() {
|
||||||
|
d.importLock.Lock()
|
||||||
|
d.importQueue = nil
|
||||||
|
d.importDone = 0
|
||||||
|
d.importLock.Unlock()
|
||||||
|
|
||||||
|
atomic.StoreInt32(&d.processing, 0)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Fetch the current cancel channel to allow termination
|
||||||
|
d.cancelLock.RLock()
|
||||||
|
cancel := d.cancelCh
|
||||||
|
d.cancelLock.RUnlock()
|
||||||
|
|
||||||
|
// Repeat the processing as long as there are blocks to import
|
||||||
|
for {
|
||||||
|
// Fetch the next batch of blocks
|
||||||
|
blocks := d.queue.TakeBlocks()
|
||||||
|
if len(blocks) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Reset the import statistics
|
||||||
|
d.importLock.Lock()
|
||||||
|
d.importStart = time.Now()
|
||||||
|
d.importQueue = blocks
|
||||||
|
d.importDone = 0
|
||||||
|
d.importLock.Unlock()
|
||||||
|
|
||||||
|
// Actually import the blocks
|
||||||
|
glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].RawBlock.Number(), blocks[len(blocks)-1].RawBlock.Number())
|
||||||
|
for len(blocks) != 0 { // TODO: quit
|
||||||
|
// Check for any termination requests
|
||||||
|
select {
|
||||||
|
case <-cancel:
|
||||||
|
return errCancelChainImport
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
// Retrieve the first batch of blocks to insert
|
||||||
|
max := int(math.Min(float64(len(blocks)), float64(maxBlockProcess)))
|
||||||
|
raw := make(types.Blocks, 0, max)
|
||||||
|
for _, block := range blocks[:max] {
|
||||||
|
raw = append(raw, block.RawBlock)
|
||||||
|
}
|
||||||
|
// Try to inset the blocks, drop the originating peer if there's an error
|
||||||
|
index, err := d.insertChain(raw)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Debug).Infof("Block #%d import failed: %v", raw[index].NumberU64(), err)
|
||||||
|
d.dropPeer(blocks[index].OriginPeer)
|
||||||
|
d.Cancel()
|
||||||
|
return errCancelChainImport
|
||||||
|
}
|
||||||
|
blocks = blocks[max:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// DeliverBlocks injects a new batch of blocks received from a remote node.
|
// DeliverBlocks injects a new batch of blocks received from a remote node.
|
||||||
// This is usually invoked through the BlocksMsg by the protocol handler.
|
// This is usually invoked through the BlocksMsg by the protocol handler.
|
||||||
func (d *Downloader) DeliverBlocks(id string, blocks []*types.Block) error {
|
func (d *Downloader) DeliverBlocks(id string, blocks []*types.Block) error {
|
||||||
|
@ -2,7 +2,10 @@ package downloader
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -13,21 +16,29 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
knownHash = common.Hash{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
knownHash = common.Hash{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
|
||||||
unknownHash = common.Hash{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}
|
unknownHash = common.Hash{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}
|
||||||
bannedHash = common.Hash{5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}
|
bannedHash = common.Hash{3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}
|
||||||
|
|
||||||
|
genesis = createBlock(1, common.Hash{}, knownHash)
|
||||||
)
|
)
|
||||||
|
|
||||||
func createHashes(start, amount int) (hashes []common.Hash) {
|
// idCounter is used by the createHashes method the generate deterministic but unique hashes
|
||||||
hashes = make([]common.Hash, amount+1)
|
var idCounter = int64(2) // #1 is the genesis block
|
||||||
hashes[len(hashes)-1] = knownHash
|
|
||||||
|
|
||||||
for i := range hashes[:len(hashes)-1] {
|
// createHashes generates a batch of hashes rooted at a specific point in the chain.
|
||||||
binary.BigEndian.PutUint64(hashes[i][:8], uint64(start+i+2))
|
func createHashes(amount int, root common.Hash) (hashes []common.Hash) {
|
||||||
|
hashes = make([]common.Hash, amount+1)
|
||||||
|
hashes[len(hashes)-1] = root
|
||||||
|
|
||||||
|
for i := 0; i < len(hashes)-1; i++ {
|
||||||
|
binary.BigEndian.PutUint64(hashes[i][:8], uint64(idCounter))
|
||||||
|
idCounter++
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// createBlock assembles a new block at the given chain height.
|
||||||
func createBlock(i int, parent, hash common.Hash) *types.Block {
|
func createBlock(i int, parent, hash common.Hash) *types.Block {
|
||||||
header := &types.Header{Number: big.NewInt(int64(i))}
|
header := &types.Header{Number: big.NewInt(int64(i))}
|
||||||
block := types.NewBlockWithHeader(header)
|
block := types.NewBlockWithHeader(header)
|
||||||
@ -36,6 +47,11 @@ func createBlock(i int, parent, hash common.Hash) *types.Block {
|
|||||||
return block
|
return block
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// copyBlock makes a deep copy of a block suitable for local modifications.
|
||||||
|
func copyBlock(block *types.Block) *types.Block {
|
||||||
|
return createBlock(int(block.Number().Int64()), block.ParentHeaderHash, block.HeaderHash)
|
||||||
|
}
|
||||||
|
|
||||||
func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block {
|
func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block {
|
||||||
blocks := make(map[common.Hash]*types.Block)
|
blocks := make(map[common.Hash]*types.Block)
|
||||||
for i := 0; i < len(hashes); i++ {
|
for i := 0; i < len(hashes); i++ {
|
||||||
@ -51,181 +67,171 @@ func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block {
|
|||||||
type downloadTester struct {
|
type downloadTester struct {
|
||||||
downloader *Downloader
|
downloader *Downloader
|
||||||
|
|
||||||
hashes []common.Hash // Chain of hashes simulating
|
ownHashes []common.Hash // Hash chain belonging to the tester
|
||||||
blocks map[common.Hash]*types.Block // Blocks associated with the hashes
|
ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester
|
||||||
chain []common.Hash // Block-chain being constructed
|
peerHashes map[string][]common.Hash // Hash chain belonging to different test peers
|
||||||
|
peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers
|
||||||
|
|
||||||
maxHashFetch int // Overrides the maximum number of retrieved hashes
|
maxHashFetch int // Overrides the maximum number of retrieved hashes
|
||||||
|
|
||||||
t *testing.T
|
|
||||||
done chan bool
|
|
||||||
activePeerId string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types.Block) *downloadTester {
|
func newTester() *downloadTester {
|
||||||
tester := &downloadTester{
|
tester := &downloadTester{
|
||||||
t: t,
|
ownHashes: []common.Hash{knownHash},
|
||||||
|
ownBlocks: map[common.Hash]*types.Block{knownHash: genesis},
|
||||||
hashes: hashes,
|
peerHashes: make(map[string][]common.Hash),
|
||||||
blocks: blocks,
|
peerBlocks: make(map[string]map[common.Hash]*types.Block),
|
||||||
chain: []common.Hash{knownHash},
|
|
||||||
|
|
||||||
done: make(chan bool),
|
|
||||||
}
|
}
|
||||||
var mux event.TypeMux
|
var mux event.TypeMux
|
||||||
downloader := New(&mux, tester.hasBlock, tester.getBlock)
|
downloader := New(&mux, tester.hasBlock, tester.getBlock, tester.insertChain, tester.dropPeer)
|
||||||
tester.downloader = downloader
|
tester.downloader = downloader
|
||||||
|
|
||||||
return tester
|
return tester
|
||||||
}
|
}
|
||||||
|
|
||||||
// sync is a simple wrapper around the downloader to start synchronisation and
|
// sync starts synchronizing with a remote peer, blocking until it completes.
|
||||||
// block until it returns
|
func (dl *downloadTester) sync(id string) error {
|
||||||
func (dl *downloadTester) sync(peerId string, head common.Hash) error {
|
err := dl.downloader.synchronise(id, dl.peerHashes[id][0])
|
||||||
dl.activePeerId = peerId
|
for atomic.LoadInt32(&dl.downloader.processing) == 1 {
|
||||||
return dl.downloader.Synchronise(peerId, head)
|
|
||||||
}
|
|
||||||
|
|
||||||
// syncTake is starts synchronising with a remote peer, but concurrently it also
|
|
||||||
// starts fetching blocks that the downloader retrieved. IT blocks until both go
|
|
||||||
// routines terminate.
|
|
||||||
func (dl *downloadTester) syncTake(peerId string, head common.Hash) ([]*Block, error) {
|
|
||||||
// Start a block collector to take blocks as they become available
|
|
||||||
done := make(chan struct{})
|
|
||||||
took := []*Block{}
|
|
||||||
go func() {
|
|
||||||
for running := true; running; {
|
|
||||||
select {
|
|
||||||
case <-done:
|
|
||||||
running = false
|
|
||||||
default:
|
|
||||||
time.Sleep(time.Millisecond)
|
time.Sleep(time.Millisecond)
|
||||||
}
|
}
|
||||||
// Take a batch of blocks and accumulate
|
return err
|
||||||
took = append(took, dl.downloader.TakeBlocks()...)
|
|
||||||
}
|
|
||||||
done <- struct{}{}
|
|
||||||
}()
|
|
||||||
// Start the downloading, sync the taker and return
|
|
||||||
err := dl.sync(peerId, head)
|
|
||||||
|
|
||||||
done <- struct{}{}
|
|
||||||
<-done
|
|
||||||
|
|
||||||
return took, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hasBlock checks if a block is pres ent in the testers canonical chain.
|
||||||
func (dl *downloadTester) hasBlock(hash common.Hash) bool {
|
func (dl *downloadTester) hasBlock(hash common.Hash) bool {
|
||||||
for _, h := range dl.chain {
|
return dl.getBlock(hash) != nil
|
||||||
if h == hash {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getBlock retrieves a block from the testers canonical chain.
|
||||||
func (dl *downloadTester) getBlock(hash common.Hash) *types.Block {
|
func (dl *downloadTester) getBlock(hash common.Hash) *types.Block {
|
||||||
return dl.blocks[knownHash]
|
return dl.ownBlocks[hash]
|
||||||
}
|
}
|
||||||
|
|
||||||
// getHashes retrieves a batch of hashes for reconstructing the chain.
|
// insertChain injects a new batch of blocks into the simulated chain.
|
||||||
func (dl *downloadTester) getHashes(head common.Hash) error {
|
func (dl *downloadTester) insertChain(blocks types.Blocks) (int, error) {
|
||||||
|
for i, block := range blocks {
|
||||||
|
if _, ok := dl.ownBlocks[block.ParentHash()]; !ok {
|
||||||
|
return i, errors.New("unknown parent")
|
||||||
|
}
|
||||||
|
dl.ownHashes = append(dl.ownHashes, block.Hash())
|
||||||
|
dl.ownBlocks[block.Hash()] = block
|
||||||
|
}
|
||||||
|
return len(blocks), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newPeer registers a new block download source into the downloader.
|
||||||
|
func (dl *downloadTester) newPeer(id string, hashes []common.Hash, blocks map[common.Hash]*types.Block) error {
|
||||||
|
return dl.newSlowPeer(id, hashes, blocks, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newSlowPeer registers a new block download source into the downloader, with a
|
||||||
|
// specific delay time on processing the network packets sent to it, simulating
|
||||||
|
// potentially slow network IO.
|
||||||
|
func (dl *downloadTester) newSlowPeer(id string, hashes []common.Hash, blocks map[common.Hash]*types.Block, delay time.Duration) error {
|
||||||
|
err := dl.downloader.RegisterPeer(id, hashes[0], dl.peerGetHashesFn(id, delay), dl.peerGetBlocksFn(id, delay))
|
||||||
|
if err == nil {
|
||||||
|
// Assign the owned hashes and blocks to the peer (deep copy)
|
||||||
|
dl.peerHashes[id] = make([]common.Hash, len(hashes))
|
||||||
|
copy(dl.peerHashes[id], hashes)
|
||||||
|
|
||||||
|
dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
|
||||||
|
for hash, block := range blocks {
|
||||||
|
dl.peerBlocks[id][hash] = copyBlock(block)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// dropPeer simulates a hard peer removal from the connection pool.
|
||||||
|
func (dl *downloadTester) dropPeer(id string) {
|
||||||
|
delete(dl.peerHashes, id)
|
||||||
|
delete(dl.peerBlocks, id)
|
||||||
|
|
||||||
|
dl.downloader.UnregisterPeer(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// peerGetBlocksFn constructs a getHashes function associated with a particular
|
||||||
|
// peer in the download tester. The returned function can be used to retrieve
|
||||||
|
// batches of hashes from the particularly requested peer.
|
||||||
|
func (dl *downloadTester) peerGetHashesFn(id string, delay time.Duration) func(head common.Hash) error {
|
||||||
|
return func(head common.Hash) error {
|
||||||
|
time.Sleep(delay)
|
||||||
|
|
||||||
limit := MaxHashFetch
|
limit := MaxHashFetch
|
||||||
if dl.maxHashFetch > 0 {
|
if dl.maxHashFetch > 0 {
|
||||||
limit = dl.maxHashFetch
|
limit = dl.maxHashFetch
|
||||||
}
|
}
|
||||||
// Gather the next batch of hashes
|
// Gather the next batch of hashes
|
||||||
hashes := make([]common.Hash, 0, limit)
|
hashes := dl.peerHashes[id]
|
||||||
for i, hash := range dl.hashes {
|
result := make([]common.Hash, 0, limit)
|
||||||
|
for i, hash := range hashes {
|
||||||
if hash == head {
|
if hash == head {
|
||||||
i++
|
i++
|
||||||
for len(hashes) < cap(hashes) && i < len(dl.hashes) {
|
for len(result) < cap(result) && i < len(hashes) {
|
||||||
hashes = append(hashes, dl.hashes[i])
|
result = append(result, hashes[i])
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Delay delivery a bit to allow attacks to unfold
|
// Delay delivery a bit to allow attacks to unfold
|
||||||
id := dl.activePeerId
|
|
||||||
go func() {
|
go func() {
|
||||||
time.Sleep(time.Millisecond)
|
time.Sleep(time.Millisecond)
|
||||||
dl.downloader.DeliverHashes(id, hashes)
|
dl.downloader.DeliverHashes(id, result)
|
||||||
}()
|
}()
|
||||||
return nil
|
return nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dl *downloadTester) getBlocks(id string) func([]common.Hash) error {
|
// peerGetBlocksFn constructs a getBlocks function associated with a particular
|
||||||
|
// peer in the download tester. The returned function can be used to retrieve
|
||||||
|
// batches of blocks from the particularly requested peer.
|
||||||
|
func (dl *downloadTester) peerGetBlocksFn(id string, delay time.Duration) func([]common.Hash) error {
|
||||||
return func(hashes []common.Hash) error {
|
return func(hashes []common.Hash) error {
|
||||||
blocks := make([]*types.Block, 0, len(hashes))
|
time.Sleep(delay)
|
||||||
|
|
||||||
|
blocks := dl.peerBlocks[id]
|
||||||
|
result := make([]*types.Block, 0, len(hashes))
|
||||||
for _, hash := range hashes {
|
for _, hash := range hashes {
|
||||||
if block, ok := dl.blocks[hash]; ok {
|
if block, ok := blocks[hash]; ok {
|
||||||
blocks = append(blocks, block)
|
result = append(result, block)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
go dl.downloader.DeliverBlocks(id, blocks)
|
go dl.downloader.DeliverBlocks(id, result)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// newPeer registers a new block download source into the syncer.
|
|
||||||
func (dl *downloadTester) newPeer(id string, td *big.Int, hash common.Hash) error {
|
|
||||||
return dl.downloader.RegisterPeer(id, hash, dl.getHashes, dl.getBlocks(id))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests that simple synchronization, without throttling from a good peer works.
|
// Tests that simple synchronization, without throttling from a good peer works.
|
||||||
func TestSynchronisation(t *testing.T) {
|
func TestSynchronisation(t *testing.T) {
|
||||||
// Create a small enough block chain to download and the tester
|
// Create a small enough block chain to download and the tester
|
||||||
targetBlocks := blockCacheLimit - 15
|
targetBlocks := blockCacheLimit - 15
|
||||||
hashes := createHashes(0, targetBlocks)
|
hashes := createHashes(targetBlocks, knownHash)
|
||||||
blocks := createBlocksFromHashes(hashes)
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
|
||||||
tester := newTester(t, hashes, blocks)
|
tester := newTester()
|
||||||
tester.newPeer("peer", big.NewInt(10000), hashes[0])
|
tester.newPeer("peer", hashes, blocks)
|
||||||
|
|
||||||
// Synchronise with the peer and make sure all blocks were retrieved
|
// Synchronise with the peer and make sure all blocks were retrieved
|
||||||
if err := tester.sync("peer", hashes[0]); err != nil {
|
if err := tester.sync("peer"); err != nil {
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
if queued := len(tester.downloader.queue.blockPool); queued != targetBlocks {
|
if imported := len(tester.ownBlocks); imported != targetBlocks+1 {
|
||||||
t.Fatalf("synchronised block mismatch: have %v, want %v", queued, targetBlocks)
|
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests that the synchronized blocks can be correctly retrieved.
|
|
||||||
func TestBlockTaking(t *testing.T) {
|
|
||||||
// Create a small enough block chain to download and the tester
|
|
||||||
targetBlocks := blockCacheLimit - 15
|
|
||||||
hashes := createHashes(0, targetBlocks)
|
|
||||||
blocks := createBlocksFromHashes(hashes)
|
|
||||||
|
|
||||||
tester := newTester(t, hashes, blocks)
|
|
||||||
tester.newPeer("peer", big.NewInt(10000), hashes[0])
|
|
||||||
|
|
||||||
// Synchronise with the peer and test block retrieval
|
|
||||||
if err := tester.sync("peer", hashes[0]); err != nil {
|
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
|
||||||
}
|
|
||||||
if took := tester.downloader.TakeBlocks(); len(took) != targetBlocks {
|
|
||||||
t.Fatalf("took block mismatch: have %v, want %v", len(took), targetBlocks)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that an inactive downloader will not accept incoming hashes and blocks.
|
// Tests that an inactive downloader will not accept incoming hashes and blocks.
|
||||||
func TestInactiveDownloader(t *testing.T) {
|
func TestInactiveDownloader(t *testing.T) {
|
||||||
// Create a small enough block chain to download and the tester
|
tester := newTester()
|
||||||
targetBlocks := blockCacheLimit - 15
|
|
||||||
hashes := createHashes(0, targetBlocks)
|
|
||||||
blocks := createBlocksFromHashSet(createHashSet(hashes))
|
|
||||||
|
|
||||||
tester := newTester(t, nil, nil)
|
|
||||||
|
|
||||||
// Check that neither hashes nor blocks are accepted
|
// Check that neither hashes nor blocks are accepted
|
||||||
if err := tester.downloader.DeliverHashes("bad peer", hashes); err != errNoSyncActive {
|
if err := tester.downloader.DeliverHashes("bad peer", []common.Hash{}); err != errNoSyncActive {
|
||||||
t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
|
t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
|
||||||
}
|
}
|
||||||
if err := tester.downloader.DeliverBlocks("bad peer", blocks); err != errNoSyncActive {
|
if err := tester.downloader.DeliverBlocks("bad peer", []*types.Block{}); err != errNoSyncActive {
|
||||||
t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
|
t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -234,26 +240,26 @@ func TestInactiveDownloader(t *testing.T) {
|
|||||||
func TestCancel(t *testing.T) {
|
func TestCancel(t *testing.T) {
|
||||||
// Create a small enough block chain to download and the tester
|
// Create a small enough block chain to download and the tester
|
||||||
targetBlocks := blockCacheLimit - 15
|
targetBlocks := blockCacheLimit - 15
|
||||||
hashes := createHashes(0, targetBlocks)
|
hashes := createHashes(targetBlocks, knownHash)
|
||||||
blocks := createBlocksFromHashes(hashes)
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
|
||||||
tester := newTester(t, hashes, blocks)
|
tester := newTester()
|
||||||
tester.newPeer("peer", big.NewInt(10000), hashes[0])
|
tester.newPeer("peer", hashes, blocks)
|
||||||
|
|
||||||
// Synchronise with the peer, but cancel afterwards
|
// Make sure canceling works with a pristine downloader
|
||||||
if err := tester.sync("peer", hashes[0]); err != nil {
|
tester.downloader.Cancel()
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
|
||||||
}
|
|
||||||
if !tester.downloader.Cancel() {
|
|
||||||
t.Fatalf("cancel operation failed")
|
|
||||||
}
|
|
||||||
// Make sure the queue reports empty and no blocks can be taken
|
|
||||||
hashCount, blockCount := tester.downloader.queue.Size()
|
hashCount, blockCount := tester.downloader.queue.Size()
|
||||||
if hashCount > 0 || blockCount > 0 {
|
if hashCount > 0 || blockCount > 0 {
|
||||||
t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount)
|
t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount)
|
||||||
}
|
}
|
||||||
if took := tester.downloader.TakeBlocks(); len(took) != 0 {
|
// Synchronise with the peer, but cancel afterwards
|
||||||
t.Errorf("taken blocks mismatch: have %d, want %d", len(took), 0)
|
if err := tester.sync("peer"); err != nil {
|
||||||
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
|
}
|
||||||
|
tester.downloader.Cancel()
|
||||||
|
hashCount, blockCount = tester.downloader.queue.Size()
|
||||||
|
if hashCount > 0 || blockCount > 0 {
|
||||||
|
t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -262,98 +268,167 @@ func TestCancel(t *testing.T) {
|
|||||||
func TestThrottling(t *testing.T) {
|
func TestThrottling(t *testing.T) {
|
||||||
// Create a long block chain to download and the tester
|
// Create a long block chain to download and the tester
|
||||||
targetBlocks := 8 * blockCacheLimit
|
targetBlocks := 8 * blockCacheLimit
|
||||||
hashes := createHashes(0, targetBlocks)
|
hashes := createHashes(targetBlocks, knownHash)
|
||||||
blocks := createBlocksFromHashes(hashes)
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
|
||||||
tester := newTester(t, hashes, blocks)
|
tester := newTester()
|
||||||
tester.newPeer("peer", big.NewInt(10000), hashes[0])
|
tester.newPeer("peer", hashes, blocks)
|
||||||
|
|
||||||
|
// Wrap the importer to allow stepping
|
||||||
|
done := make(chan int)
|
||||||
|
tester.downloader.insertChain = func(blocks types.Blocks) (int, error) {
|
||||||
|
n, err := tester.insertChain(blocks)
|
||||||
|
done <- n
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
// Start a synchronisation concurrently
|
// Start a synchronisation concurrently
|
||||||
errc := make(chan error)
|
errc := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
errc <- tester.sync("peer", hashes[0])
|
errc <- tester.sync("peer")
|
||||||
}()
|
}()
|
||||||
// Iteratively take some blocks, always checking the retrieval count
|
// Iteratively take some blocks, always checking the retrieval count
|
||||||
for total := 0; total < targetBlocks; {
|
for len(tester.ownBlocks) < targetBlocks+1 {
|
||||||
// Wait a bit for sync to complete
|
// Wait a bit for sync to throttle itself
|
||||||
|
var cached int
|
||||||
for start := time.Now(); time.Since(start) < 3*time.Second; {
|
for start := time.Now(); time.Since(start) < 3*time.Second; {
|
||||||
time.Sleep(25 * time.Millisecond)
|
time.Sleep(25 * time.Millisecond)
|
||||||
if len(tester.downloader.queue.blockPool) == blockCacheLimit {
|
|
||||||
|
cached = len(tester.downloader.queue.blockPool)
|
||||||
|
if cached == blockCacheLimit || len(tester.ownBlocks)+cached == targetBlocks+1 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Fetch the next batch of blocks
|
// Make sure we filled up the cache, then exhaust it
|
||||||
took := tester.downloader.TakeBlocks()
|
time.Sleep(25 * time.Millisecond) // give it a chance to screw up
|
||||||
if len(took) != blockCacheLimit {
|
if cached != blockCacheLimit && len(tester.ownBlocks)+cached < targetBlocks+1 {
|
||||||
t.Fatalf("block count mismatch: have %v, want %v", len(took), blockCacheLimit)
|
t.Fatalf("block count mismatch: have %v, want %v", cached, blockCacheLimit)
|
||||||
}
|
}
|
||||||
total += len(took)
|
<-done // finish previous blocking import
|
||||||
if total > targetBlocks {
|
for cached > maxBlockProcess {
|
||||||
t.Fatalf("target block count mismatch: have %v, want %v", total, targetBlocks)
|
cached -= <-done
|
||||||
}
|
}
|
||||||
|
time.Sleep(25 * time.Millisecond) // yield to the insertion
|
||||||
|
}
|
||||||
|
<-done // finish the last blocking import
|
||||||
|
|
||||||
|
// Check that we haven't pulled more blocks than available
|
||||||
|
if len(tester.ownBlocks) > targetBlocks+1 {
|
||||||
|
t.Fatalf("target block count mismatch: have %v, want %v", len(tester.ownBlocks), targetBlocks+1)
|
||||||
}
|
}
|
||||||
if err := <-errc; err != nil {
|
if err := <-errc; err != nil {
|
||||||
t.Fatalf("block synchronization failed: %v", err)
|
t.Fatalf("block synchronization failed: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
|
||||||
|
func TestMultiSynchronisation(t *testing.T) {
|
||||||
|
// Create various peers with various parts of the chain
|
||||||
|
targetPeers := 16
|
||||||
|
targetBlocks := targetPeers*blockCacheLimit - 15
|
||||||
|
|
||||||
|
hashes := createHashes(targetBlocks, knownHash)
|
||||||
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
|
||||||
|
tester := newTester()
|
||||||
|
for i := 0; i < targetPeers; i++ {
|
||||||
|
id := fmt.Sprintf("peer #%d", i)
|
||||||
|
tester.newPeer(id, hashes[i*blockCacheLimit:], blocks)
|
||||||
|
}
|
||||||
|
// Synchronise with the middle peer and make sure half of the blocks were retrieved
|
||||||
|
id := fmt.Sprintf("peer #%d", targetPeers/2)
|
||||||
|
if err := tester.sync(id); err != nil {
|
||||||
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
|
}
|
||||||
|
if imported := len(tester.ownBlocks); imported != len(tester.peerHashes[id]) {
|
||||||
|
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, len(tester.peerHashes[id]))
|
||||||
|
}
|
||||||
|
// Synchronise with the best peer and make sure everything is retrieved
|
||||||
|
if err := tester.sync("peer #0"); err != nil {
|
||||||
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
|
}
|
||||||
|
if imported := len(tester.ownBlocks); imported != targetBlocks+1 {
|
||||||
|
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that synchronising with a peer who's very slow at network IO does not
|
||||||
|
// stall the other peers in the system.
|
||||||
|
func TestSlowSynchronisation(t *testing.T) {
|
||||||
|
tester := newTester()
|
||||||
|
|
||||||
|
// Create a batch of blocks, with a slow and a full speed peer
|
||||||
|
targetCycles := 2
|
||||||
|
targetBlocks := targetCycles*blockCacheLimit - 15
|
||||||
|
targetIODelay := 500 * time.Millisecond
|
||||||
|
|
||||||
|
hashes := createHashes(targetBlocks, knownHash)
|
||||||
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
|
||||||
|
tester.newSlowPeer("fast", hashes, blocks, 0)
|
||||||
|
tester.newSlowPeer("slow", hashes, blocks, targetIODelay)
|
||||||
|
|
||||||
|
// Try to sync with the peers (pull hashes from fast)
|
||||||
|
start := time.Now()
|
||||||
|
if err := tester.sync("fast"); err != nil {
|
||||||
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
|
}
|
||||||
|
if imported := len(tester.ownBlocks); imported != targetBlocks+1 {
|
||||||
|
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1)
|
||||||
|
}
|
||||||
|
// Check that the slow peer got hit at most once per block-cache-size import
|
||||||
|
limit := time.Duration(targetCycles+1) * targetIODelay
|
||||||
|
if delay := time.Since(start); delay >= limit {
|
||||||
|
t.Fatalf("synchronisation exceeded delay limit: have %v, want %v", delay, limit)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Tests that if a peer returns an invalid chain with a block pointing to a non-
|
// Tests that if a peer returns an invalid chain with a block pointing to a non-
|
||||||
// existing parent, it is correctly detected and handled.
|
// existing parent, it is correctly detected and handled.
|
||||||
func TestNonExistingParentAttack(t *testing.T) {
|
func TestNonExistingParentAttack(t *testing.T) {
|
||||||
// Forge a single-link chain with a forged header
|
tester := newTester()
|
||||||
hashes := createHashes(0, 1)
|
|
||||||
blocks := createBlocksFromHashes(hashes)
|
|
||||||
|
|
||||||
forged := blocks[hashes[0]]
|
// Forge a single-link chain with a forged header
|
||||||
forged.ParentHeaderHash = unknownHash
|
hashes := createHashes(1, knownHash)
|
||||||
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
tester.newPeer("valid", hashes, blocks)
|
||||||
|
|
||||||
|
hashes = createHashes(1, knownHash)
|
||||||
|
blocks = createBlocksFromHashes(hashes)
|
||||||
|
blocks[hashes[0]].ParentHeaderHash = unknownHash
|
||||||
|
tester.newPeer("attack", hashes, blocks)
|
||||||
|
|
||||||
// Try and sync with the malicious node and check that it fails
|
// Try and sync with the malicious node and check that it fails
|
||||||
tester := newTester(t, hashes, blocks)
|
if err := tester.sync("attack"); err == nil {
|
||||||
tester.newPeer("attack", big.NewInt(10000), hashes[0])
|
t.Fatalf("block synchronization succeeded")
|
||||||
if err := tester.sync("attack", hashes[0]); err != nil {
|
}
|
||||||
|
if tester.hasBlock(hashes[0]) {
|
||||||
|
t.Fatalf("tester accepted unknown-parent block: %v", blocks[hashes[0]])
|
||||||
|
}
|
||||||
|
// Try to synchronize with the valid chain and make sure it succeeds
|
||||||
|
if err := tester.sync("valid"); err != nil {
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
bs := tester.downloader.TakeBlocks()
|
if !tester.hasBlock(tester.peerHashes["valid"][0]) {
|
||||||
if len(bs) != 1 {
|
t.Fatalf("tester didn't accept known-parent block: %v", tester.peerBlocks["valid"][hashes[0]])
|
||||||
t.Fatalf("retrieved block mismatch: have %v, want %v", len(bs), 1)
|
|
||||||
}
|
|
||||||
if tester.hasBlock(bs[0].RawBlock.ParentHash()) {
|
|
||||||
t.Fatalf("tester knows about the unknown hash")
|
|
||||||
}
|
|
||||||
tester.downloader.Cancel()
|
|
||||||
|
|
||||||
// Reconstruct a valid chain, and try to synchronize with it
|
|
||||||
forged.ParentHeaderHash = knownHash
|
|
||||||
tester.newPeer("valid", big.NewInt(20000), hashes[0])
|
|
||||||
if err := tester.sync("valid", hashes[0]); err != nil {
|
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
|
||||||
}
|
|
||||||
bs = tester.downloader.TakeBlocks()
|
|
||||||
if len(bs) != 1 {
|
|
||||||
t.Fatalf("retrieved block mismatch: have %v, want %v", len(bs), 1)
|
|
||||||
}
|
|
||||||
if !tester.hasBlock(bs[0].RawBlock.ParentHash()) {
|
|
||||||
t.Fatalf("tester doesn't know about the origin hash")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that if a malicious peers keeps sending us repeating hashes, we don't
|
// Tests that if a malicious peers keeps sending us repeating hashes, we don't
|
||||||
// loop indefinitely.
|
// loop indefinitely.
|
||||||
func TestRepeatingHashAttack(t *testing.T) {
|
func TestRepeatingHashAttack(t *testing.T) { // TODO: Is this thing valid??
|
||||||
|
tester := newTester()
|
||||||
|
|
||||||
// Create a valid chain, but drop the last link
|
// Create a valid chain, but drop the last link
|
||||||
hashes := createHashes(0, blockCacheLimit)
|
hashes := createHashes(blockCacheLimit, knownHash)
|
||||||
blocks := createBlocksFromHashes(hashes)
|
blocks := createBlocksFromHashes(hashes)
|
||||||
forged := hashes[:len(hashes)-1]
|
tester.newPeer("valid", hashes, blocks)
|
||||||
|
tester.newPeer("attack", hashes[:len(hashes)-1], blocks)
|
||||||
|
|
||||||
// Try and sync with the malicious node
|
// Try and sync with the malicious node
|
||||||
tester := newTester(t, forged, blocks)
|
|
||||||
tester.newPeer("attack", big.NewInt(10000), forged[0])
|
|
||||||
|
|
||||||
errc := make(chan error)
|
errc := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
errc <- tester.sync("attack", hashes[0])
|
errc <- tester.sync("attack")
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Make sure that syncing returns and does so with a failure
|
// Make sure that syncing returns and does so with a failure
|
||||||
select {
|
select {
|
||||||
case <-time.After(time.Second):
|
case <-time.After(time.Second):
|
||||||
@ -364,9 +439,7 @@ func TestRepeatingHashAttack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Ensure that a valid chain can still pass sync
|
// Ensure that a valid chain can still pass sync
|
||||||
tester.hashes = hashes
|
if err := tester.sync("valid"); err != nil {
|
||||||
tester.newPeer("valid", big.NewInt(20000), hashes[0])
|
|
||||||
if err := tester.sync("valid", hashes[0]); err != nil {
|
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -374,23 +447,22 @@ func TestRepeatingHashAttack(t *testing.T) {
|
|||||||
// Tests that if a malicious peers returns a non-existent block hash, it should
|
// Tests that if a malicious peers returns a non-existent block hash, it should
|
||||||
// eventually time out and the sync reattempted.
|
// eventually time out and the sync reattempted.
|
||||||
func TestNonExistingBlockAttack(t *testing.T) {
|
func TestNonExistingBlockAttack(t *testing.T) {
|
||||||
|
tester := newTester()
|
||||||
|
|
||||||
// Create a valid chain, but forge the last link
|
// Create a valid chain, but forge the last link
|
||||||
hashes := createHashes(0, blockCacheLimit)
|
hashes := createHashes(blockCacheLimit, knownHash)
|
||||||
blocks := createBlocksFromHashes(hashes)
|
blocks := createBlocksFromHashes(hashes)
|
||||||
origin := hashes[len(hashes)/2]
|
tester.newPeer("valid", hashes, blocks)
|
||||||
|
|
||||||
hashes[len(hashes)/2] = unknownHash
|
hashes[len(hashes)/2] = unknownHash
|
||||||
|
tester.newPeer("attack", hashes, blocks)
|
||||||
|
|
||||||
// Try and sync with the malicious node and check that it fails
|
// Try and sync with the malicious node and check that it fails
|
||||||
tester := newTester(t, hashes, blocks)
|
if err := tester.sync("attack"); err != errPeersUnavailable {
|
||||||
tester.newPeer("attack", big.NewInt(10000), hashes[0])
|
|
||||||
if err := tester.sync("attack", hashes[0]); err != errPeersUnavailable {
|
|
||||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errPeersUnavailable)
|
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errPeersUnavailable)
|
||||||
}
|
}
|
||||||
// Ensure that a valid chain can still pass sync
|
// Ensure that a valid chain can still pass sync
|
||||||
hashes[len(hashes)/2] = origin
|
if err := tester.sync("valid"); err != nil {
|
||||||
tester.newPeer("valid", big.NewInt(20000), hashes[0])
|
|
||||||
if err := tester.sync("valid", hashes[0]); err != nil {
|
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -398,30 +470,28 @@ func TestNonExistingBlockAttack(t *testing.T) {
|
|||||||
// Tests that if a malicious peer is returning hashes in a weird order, that the
|
// Tests that if a malicious peer is returning hashes in a weird order, that the
|
||||||
// sync throttler doesn't choke on them waiting for the valid blocks.
|
// sync throttler doesn't choke on them waiting for the valid blocks.
|
||||||
func TestInvalidHashOrderAttack(t *testing.T) {
|
func TestInvalidHashOrderAttack(t *testing.T) {
|
||||||
|
tester := newTester()
|
||||||
|
|
||||||
// Create a valid long chain, but reverse some hashes within
|
// Create a valid long chain, but reverse some hashes within
|
||||||
hashes := createHashes(0, 4*blockCacheLimit)
|
hashes := createHashes(4*blockCacheLimit, knownHash)
|
||||||
blocks := createBlocksFromHashes(hashes)
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
tester.newPeer("valid", hashes, blocks)
|
||||||
|
|
||||||
chunk1 := make([]common.Hash, blockCacheLimit)
|
chunk1 := make([]common.Hash, blockCacheLimit)
|
||||||
chunk2 := make([]common.Hash, blockCacheLimit)
|
chunk2 := make([]common.Hash, blockCacheLimit)
|
||||||
copy(chunk1, hashes[blockCacheLimit:2*blockCacheLimit])
|
copy(chunk1, hashes[blockCacheLimit:2*blockCacheLimit])
|
||||||
copy(chunk2, hashes[2*blockCacheLimit:3*blockCacheLimit])
|
copy(chunk2, hashes[2*blockCacheLimit:3*blockCacheLimit])
|
||||||
|
|
||||||
reverse := make([]common.Hash, len(hashes))
|
copy(hashes[2*blockCacheLimit:], chunk1)
|
||||||
copy(reverse, hashes)
|
copy(hashes[blockCacheLimit:], chunk2)
|
||||||
copy(reverse[2*blockCacheLimit:], chunk1)
|
tester.newPeer("attack", hashes, blocks)
|
||||||
copy(reverse[blockCacheLimit:], chunk2)
|
|
||||||
|
|
||||||
// Try and sync with the malicious node and check that it fails
|
// Try and sync with the malicious node and check that it fails
|
||||||
tester := newTester(t, reverse, blocks)
|
if err := tester.sync("attack"); err != errInvalidChain {
|
||||||
tester.newPeer("attack", big.NewInt(10000), reverse[0])
|
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain)
|
||||||
if _, err := tester.syncTake("attack", reverse[0]); err != ErrInvalidChain {
|
|
||||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrInvalidChain)
|
|
||||||
}
|
}
|
||||||
// Ensure that a valid chain can still pass sync
|
// Ensure that a valid chain can still pass sync
|
||||||
tester.hashes = hashes
|
if err := tester.sync("valid"); err != nil {
|
||||||
tester.newPeer("valid", big.NewInt(20000), hashes[0])
|
|
||||||
if _, err := tester.syncTake("valid", hashes[0]); err != nil {
|
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -429,17 +499,24 @@ func TestInvalidHashOrderAttack(t *testing.T) {
|
|||||||
// Tests that if a malicious peer makes up a random hash chain and tries to push
|
// Tests that if a malicious peer makes up a random hash chain and tries to push
|
||||||
// indefinitely, it actually gets caught with it.
|
// indefinitely, it actually gets caught with it.
|
||||||
func TestMadeupHashChainAttack(t *testing.T) {
|
func TestMadeupHashChainAttack(t *testing.T) {
|
||||||
|
tester := newTester()
|
||||||
blockSoftTTL = 100 * time.Millisecond
|
blockSoftTTL = 100 * time.Millisecond
|
||||||
crossCheckCycle = 25 * time.Millisecond
|
crossCheckCycle = 25 * time.Millisecond
|
||||||
|
|
||||||
// Create a long chain of hashes without backing blocks
|
// Create a long chain of hashes without backing blocks
|
||||||
hashes := createHashes(0, 1024*blockCacheLimit)
|
hashes := createHashes(4*blockCacheLimit, knownHash)
|
||||||
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
|
||||||
|
tester.newPeer("valid", hashes, blocks)
|
||||||
|
tester.newPeer("attack", createHashes(1024*blockCacheLimit, knownHash), nil)
|
||||||
|
|
||||||
// Try and sync with the malicious node and check that it fails
|
// Try and sync with the malicious node and check that it fails
|
||||||
tester := newTester(t, hashes, nil)
|
if err := tester.sync("attack"); err != errCrossCheckFailed {
|
||||||
tester.newPeer("attack", big.NewInt(10000), hashes[0])
|
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed)
|
||||||
if _, err := tester.syncTake("attack", hashes[0]); err != ErrCrossCheckFailed {
|
}
|
||||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrCrossCheckFailed)
|
// Ensure that a valid chain can still pass sync
|
||||||
|
if err := tester.sync("valid"); err != nil {
|
||||||
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -449,14 +526,14 @@ func TestMadeupHashChainAttack(t *testing.T) {
|
|||||||
// one by one prevents reliable block/parent verification.
|
// one by one prevents reliable block/parent verification.
|
||||||
func TestMadeupHashChainDrippingAttack(t *testing.T) {
|
func TestMadeupHashChainDrippingAttack(t *testing.T) {
|
||||||
// Create a random chain of hashes to drip
|
// Create a random chain of hashes to drip
|
||||||
hashes := createHashes(0, 16*blockCacheLimit)
|
hashes := createHashes(16*blockCacheLimit, knownHash)
|
||||||
tester := newTester(t, hashes, nil)
|
tester := newTester()
|
||||||
|
|
||||||
// Try and sync with the attacker, one hash at a time
|
// Try and sync with the attacker, one hash at a time
|
||||||
tester.maxHashFetch = 1
|
tester.maxHashFetch = 1
|
||||||
tester.newPeer("attack", big.NewInt(10000), hashes[0])
|
tester.newPeer("attack", hashes, nil)
|
||||||
if _, err := tester.syncTake("attack", hashes[0]); err != ErrStallingPeer {
|
if err := tester.sync("attack"); err != errStallingPeer {
|
||||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrStallingPeer)
|
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -470,7 +547,7 @@ func TestMadeupBlockChainAttack(t *testing.T) {
|
|||||||
crossCheckCycle = 25 * time.Millisecond
|
crossCheckCycle = 25 * time.Millisecond
|
||||||
|
|
||||||
// Create a long chain of blocks and simulate an invalid chain by dropping every second
|
// Create a long chain of blocks and simulate an invalid chain by dropping every second
|
||||||
hashes := createHashes(0, 16*blockCacheLimit)
|
hashes := createHashes(16*blockCacheLimit, knownHash)
|
||||||
blocks := createBlocksFromHashes(hashes)
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
|
||||||
gapped := make([]common.Hash, len(hashes)/2)
|
gapped := make([]common.Hash, len(hashes)/2)
|
||||||
@ -478,18 +555,17 @@ func TestMadeupBlockChainAttack(t *testing.T) {
|
|||||||
gapped[i] = hashes[2*i]
|
gapped[i] = hashes[2*i]
|
||||||
}
|
}
|
||||||
// Try and sync with the malicious node and check that it fails
|
// Try and sync with the malicious node and check that it fails
|
||||||
tester := newTester(t, gapped, blocks)
|
tester := newTester()
|
||||||
tester.newPeer("attack", big.NewInt(10000), gapped[0])
|
tester.newPeer("attack", gapped, blocks)
|
||||||
if _, err := tester.syncTake("attack", gapped[0]); err != ErrCrossCheckFailed {
|
if err := tester.sync("attack"); err != errCrossCheckFailed {
|
||||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrCrossCheckFailed)
|
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed)
|
||||||
}
|
}
|
||||||
// Ensure that a valid chain can still pass sync
|
// Ensure that a valid chain can still pass sync
|
||||||
blockSoftTTL = defaultBlockTTL
|
blockSoftTTL = defaultBlockTTL
|
||||||
crossCheckCycle = defaultCrossCheckCycle
|
crossCheckCycle = defaultCrossCheckCycle
|
||||||
|
|
||||||
tester.hashes = hashes
|
tester.newPeer("valid", hashes, blocks)
|
||||||
tester.newPeer("valid", big.NewInt(20000), hashes[0])
|
if err := tester.sync("valid"); err != nil {
|
||||||
if _, err := tester.syncTake("valid", hashes[0]); err != nil {
|
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -498,6 +574,8 @@ func TestMadeupBlockChainAttack(t *testing.T) {
|
|||||||
// attacker make up a valid hashes for random blocks, but also forges the block
|
// attacker make up a valid hashes for random blocks, but also forges the block
|
||||||
// parents to point to existing hashes.
|
// parents to point to existing hashes.
|
||||||
func TestMadeupParentBlockChainAttack(t *testing.T) {
|
func TestMadeupParentBlockChainAttack(t *testing.T) {
|
||||||
|
tester := newTester()
|
||||||
|
|
||||||
defaultBlockTTL := blockSoftTTL
|
defaultBlockTTL := blockSoftTTL
|
||||||
defaultCrossCheckCycle := crossCheckCycle
|
defaultCrossCheckCycle := crossCheckCycle
|
||||||
|
|
||||||
@ -505,25 +583,24 @@ func TestMadeupParentBlockChainAttack(t *testing.T) {
|
|||||||
crossCheckCycle = 25 * time.Millisecond
|
crossCheckCycle = 25 * time.Millisecond
|
||||||
|
|
||||||
// Create a long chain of blocks and simulate an invalid chain by dropping every second
|
// Create a long chain of blocks and simulate an invalid chain by dropping every second
|
||||||
hashes := createHashes(0, 16*blockCacheLimit)
|
hashes := createHashes(16*blockCacheLimit, knownHash)
|
||||||
blocks := createBlocksFromHashes(hashes)
|
blocks := createBlocksFromHashes(hashes)
|
||||||
forges := createBlocksFromHashes(hashes)
|
tester.newPeer("valid", hashes, blocks)
|
||||||
for hash, block := range forges {
|
|
||||||
block.ParentHeaderHash = hash // Simulate pointing to already known hash
|
for _, block := range blocks {
|
||||||
|
block.ParentHeaderHash = knownHash // Simulate pointing to already known hash
|
||||||
}
|
}
|
||||||
|
tester.newPeer("attack", hashes, blocks)
|
||||||
|
|
||||||
// Try and sync with the malicious node and check that it fails
|
// Try and sync with the malicious node and check that it fails
|
||||||
tester := newTester(t, hashes, forges)
|
if err := tester.sync("attack"); err != errCrossCheckFailed {
|
||||||
tester.newPeer("attack", big.NewInt(10000), hashes[0])
|
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed)
|
||||||
if _, err := tester.syncTake("attack", hashes[0]); err != ErrCrossCheckFailed {
|
|
||||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrCrossCheckFailed)
|
|
||||||
}
|
}
|
||||||
// Ensure that a valid chain can still pass sync
|
// Ensure that a valid chain can still pass sync
|
||||||
blockSoftTTL = defaultBlockTTL
|
blockSoftTTL = defaultBlockTTL
|
||||||
crossCheckCycle = defaultCrossCheckCycle
|
crossCheckCycle = defaultCrossCheckCycle
|
||||||
|
|
||||||
tester.blocks = blocks
|
if err := tester.sync("valid"); err != nil {
|
||||||
tester.newPeer("valid", big.NewInt(20000), hashes[0])
|
|
||||||
if _, err := tester.syncTake("valid", hashes[0]); err != nil {
|
|
||||||
t.Fatalf("failed to synchronise blocks: %v", err)
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -532,68 +609,81 @@ func TestMadeupParentBlockChainAttack(t *testing.T) {
|
|||||||
// the downloader, it will not keep refetching the same chain indefinitely, but
|
// the downloader, it will not keep refetching the same chain indefinitely, but
|
||||||
// gradually block pieces of it, until it's head is also blocked.
|
// gradually block pieces of it, until it's head is also blocked.
|
||||||
func TestBannedChainStarvationAttack(t *testing.T) {
|
func TestBannedChainStarvationAttack(t *testing.T) {
|
||||||
// Construct a valid chain, but ban one of the hashes in it
|
|
||||||
hashes := createHashes(0, 8*blockCacheLimit)
|
|
||||||
hashes[len(hashes)/2+23] = bannedHash // weird index to have non multiple of ban chunk size
|
|
||||||
|
|
||||||
blocks := createBlocksFromHashes(hashes)
|
|
||||||
|
|
||||||
// Create the tester and ban the selected hash
|
// Create the tester and ban the selected hash
|
||||||
tester := newTester(t, hashes, blocks)
|
tester := newTester()
|
||||||
tester.downloader.banned.Add(bannedHash)
|
tester.downloader.banned.Add(bannedHash)
|
||||||
|
|
||||||
|
// Construct a valid chain, for it and ban the fork
|
||||||
|
hashes := createHashes(8*blockCacheLimit, knownHash)
|
||||||
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
tester.newPeer("valid", hashes, blocks)
|
||||||
|
|
||||||
|
fork := len(hashes)/2 - 23
|
||||||
|
hashes = append(createHashes(4*blockCacheLimit, bannedHash), hashes[fork:]...)
|
||||||
|
blocks = createBlocksFromHashes(hashes)
|
||||||
|
tester.newPeer("attack", hashes, blocks)
|
||||||
|
|
||||||
// Iteratively try to sync, and verify that the banned hash list grows until
|
// Iteratively try to sync, and verify that the banned hash list grows until
|
||||||
// the head of the invalid chain is blocked too.
|
// the head of the invalid chain is blocked too.
|
||||||
tester.newPeer("attack", big.NewInt(10000), hashes[0])
|
|
||||||
for banned := tester.downloader.banned.Size(); ; {
|
for banned := tester.downloader.banned.Size(); ; {
|
||||||
// Try to sync with the attacker, check hash chain failure
|
// Try to sync with the attacker, check hash chain failure
|
||||||
if _, err := tester.syncTake("attack", hashes[0]); err != ErrInvalidChain {
|
if err := tester.sync("attack"); err != errInvalidChain {
|
||||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrInvalidChain)
|
if tester.downloader.banned.Has(hashes[0]) && err == errBannedHead {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain)
|
||||||
}
|
}
|
||||||
// Check that the ban list grew with at least 1 new item, or all banned
|
// Check that the ban list grew with at least 1 new item, or all banned
|
||||||
bans := tester.downloader.banned.Size()
|
bans := tester.downloader.banned.Size()
|
||||||
if bans < banned+1 {
|
if bans < banned+1 {
|
||||||
if tester.downloader.banned.Has(hashes[0]) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
t.Fatalf("ban count mismatch: have %v, want %v+", bans, banned+1)
|
t.Fatalf("ban count mismatch: have %v, want %v+", bans, banned+1)
|
||||||
}
|
}
|
||||||
banned = bans
|
banned = bans
|
||||||
}
|
}
|
||||||
// Check that after banning an entire chain, bad peers get dropped
|
// Check that after banning an entire chain, bad peers get dropped
|
||||||
if err := tester.newPeer("new attacker", big.NewInt(10000), hashes[0]); err != errBannedHead {
|
if err := tester.newPeer("new attacker", hashes, blocks); err != errBannedHead {
|
||||||
t.Fatalf("peer registration mismatch: have %v, want %v", err, errBannedHead)
|
t.Fatalf("peer registration mismatch: have %v, want %v", err, errBannedHead)
|
||||||
}
|
}
|
||||||
if peer := tester.downloader.peers.Peer("net attacker"); peer != nil {
|
if peer := tester.downloader.peers.Peer("new attacker"); peer != nil {
|
||||||
t.Fatalf("banned attacker registered: %v", peer)
|
t.Fatalf("banned attacker registered: %v", peer)
|
||||||
}
|
}
|
||||||
|
// Ensure that a valid chain can still pass sync
|
||||||
|
if err := tester.sync("valid"); err != nil {
|
||||||
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that if a peer sends excessively many/large invalid chains that are
|
// Tests that if a peer sends excessively many/large invalid chains that are
|
||||||
// gradually banned, it will have an upper limit on the consumed memory and also
|
// gradually banned, it will have an upper limit on the consumed memory and also
|
||||||
// the origin bad hashes will not be evacuated.
|
// the origin bad hashes will not be evacuated.
|
||||||
func TestBannedChainMemoryExhaustionAttack(t *testing.T) {
|
func TestBannedChainMemoryExhaustionAttack(t *testing.T) {
|
||||||
|
// Create the tester and ban the selected hash
|
||||||
|
tester := newTester()
|
||||||
|
tester.downloader.banned.Add(bannedHash)
|
||||||
|
|
||||||
// Reduce the test size a bit
|
// Reduce the test size a bit
|
||||||
|
defaultMaxBlockFetch := MaxBlockFetch
|
||||||
|
defaultMaxBannedHashes := maxBannedHashes
|
||||||
|
|
||||||
MaxBlockFetch = 4
|
MaxBlockFetch = 4
|
||||||
maxBannedHashes = 256
|
maxBannedHashes = 256
|
||||||
|
|
||||||
// Construct a banned chain with more chunks than the ban limit
|
// Construct a banned chain with more chunks than the ban limit
|
||||||
hashes := createHashes(0, maxBannedHashes*MaxBlockFetch)
|
hashes := createHashes(8*blockCacheLimit, knownHash)
|
||||||
hashes[len(hashes)-1] = bannedHash // weird index to have non multiple of ban chunk size
|
|
||||||
|
|
||||||
blocks := createBlocksFromHashes(hashes)
|
blocks := createBlocksFromHashes(hashes)
|
||||||
|
tester.newPeer("valid", hashes, blocks)
|
||||||
|
|
||||||
// Create the tester and ban the selected hash
|
fork := len(hashes)/2 - 23
|
||||||
tester := newTester(t, hashes, blocks)
|
hashes = append(createHashes(maxBannedHashes*MaxBlockFetch, bannedHash), hashes[fork:]...)
|
||||||
tester.downloader.banned.Add(bannedHash)
|
blocks = createBlocksFromHashes(hashes)
|
||||||
|
tester.newPeer("attack", hashes, blocks)
|
||||||
|
|
||||||
// Iteratively try to sync, and verify that the banned hash list grows until
|
// Iteratively try to sync, and verify that the banned hash list grows until
|
||||||
// the head of the invalid chain is blocked too.
|
// the head of the invalid chain is blocked too.
|
||||||
tester.newPeer("attack", big.NewInt(10000), hashes[0])
|
|
||||||
for {
|
for {
|
||||||
// Try to sync with the attacker, check hash chain failure
|
// Try to sync with the attacker, check hash chain failure
|
||||||
if _, err := tester.syncTake("attack", hashes[0]); err != ErrInvalidChain {
|
if err := tester.sync("attack"); err != errInvalidChain {
|
||||||
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrInvalidChain)
|
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errInvalidChain)
|
||||||
}
|
}
|
||||||
// Short circuit if the entire chain was banned
|
// Short circuit if the entire chain was banned
|
||||||
if tester.downloader.banned.Has(hashes[0]) {
|
if tester.downloader.banned.Has(hashes[0]) {
|
||||||
@ -609,4 +699,91 @@ func TestBannedChainMemoryExhaustionAttack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Ensure that a valid chain can still pass sync
|
||||||
|
MaxBlockFetch = defaultMaxBlockFetch
|
||||||
|
maxBannedHashes = defaultMaxBannedHashes
|
||||||
|
|
||||||
|
if err := tester.sync("valid"); err != nil {
|
||||||
|
t.Fatalf("failed to synchronise blocks: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
|
||||||
|
func TestHashAttackerDropping(t *testing.T) {
|
||||||
|
// Define the disconnection requirement for individual hash fetch errors
|
||||||
|
tests := []struct {
|
||||||
|
result error
|
||||||
|
drop bool
|
||||||
|
}{
|
||||||
|
{nil, false}, // Sync succeeded, all is well
|
||||||
|
{errBusy, false}, // Sync is already in progress, no problem
|
||||||
|
{errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop
|
||||||
|
{errBadPeer, true}, // Peer was deemed bad for some reason, drop it
|
||||||
|
{errStallingPeer, true}, // Peer was detected to be stalling, drop it
|
||||||
|
{errBannedHead, true}, // Peer's head hash is a known bad hash, drop it
|
||||||
|
{errNoPeers, false}, // No peers to download from, soft race, no issue
|
||||||
|
{errPendingQueue, false}, // There are blocks still cached, wait to exhaust, no issue
|
||||||
|
{errTimeout, true}, // No hashes received in due time, drop the peer
|
||||||
|
{errEmptyHashSet, true}, // No hashes were returned as a response, drop as it's a dead end
|
||||||
|
{errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
|
||||||
|
{errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
|
||||||
|
{errCrossCheckFailed, true}, // Hash-origin failed to pass a block cross check, drop
|
||||||
|
{errCancelHashFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||||
|
{errCancelBlockFetch, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||||
|
{errCancelChainImport, false}, // Synchronisation was canceled, origin may be innocent, don't drop
|
||||||
|
}
|
||||||
|
// Run the tests and check disconnection status
|
||||||
|
tester := newTester()
|
||||||
|
for i, tt := range tests {
|
||||||
|
// Register a new peer and ensure it's presence
|
||||||
|
id := fmt.Sprintf("test %d", i)
|
||||||
|
if err := tester.newPeer(id, []common.Hash{knownHash}, nil); err != nil {
|
||||||
|
t.Fatalf("test %d: failed to register new peer: %v", i, err)
|
||||||
|
}
|
||||||
|
if _, ok := tester.peerHashes[id]; !ok {
|
||||||
|
t.Fatalf("test %d: registered peer not found", i)
|
||||||
|
}
|
||||||
|
// Simulate a synchronisation and check the required result
|
||||||
|
tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
|
||||||
|
|
||||||
|
tester.downloader.Synchronise(id, knownHash)
|
||||||
|
if _, ok := tester.peerHashes[id]; !ok != tt.drop {
|
||||||
|
t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that feeding bad blocks will result in a peer drop.
|
||||||
|
func TestBlockAttackerDropping(t *testing.T) {
|
||||||
|
// Define the disconnection requirement for individual block import errors
|
||||||
|
tests := []struct {
|
||||||
|
failure bool
|
||||||
|
drop bool
|
||||||
|
}{{true, true}, {false, false}}
|
||||||
|
|
||||||
|
// Run the tests and check disconnection status
|
||||||
|
tester := newTester()
|
||||||
|
for i, tt := range tests {
|
||||||
|
// Register a new peer and ensure it's presence
|
||||||
|
id := fmt.Sprintf("test %d", i)
|
||||||
|
if err := tester.newPeer(id, []common.Hash{common.Hash{}}, nil); err != nil {
|
||||||
|
t.Fatalf("test %d: failed to register new peer: %v", i, err)
|
||||||
|
}
|
||||||
|
if _, ok := tester.peerHashes[id]; !ok {
|
||||||
|
t.Fatalf("test %d: registered peer not found", i)
|
||||||
|
}
|
||||||
|
// Assemble a good or bad block, depending of the test
|
||||||
|
raw := createBlock(1, knownHash, common.Hash{})
|
||||||
|
if tt.failure {
|
||||||
|
raw = createBlock(1, unknownHash, common.Hash{})
|
||||||
|
}
|
||||||
|
block := &Block{OriginPeer: id, RawBlock: raw}
|
||||||
|
|
||||||
|
// Simulate block processing and check the result
|
||||||
|
tester.downloader.queue.blockCache[0] = block
|
||||||
|
tester.downloader.process()
|
||||||
|
if _, ok := tester.peerHashes[id]; !ok != tt.drop {
|
||||||
|
t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.failure, !ok, tt.drop)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -74,7 +74,7 @@ func (p *peer) Fetch(request *fetchRequest) error {
|
|||||||
for hash, _ := range request.Hashes {
|
for hash, _ := range request.Hashes {
|
||||||
hashes = append(hashes, hash)
|
hashes = append(hashes, hash)
|
||||||
}
|
}
|
||||||
p.getBlocks(hashes)
|
go p.getBlocks(hashes)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -320,7 +320,7 @@ func (q *queue) Deliver(id string, blocks []*types.Block) (err error) {
|
|||||||
// If a requested block falls out of the range, the hash chain is invalid
|
// If a requested block falls out of the range, the hash chain is invalid
|
||||||
index := int(block.NumberU64()) - q.blockOffset
|
index := int(block.NumberU64()) - q.blockOffset
|
||||||
if index >= len(q.blockCache) || index < 0 {
|
if index >= len(q.blockCache) || index < 0 {
|
||||||
return ErrInvalidChain
|
return errInvalidChain
|
||||||
}
|
}
|
||||||
// Otherwise merge the block and mark the hash block
|
// Otherwise merge the block and mark the hash block
|
||||||
q.blockCache[index] = &Block{
|
q.blockCache[index] = &Block{
|
||||||
|
@ -1,30 +0,0 @@
|
|||||||
package downloader
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"gopkg.in/fatih/set.v0"
|
|
||||||
)
|
|
||||||
|
|
||||||
func createHashSet(hashes []common.Hash) *set.Set {
|
|
||||||
hset := set.New()
|
|
||||||
|
|
||||||
for _, hash := range hashes {
|
|
||||||
hset.Add(hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
return hset
|
|
||||||
}
|
|
||||||
|
|
||||||
func createBlocksFromHashSet(hashes *set.Set) []*types.Block {
|
|
||||||
blocks := make([]*types.Block, hashes.Size())
|
|
||||||
|
|
||||||
var i int
|
|
||||||
hashes.Each(func(v interface{}) bool {
|
|
||||||
blocks[i] = createBlock(i, common.Hash{}, v.(common.Hash))
|
|
||||||
i++
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
return blocks
|
|
||||||
}
|
|
181
eth/gasprice.go
Normal file
181
eth/gasprice.go
Normal file
@ -0,0 +1,181 @@
|
|||||||
|
package eth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/event"
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
)
|
||||||
|
|
||||||
|
const gpoProcessPastBlocks = 100
|
||||||
|
|
||||||
|
type blockPriceInfo struct {
|
||||||
|
baseGasPrice *big.Int
|
||||||
|
}
|
||||||
|
|
||||||
|
type GasPriceOracle struct {
|
||||||
|
eth *Ethereum
|
||||||
|
chain *core.ChainManager
|
||||||
|
pool *core.TxPool
|
||||||
|
events event.Subscription
|
||||||
|
blocks map[uint64]*blockPriceInfo
|
||||||
|
firstProcessed, lastProcessed uint64
|
||||||
|
lastBaseMutex sync.Mutex
|
||||||
|
lastBase *big.Int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewGasPriceOracle(eth *Ethereum) (self *GasPriceOracle) {
|
||||||
|
self = &GasPriceOracle{}
|
||||||
|
self.blocks = make(map[uint64]*blockPriceInfo)
|
||||||
|
self.eth = eth
|
||||||
|
self.chain = eth.chainManager
|
||||||
|
self.pool = eth.txPool
|
||||||
|
self.events = eth.EventMux().Subscribe(
|
||||||
|
core.ChainEvent{},
|
||||||
|
core.ChainSplitEvent{},
|
||||||
|
core.TxPreEvent{},
|
||||||
|
core.TxPostEvent{},
|
||||||
|
)
|
||||||
|
self.processPastBlocks()
|
||||||
|
go self.listenLoop()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *GasPriceOracle) processPastBlocks() {
|
||||||
|
last := self.chain.CurrentBlock().NumberU64()
|
||||||
|
first := uint64(0)
|
||||||
|
if last > gpoProcessPastBlocks {
|
||||||
|
first = last - gpoProcessPastBlocks
|
||||||
|
}
|
||||||
|
self.firstProcessed = first
|
||||||
|
for i := first; i <= last; i++ {
|
||||||
|
self.processBlock(self.chain.GetBlockByNumber(i))
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *GasPriceOracle) listenLoop() {
|
||||||
|
for {
|
||||||
|
ev, isopen := <-self.events.Chan()
|
||||||
|
if !isopen {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
switch ev := ev.(type) {
|
||||||
|
case core.ChainEvent:
|
||||||
|
self.processBlock(ev.Block)
|
||||||
|
case core.ChainSplitEvent:
|
||||||
|
self.processBlock(ev.Block)
|
||||||
|
case core.TxPreEvent:
|
||||||
|
case core.TxPostEvent:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.events.Unsubscribe()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *GasPriceOracle) processBlock(block *types.Block) {
|
||||||
|
i := block.NumberU64()
|
||||||
|
if i > self.lastProcessed {
|
||||||
|
self.lastProcessed = i
|
||||||
|
}
|
||||||
|
|
||||||
|
lastBase := self.eth.GpoMinGasPrice
|
||||||
|
bpl := self.blocks[i-1]
|
||||||
|
if bpl != nil {
|
||||||
|
lastBase = bpl.baseGasPrice
|
||||||
|
}
|
||||||
|
if lastBase == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var corr int
|
||||||
|
lp := self.lowestPrice(block)
|
||||||
|
if lp == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if lastBase.Cmp(lp) < 0 {
|
||||||
|
corr = self.eth.GpobaseStepUp
|
||||||
|
} else {
|
||||||
|
corr = -self.eth.GpobaseStepDown
|
||||||
|
}
|
||||||
|
|
||||||
|
crand := int64(corr * (900 + rand.Intn(201)))
|
||||||
|
newBase := new(big.Int).Mul(lastBase, big.NewInt(1000000+crand))
|
||||||
|
newBase.Div(newBase, big.NewInt(1000000))
|
||||||
|
|
||||||
|
bpi := self.blocks[i]
|
||||||
|
if bpi == nil {
|
||||||
|
bpi = &blockPriceInfo{}
|
||||||
|
self.blocks[i] = bpi
|
||||||
|
}
|
||||||
|
bpi.baseGasPrice = newBase
|
||||||
|
self.lastBaseMutex.Lock()
|
||||||
|
self.lastBase = newBase
|
||||||
|
self.lastBaseMutex.Unlock()
|
||||||
|
|
||||||
|
glog.V(logger.Detail).Infof("Processed block #%v, base price is %v\n", block.NumberU64(), newBase.Int64())
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns the lowers possible price with which a tx was or could have been included
|
||||||
|
func (self *GasPriceOracle) lowestPrice(block *types.Block) *big.Int {
|
||||||
|
gasUsed := new(big.Int)
|
||||||
|
recepits, err := self.eth.BlockProcessor().GetBlockReceipts(block.Hash())
|
||||||
|
if err != nil {
|
||||||
|
return self.eth.GpoMinGasPrice
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(recepits) > 0 {
|
||||||
|
gasUsed = recepits[len(recepits)-1].CumulativeGasUsed
|
||||||
|
}
|
||||||
|
|
||||||
|
if new(big.Int).Mul(gasUsed, big.NewInt(100)).Cmp(new(big.Int).Mul(block.Header().GasLimit,
|
||||||
|
big.NewInt(int64(self.eth.GpoFullBlockRatio)))) < 0 {
|
||||||
|
// block is not full, could have posted a tx with MinGasPrice
|
||||||
|
return self.eth.GpoMinGasPrice
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(block.Transactions()) < 1 {
|
||||||
|
return self.eth.GpoMinGasPrice
|
||||||
|
}
|
||||||
|
|
||||||
|
// block is full, find smallest gasPrice
|
||||||
|
minPrice := block.Transactions()[0].GasPrice()
|
||||||
|
for i := 1; i < len(block.Transactions()); i++ {
|
||||||
|
price := block.Transactions()[i].GasPrice()
|
||||||
|
if price.Cmp(minPrice) < 0 {
|
||||||
|
minPrice = price
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return minPrice
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *GasPriceOracle) SuggestPrice() *big.Int {
|
||||||
|
self.lastBaseMutex.Lock()
|
||||||
|
base := self.lastBase
|
||||||
|
self.lastBaseMutex.Unlock()
|
||||||
|
|
||||||
|
if base == nil {
|
||||||
|
base = self.eth.GpoMinGasPrice
|
||||||
|
}
|
||||||
|
if base == nil {
|
||||||
|
return big.NewInt(10000000000000) // apparently MinGasPrice is not initialized during some tests
|
||||||
|
}
|
||||||
|
|
||||||
|
baseCorr := new(big.Int).Mul(base, big.NewInt(int64(self.eth.GpobaseCorrectionFactor)))
|
||||||
|
baseCorr.Div(baseCorr, big.NewInt(100))
|
||||||
|
|
||||||
|
if baseCorr.Cmp(self.eth.GpoMinGasPrice) < 0 {
|
||||||
|
return self.eth.GpoMinGasPrice
|
||||||
|
}
|
||||||
|
|
||||||
|
if baseCorr.Cmp(self.eth.GpoMaxGasPrice) > 0 {
|
||||||
|
return self.eth.GpoMaxGasPrice
|
||||||
|
}
|
||||||
|
|
||||||
|
return baseCorr
|
||||||
|
}
|
@ -68,12 +68,11 @@ type ProtocolManager struct {
|
|||||||
|
|
||||||
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
||||||
// with the ethereum network.
|
// with the ethereum network.
|
||||||
func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpool txPool, chainman *core.ChainManager, downloader *downloader.Downloader) *ProtocolManager {
|
func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpool txPool, chainman *core.ChainManager) *ProtocolManager {
|
||||||
manager := &ProtocolManager{
|
manager := &ProtocolManager{
|
||||||
eventMux: mux,
|
eventMux: mux,
|
||||||
txpool: txpool,
|
txpool: txpool,
|
||||||
chainman: chainman,
|
chainman: chainman,
|
||||||
downloader: downloader,
|
|
||||||
peers: newPeerSet(),
|
peers: newPeerSet(),
|
||||||
newPeerCh: make(chan *peer, 1),
|
newPeerCh: make(chan *peer, 1),
|
||||||
newHashCh: make(chan []*blockAnnounce, 1),
|
newHashCh: make(chan []*blockAnnounce, 1),
|
||||||
@ -81,6 +80,7 @@ func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpo
|
|||||||
txsyncCh: make(chan *txsync),
|
txsyncCh: make(chan *txsync),
|
||||||
quitSync: make(chan struct{}),
|
quitSync: make(chan struct{}),
|
||||||
}
|
}
|
||||||
|
manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.chainman.InsertChain, manager.removePeer)
|
||||||
manager.SubProtocol = p2p.Protocol{
|
manager.SubProtocol = p2p.Protocol{
|
||||||
Name: "eth",
|
Name: "eth",
|
||||||
Version: uint(protocolVersion),
|
Version: uint(protocolVersion),
|
||||||
|
@ -11,7 +11,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
@ -168,8 +167,7 @@ func newProtocolManagerForTesting(txAdded chan<- []*types.Transaction) *Protocol
|
|||||||
db, _ = ethdb.NewMemDatabase()
|
db, _ = ethdb.NewMemDatabase()
|
||||||
chain, _ = core.NewChainManager(core.GenesisBlock(0, db), db, db, core.FakePow{}, em)
|
chain, _ = core.NewChainManager(core.GenesisBlock(0, db), db, db, core.FakePow{}, em)
|
||||||
txpool = &fakeTxPool{added: txAdded}
|
txpool = &fakeTxPool{added: txAdded}
|
||||||
dl = downloader.New(em, chain.HasBlock, chain.GetBlock)
|
pm = NewProtocolManager(ProtocolVersion, 0, em, txpool, chain)
|
||||||
pm = NewProtocolManager(ProtocolVersion, 0, em, txpool, chain, dl)
|
|
||||||
)
|
)
|
||||||
pm.Start()
|
pm.Start()
|
||||||
return pm
|
return pm
|
||||||
|
132
eth/sync.go
132
eth/sync.go
@ -1,14 +1,11 @@
|
|||||||
package eth
|
package eth
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
|
||||||
"github.com/ethereum/go-ethereum/logger"
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
"github.com/ethereum/go-ethereum/logger/glog"
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||||
@ -16,12 +13,10 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available
|
forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available
|
||||||
blockProcCycle = 500 * time.Millisecond // Time interval to check for new blocks to process
|
|
||||||
notifyCheckCycle = 100 * time.Millisecond // Time interval to allow hash notifies to fulfill before hard fetching
|
notifyCheckCycle = 100 * time.Millisecond // Time interval to allow hash notifies to fulfill before hard fetching
|
||||||
notifyArriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested
|
notifyArriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested
|
||||||
notifyFetchTimeout = 5 * time.Second // Maximum alloted time to return an explicitly requested block
|
notifyFetchTimeout = 5 * time.Second // Maximum alloted time to return an explicitly requested block
|
||||||
minDesiredPeerCount = 5 // Amount of peers desired to start syncing
|
minDesiredPeerCount = 5 // Amount of peers desired to start syncing
|
||||||
blockProcAmount = 256
|
|
||||||
|
|
||||||
// This is the target size for the packs of transactions sent by txsyncLoop.
|
// This is the target size for the packs of transactions sent by txsyncLoop.
|
||||||
// A pack can get larger than this if a single transactions exceeds this size.
|
// A pack can get larger than this if a single transactions exceeds this size.
|
||||||
@ -127,10 +122,11 @@ func (pm *ProtocolManager) txsyncLoop() {
|
|||||||
// fetcher is responsible for collecting hash notifications, and periodically
|
// fetcher is responsible for collecting hash notifications, and periodically
|
||||||
// checking all unknown ones and individually fetching them.
|
// checking all unknown ones and individually fetching them.
|
||||||
func (pm *ProtocolManager) fetcher() {
|
func (pm *ProtocolManager) fetcher() {
|
||||||
announces := make(map[common.Hash]*blockAnnounce)
|
announces := make(map[common.Hash][]*blockAnnounce)
|
||||||
request := make(map[*peer][]common.Hash)
|
request := make(map[*peer][]common.Hash)
|
||||||
pending := make(map[common.Hash]*blockAnnounce)
|
pending := make(map[common.Hash]*blockAnnounce)
|
||||||
cycle := time.Tick(notifyCheckCycle)
|
cycle := time.Tick(notifyCheckCycle)
|
||||||
|
done := make(chan common.Hash)
|
||||||
|
|
||||||
// Iterate the block fetching until a quit is requested
|
// Iterate the block fetching until a quit is requested
|
||||||
for {
|
for {
|
||||||
@ -139,8 +135,17 @@ func (pm *ProtocolManager) fetcher() {
|
|||||||
// A batch of hashes the notified, schedule them for retrieval
|
// A batch of hashes the notified, schedule them for retrieval
|
||||||
glog.V(logger.Debug).Infof("Scheduling %d hash announcements from %s", len(notifications), notifications[0].peer.id)
|
glog.V(logger.Debug).Infof("Scheduling %d hash announcements from %s", len(notifications), notifications[0].peer.id)
|
||||||
for _, announce := range notifications {
|
for _, announce := range notifications {
|
||||||
announces[announce.hash] = announce
|
// Skip if it's already pending fetch
|
||||||
|
if _, ok := pending[announce.hash]; ok {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
// Otherwise queue up the peer as a potential source
|
||||||
|
announces[announce.hash] = append(announces[announce.hash], announce)
|
||||||
|
}
|
||||||
|
|
||||||
|
case hash := <-done:
|
||||||
|
// A pending import finished, remove all traces
|
||||||
|
delete(pending, hash)
|
||||||
|
|
||||||
case <-cycle:
|
case <-cycle:
|
||||||
// Clean up any expired block fetches
|
// Clean up any expired block fetches
|
||||||
@ -150,8 +155,9 @@ func (pm *ProtocolManager) fetcher() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Check if any notified blocks failed to arrive
|
// Check if any notified blocks failed to arrive
|
||||||
for hash, announce := range announces {
|
for hash, all := range announces {
|
||||||
if time.Since(announce.time) > notifyArriveTimeout {
|
if time.Since(all[0].time) > notifyArriveTimeout {
|
||||||
|
announce := all[rand.Intn(len(all))]
|
||||||
if !pm.chainman.HasBlock(hash) {
|
if !pm.chainman.HasBlock(hash) {
|
||||||
request[announce.peer] = append(request[announce.peer], hash)
|
request[announce.peer] = append(request[announce.peer], hash)
|
||||||
pending[hash] = announce
|
pending[hash] = announce
|
||||||
@ -165,7 +171,7 @@ func (pm *ProtocolManager) fetcher() {
|
|||||||
// Send out all block requests
|
// Send out all block requests
|
||||||
for peer, hashes := range request {
|
for peer, hashes := range request {
|
||||||
glog.V(logger.Debug).Infof("Explicitly fetching %d blocks from %s", len(hashes), peer.id)
|
glog.V(logger.Debug).Infof("Explicitly fetching %d blocks from %s", len(hashes), peer.id)
|
||||||
peer.requestBlocks(hashes)
|
go peer.requestBlocks(hashes)
|
||||||
}
|
}
|
||||||
request = make(map[*peer][]common.Hash)
|
request = make(map[*peer][]common.Hash)
|
||||||
|
|
||||||
@ -200,24 +206,32 @@ func (pm *ProtocolManager) fetcher() {
|
|||||||
case <-pm.quitSync:
|
case <-pm.quitSync:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// If any explicit fetches were replied to, import them
|
|
||||||
if count := len(explicit); count > 0 {
|
|
||||||
glog.V(logger.Debug).Infof("Importing %d explicitly fetched blocks", count)
|
|
||||||
|
|
||||||
// Create a closure with the retrieved blocks and origin peers
|
// Create a closure with the retrieved blocks and origin peers
|
||||||
peers := make([]*peer, 0, count)
|
peers := make([]*peer, 0, len(explicit))
|
||||||
blocks := make([]*types.Block, 0, count)
|
blocks = make([]*types.Block, 0, len(explicit))
|
||||||
for _, block := range explicit {
|
for _, block := range explicit {
|
||||||
hash := block.Hash()
|
hash := block.Hash()
|
||||||
if announce := pending[hash]; announce != nil {
|
if announce := pending[hash]; announce != nil {
|
||||||
|
// Drop the block if it surely cannot fit
|
||||||
|
if pm.chainman.HasBlock(hash) || !pm.chainman.HasBlock(block.ParentHash()) {
|
||||||
|
// delete(pending, hash) // if we drop, it will re-fetch it, wait for timeout?
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Otherwise accumulate for import
|
||||||
peers = append(peers, announce.peer)
|
peers = append(peers, announce.peer)
|
||||||
blocks = append(blocks, block)
|
blocks = append(blocks, block)
|
||||||
|
|
||||||
delete(pending, hash)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Run the importer on a new thread
|
// If any explicit fetches were replied to, import them
|
||||||
|
if count := len(blocks); count > 0 {
|
||||||
|
glog.V(logger.Debug).Infof("Importing %d explicitly fetched blocks", len(blocks))
|
||||||
go func() {
|
go func() {
|
||||||
|
// Make sure all hashes are cleaned up
|
||||||
|
for _, block := range blocks {
|
||||||
|
hash := block.Hash()
|
||||||
|
defer func() { done <- hash }()
|
||||||
|
}
|
||||||
|
// Try and actually import the blocks
|
||||||
for i := 0; i < len(blocks); i++ {
|
for i := 0; i < len(blocks); i++ {
|
||||||
if err := pm.importBlock(peers[i], blocks[i], nil); err != nil {
|
if err := pm.importBlock(peers[i], blocks[i], nil); err != nil {
|
||||||
glog.V(logger.Detail).Infof("Failed to import explicitly fetched block: %v", err)
|
glog.V(logger.Detail).Infof("Failed to import explicitly fetched block: %v", err)
|
||||||
@ -236,10 +250,10 @@ func (pm *ProtocolManager) fetcher() {
|
|||||||
// syncer is responsible for periodically synchronising with the network, both
|
// syncer is responsible for periodically synchronising with the network, both
|
||||||
// downloading hashes and blocks as well as retrieving cached ones.
|
// downloading hashes and blocks as well as retrieving cached ones.
|
||||||
func (pm *ProtocolManager) syncer() {
|
func (pm *ProtocolManager) syncer() {
|
||||||
forceSync := time.Tick(forceSyncCycle)
|
// Abort any pending syncs if we terminate
|
||||||
blockProc := time.Tick(blockProcCycle)
|
defer pm.downloader.Cancel()
|
||||||
blockProcPend := int32(0)
|
|
||||||
|
|
||||||
|
forceSync := time.Tick(forceSyncCycle)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-pm.newPeerCh:
|
case <-pm.newPeerCh:
|
||||||
@ -253,55 +267,12 @@ func (pm *ProtocolManager) syncer() {
|
|||||||
// Force a sync even if not enough peers are present
|
// Force a sync even if not enough peers are present
|
||||||
go pm.synchronise(pm.peers.BestPeer())
|
go pm.synchronise(pm.peers.BestPeer())
|
||||||
|
|
||||||
case <-blockProc:
|
|
||||||
// Try to pull some blocks from the downloaded
|
|
||||||
if atomic.CompareAndSwapInt32(&blockProcPend, 0, 1) {
|
|
||||||
go func() {
|
|
||||||
pm.processBlocks()
|
|
||||||
atomic.StoreInt32(&blockProcPend, 0)
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
case <-pm.quitSync:
|
case <-pm.quitSync:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// processBlocks retrieves downloaded blocks from the download cache and tries
|
|
||||||
// to construct the local block chain with it. Note, since the block retrieval
|
|
||||||
// order matters, access to this function *must* be synchronized/serialized.
|
|
||||||
func (pm *ProtocolManager) processBlocks() error {
|
|
||||||
pm.wg.Add(1)
|
|
||||||
defer pm.wg.Done()
|
|
||||||
|
|
||||||
// Short circuit if no blocks are available for insertion
|
|
||||||
blocks := pm.downloader.TakeBlocks()
|
|
||||||
if len(blocks) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].RawBlock.Number(), blocks[len(blocks)-1].RawBlock.Number())
|
|
||||||
|
|
||||||
for len(blocks) != 0 && !pm.quit {
|
|
||||||
// Retrieve the first batch of blocks to insert
|
|
||||||
max := int(math.Min(float64(len(blocks)), float64(blockProcAmount)))
|
|
||||||
raw := make(types.Blocks, 0, max)
|
|
||||||
for _, block := range blocks[:max] {
|
|
||||||
raw = append(raw, block.RawBlock)
|
|
||||||
}
|
|
||||||
// Try to inset the blocks, drop the originating peer if there's an error
|
|
||||||
index, err := pm.chainman.InsertChain(raw)
|
|
||||||
if err != nil {
|
|
||||||
glog.V(logger.Debug).Infoln("Downloaded block import failed:", err)
|
|
||||||
pm.removePeer(blocks[index].OriginPeer)
|
|
||||||
pm.downloader.Cancel()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
blocks = blocks[max:]
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// synchronise tries to sync up our local block chain with a remote peer, both
|
// synchronise tries to sync up our local block chain with a remote peer, both
|
||||||
// adding various sanity checks as well as wrapping it with various log entries.
|
// adding various sanity checks as well as wrapping it with various log entries.
|
||||||
func (pm *ProtocolManager) synchronise(peer *peer) {
|
func (pm *ProtocolManager) synchronise(peer *peer) {
|
||||||
@ -313,33 +284,6 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
|
|||||||
if peer.Td().Cmp(pm.chainman.Td()) <= 0 {
|
if peer.Td().Cmp(pm.chainman.Td()) <= 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// FIXME if we have the hash in our chain and the TD of the peer is
|
// Otherwise try to sync with the downloader
|
||||||
// much higher than ours, something is wrong with us or the peer.
|
pm.downloader.Synchronise(peer.id, peer.Head())
|
||||||
// Check if the hash is on our own chain
|
|
||||||
head := peer.Head()
|
|
||||||
if pm.chainman.HasBlock(head) {
|
|
||||||
glog.V(logger.Debug).Infoln("Synchronisation canceled: head already known")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Get the hashes from the peer (synchronously)
|
|
||||||
glog.V(logger.Detail).Infof("Attempting synchronisation: %v, 0x%x", peer.id, head)
|
|
||||||
|
|
||||||
err := pm.downloader.Synchronise(peer.id, head)
|
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
glog.V(logger.Detail).Infof("Synchronisation completed")
|
|
||||||
|
|
||||||
case downloader.ErrBusy:
|
|
||||||
glog.V(logger.Detail).Infof("Synchronisation already in progress")
|
|
||||||
|
|
||||||
case downloader.ErrTimeout, downloader.ErrBadPeer, downloader.ErrEmptyHashSet, downloader.ErrInvalidChain, downloader.ErrCrossCheckFailed:
|
|
||||||
glog.V(logger.Debug).Infof("Removing peer %v: %v", peer.id, err)
|
|
||||||
pm.removePeer(peer.id)
|
|
||||||
|
|
||||||
case downloader.ErrPendingQueue:
|
|
||||||
glog.V(logger.Debug).Infoln("Synchronisation aborted:", err)
|
|
||||||
|
|
||||||
default:
|
|
||||||
glog.V(logger.Warn).Infof("Synchronisation failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
2048
jsre/ethereum_js.go
2048
jsre/ethereum_js.go
File diff suppressed because it is too large
Load Diff
@ -77,7 +77,7 @@ func (m *Miner) SetGasPrice(price *big.Int) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
m.worker.gasPrice = price
|
m.worker.setGasPrice(price)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *Miner) Start(coinbase common.Address, threads int) {
|
func (self *Miner) Start(coinbase common.Address, threads int) {
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/ethereum/go-ethereum/accounts"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -270,7 +271,6 @@ func (self *worker) wait() {
|
|||||||
|
|
||||||
func (self *worker) push() {
|
func (self *worker) push() {
|
||||||
if atomic.LoadInt32(&self.mining) == 1 {
|
if atomic.LoadInt32(&self.mining) == 1 {
|
||||||
self.current.block.Header().GasUsed = self.current.totalUsedGas
|
|
||||||
self.current.block.SetRoot(self.current.state.Root())
|
self.current.block.SetRoot(self.current.state.Root())
|
||||||
|
|
||||||
// push new work to agents
|
// push new work to agents
|
||||||
@ -375,6 +375,8 @@ func (self *worker) commitNewWork() {
|
|||||||
self.currentMu.Lock()
|
self.currentMu.Lock()
|
||||||
defer self.currentMu.Unlock()
|
defer self.currentMu.Unlock()
|
||||||
|
|
||||||
|
tstart := time.Now()
|
||||||
|
|
||||||
previous := self.current
|
previous := self.current
|
||||||
self.makeCurrent()
|
self.makeCurrent()
|
||||||
current := self.current
|
current := self.current
|
||||||
@ -410,7 +412,7 @@ func (self *worker) commitNewWork() {
|
|||||||
|
|
||||||
// We only care about logging if we're actually mining
|
// We only care about logging if we're actually mining
|
||||||
if atomic.LoadInt32(&self.mining) == 1 {
|
if atomic.LoadInt32(&self.mining) == 1 {
|
||||||
glog.V(logger.Info).Infof("commit new work on block %v with %d txs & %d uncles\n", current.block.Number(), current.tcount, len(uncles))
|
glog.V(logger.Info).Infof("commit new work on block %v with %d txs & %d uncles. Took %v\n", current.block.Number(), current.tcount, len(uncles), time.Since(tstart))
|
||||||
self.logLocalMinedBlocks(previous)
|
self.logLocalMinedBlocks(previous)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -438,7 +440,6 @@ func (self *worker) commitUncle(uncle *types.Header) error {
|
|||||||
// Error not unique
|
// Error not unique
|
||||||
return core.UncleError("Uncle not unique")
|
return core.UncleError("Uncle not unique")
|
||||||
}
|
}
|
||||||
self.current.uncles.Add(uncle.Hash())
|
|
||||||
|
|
||||||
if !self.current.ancestors.Has(uncle.ParentHash) {
|
if !self.current.ancestors.Has(uncle.ParentHash) {
|
||||||
return core.UncleError(fmt.Sprintf("Uncle's parent unknown (%x)", uncle.ParentHash[0:4]))
|
return core.UncleError(fmt.Sprintf("Uncle's parent unknown (%x)", uncle.ParentHash[0:4]))
|
||||||
@ -447,6 +448,7 @@ func (self *worker) commitUncle(uncle *types.Header) error {
|
|||||||
if self.current.family.Has(uncle.Hash()) {
|
if self.current.family.Has(uncle.Hash()) {
|
||||||
return core.UncleError(fmt.Sprintf("Uncle already in family (%x)", uncle.Hash()))
|
return core.UncleError(fmt.Sprintf("Uncle already in family (%x)", uncle.Hash()))
|
||||||
}
|
}
|
||||||
|
self.current.uncles.Add(uncle.Hash())
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -510,6 +512,8 @@ func (self *worker) commitTransactions(transactions types.Transactions) {
|
|||||||
current.tcount++
|
current.tcount++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
self.current.block.Header().GasUsed = self.current.totalUsedGas
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *worker) commitTransaction(tx *types.Transaction) error {
|
func (self *worker) commitTransaction(tx *types.Transaction) error {
|
||||||
|
@ -13,11 +13,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
quickrand = rand.New(rand.NewSource(time.Now().Unix()))
|
|
||||||
quickcfg = &quick.Config{MaxCount: 5000, Rand: quickrand}
|
|
||||||
)
|
|
||||||
|
|
||||||
var parseNodeTests = []struct {
|
var parseNodeTests = []struct {
|
||||||
rawurl string
|
rawurl string
|
||||||
wantError string
|
wantError string
|
||||||
@ -176,7 +171,7 @@ func TestNodeID_distcmp(t *testing.T) {
|
|||||||
bbig := new(big.Int).SetBytes(b[:])
|
bbig := new(big.Int).SetBytes(b[:])
|
||||||
return new(big.Int).Xor(tbig, abig).Cmp(new(big.Int).Xor(tbig, bbig))
|
return new(big.Int).Xor(tbig, abig).Cmp(new(big.Int).Xor(tbig, bbig))
|
||||||
}
|
}
|
||||||
if err := quick.CheckEqual(distcmp, distcmpBig, quickcfg); err != nil {
|
if err := quick.CheckEqual(distcmp, distcmpBig, quickcfg()); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -195,7 +190,7 @@ func TestNodeID_logdist(t *testing.T) {
|
|||||||
abig, bbig := new(big.Int).SetBytes(a[:]), new(big.Int).SetBytes(b[:])
|
abig, bbig := new(big.Int).SetBytes(a[:]), new(big.Int).SetBytes(b[:])
|
||||||
return new(big.Int).Xor(abig, bbig).BitLen()
|
return new(big.Int).Xor(abig, bbig).BitLen()
|
||||||
}
|
}
|
||||||
if err := quick.CheckEqual(logdist, logdistBig, quickcfg); err != nil {
|
if err := quick.CheckEqual(logdist, logdistBig, quickcfg()); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -211,9 +206,10 @@ func TestNodeID_logdistEqual(t *testing.T) {
|
|||||||
func TestNodeID_hashAtDistance(t *testing.T) {
|
func TestNodeID_hashAtDistance(t *testing.T) {
|
||||||
// we don't use quick.Check here because its output isn't
|
// we don't use quick.Check here because its output isn't
|
||||||
// very helpful when the test fails.
|
// very helpful when the test fails.
|
||||||
for i := 0; i < quickcfg.MaxCount; i++ {
|
cfg := quickcfg()
|
||||||
a := gen(common.Hash{}, quickrand).(common.Hash)
|
for i := 0; i < cfg.MaxCount; i++ {
|
||||||
dist := quickrand.Intn(len(common.Hash{}) * 8)
|
a := gen(common.Hash{}, cfg.Rand).(common.Hash)
|
||||||
|
dist := cfg.Rand.Intn(len(common.Hash{}) * 8)
|
||||||
result := hashAtDistance(a, dist)
|
result := hashAtDistance(a, dist)
|
||||||
actualdist := logdist(result, a)
|
actualdist := logdist(result, a)
|
||||||
|
|
||||||
@ -225,7 +221,14 @@ func TestNodeID_hashAtDistance(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: this can be dropped when we require Go >= 1.5
|
func quickcfg() *quick.Config {
|
||||||
|
return &quick.Config{
|
||||||
|
MaxCount: 5000,
|
||||||
|
Rand: rand.New(rand.NewSource(time.Now().Unix())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: The Generate method can be dropped when we require Go >= 1.5
|
||||||
// because testing/quick learned to generate arrays in 1.5.
|
// because testing/quick learned to generate arrays in 1.5.
|
||||||
|
|
||||||
func (NodeID) Generate(rand *rand.Rand, size int) reflect.Value {
|
func (NodeID) Generate(rand *rand.Rand, size int) reflect.Value {
|
||||||
|
@ -40,6 +40,8 @@ type Table struct {
|
|||||||
bonding map[NodeID]*bondproc
|
bonding map[NodeID]*bondproc
|
||||||
bondslots chan struct{} // limits total number of active bonding processes
|
bondslots chan struct{} // limits total number of active bonding processes
|
||||||
|
|
||||||
|
nodeAddedHook func(*Node) // for testing
|
||||||
|
|
||||||
net transport
|
net transport
|
||||||
self *Node // metadata of the local node
|
self *Node // metadata of the local node
|
||||||
}
|
}
|
||||||
@ -431,6 +433,9 @@ func (tab *Table) pingreplace(new *Node, b *bucket) {
|
|||||||
}
|
}
|
||||||
copy(b.entries[1:], b.entries)
|
copy(b.entries[1:], b.entries)
|
||||||
b.entries[0] = new
|
b.entries[0] = new
|
||||||
|
if tab.nodeAddedHook != nil {
|
||||||
|
tab.nodeAddedHook(new)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ping a remote endpoint and wait for a reply, also updating the node database
|
// ping a remote endpoint and wait for a reply, also updating the node database
|
||||||
@ -466,6 +471,9 @@ outer:
|
|||||||
}
|
}
|
||||||
if len(bucket.entries) < bucketSize {
|
if len(bucket.entries) < bucketSize {
|
||||||
bucket.entries = append(bucket.entries, n)
|
bucket.entries = append(bucket.entries, n)
|
||||||
|
if tab.nodeAddedHook != nil {
|
||||||
|
tab.nodeAddedHook(n)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
"testing/quick"
|
"testing/quick"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
@ -74,7 +75,7 @@ func TestBucket_bumpNoDuplicates(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
cfg := &quick.Config{
|
cfg := &quick.Config{
|
||||||
MaxCount: 1000,
|
MaxCount: 1000,
|
||||||
Rand: quickrand,
|
Rand: rand.New(rand.NewSource(time.Now().Unix())),
|
||||||
Values: func(args []reflect.Value, rand *rand.Rand) {
|
Values: func(args []reflect.Value, rand *rand.Rand) {
|
||||||
// generate a random list of nodes. this will be the content of the bucket.
|
// generate a random list of nodes. this will be the content of the bucket.
|
||||||
n := rand.Intn(bucketSize-1) + 1
|
n := rand.Intn(bucketSize-1) + 1
|
||||||
@ -205,7 +206,7 @@ func TestTable_closest(t *testing.T) {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if err := quick.Check(test, quickcfg); err != nil {
|
if err := quick.Check(test, quickcfg()); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -213,7 +214,7 @@ func TestTable_closest(t *testing.T) {
|
|||||||
func TestTable_ReadRandomNodesGetAll(t *testing.T) {
|
func TestTable_ReadRandomNodesGetAll(t *testing.T) {
|
||||||
cfg := &quick.Config{
|
cfg := &quick.Config{
|
||||||
MaxCount: 200,
|
MaxCount: 200,
|
||||||
Rand: quickrand,
|
Rand: rand.New(rand.NewSource(time.Now().Unix())),
|
||||||
Values: func(args []reflect.Value, rand *rand.Rand) {
|
Values: func(args []reflect.Value, rand *rand.Rand) {
|
||||||
args[0] = reflect.ValueOf(make([]*Node, rand.Intn(1000)))
|
args[0] = reflect.ValueOf(make([]*Node, rand.Intn(1000)))
|
||||||
},
|
},
|
||||||
@ -221,7 +222,7 @@ func TestTable_ReadRandomNodesGetAll(t *testing.T) {
|
|||||||
test := func(buf []*Node) bool {
|
test := func(buf []*Node) bool {
|
||||||
tab := newTable(nil, NodeID{}, &net.UDPAddr{}, "")
|
tab := newTable(nil, NodeID{}, &net.UDPAddr{}, "")
|
||||||
for i := 0; i < len(buf); i++ {
|
for i := 0; i < len(buf); i++ {
|
||||||
ld := quickrand.Intn(len(tab.buckets))
|
ld := cfg.Rand.Intn(len(tab.buckets))
|
||||||
tab.add([]*Node{nodeAtDistance(tab.self.sha, ld)})
|
tab.add([]*Node{nodeAtDistance(tab.self.sha, ld)})
|
||||||
}
|
}
|
||||||
gotN := tab.ReadRandomNodes(buf)
|
gotN := tab.ReadRandomNodes(buf)
|
||||||
|
@ -234,14 +234,12 @@ func TestUDP_findnodeMultiReply(t *testing.T) {
|
|||||||
|
|
||||||
func TestUDP_successfulPing(t *testing.T) {
|
func TestUDP_successfulPing(t *testing.T) {
|
||||||
test := newUDPTest(t)
|
test := newUDPTest(t)
|
||||||
|
added := make(chan *Node, 1)
|
||||||
|
test.table.nodeAddedHook = func(n *Node) { added <- n }
|
||||||
defer test.table.Close()
|
defer test.table.Close()
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
// The remote side sends a ping packet to initiate the exchange.
|
// The remote side sends a ping packet to initiate the exchange.
|
||||||
test.packetIn(nil, pingPacket, &ping{From: testRemote, To: testLocalAnnounced, Version: Version, Expiration: futureExp})
|
go test.packetIn(nil, pingPacket, &ping{From: testRemote, To: testLocalAnnounced, Version: Version, Expiration: futureExp})
|
||||||
close(done)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// the ping is replied to.
|
// the ping is replied to.
|
||||||
test.waitPacketOut(func(p *pong) {
|
test.waitPacketOut(func(p *pong) {
|
||||||
@ -277,35 +275,26 @@ func TestUDP_successfulPing(t *testing.T) {
|
|||||||
})
|
})
|
||||||
test.packetIn(nil, pongPacket, &pong{Expiration: futureExp})
|
test.packetIn(nil, pongPacket, &pong{Expiration: futureExp})
|
||||||
|
|
||||||
// ping should return shortly after getting the pong packet.
|
// the node should be added to the table shortly after getting the
|
||||||
<-done
|
// pong packet.
|
||||||
|
select {
|
||||||
// check that the node was added.
|
case n := <-added:
|
||||||
rid := PubkeyID(&test.remotekey.PublicKey)
|
rid := PubkeyID(&test.remotekey.PublicKey)
|
||||||
rnode := find(test.table, rid)
|
if n.ID != rid {
|
||||||
if rnode == nil {
|
t.Errorf("node has wrong ID: got %v, want %v", n.ID, rid)
|
||||||
t.Fatalf("node %v not found in table", rid)
|
|
||||||
}
|
}
|
||||||
if !bytes.Equal(rnode.IP, test.remoteaddr.IP) {
|
if !bytes.Equal(n.IP, test.remoteaddr.IP) {
|
||||||
t.Errorf("node has wrong IP: got %v, want: %v", rnode.IP, test.remoteaddr.IP)
|
t.Errorf("node has wrong IP: got %v, want: %v", n.IP, test.remoteaddr.IP)
|
||||||
}
|
}
|
||||||
if int(rnode.UDP) != test.remoteaddr.Port {
|
if int(n.UDP) != test.remoteaddr.Port {
|
||||||
t.Errorf("node has wrong UDP port: got %v, want: %v", rnode.UDP, test.remoteaddr.Port)
|
t.Errorf("node has wrong UDP port: got %v, want: %v", n.UDP, test.remoteaddr.Port)
|
||||||
}
|
}
|
||||||
if rnode.TCP != testRemote.TCP {
|
if n.TCP != testRemote.TCP {
|
||||||
t.Errorf("node has wrong TCP port: got %v, want: %v", rnode.TCP, testRemote.TCP)
|
t.Errorf("node has wrong TCP port: got %v, want: %v", n.TCP, testRemote.TCP)
|
||||||
}
|
}
|
||||||
}
|
case <-time.After(2 * time.Second):
|
||||||
|
t.Errorf("node was not added within 2 seconds")
|
||||||
func find(tab *Table, id NodeID) *Node {
|
|
||||||
for _, b := range tab.buckets {
|
|
||||||
for _, e := range b.entries {
|
|
||||||
if e.ID == id {
|
|
||||||
return e
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// dgramPipe is a fake UDP socket. It queues all sent datagrams.
|
// dgramPipe is a fake UDP socket. It queues all sent datagrams.
|
||||||
|
69
p2p/peer.go
69
p2p/peer.go
@ -115,41 +115,60 @@ func newPeer(conn *conn, protocols []Protocol) *Peer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Peer) run() DiscReason {
|
func (p *Peer) run() DiscReason {
|
||||||
readErr := make(chan error, 1)
|
var (
|
||||||
|
writeStart = make(chan struct{}, 1)
|
||||||
|
writeErr = make(chan error, 1)
|
||||||
|
readErr = make(chan error, 1)
|
||||||
|
reason DiscReason
|
||||||
|
requested bool
|
||||||
|
)
|
||||||
p.wg.Add(2)
|
p.wg.Add(2)
|
||||||
go p.readLoop(readErr)
|
go p.readLoop(readErr)
|
||||||
go p.pingLoop()
|
go p.pingLoop()
|
||||||
|
|
||||||
p.startProtocols()
|
// Start all protocol handlers.
|
||||||
|
writeStart <- struct{}{}
|
||||||
|
p.startProtocols(writeStart, writeErr)
|
||||||
|
|
||||||
// Wait for an error or disconnect.
|
// Wait for an error or disconnect.
|
||||||
var (
|
loop:
|
||||||
reason DiscReason
|
for {
|
||||||
requested bool
|
|
||||||
)
|
|
||||||
select {
|
select {
|
||||||
|
case err := <-writeErr:
|
||||||
|
// A write finished. Allow the next write to start if
|
||||||
|
// there was no error.
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Detail).Infof("%v: write error: %v\n", p, err)
|
||||||
|
reason = DiscNetworkError
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
writeStart <- struct{}{}
|
||||||
case err := <-readErr:
|
case err := <-readErr:
|
||||||
if r, ok := err.(DiscReason); ok {
|
if r, ok := err.(DiscReason); ok {
|
||||||
|
glog.V(logger.Debug).Infof("%v: remote requested disconnect: %v\n", p, r)
|
||||||
|
requested = true
|
||||||
reason = r
|
reason = r
|
||||||
} else {
|
} else {
|
||||||
// Note: We rely on protocols to abort if there is a write
|
glog.V(logger.Detail).Infof("%v: read error: %v\n", p, err)
|
||||||
// error. It might be more robust to handle them here as well.
|
|
||||||
glog.V(logger.Detail).Infof("%v: Read error: %v\n", p, err)
|
|
||||||
reason = DiscNetworkError
|
reason = DiscNetworkError
|
||||||
}
|
}
|
||||||
|
break loop
|
||||||
case err := <-p.protoErr:
|
case err := <-p.protoErr:
|
||||||
reason = discReasonForError(err)
|
reason = discReasonForError(err)
|
||||||
|
glog.V(logger.Debug).Infof("%v: protocol error: %v (%v)\n", p, err, reason)
|
||||||
|
break loop
|
||||||
case reason = <-p.disc:
|
case reason = <-p.disc:
|
||||||
requested = true
|
glog.V(logger.Debug).Infof("%v: locally requested disconnect: %v\n", p, reason)
|
||||||
|
break loop
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
close(p.closed)
|
close(p.closed)
|
||||||
p.rw.close(reason)
|
p.rw.close(reason)
|
||||||
p.wg.Wait()
|
p.wg.Wait()
|
||||||
|
|
||||||
if requested {
|
if requested {
|
||||||
reason = DiscRequested
|
reason = DiscRequested
|
||||||
}
|
}
|
||||||
glog.V(logger.Debug).Infof("%v: Disconnected: %v\n", p, reason)
|
|
||||||
return reason
|
return reason
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -196,7 +215,6 @@ func (p *Peer) handle(msg Msg) error {
|
|||||||
// This is the last message. We don't need to discard or
|
// This is the last message. We don't need to discard or
|
||||||
// check errors because, the connection will be closed after it.
|
// check errors because, the connection will be closed after it.
|
||||||
rlp.Decode(msg.Payload, &reason)
|
rlp.Decode(msg.Payload, &reason)
|
||||||
glog.V(logger.Debug).Infof("%v: Disconnect Requested: %v\n", p, reason[0])
|
|
||||||
return reason[0]
|
return reason[0]
|
||||||
case msg.Code < baseProtocolLength:
|
case msg.Code < baseProtocolLength:
|
||||||
// ignore other base protocol messages
|
// ignore other base protocol messages
|
||||||
@ -247,11 +265,13 @@ outer:
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Peer) startProtocols() {
|
func (p *Peer) startProtocols(writeStart <-chan struct{}, writeErr chan<- error) {
|
||||||
p.wg.Add(len(p.running))
|
p.wg.Add(len(p.running))
|
||||||
for _, proto := range p.running {
|
for _, proto := range p.running {
|
||||||
proto := proto
|
proto := proto
|
||||||
proto.closed = p.closed
|
proto.closed = p.closed
|
||||||
|
proto.wstart = writeStart
|
||||||
|
proto.werr = writeErr
|
||||||
glog.V(logger.Detail).Infof("%v: Starting protocol %s/%d\n", p, proto.Name, proto.Version)
|
glog.V(logger.Detail).Infof("%v: Starting protocol %s/%d\n", p, proto.Name, proto.Version)
|
||||||
go func() {
|
go func() {
|
||||||
err := proto.Run(p, proto)
|
err := proto.Run(p, proto)
|
||||||
@ -280,18 +300,31 @@ func (p *Peer) getProto(code uint64) (*protoRW, error) {
|
|||||||
|
|
||||||
type protoRW struct {
|
type protoRW struct {
|
||||||
Protocol
|
Protocol
|
||||||
in chan Msg
|
in chan Msg // receices read messages
|
||||||
closed <-chan struct{}
|
closed <-chan struct{} // receives when peer is shutting down
|
||||||
|
wstart <-chan struct{} // receives when write may start
|
||||||
|
werr chan<- error // for write results
|
||||||
offset uint64
|
offset uint64
|
||||||
w MsgWriter
|
w MsgWriter
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rw *protoRW) WriteMsg(msg Msg) error {
|
func (rw *protoRW) WriteMsg(msg Msg) (err error) {
|
||||||
if msg.Code >= rw.Length {
|
if msg.Code >= rw.Length {
|
||||||
return newPeerError(errInvalidMsgCode, "not handled")
|
return newPeerError(errInvalidMsgCode, "not handled")
|
||||||
}
|
}
|
||||||
msg.Code += rw.offset
|
msg.Code += rw.offset
|
||||||
return rw.w.WriteMsg(msg)
|
select {
|
||||||
|
case <-rw.wstart:
|
||||||
|
err = rw.w.WriteMsg(msg)
|
||||||
|
// Report write status back to Peer.run. It will initiate
|
||||||
|
// shutdown if the error is non-nil and unblock the next write
|
||||||
|
// otherwise. The calling protocol code should exit for errors
|
||||||
|
// as well but we don't want to rely on that.
|
||||||
|
rw.werr <- err
|
||||||
|
case <-rw.closed:
|
||||||
|
err = fmt.Errorf("shutting down")
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rw *protoRW) ReadMsg() (Msg, error) {
|
func (rw *protoRW) ReadMsg() (Msg, error) {
|
||||||
|
@ -121,7 +121,7 @@ func TestPeerDisconnect(t *testing.T) {
|
|||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case reason := <-disc:
|
case reason := <-disc:
|
||||||
if reason != DiscQuitting {
|
if reason != DiscRequested {
|
||||||
t.Errorf("run returned wrong reason: got %v, want %v", reason, DiscRequested)
|
t.Errorf("run returned wrong reason: got %v, want %v", reason, DiscRequested)
|
||||||
}
|
}
|
||||||
case <-time.After(500 * time.Millisecond):
|
case <-time.After(500 * time.Millisecond):
|
||||||
|
@ -117,7 +117,6 @@ func TestServerDial(t *testing.T) {
|
|||||||
t.Error("accept error:", err)
|
t.Error("accept error:", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
conn.Close()
|
|
||||||
accepted <- conn
|
accepted <- conn
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -134,6 +133,8 @@ func TestServerDial(t *testing.T) {
|
|||||||
|
|
||||||
select {
|
select {
|
||||||
case conn := <-accepted:
|
case conn := <-accepted:
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case peer := <-connected:
|
case peer := <-connected:
|
||||||
if peer.ID() != remid {
|
if peer.ID() != remid {
|
||||||
|
@ -59,7 +59,7 @@ func (api *EthereumApi) GetRequestReply(req *RpcRequest, reply *interface{}) err
|
|||||||
case "eth_mining":
|
case "eth_mining":
|
||||||
*reply = api.xeth().IsMining()
|
*reply = api.xeth().IsMining()
|
||||||
case "eth_gasPrice":
|
case "eth_gasPrice":
|
||||||
v := xeth.DefaultGasPrice()
|
v := api.xeth().DefaultGasPrice()
|
||||||
*reply = newHexNum(v.Bytes())
|
*reply = newHexNum(v.Bytes())
|
||||||
case "eth_accounts":
|
case "eth_accounts":
|
||||||
*reply = api.xeth().Accounts()
|
*reply = api.xeth().Accounts()
|
||||||
|
243
rpc/api/admin.go
Normal file
243
rpc/api/admin.go
Normal file
@ -0,0 +1,243 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/codec"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
"github.com/ethereum/go-ethereum/xeth"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
AdminApiversion = "1.0"
|
||||||
|
importBatchSize = 2500
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// mapping between methods and handlers
|
||||||
|
AdminMapping = map[string]adminhandler{
|
||||||
|
// "admin_startRPC": (*adminApi).StartRPC,
|
||||||
|
// "admin_stopRPC": (*adminApi).StopRPC,
|
||||||
|
"admin_addPeer": (*adminApi).AddPeer,
|
||||||
|
"admin_peers": (*adminApi).Peers,
|
||||||
|
"admin_nodeInfo": (*adminApi).NodeInfo,
|
||||||
|
"admin_exportChain": (*adminApi).ExportChain,
|
||||||
|
"admin_importChain": (*adminApi).ImportChain,
|
||||||
|
"admin_verbosity": (*adminApi).Verbosity,
|
||||||
|
"admin_chainSyncStatus": (*adminApi).ChainSyncStatus,
|
||||||
|
"admin_setSolc": (*adminApi).SetSolc,
|
||||||
|
"admin_datadir": (*adminApi).DataDir,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// admin callback handler
|
||||||
|
type adminhandler func(*adminApi, *shared.Request) (interface{}, error)
|
||||||
|
|
||||||
|
// admin api provider
|
||||||
|
type adminApi struct {
|
||||||
|
xeth *xeth.XEth
|
||||||
|
ethereum *eth.Ethereum
|
||||||
|
methods map[string]adminhandler
|
||||||
|
codec codec.ApiCoder
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a new admin api instance
|
||||||
|
func NewAdminApi(xeth *xeth.XEth, ethereum *eth.Ethereum, coder codec.Codec) *adminApi {
|
||||||
|
return &adminApi{
|
||||||
|
xeth: xeth,
|
||||||
|
ethereum: ethereum,
|
||||||
|
methods: AdminMapping,
|
||||||
|
codec: coder.New(nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// collection with supported methods
|
||||||
|
func (self *adminApi) Methods() []string {
|
||||||
|
methods := make([]string, len(self.methods))
|
||||||
|
i := 0
|
||||||
|
for k := range self.methods {
|
||||||
|
methods[i] = k
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return methods
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute given request
|
||||||
|
func (self *adminApi) Execute(req *shared.Request) (interface{}, error) {
|
||||||
|
if callback, ok := self.methods[req.Method]; ok {
|
||||||
|
return callback(self, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, &shared.NotImplementedError{req.Method}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *adminApi) Name() string {
|
||||||
|
return AdminApiName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *adminApi) ApiVersion() string {
|
||||||
|
return AdminApiversion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *adminApi) AddPeer(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(AddPeerArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
err := self.ethereum.AddPeer(args.Url)
|
||||||
|
if err == nil {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *adminApi) Peers(req *shared.Request) (interface{}, error) {
|
||||||
|
return self.ethereum.PeersInfo(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *adminApi) StartRPC(req *shared.Request) (interface{}, error) {
|
||||||
|
return false, nil
|
||||||
|
// Enable when http rpc interface is refactored to prevent import cycles
|
||||||
|
// args := new(StartRpcArgs)
|
||||||
|
// if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
// return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// cfg := rpc.RpcConfig{
|
||||||
|
// ListenAddress: args.Address,
|
||||||
|
// ListenPort: args.Port,
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// err := rpc.Start(self.xeth, cfg)
|
||||||
|
// if err == nil {
|
||||||
|
// return true, nil
|
||||||
|
// }
|
||||||
|
// return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *adminApi) StopRPC(req *shared.Request) (interface{}, error) {
|
||||||
|
return false, nil
|
||||||
|
// Enable when http rpc interface is refactored to prevent import cycles
|
||||||
|
// rpc.Stop()
|
||||||
|
// return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *adminApi) NodeInfo(req *shared.Request) (interface{}, error) {
|
||||||
|
return self.ethereum.NodeInfo(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *adminApi) DataDir(req *shared.Request) (interface{}, error) {
|
||||||
|
return self.ethereum.DataDir, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasAllBlocks(chain *core.ChainManager, bs []*types.Block) bool {
|
||||||
|
for _, b := range bs {
|
||||||
|
if !chain.HasBlock(b.Hash()) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *adminApi) ImportChain(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(ImportExportChainArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
fh, err := os.Open(args.Filename)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
defer fh.Close()
|
||||||
|
stream := rlp.NewStream(fh, 0)
|
||||||
|
|
||||||
|
// Run actual the import.
|
||||||
|
blocks := make(types.Blocks, importBatchSize)
|
||||||
|
n := 0
|
||||||
|
for batch := 0; ; batch++ {
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for ; i < importBatchSize; i++ {
|
||||||
|
var b types.Block
|
||||||
|
if err := stream.Decode(&b); err == io.EOF {
|
||||||
|
break
|
||||||
|
} else if err != nil {
|
||||||
|
return false, fmt.Errorf("at block %d: %v", n, err)
|
||||||
|
}
|
||||||
|
blocks[i] = &b
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
if i == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Import the batch.
|
||||||
|
if hasAllBlocks(self.ethereum.ChainManager(), blocks[:i]) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, err := self.ethereum.ChainManager().InsertChain(blocks[:i]); err != nil {
|
||||||
|
return false, fmt.Errorf("invalid block %d: %v", n, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *adminApi) ExportChain(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(ImportExportChainArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
fh, err := os.OpenFile(args.Filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
defer fh.Close()
|
||||||
|
if err := self.ethereum.ChainManager().Export(fh); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *adminApi) Verbosity(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(VerbosityArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.SetV(args.Level)
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *adminApi) ChainSyncStatus(req *shared.Request) (interface{}, error) {
|
||||||
|
pending, cached, importing, estimate := self.ethereum.Downloader().Stats()
|
||||||
|
|
||||||
|
return map[string]interface{}{
|
||||||
|
"blocksAvailable": pending,
|
||||||
|
"blocksWaitingForImport": cached,
|
||||||
|
"importing": importing,
|
||||||
|
"estimate": estimate.String(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *adminApi) SetSolc(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(SetSolcArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
solc, err := self.xeth.SetSolc(args.Path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return solc.Info(), nil
|
||||||
|
}
|
97
rpc/api/admin_args.go
Normal file
97
rpc/api/admin_args.go
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AddPeerArgs struct {
|
||||||
|
Url string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *AddPeerArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) != 1 {
|
||||||
|
return shared.NewDecodeParamError("Expected enode as argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
urlstr, ok := obj[0].(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("url", "not a string")
|
||||||
|
}
|
||||||
|
args.Url = urlstr
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ImportExportChainArgs struct {
|
||||||
|
Filename string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *ImportExportChainArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) != 1 {
|
||||||
|
return shared.NewDecodeParamError("Expected filename as argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
filename, ok := obj[0].(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("filename", "not a string")
|
||||||
|
}
|
||||||
|
args.Filename = filename
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type VerbosityArgs struct {
|
||||||
|
Level int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *VerbosityArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) != 1 {
|
||||||
|
return shared.NewDecodeParamError("Expected enode as argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
level, err := numString(obj[0])
|
||||||
|
if err == nil {
|
||||||
|
args.Level = int(level.Int64())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type SetSolcArgs struct {
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *SetSolcArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) != 1 {
|
||||||
|
return shared.NewDecodeParamError("Expected path as argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
if pathstr, ok := obj[0].(string); ok {
|
||||||
|
args.Path = pathstr
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return shared.NewInvalidTypeError("path", "not a string")
|
||||||
|
}
|
68
rpc/api/admin_js.go
Normal file
68
rpc/api/admin_js.go
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
const Admin_JS = `
|
||||||
|
web3._extend({
|
||||||
|
property: 'admin',
|
||||||
|
methods:
|
||||||
|
[
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'addPeer',
|
||||||
|
call: 'admin_addPeer',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [web3._extend.utils.formatInputString],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputBool
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'exportChain',
|
||||||
|
call: 'admin_exportChain',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [web3._extend.utils.formatInputString],
|
||||||
|
outputFormatter: function(obj) { return obj; }
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'importChain',
|
||||||
|
call: 'admin_importChain',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [web3._extend.utils.formatInputString],
|
||||||
|
outputFormatter: function(obj) { return obj; }
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'verbosity',
|
||||||
|
call: 'admin_verbosity',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [web3._extend.utils.formatInputInt],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputBool
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'setSolc',
|
||||||
|
call: 'admin_setSolc',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [web3._extend.utils.formatInputString],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputString
|
||||||
|
})
|
||||||
|
],
|
||||||
|
properties:
|
||||||
|
[
|
||||||
|
new web3._extend.Property({
|
||||||
|
name: 'nodeInfo',
|
||||||
|
getter: 'admin_nodeInfo',
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputString
|
||||||
|
}),
|
||||||
|
new web3._extend.Property({
|
||||||
|
name: 'peers',
|
||||||
|
getter: 'admin_peers',
|
||||||
|
outputFormatter: function(obj) { return obj; }
|
||||||
|
}),
|
||||||
|
new web3._extend.Property({
|
||||||
|
name: 'datadir',
|
||||||
|
getter: 'admin_datadir',
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputString
|
||||||
|
}),
|
||||||
|
new web3._extend.Property({
|
||||||
|
name: 'chainSyncStatus',
|
||||||
|
getter: 'admin_chainSyncStatus',
|
||||||
|
outputFormatter: function(obj) { return obj; }
|
||||||
|
})
|
||||||
|
]
|
||||||
|
});
|
||||||
|
`
|
48
rpc/api/api.go
Normal file
48
rpc/api/api.go
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
AdminApiName = "admin"
|
||||||
|
EthApiName = "eth"
|
||||||
|
DebugApiName = "debug"
|
||||||
|
MergedApiName = "merged"
|
||||||
|
MinerApiName = "miner"
|
||||||
|
NetApiName = "net"
|
||||||
|
ShhApiName = "shh"
|
||||||
|
TxPoolApiName = "txpool"
|
||||||
|
PersonalApiName = "personal"
|
||||||
|
Web3ApiName = "web3"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// List with all API's which are offered over the IPC interface by default
|
||||||
|
DefaultIpcApis = strings.Join([]string{
|
||||||
|
AdminApiName, EthApiName, DebugApiName, MinerApiName, NetApiName,
|
||||||
|
ShhApiName, TxPoolApiName, PersonalApiName, Web3ApiName,
|
||||||
|
}, ",")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Ethereum RPC API interface
|
||||||
|
type EthereumApi interface {
|
||||||
|
// API identifier
|
||||||
|
Name() string
|
||||||
|
|
||||||
|
// API version
|
||||||
|
ApiVersion() string
|
||||||
|
|
||||||
|
// Execute the given request and returns the response or an error
|
||||||
|
Execute(*shared.Request) (interface{}, error)
|
||||||
|
|
||||||
|
// List of supported RCP methods this API provides
|
||||||
|
Methods() []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge multiple API's to a single API instance
|
||||||
|
func Merge(apis ...EthereumApi) EthereumApi {
|
||||||
|
return newMergedApi(apis...)
|
||||||
|
}
|
42
rpc/api/api_test.go
Normal file
42
rpc/api/api_test.go
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/codec"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseApiString(t *testing.T) {
|
||||||
|
apis, err := ParseApiString("", codec.JSON, nil, nil)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("Expected an err from parsing empty API string but got nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(apis) != 0 {
|
||||||
|
t.Errorf("Expected 0 apis from empty API string")
|
||||||
|
}
|
||||||
|
|
||||||
|
apis, err = ParseApiString("eth", codec.JSON, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Expected nil err from parsing empty API string but got %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(apis) != 1 {
|
||||||
|
t.Errorf("Expected 1 apis but got %d - %v", apis, apis)
|
||||||
|
}
|
||||||
|
|
||||||
|
apis, err = ParseApiString("eth,eth", codec.JSON, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Expected nil err from parsing empty API string but got \"%v\"", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(apis) != 2 {
|
||||||
|
t.Errorf("Expected 2 apis but got %d - %v", apis, apis)
|
||||||
|
}
|
||||||
|
|
||||||
|
apis, err = ParseApiString("eth,invalid", codec.JSON, nil, nil)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("Expected an err but got no err")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
173
rpc/api/debug.go
Normal file
173
rpc/api/debug.go
Normal file
@ -0,0 +1,173 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/ethash"
|
||||||
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/codec"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
"github.com/ethereum/go-ethereum/xeth"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
DebugApiVersion = "1.0"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// mapping between methods and handlers
|
||||||
|
DebugMapping = map[string]debughandler{
|
||||||
|
"debug_dumpBlock": (*debugApi).DumpBlock,
|
||||||
|
"debug_getBlockRlp": (*debugApi).GetBlockRlp,
|
||||||
|
"debug_printBlock": (*debugApi).PrintBlock,
|
||||||
|
"debug_processBlock": (*debugApi).ProcessBlock,
|
||||||
|
"debug_seedHash": (*debugApi).SeedHash,
|
||||||
|
"debug_setHead": (*debugApi).SetHead,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// debug callback handler
|
||||||
|
type debughandler func(*debugApi, *shared.Request) (interface{}, error)
|
||||||
|
|
||||||
|
// admin api provider
|
||||||
|
type debugApi struct {
|
||||||
|
xeth *xeth.XEth
|
||||||
|
ethereum *eth.Ethereum
|
||||||
|
methods map[string]debughandler
|
||||||
|
codec codec.ApiCoder
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a new debug api instance
|
||||||
|
func NewDebugApi(xeth *xeth.XEth, ethereum *eth.Ethereum, coder codec.Codec) *debugApi {
|
||||||
|
return &debugApi{
|
||||||
|
xeth: xeth,
|
||||||
|
ethereum: ethereum,
|
||||||
|
methods: DebugMapping,
|
||||||
|
codec: coder.New(nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// collection with supported methods
|
||||||
|
func (self *debugApi) Methods() []string {
|
||||||
|
methods := make([]string, len(self.methods))
|
||||||
|
i := 0
|
||||||
|
for k := range self.methods {
|
||||||
|
methods[i] = k
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return methods
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute given request
|
||||||
|
func (self *debugApi) Execute(req *shared.Request) (interface{}, error) {
|
||||||
|
if callback, ok := self.methods[req.Method]; ok {
|
||||||
|
return callback(self, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, &shared.NotImplementedError{req.Method}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *debugApi) Name() string {
|
||||||
|
return DebugApiName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *debugApi) ApiVersion() string {
|
||||||
|
return DebugApiVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *debugApi) PrintBlock(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(BlockNumArg)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := self.xeth.EthBlockByNumber(args.BlockNumber)
|
||||||
|
return fmt.Sprintf("%s", block), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *debugApi) DumpBlock(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(BlockNumArg)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := self.xeth.EthBlockByNumber(args.BlockNumber)
|
||||||
|
if block == nil {
|
||||||
|
return nil, fmt.Errorf("block #%d not found", args.BlockNumber)
|
||||||
|
}
|
||||||
|
|
||||||
|
stateDb := state.New(block.Root(), self.ethereum.StateDb())
|
||||||
|
if stateDb == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return stateDb.RawDump(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *debugApi) GetBlockRlp(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(BlockNumArg)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := self.xeth.EthBlockByNumber(args.BlockNumber)
|
||||||
|
if block == nil {
|
||||||
|
return nil, fmt.Errorf("block #%d not found", args.BlockNumber)
|
||||||
|
}
|
||||||
|
encoded, err := rlp.EncodeToBytes(block)
|
||||||
|
return fmt.Sprintf("%x", encoded), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *debugApi) SetHead(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(BlockNumArg)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := self.xeth.EthBlockByNumber(args.BlockNumber)
|
||||||
|
if block == nil {
|
||||||
|
return nil, fmt.Errorf("block #%d not found", args.BlockNumber)
|
||||||
|
}
|
||||||
|
|
||||||
|
self.ethereum.ChainManager().SetHead(block)
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *debugApi) ProcessBlock(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(BlockNumArg)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := self.xeth.EthBlockByNumber(args.BlockNumber)
|
||||||
|
if block == nil {
|
||||||
|
return nil, fmt.Errorf("block #%d not found", args.BlockNumber)
|
||||||
|
}
|
||||||
|
|
||||||
|
old := vm.Debug
|
||||||
|
defer func() { vm.Debug = old }()
|
||||||
|
vm.Debug = true
|
||||||
|
|
||||||
|
_, err := self.ethereum.BlockProcessor().RetryProcess(block)
|
||||||
|
if err == nil {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *debugApi) SeedHash(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(BlockNumArg)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if hash, err := ethash.GetSeedHash(uint64(args.BlockNumber)); err == nil {
|
||||||
|
return fmt.Sprintf("0x%x", hash), nil
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
47
rpc/api/debug_args.go
Normal file
47
rpc/api/debug_args.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WaitForBlockArgs struct {
|
||||||
|
MinHeight int
|
||||||
|
Timeout int // in seconds
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *WaitForBlockArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) > 2 {
|
||||||
|
return fmt.Errorf("waitForArgs needs 0, 1, 2 arguments")
|
||||||
|
}
|
||||||
|
|
||||||
|
// default values when not provided
|
||||||
|
args.MinHeight = -1
|
||||||
|
args.Timeout = -1
|
||||||
|
|
||||||
|
if len(obj) >= 1 {
|
||||||
|
var minHeight *big.Int
|
||||||
|
if minHeight, err = numString(obj[0]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
args.MinHeight = int(minHeight.Int64())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) >= 2 {
|
||||||
|
timeout, err := numString(obj[1])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
args.Timeout = int(timeout.Int64())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
55
rpc/api/debug_js.go
Normal file
55
rpc/api/debug_js.go
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
const Debug_JS = `
|
||||||
|
web3._extend({
|
||||||
|
property: 'debug',
|
||||||
|
methods:
|
||||||
|
[
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'printBlock',
|
||||||
|
call: 'debug_printBlock',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [web3._extend.formatters.formatInputInt],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputString
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'getBlockRlp',
|
||||||
|
call: 'debug_getBlockRlp',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [web3._extend.formatters.formatInputInt],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputString
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'setHead',
|
||||||
|
call: 'debug_setHead',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [web3._extend.formatters.formatInputInt],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputBool
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'processBlock',
|
||||||
|
call: 'debug_processBlock',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [web3._extend.formatters.formatInputInt],
|
||||||
|
outputFormatter: function(obj) { return obj; }
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'seedHash',
|
||||||
|
call: 'debug_seedHash',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [web3._extend.formatters.formatInputInt],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputString
|
||||||
|
}) ,
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'dumpBlock',
|
||||||
|
call: 'debug_dumpBlock',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [web3._extend.formatters.formatInputInt],
|
||||||
|
outputFormatter: function(obj) { return obj; }
|
||||||
|
})
|
||||||
|
],
|
||||||
|
properties:
|
||||||
|
[
|
||||||
|
]
|
||||||
|
});
|
||||||
|
`
|
542
rpc/api/eth.go
Normal file
542
rpc/api/eth.go
Normal file
@ -0,0 +1,542 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/codec"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
"github.com/ethereum/go-ethereum/xeth"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
EthApiVersion = "1.0"
|
||||||
|
)
|
||||||
|
|
||||||
|
// eth api provider
|
||||||
|
// See https://github.com/ethereum/wiki/wiki/JSON-RPC
|
||||||
|
type ethApi struct {
|
||||||
|
xeth *xeth.XEth
|
||||||
|
methods map[string]ethhandler
|
||||||
|
codec codec.ApiCoder
|
||||||
|
}
|
||||||
|
|
||||||
|
// eth callback handler
|
||||||
|
type ethhandler func(*ethApi, *shared.Request) (interface{}, error)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ethMapping = map[string]ethhandler{
|
||||||
|
"eth_accounts": (*ethApi).Accounts,
|
||||||
|
"eth_blockNumber": (*ethApi).BlockNumber,
|
||||||
|
"eth_getBalance": (*ethApi).GetBalance,
|
||||||
|
"eth_protocolVersion": (*ethApi).ProtocolVersion,
|
||||||
|
"eth_coinbase": (*ethApi).Coinbase,
|
||||||
|
"eth_mining": (*ethApi).IsMining,
|
||||||
|
"eth_gasPrice": (*ethApi).GasPrice,
|
||||||
|
"eth_getStorage": (*ethApi).GetStorage,
|
||||||
|
"eth_storageAt": (*ethApi).GetStorage,
|
||||||
|
"eth_getStorageAt": (*ethApi).GetStorageAt,
|
||||||
|
"eth_getTransactionCount": (*ethApi).GetTransactionCount,
|
||||||
|
"eth_getBlockTransactionCountByHash": (*ethApi).GetBlockTransactionCountByHash,
|
||||||
|
"eth_getBlockTransactionCountByNumber": (*ethApi).GetBlockTransactionCountByNumber,
|
||||||
|
"eth_getUncleCountByBlockHash": (*ethApi).GetUncleCountByBlockHash,
|
||||||
|
"eth_getUncleCountByBlockNumber": (*ethApi).GetUncleCountByBlockNumber,
|
||||||
|
"eth_getData": (*ethApi).GetData,
|
||||||
|
"eth_getCode": (*ethApi).GetData,
|
||||||
|
"eth_sign": (*ethApi).Sign,
|
||||||
|
"eth_sendTransaction": (*ethApi).SendTransaction,
|
||||||
|
"eth_transact": (*ethApi).SendTransaction,
|
||||||
|
"eth_estimateGas": (*ethApi).EstimateGas,
|
||||||
|
"eth_call": (*ethApi).Call,
|
||||||
|
"eth_flush": (*ethApi).Flush,
|
||||||
|
"eth_getBlockByHash": (*ethApi).GetBlockByHash,
|
||||||
|
"eth_getBlockByNumber": (*ethApi).GetBlockByNumber,
|
||||||
|
"eth_getTransactionByHash": (*ethApi).GetTransactionByHash,
|
||||||
|
"eth_getTransactionByBlockHashAndIndex": (*ethApi).GetTransactionByBlockHashAndIndex,
|
||||||
|
"eth_getUncleByBlockHashAndIndex": (*ethApi).GetUncleByBlockHashAndIndex,
|
||||||
|
"eth_getUncleByBlockNumberAndIndex": (*ethApi).GetUncleByBlockNumberAndIndex,
|
||||||
|
"eth_getCompilers": (*ethApi).GetCompilers,
|
||||||
|
"eth_compileSolidity": (*ethApi).CompileSolidity,
|
||||||
|
"eth_newFilter": (*ethApi).NewFilter,
|
||||||
|
"eth_newBlockFilter": (*ethApi).NewBlockFilter,
|
||||||
|
"eth_newPendingTransactionFilter": (*ethApi).NewPendingTransactionFilter,
|
||||||
|
"eth_uninstallFilter": (*ethApi).UninstallFilter,
|
||||||
|
"eth_getFilterChanges": (*ethApi).GetFilterChanges,
|
||||||
|
"eth_getFilterLogs": (*ethApi).GetFilterLogs,
|
||||||
|
"eth_getLogs": (*ethApi).GetLogs,
|
||||||
|
"eth_hashrate": (*ethApi).Hashrate,
|
||||||
|
"eth_getWork": (*ethApi).GetWork,
|
||||||
|
"eth_submitWork": (*ethApi).SubmitWork,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// create new ethApi instance
|
||||||
|
func NewEthApi(xeth *xeth.XEth, codec codec.Codec) *ethApi {
|
||||||
|
return ðApi{xeth, ethMapping, codec.New(nil)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// collection with supported methods
|
||||||
|
func (self *ethApi) Methods() []string {
|
||||||
|
methods := make([]string, len(self.methods))
|
||||||
|
i := 0
|
||||||
|
for k := range self.methods {
|
||||||
|
methods[i] = k
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return methods
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute given request
|
||||||
|
func (self *ethApi) Execute(req *shared.Request) (interface{}, error) {
|
||||||
|
if callback, ok := self.methods[req.Method]; ok {
|
||||||
|
return callback(self, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, shared.NewNotImplementedError(req.Method)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) Name() string {
|
||||||
|
return EthApiName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) ApiVersion() string {
|
||||||
|
return EthApiVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) Accounts(req *shared.Request) (interface{}, error) {
|
||||||
|
return self.xeth.Accounts(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) Hashrate(req *shared.Request) (interface{}, error) {
|
||||||
|
return newHexNum(self.xeth.HashRate()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) BlockNumber(req *shared.Request) (interface{}, error) {
|
||||||
|
return self.xeth.CurrentBlock().Number(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetBalance(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(GetBalanceArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return self.xeth.AtStateNum(args.BlockNumber).BalanceAt(args.Address), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) ProtocolVersion(req *shared.Request) (interface{}, error) {
|
||||||
|
return self.xeth.EthVersion(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) Coinbase(req *shared.Request) (interface{}, error) {
|
||||||
|
return newHexData(self.xeth.Coinbase()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) IsMining(req *shared.Request) (interface{}, error) {
|
||||||
|
return self.xeth.IsMining(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GasPrice(req *shared.Request) (interface{}, error) {
|
||||||
|
return newHexNum(self.xeth.DefaultGasPrice().Bytes()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetStorage(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(GetStorageArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return self.xeth.AtStateNum(args.BlockNumber).State().SafeGet(args.Address).Storage(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetStorageAt(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(GetStorageAtArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return self.xeth.AtStateNum(args.BlockNumber).StorageAt(args.Address, args.Key), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetTransactionCount(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(GetTxCountArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
count := self.xeth.AtStateNum(args.BlockNumber).TxCountAt(args.Address)
|
||||||
|
return newHexNum(big.NewInt(int64(count)).Bytes()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetBlockTransactionCountByHash(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(HashArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := NewBlockRes(self.xeth.EthBlockByHash(args.Hash), false)
|
||||||
|
if block == nil {
|
||||||
|
return nil, nil
|
||||||
|
} else {
|
||||||
|
return newHexNum(big.NewInt(int64(len(block.Transactions))).Bytes()), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetBlockTransactionCountByNumber(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(BlockNumArg)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := NewBlockRes(self.xeth.EthBlockByNumber(args.BlockNumber), false)
|
||||||
|
if block == nil {
|
||||||
|
return nil, nil
|
||||||
|
} else {
|
||||||
|
return newHexNum(big.NewInt(int64(len(block.Transactions))).Bytes()), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetUncleCountByBlockHash(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(HashArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := self.xeth.EthBlockByHash(args.Hash)
|
||||||
|
br := NewBlockRes(block, false)
|
||||||
|
if br == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return newHexNum(big.NewInt(int64(len(br.Uncles))).Bytes()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetUncleCountByBlockNumber(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(BlockNumArg)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := self.xeth.EthBlockByNumber(args.BlockNumber)
|
||||||
|
br := NewBlockRes(block, false)
|
||||||
|
if br == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return newHexNum(big.NewInt(int64(len(br.Uncles))).Bytes()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetData(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(GetDataArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
v := self.xeth.AtStateNum(args.BlockNumber).CodeAtBytes(args.Address)
|
||||||
|
return newHexData(v), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) Sign(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(NewSigArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
v, err := self.xeth.Sign(args.From, args.Data, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) SendTransaction(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(NewTxArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// nonce may be nil ("guess" mode)
|
||||||
|
var nonce string
|
||||||
|
if args.Nonce != nil {
|
||||||
|
nonce = args.Nonce.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
var gas, price string
|
||||||
|
if args.Gas != nil {
|
||||||
|
gas = args.Gas.String()
|
||||||
|
}
|
||||||
|
if args.GasPrice != nil {
|
||||||
|
price = args.GasPrice.String()
|
||||||
|
}
|
||||||
|
v, err := self.xeth.Transact(args.From, args.To, nonce, args.Value.String(), gas, price, args.Data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) EstimateGas(req *shared.Request) (interface{}, error) {
|
||||||
|
_, gas, err := self.doCall(req.Params)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO unwrap the parent method's ToHex call
|
||||||
|
if len(gas) == 0 {
|
||||||
|
return newHexNum(0), nil
|
||||||
|
} else {
|
||||||
|
return newHexNum(gas), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) Call(req *shared.Request) (interface{}, error) {
|
||||||
|
v, _, err := self.doCall(req.Params)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO unwrap the parent method's ToHex call
|
||||||
|
if v == "0x0" {
|
||||||
|
return newHexData([]byte{}), nil
|
||||||
|
} else {
|
||||||
|
return newHexData(common.FromHex(v)), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) Flush(req *shared.Request) (interface{}, error) {
|
||||||
|
return nil, shared.NewNotImplementedError(req.Method)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) doCall(params json.RawMessage) (string, string, error) {
|
||||||
|
args := new(CallArgs)
|
||||||
|
if err := self.codec.Decode(params, &args); err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return self.xeth.AtStateNum(args.BlockNumber).Call(args.From, args.To, args.Value.String(), args.Gas.String(), args.GasPrice.String(), args.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetBlockByHash(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(GetBlockByHashArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := self.xeth.EthBlockByHash(args.BlockHash)
|
||||||
|
return NewBlockRes(block, args.IncludeTxs), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetBlockByNumber(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(GetBlockByNumberArgs)
|
||||||
|
if err := json.Unmarshal(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := self.xeth.EthBlockByNumber(args.BlockNumber)
|
||||||
|
br := NewBlockRes(block, args.IncludeTxs)
|
||||||
|
// If request was for "pending", nil nonsensical fields
|
||||||
|
if args.BlockNumber == -2 {
|
||||||
|
br.BlockHash = nil
|
||||||
|
br.BlockNumber = nil
|
||||||
|
br.Miner = nil
|
||||||
|
br.Nonce = nil
|
||||||
|
br.LogsBloom = nil
|
||||||
|
}
|
||||||
|
return br, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetTransactionByHash(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(HashArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
tx, bhash, bnum, txi := self.xeth.EthTransactionByHash(args.Hash)
|
||||||
|
if tx != nil {
|
||||||
|
v := NewTransactionRes(tx)
|
||||||
|
// if the blockhash is 0, assume this is a pending transaction
|
||||||
|
if bytes.Compare(bhash.Bytes(), bytes.Repeat([]byte{0}, 32)) != 0 {
|
||||||
|
v.BlockHash = newHexData(bhash)
|
||||||
|
v.BlockNumber = newHexNum(bnum)
|
||||||
|
v.TxIndex = newHexNum(txi)
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetTransactionByBlockHashAndIndex(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(HashIndexArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := self.xeth.EthBlockByHash(args.Hash)
|
||||||
|
br := NewBlockRes(block, true)
|
||||||
|
if br == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if args.Index >= int64(len(br.Transactions)) || args.Index < 0 {
|
||||||
|
return nil, nil
|
||||||
|
} else {
|
||||||
|
return br.Transactions[args.Index], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetTransactionByBlockNumberAndIndex(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(BlockNumIndexArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := self.xeth.EthBlockByNumber(args.BlockNumber)
|
||||||
|
v := NewBlockRes(block, true)
|
||||||
|
if v == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if args.Index >= int64(len(v.Transactions)) || args.Index < 0 {
|
||||||
|
// return NewValidationError("Index", "does not exist")
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return v.Transactions[args.Index], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetUncleByBlockHashAndIndex(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(HashIndexArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
br := NewBlockRes(self.xeth.EthBlockByHash(args.Hash), false)
|
||||||
|
if br == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if args.Index >= int64(len(br.Uncles)) || args.Index < 0 {
|
||||||
|
// return NewValidationError("Index", "does not exist")
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return br.Uncles[args.Index], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetUncleByBlockNumberAndIndex(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(BlockNumIndexArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
block := self.xeth.EthBlockByNumber(args.BlockNumber)
|
||||||
|
v := NewBlockRes(block, true)
|
||||||
|
|
||||||
|
if v == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if args.Index >= int64(len(v.Uncles)) || args.Index < 0 {
|
||||||
|
return nil, nil
|
||||||
|
} else {
|
||||||
|
return v.Uncles[args.Index], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetCompilers(req *shared.Request) (interface{}, error) {
|
||||||
|
var lang string
|
||||||
|
if solc, _ := self.xeth.Solc(); solc != nil {
|
||||||
|
lang = "Solidity"
|
||||||
|
}
|
||||||
|
c := []string{lang}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) CompileSolidity(req *shared.Request) (interface{}, error) {
|
||||||
|
solc, _ := self.xeth.Solc()
|
||||||
|
if solc == nil {
|
||||||
|
return nil, shared.NewNotAvailableError(req.Method, "solc (solidity compiler) not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
args := new(SourceArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
contracts, err := solc.Compile(args.Source)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return contracts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) NewFilter(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(BlockFilterArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
id := self.xeth.NewLogFilter(args.Earliest, args.Latest, args.Skip, args.Max, args.Address, args.Topics)
|
||||||
|
return newHexNum(big.NewInt(int64(id)).Bytes()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) NewBlockFilter(req *shared.Request) (interface{}, error) {
|
||||||
|
return newHexNum(self.xeth.NewBlockFilter()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) NewPendingTransactionFilter(req *shared.Request) (interface{}, error) {
|
||||||
|
return newHexNum(self.xeth.NewTransactionFilter()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) UninstallFilter(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(FilterIdArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
return self.xeth.UninstallFilter(args.Id), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetFilterChanges(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(FilterIdArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
switch self.xeth.GetFilterType(args.Id) {
|
||||||
|
case xeth.BlockFilterTy:
|
||||||
|
return NewHashesRes(self.xeth.BlockFilterChanged(args.Id)), nil
|
||||||
|
case xeth.TransactionFilterTy:
|
||||||
|
return NewHashesRes(self.xeth.TransactionFilterChanged(args.Id)), nil
|
||||||
|
case xeth.LogFilterTy:
|
||||||
|
return NewLogsRes(self.xeth.LogFilterChanged(args.Id)), nil
|
||||||
|
default:
|
||||||
|
return []string{}, nil // reply empty string slice
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetFilterLogs(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(FilterIdArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewLogsRes(self.xeth.Logs(args.Id)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetLogs(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(BlockFilterArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
return NewLogsRes(self.xeth.AllLogs(args.Earliest, args.Latest, args.Skip, args.Max, args.Address, args.Topics)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) GetWork(req *shared.Request) (interface{}, error) {
|
||||||
|
self.xeth.SetMining(true, 0)
|
||||||
|
return self.xeth.RemoteMining().GetWork(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ethApi) SubmitWork(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(SubmitWorkArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
return self.xeth.RemoteMining().SubmitWork(args.Nonce, common.HexToHash(args.Digest), common.HexToHash(args.Header)), nil
|
||||||
|
}
|
831
rpc/api/eth_args.go
Normal file
831
rpc/api/eth_args.go
Normal file
@ -0,0 +1,831 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultLogLimit = 100
|
||||||
|
defaultLogOffset = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
type GetBalanceArgs struct {
|
||||||
|
Address string
|
||||||
|
BlockNumber int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *GetBalanceArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
addstr, ok := obj[0].(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("address", "not a string")
|
||||||
|
}
|
||||||
|
args.Address = addstr
|
||||||
|
|
||||||
|
if len(obj) > 1 {
|
||||||
|
if err := blockHeight(obj[1], &args.BlockNumber); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
args.BlockNumber = -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetStorageArgs struct {
|
||||||
|
Address string
|
||||||
|
BlockNumber int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *GetStorageArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
addstr, ok := obj[0].(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("address", "not a string")
|
||||||
|
}
|
||||||
|
args.Address = addstr
|
||||||
|
|
||||||
|
if len(obj) > 1 {
|
||||||
|
if err := blockHeight(obj[1], &args.BlockNumber); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
args.BlockNumber = -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetStorageAtArgs struct {
|
||||||
|
Address string
|
||||||
|
BlockNumber int64
|
||||||
|
Key string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *GetStorageAtArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 2 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
addstr, ok := obj[0].(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("address", "not a string")
|
||||||
|
}
|
||||||
|
args.Address = addstr
|
||||||
|
|
||||||
|
keystr, ok := obj[1].(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("key", "not a string")
|
||||||
|
}
|
||||||
|
args.Key = keystr
|
||||||
|
|
||||||
|
if len(obj) > 2 {
|
||||||
|
if err := blockHeight(obj[2], &args.BlockNumber); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
args.BlockNumber = -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetTxCountArgs struct {
|
||||||
|
Address string
|
||||||
|
BlockNumber int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *GetTxCountArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
addstr, ok := obj[0].(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("address", "not a string")
|
||||||
|
}
|
||||||
|
args.Address = addstr
|
||||||
|
|
||||||
|
if len(obj) > 1 {
|
||||||
|
if err := blockHeight(obj[1], &args.BlockNumber); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
args.BlockNumber = -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type HashArgs struct {
|
||||||
|
Hash string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *HashArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
arg0, ok := obj[0].(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("hash", "not a string")
|
||||||
|
}
|
||||||
|
args.Hash = arg0
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type BlockNumArg struct {
|
||||||
|
BlockNumber int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *BlockNumArg) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := blockHeight(obj[0], &args.BlockNumber); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetDataArgs struct {
|
||||||
|
Address string
|
||||||
|
BlockNumber int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *GetDataArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
addstr, ok := obj[0].(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("address", "not a string")
|
||||||
|
}
|
||||||
|
args.Address = addstr
|
||||||
|
|
||||||
|
if len(obj) > 1 {
|
||||||
|
if err := blockHeight(obj[1], &args.BlockNumber); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
args.BlockNumber = -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type NewSigArgs struct {
|
||||||
|
From string
|
||||||
|
Data string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *NewSigArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for sufficient params
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
from, ok := obj[0].(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("from", "not a string")
|
||||||
|
}
|
||||||
|
args.From = from
|
||||||
|
|
||||||
|
if len(args.From) == 0 {
|
||||||
|
return shared.NewValidationError("from", "is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
data, ok := obj[1].(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("data", "not a string")
|
||||||
|
}
|
||||||
|
args.Data = data
|
||||||
|
|
||||||
|
if len(args.Data) == 0 {
|
||||||
|
return shared.NewValidationError("data", "is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type NewTxArgs struct {
|
||||||
|
From string
|
||||||
|
To string
|
||||||
|
Nonce *big.Int
|
||||||
|
Value *big.Int
|
||||||
|
Gas *big.Int
|
||||||
|
GasPrice *big.Int
|
||||||
|
Data string
|
||||||
|
|
||||||
|
BlockNumber int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *NewTxArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []json.RawMessage
|
||||||
|
var ext struct {
|
||||||
|
From string
|
||||||
|
To string
|
||||||
|
Nonce interface{}
|
||||||
|
Value interface{}
|
||||||
|
Gas interface{}
|
||||||
|
GasPrice interface{}
|
||||||
|
Data string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode byte slice to array of RawMessages
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for sufficient params
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode 0th RawMessage to temporary struct
|
||||||
|
if err := json.Unmarshal(obj[0], &ext); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ext.From) == 0 {
|
||||||
|
return shared.NewValidationError("from", "is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
args.From = ext.From
|
||||||
|
args.To = ext.To
|
||||||
|
args.Data = ext.Data
|
||||||
|
|
||||||
|
var num *big.Int
|
||||||
|
if ext.Nonce != nil {
|
||||||
|
num, err = numString(ext.Nonce)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
args.Nonce = num
|
||||||
|
|
||||||
|
if ext.Value == nil {
|
||||||
|
num = big.NewInt(0)
|
||||||
|
} else {
|
||||||
|
num, err = numString(ext.Value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
args.Value = num
|
||||||
|
|
||||||
|
num = nil
|
||||||
|
if ext.Gas != nil {
|
||||||
|
if num, err = numString(ext.Gas); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
args.Gas = num
|
||||||
|
|
||||||
|
num = nil
|
||||||
|
if ext.GasPrice != nil {
|
||||||
|
if num, err = numString(ext.GasPrice); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
args.GasPrice = num
|
||||||
|
|
||||||
|
// Check for optional BlockNumber param
|
||||||
|
if len(obj) > 1 {
|
||||||
|
if err := blockHeightFromJson(obj[1], &args.BlockNumber); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
args.BlockNumber = -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type SourceArgs struct {
|
||||||
|
Source string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *SourceArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
arg0, ok := obj[0].(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("source code", "not a string")
|
||||||
|
}
|
||||||
|
args.Source = arg0
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type CallArgs struct {
|
||||||
|
From string
|
||||||
|
To string
|
||||||
|
Value *big.Int
|
||||||
|
Gas *big.Int
|
||||||
|
GasPrice *big.Int
|
||||||
|
Data string
|
||||||
|
|
||||||
|
BlockNumber int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *CallArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []json.RawMessage
|
||||||
|
var ext struct {
|
||||||
|
From string
|
||||||
|
To string
|
||||||
|
Value interface{}
|
||||||
|
Gas interface{}
|
||||||
|
GasPrice interface{}
|
||||||
|
Data string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode byte slice to array of RawMessages
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for sufficient params
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode 0th RawMessage to temporary struct
|
||||||
|
if err := json.Unmarshal(obj[0], &ext); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
args.From = ext.From
|
||||||
|
|
||||||
|
if len(ext.To) == 0 {
|
||||||
|
return shared.NewValidationError("to", "is required")
|
||||||
|
}
|
||||||
|
args.To = ext.To
|
||||||
|
|
||||||
|
var num *big.Int
|
||||||
|
if ext.Value == nil {
|
||||||
|
num = big.NewInt(0)
|
||||||
|
} else {
|
||||||
|
if num, err = numString(ext.Value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
args.Value = num
|
||||||
|
|
||||||
|
if ext.Gas == nil {
|
||||||
|
num = big.NewInt(0)
|
||||||
|
} else {
|
||||||
|
if num, err = numString(ext.Gas); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
args.Gas = num
|
||||||
|
|
||||||
|
if ext.GasPrice == nil {
|
||||||
|
num = big.NewInt(0)
|
||||||
|
} else {
|
||||||
|
if num, err = numString(ext.GasPrice); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
args.GasPrice = num
|
||||||
|
|
||||||
|
args.Data = ext.Data
|
||||||
|
|
||||||
|
// Check for optional BlockNumber param
|
||||||
|
if len(obj) > 1 {
|
||||||
|
if err := blockHeightFromJson(obj[1], &args.BlockNumber); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
args.BlockNumber = -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type HashIndexArgs struct {
|
||||||
|
Hash string
|
||||||
|
Index int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *HashIndexArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 2 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
arg0, ok := obj[0].(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("hash", "not a string")
|
||||||
|
}
|
||||||
|
args.Hash = arg0
|
||||||
|
|
||||||
|
arg1, ok := obj[1].(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("index", "not a string")
|
||||||
|
}
|
||||||
|
args.Index = common.Big(arg1).Int64()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type BlockNumIndexArgs struct {
|
||||||
|
BlockNumber int64
|
||||||
|
Index int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *BlockNumIndexArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 2 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := blockHeight(obj[0], &args.BlockNumber); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var arg1 *big.Int
|
||||||
|
if arg1, err = numString(obj[1]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
args.Index = arg1.Int64()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetBlockByHashArgs struct {
|
||||||
|
BlockHash string
|
||||||
|
IncludeTxs bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *GetBlockByHashArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 2 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
argstr, ok := obj[0].(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("blockHash", "not a string")
|
||||||
|
}
|
||||||
|
args.BlockHash = argstr
|
||||||
|
|
||||||
|
args.IncludeTxs = obj[1].(bool)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetBlockByNumberArgs struct {
|
||||||
|
BlockNumber int64
|
||||||
|
IncludeTxs bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *GetBlockByNumberArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 2 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := blockHeight(obj[0], &args.BlockNumber); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
args.IncludeTxs = obj[1].(bool)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type BlockFilterArgs struct {
|
||||||
|
Earliest int64
|
||||||
|
Latest int64
|
||||||
|
Address []string
|
||||||
|
Topics [][]string
|
||||||
|
Skip int
|
||||||
|
Max int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *BlockFilterArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []struct {
|
||||||
|
FromBlock interface{} `json:"fromBlock"`
|
||||||
|
ToBlock interface{} `json:"toBlock"`
|
||||||
|
Limit interface{} `json:"limit"`
|
||||||
|
Offset interface{} `json:"offset"`
|
||||||
|
Address interface{} `json:"address"`
|
||||||
|
Topics interface{} `json:"topics"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// args.Earliest, err = toNumber(obj[0].ToBlock)
|
||||||
|
// if err != nil {
|
||||||
|
// return shared.NewDecodeParamError(fmt.Sprintf("FromBlock %v", err))
|
||||||
|
// }
|
||||||
|
// args.Latest, err = toNumber(obj[0].FromBlock)
|
||||||
|
// if err != nil {
|
||||||
|
// return shared.NewDecodeParamError(fmt.Sprintf("ToBlock %v", err))
|
||||||
|
|
||||||
|
var num int64
|
||||||
|
var numBig *big.Int
|
||||||
|
|
||||||
|
// if blank then latest
|
||||||
|
if obj[0].FromBlock == nil {
|
||||||
|
num = -1
|
||||||
|
} else {
|
||||||
|
if err := blockHeight(obj[0].FromBlock, &num); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if -2 or other "silly" number, use latest
|
||||||
|
if num < 0 {
|
||||||
|
args.Earliest = -1 //latest block
|
||||||
|
} else {
|
||||||
|
args.Earliest = num
|
||||||
|
}
|
||||||
|
|
||||||
|
// if blank than latest
|
||||||
|
if obj[0].ToBlock == nil {
|
||||||
|
num = -1
|
||||||
|
} else {
|
||||||
|
if err := blockHeight(obj[0].ToBlock, &num); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
args.Latest = num
|
||||||
|
|
||||||
|
if obj[0].Limit == nil {
|
||||||
|
numBig = big.NewInt(defaultLogLimit)
|
||||||
|
} else {
|
||||||
|
if numBig, err = numString(obj[0].Limit); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
args.Max = int(numBig.Int64())
|
||||||
|
|
||||||
|
if obj[0].Offset == nil {
|
||||||
|
numBig = big.NewInt(defaultLogOffset)
|
||||||
|
} else {
|
||||||
|
if numBig, err = numString(obj[0].Offset); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
args.Skip = int(numBig.Int64())
|
||||||
|
|
||||||
|
if obj[0].Address != nil {
|
||||||
|
marg, ok := obj[0].Address.([]interface{})
|
||||||
|
if ok {
|
||||||
|
v := make([]string, len(marg))
|
||||||
|
for i, arg := range marg {
|
||||||
|
argstr, ok := arg.(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError(fmt.Sprintf("address[%d]", i), "is not a string")
|
||||||
|
}
|
||||||
|
v[i] = argstr
|
||||||
|
}
|
||||||
|
args.Address = v
|
||||||
|
} else {
|
||||||
|
argstr, ok := obj[0].Address.(string)
|
||||||
|
if ok {
|
||||||
|
v := make([]string, 1)
|
||||||
|
v[0] = argstr
|
||||||
|
args.Address = v
|
||||||
|
} else {
|
||||||
|
return shared.NewInvalidTypeError("address", "is not a string or array")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj[0].Topics != nil {
|
||||||
|
other, ok := obj[0].Topics.([]interface{})
|
||||||
|
if ok {
|
||||||
|
topicdbl := make([][]string, len(other))
|
||||||
|
for i, iv := range other {
|
||||||
|
if argstr, ok := iv.(string); ok {
|
||||||
|
// Found a string, push into first element of array
|
||||||
|
topicsgl := make([]string, 1)
|
||||||
|
topicsgl[0] = argstr
|
||||||
|
topicdbl[i] = topicsgl
|
||||||
|
} else if argarray, ok := iv.([]interface{}); ok {
|
||||||
|
// Found an array of other
|
||||||
|
topicdbl[i] = make([]string, len(argarray))
|
||||||
|
for j, jv := range argarray {
|
||||||
|
if v, ok := jv.(string); ok {
|
||||||
|
topicdbl[i][j] = v
|
||||||
|
} else if jv == nil {
|
||||||
|
topicdbl[i][j] = ""
|
||||||
|
} else {
|
||||||
|
return shared.NewInvalidTypeError(fmt.Sprintf("topic[%d][%d]", i, j), "is not a string")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if iv == nil {
|
||||||
|
topicdbl[i] = []string{""}
|
||||||
|
} else {
|
||||||
|
return shared.NewInvalidTypeError(fmt.Sprintf("topic[%d]", i), "not a string or array")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
args.Topics = topicdbl
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return shared.NewInvalidTypeError("topic", "is not a string or array")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type FilterIdArgs struct {
|
||||||
|
Id int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *FilterIdArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
var num *big.Int
|
||||||
|
if num, err = numString(obj[0]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
args.Id = int(num.Int64())
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type LogRes struct {
|
||||||
|
Address *hexdata `json:"address"`
|
||||||
|
Topics []*hexdata `json:"topics"`
|
||||||
|
Data *hexdata `json:"data"`
|
||||||
|
BlockNumber *hexnum `json:"blockNumber"`
|
||||||
|
LogIndex *hexnum `json:"logIndex"`
|
||||||
|
BlockHash *hexdata `json:"blockHash"`
|
||||||
|
TransactionHash *hexdata `json:"transactionHash"`
|
||||||
|
TransactionIndex *hexnum `json:"transactionIndex"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLogRes(log *state.Log) LogRes {
|
||||||
|
var l LogRes
|
||||||
|
l.Topics = make([]*hexdata, len(log.Topics))
|
||||||
|
for j, topic := range log.Topics {
|
||||||
|
l.Topics[j] = newHexData(topic)
|
||||||
|
}
|
||||||
|
l.Address = newHexData(log.Address)
|
||||||
|
l.Data = newHexData(log.Data)
|
||||||
|
l.BlockNumber = newHexNum(log.Number)
|
||||||
|
l.LogIndex = newHexNum(log.Index)
|
||||||
|
l.TransactionHash = newHexData(log.TxHash)
|
||||||
|
l.TransactionIndex = newHexNum(log.TxIndex)
|
||||||
|
l.BlockHash = newHexData(log.BlockHash)
|
||||||
|
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLogsRes(logs state.Logs) (ls []LogRes) {
|
||||||
|
ls = make([]LogRes, len(logs))
|
||||||
|
|
||||||
|
for i, log := range logs {
|
||||||
|
ls[i] = NewLogRes(log)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHashesRes(hs []common.Hash) []string {
|
||||||
|
hashes := make([]string, len(hs))
|
||||||
|
|
||||||
|
for i, hash := range hs {
|
||||||
|
hashes[i] = hash.Hex()
|
||||||
|
}
|
||||||
|
|
||||||
|
return hashes
|
||||||
|
}
|
||||||
|
|
||||||
|
type SubmitWorkArgs struct {
|
||||||
|
Nonce uint64
|
||||||
|
Header string
|
||||||
|
Digest string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *SubmitWorkArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err = json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 3 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 3)
|
||||||
|
}
|
||||||
|
|
||||||
|
var objstr string
|
||||||
|
var ok bool
|
||||||
|
if objstr, ok = obj[0].(string); !ok {
|
||||||
|
return shared.NewInvalidTypeError("nonce", "not a string")
|
||||||
|
}
|
||||||
|
|
||||||
|
args.Nonce = common.String2Big(objstr).Uint64()
|
||||||
|
if objstr, ok = obj[1].(string); !ok {
|
||||||
|
return shared.NewInvalidTypeError("header", "not a string")
|
||||||
|
}
|
||||||
|
|
||||||
|
args.Header = objstr
|
||||||
|
|
||||||
|
if objstr, ok = obj[2].(string); !ok {
|
||||||
|
return shared.NewInvalidTypeError("digest", "not a string")
|
||||||
|
}
|
||||||
|
|
||||||
|
args.Digest = objstr
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
3
rpc/api/eth_js.go
Normal file
3
rpc/api/eth_js.go
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
// JS api provided by web3.js
|
66
rpc/api/mergedapi.go
Normal file
66
rpc/api/mergedapi.go
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
MergedApiVersion = "1.0"
|
||||||
|
)
|
||||||
|
|
||||||
|
// combines multiple API's
|
||||||
|
type MergedApi struct {
|
||||||
|
apis map[string]string
|
||||||
|
methods map[string]EthereumApi
|
||||||
|
}
|
||||||
|
|
||||||
|
// create new merged api instance
|
||||||
|
func newMergedApi(apis ...EthereumApi) *MergedApi {
|
||||||
|
mergedApi := new(MergedApi)
|
||||||
|
mergedApi.apis = make(map[string]string, len(apis))
|
||||||
|
mergedApi.methods = make(map[string]EthereumApi)
|
||||||
|
|
||||||
|
for _, api := range apis {
|
||||||
|
mergedApi.apis[api.Name()] = api.ApiVersion()
|
||||||
|
for _, method := range api.Methods() {
|
||||||
|
mergedApi.methods[method] = api
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return mergedApi
|
||||||
|
}
|
||||||
|
|
||||||
|
// Supported RPC methods
|
||||||
|
func (self *MergedApi) Methods() []string {
|
||||||
|
all := make([]string, len(self.methods))
|
||||||
|
for method, _ := range self.methods {
|
||||||
|
all = append(all, method)
|
||||||
|
}
|
||||||
|
return all
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call the correct API's Execute method for the given request
|
||||||
|
func (self *MergedApi) Execute(req *shared.Request) (interface{}, error) {
|
||||||
|
if res, _ := self.handle(req); res != nil {
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
if api, found := self.methods[req.Method]; found {
|
||||||
|
return api.Execute(req)
|
||||||
|
}
|
||||||
|
return nil, shared.NewNotImplementedError(req.Method)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *MergedApi) Name() string {
|
||||||
|
return MergedApiName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *MergedApi) ApiVersion() string {
|
||||||
|
return MergedApiVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *MergedApi) handle(req *shared.Request) (interface{}, error) {
|
||||||
|
if req.Method == "modules" { // provided API's
|
||||||
|
return self.apis, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
1
rpc/api/mergedapi_js.go
Normal file
1
rpc/api/mergedapi_js.go
Normal file
@ -0,0 +1 @@
|
|||||||
|
package api
|
147
rpc/api/miner.go
Normal file
147
rpc/api/miner.go
Normal file
@ -0,0 +1,147 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/ethash"
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/codec"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
MinerApiVersion = "1.0"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// mapping between methods and handlers
|
||||||
|
MinerMapping = map[string]minerhandler{
|
||||||
|
"miner_hashrate": (*minerApi).Hashrate,
|
||||||
|
"miner_makeDAG": (*minerApi).MakeDAG,
|
||||||
|
"miner_setExtra": (*minerApi).SetExtra,
|
||||||
|
"miner_setGasPrice": (*minerApi).SetGasPrice,
|
||||||
|
"miner_startAutoDAG": (*minerApi).StartAutoDAG,
|
||||||
|
"miner_start": (*minerApi).StartMiner,
|
||||||
|
"miner_stopAutoDAG": (*minerApi).StopAutoDAG,
|
||||||
|
"miner_stop": (*minerApi).StopMiner,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// miner callback handler
|
||||||
|
type minerhandler func(*minerApi, *shared.Request) (interface{}, error)
|
||||||
|
|
||||||
|
// miner api provider
|
||||||
|
type minerApi struct {
|
||||||
|
ethereum *eth.Ethereum
|
||||||
|
methods map[string]minerhandler
|
||||||
|
codec codec.ApiCoder
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a new miner api instance
|
||||||
|
func NewMinerApi(ethereum *eth.Ethereum, coder codec.Codec) *minerApi {
|
||||||
|
return &minerApi{
|
||||||
|
ethereum: ethereum,
|
||||||
|
methods: MinerMapping,
|
||||||
|
codec: coder.New(nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute given request
|
||||||
|
func (self *minerApi) Execute(req *shared.Request) (interface{}, error) {
|
||||||
|
if callback, ok := self.methods[req.Method]; ok {
|
||||||
|
return callback(self, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, &shared.NotImplementedError{req.Method}
|
||||||
|
}
|
||||||
|
|
||||||
|
// collection with supported methods
|
||||||
|
func (self *minerApi) Methods() []string {
|
||||||
|
methods := make([]string, len(self.methods))
|
||||||
|
i := 0
|
||||||
|
for k := range self.methods {
|
||||||
|
methods[i] = k
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return methods
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *minerApi) Name() string {
|
||||||
|
return MinerApiName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *minerApi) ApiVersion() string {
|
||||||
|
return MinerApiVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *minerApi) StartMiner(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(StartMinerArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if args.Threads == -1 { // (not specified by user, use default)
|
||||||
|
args.Threads = self.ethereum.MinerThreads
|
||||||
|
}
|
||||||
|
|
||||||
|
self.ethereum.StartAutoDAG()
|
||||||
|
err := self.ethereum.StartMining(args.Threads)
|
||||||
|
if err == nil {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *minerApi) StopMiner(req *shared.Request) (interface{}, error) {
|
||||||
|
self.ethereum.StopMining()
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *minerApi) Hashrate(req *shared.Request) (interface{}, error) {
|
||||||
|
return self.ethereum.Miner().HashRate(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *minerApi) SetExtra(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(SetExtraArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
self.ethereum.Miner().SetExtra([]byte(args.Data))
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *minerApi) SetGasPrice(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(GasPriceArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
self.ethereum.Miner().SetGasPrice(common.String2Big(args.Price))
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *minerApi) StartAutoDAG(req *shared.Request) (interface{}, error) {
|
||||||
|
self.ethereum.StartAutoDAG()
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *minerApi) StopAutoDAG(req *shared.Request) (interface{}, error) {
|
||||||
|
self.ethereum.StopAutoDAG()
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *minerApi) MakeDAG(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(MakeDAGArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if args.BlockNumber < 0 {
|
||||||
|
return false, shared.NewValidationError("BlockNumber", "BlockNumber must be positive")
|
||||||
|
}
|
||||||
|
|
||||||
|
err := ethash.MakeDAG(uint64(args.BlockNumber), "")
|
||||||
|
if err == nil {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
100
rpc/api/miner_args.go
Normal file
100
rpc/api/miner_args.go
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
type StartMinerArgs struct {
|
||||||
|
Threads int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *StartMinerArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) == 0 || obj[0] == nil {
|
||||||
|
args.Threads = -1
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var num *big.Int
|
||||||
|
if num, err = numString(obj[0]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
args.Threads = int(num.Int64())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type SetExtraArgs struct {
|
||||||
|
Data string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *SetExtraArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
extrastr, ok := obj[0].(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("Price", "not a string")
|
||||||
|
}
|
||||||
|
args.Data = extrastr
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type GasPriceArgs struct {
|
||||||
|
Price string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *GasPriceArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pricestr, ok := obj[0].(string); ok {
|
||||||
|
args.Price = pricestr
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return shared.NewInvalidTypeError("Price", "not a string")
|
||||||
|
}
|
||||||
|
|
||||||
|
type MakeDAGArgs struct {
|
||||||
|
BlockNumber int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *MakeDAGArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
args.BlockNumber = -1
|
||||||
|
var obj []interface{}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := blockHeight(obj[0], &args.BlockNumber); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
67
rpc/api/miner_js.go
Normal file
67
rpc/api/miner_js.go
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
const Miner_JS = `
|
||||||
|
web3._extend({
|
||||||
|
property: 'miner',
|
||||||
|
methods:
|
||||||
|
[
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'start',
|
||||||
|
call: 'miner_start',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [web3._extend.formatters.formatInputInt],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputBool
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'stop',
|
||||||
|
call: 'miner_stop',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [web3._extend.formatters.formatInputInt],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputBool
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'setExtra',
|
||||||
|
call: 'miner_setExtra',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [web3._extend.utils.formatInputString],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputBool
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'setGasPrice',
|
||||||
|
call: 'miner_setGasPrice',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [web3._extend.utils.formatInputString],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputBool
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'startAutoDAG',
|
||||||
|
call: 'miner_startAutoDAG',
|
||||||
|
params: 0,
|
||||||
|
inputFormatter: [],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputBool
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'stopAutoDAG',
|
||||||
|
call: 'miner_stopAutoDAG',
|
||||||
|
params: 0,
|
||||||
|
inputFormatter: [],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputBool
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'makeDAG',
|
||||||
|
call: 'miner_makeDAG',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [web3._extend.formatters.inputDefaultBlockNumberFormatter],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputBool
|
||||||
|
})
|
||||||
|
],
|
||||||
|
properties:
|
||||||
|
[
|
||||||
|
new web3._extend.Property({
|
||||||
|
name: 'hashrate',
|
||||||
|
getter: 'miner_hashrate',
|
||||||
|
outputFormatter: web3._extend.utils.toDecimal
|
||||||
|
})
|
||||||
|
]
|
||||||
|
});
|
||||||
|
`
|
89
rpc/api/net.go
Normal file
89
rpc/api/net.go
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/codec"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
"github.com/ethereum/go-ethereum/xeth"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
NetApiVersion = "1.0"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// mapping between methods and handlers
|
||||||
|
netMapping = map[string]nethandler{
|
||||||
|
"net_version": (*netApi).Version,
|
||||||
|
"net_peerCount": (*netApi).PeerCount,
|
||||||
|
"net_listening": (*netApi).IsListening,
|
||||||
|
"net_peers": (*netApi).Peers,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// net callback handler
|
||||||
|
type nethandler func(*netApi, *shared.Request) (interface{}, error)
|
||||||
|
|
||||||
|
// net api provider
|
||||||
|
type netApi struct {
|
||||||
|
xeth *xeth.XEth
|
||||||
|
ethereum *eth.Ethereum
|
||||||
|
methods map[string]nethandler
|
||||||
|
codec codec.ApiCoder
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a new net api instance
|
||||||
|
func NewNetApi(xeth *xeth.XEth, eth *eth.Ethereum, coder codec.Codec) *netApi {
|
||||||
|
return &netApi{
|
||||||
|
xeth: xeth,
|
||||||
|
ethereum: eth,
|
||||||
|
methods: netMapping,
|
||||||
|
codec: coder.New(nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// collection with supported methods
|
||||||
|
func (self *netApi) Methods() []string {
|
||||||
|
methods := make([]string, len(self.methods))
|
||||||
|
i := 0
|
||||||
|
for k := range self.methods {
|
||||||
|
methods[i] = k
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return methods
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute given request
|
||||||
|
func (self *netApi) Execute(req *shared.Request) (interface{}, error) {
|
||||||
|
if callback, ok := self.methods[req.Method]; ok {
|
||||||
|
return callback(self, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, shared.NewNotImplementedError(req.Method)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *netApi) Name() string {
|
||||||
|
return NetApiName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *netApi) ApiVersion() string {
|
||||||
|
return NetApiVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
// Network version
|
||||||
|
func (self *netApi) Version(req *shared.Request) (interface{}, error) {
|
||||||
|
return self.xeth.NetworkVersion(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Number of connected peers
|
||||||
|
func (self *netApi) PeerCount(req *shared.Request) (interface{}, error) {
|
||||||
|
return self.xeth.PeerCount(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *netApi) IsListening(req *shared.Request) (interface{}, error) {
|
||||||
|
return self.xeth.IsListening(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *netApi) Peers(req *shared.Request) (interface{}, error) {
|
||||||
|
return self.ethereum.PeersInfo(), nil
|
||||||
|
}
|
47
rpc/api/net_js.go
Normal file
47
rpc/api/net_js.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
const Net_JS = `
|
||||||
|
web3._extend({
|
||||||
|
property: 'network',
|
||||||
|
methods:
|
||||||
|
[
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'addPeer',
|
||||||
|
call: 'net_addPeer',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [web3._extend.utils.formatInputString],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputBool
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'getPeerCount',
|
||||||
|
call: 'net_peerCount',
|
||||||
|
params: 0,
|
||||||
|
inputFormatter: [],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputString
|
||||||
|
})
|
||||||
|
],
|
||||||
|
properties:
|
||||||
|
[
|
||||||
|
new web3._extend.Property({
|
||||||
|
name: 'listening',
|
||||||
|
getter: 'net_listening',
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputBool
|
||||||
|
}),
|
||||||
|
new web3._extend.Property({
|
||||||
|
name: 'peerCount',
|
||||||
|
getter: 'net_peerCount',
|
||||||
|
outputFormatter: web3._extend.utils.toDecimal
|
||||||
|
}),
|
||||||
|
new web3._extend.Property({
|
||||||
|
name: 'peers',
|
||||||
|
getter: 'net_peers',
|
||||||
|
outputFormatter: function(obj) { return obj; }
|
||||||
|
}),
|
||||||
|
new web3._extend.Property({
|
||||||
|
name: 'version',
|
||||||
|
getter: 'net_version',
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputString
|
||||||
|
})
|
||||||
|
]
|
||||||
|
});
|
||||||
|
`
|
460
rpc/api/parsing.go
Normal file
460
rpc/api/parsing.go
Normal file
@ -0,0 +1,460 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"math/big"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
type hexdata struct {
|
||||||
|
data []byte
|
||||||
|
isNil bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *hexdata) String() string {
|
||||||
|
return "0x" + common.Bytes2Hex(d.data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *hexdata) MarshalJSON() ([]byte, error) {
|
||||||
|
if d.isNil {
|
||||||
|
return json.Marshal(nil)
|
||||||
|
}
|
||||||
|
return json.Marshal(d.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHexData(input interface{}) *hexdata {
|
||||||
|
d := new(hexdata)
|
||||||
|
|
||||||
|
if input == nil {
|
||||||
|
d.isNil = true
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
switch input := input.(type) {
|
||||||
|
case []byte:
|
||||||
|
d.data = input
|
||||||
|
case common.Hash:
|
||||||
|
d.data = input.Bytes()
|
||||||
|
case *common.Hash:
|
||||||
|
if input == nil {
|
||||||
|
d.isNil = true
|
||||||
|
} else {
|
||||||
|
d.data = input.Bytes()
|
||||||
|
}
|
||||||
|
case common.Address:
|
||||||
|
d.data = input.Bytes()
|
||||||
|
case *common.Address:
|
||||||
|
if input == nil {
|
||||||
|
d.isNil = true
|
||||||
|
} else {
|
||||||
|
d.data = input.Bytes()
|
||||||
|
}
|
||||||
|
case types.Bloom:
|
||||||
|
d.data = input.Bytes()
|
||||||
|
case *types.Bloom:
|
||||||
|
if input == nil {
|
||||||
|
d.isNil = true
|
||||||
|
} else {
|
||||||
|
d.data = input.Bytes()
|
||||||
|
}
|
||||||
|
case *big.Int:
|
||||||
|
if input == nil {
|
||||||
|
d.isNil = true
|
||||||
|
} else {
|
||||||
|
d.data = input.Bytes()
|
||||||
|
}
|
||||||
|
case int64:
|
||||||
|
d.data = big.NewInt(input).Bytes()
|
||||||
|
case uint64:
|
||||||
|
buff := make([]byte, 8)
|
||||||
|
binary.BigEndian.PutUint64(buff, input)
|
||||||
|
d.data = buff
|
||||||
|
case int:
|
||||||
|
d.data = big.NewInt(int64(input)).Bytes()
|
||||||
|
case uint:
|
||||||
|
d.data = big.NewInt(int64(input)).Bytes()
|
||||||
|
case int8:
|
||||||
|
d.data = big.NewInt(int64(input)).Bytes()
|
||||||
|
case uint8:
|
||||||
|
d.data = big.NewInt(int64(input)).Bytes()
|
||||||
|
case int16:
|
||||||
|
d.data = big.NewInt(int64(input)).Bytes()
|
||||||
|
case uint16:
|
||||||
|
buff := make([]byte, 2)
|
||||||
|
binary.BigEndian.PutUint16(buff, input)
|
||||||
|
d.data = buff
|
||||||
|
case int32:
|
||||||
|
d.data = big.NewInt(int64(input)).Bytes()
|
||||||
|
case uint32:
|
||||||
|
buff := make([]byte, 4)
|
||||||
|
binary.BigEndian.PutUint32(buff, input)
|
||||||
|
d.data = buff
|
||||||
|
case string: // hexstring
|
||||||
|
// aaargh ffs TODO: avoid back-and-forth hex encodings where unneeded
|
||||||
|
bytes, err := hex.DecodeString(strings.TrimPrefix(input, "0x"))
|
||||||
|
if err != nil {
|
||||||
|
d.isNil = true
|
||||||
|
} else {
|
||||||
|
d.data = bytes
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
d.isNil = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
type hexnum struct {
|
||||||
|
data []byte
|
||||||
|
isNil bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *hexnum) String() string {
|
||||||
|
// Get hex string from bytes
|
||||||
|
out := common.Bytes2Hex(d.data)
|
||||||
|
// Trim leading 0s
|
||||||
|
out = strings.TrimLeft(out, "0")
|
||||||
|
// Output "0x0" when value is 0
|
||||||
|
if len(out) == 0 {
|
||||||
|
out = "0"
|
||||||
|
}
|
||||||
|
return "0x" + out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *hexnum) MarshalJSON() ([]byte, error) {
|
||||||
|
if d.isNil {
|
||||||
|
return json.Marshal(nil)
|
||||||
|
}
|
||||||
|
return json.Marshal(d.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHexNum(input interface{}) *hexnum {
|
||||||
|
d := new(hexnum)
|
||||||
|
|
||||||
|
d.data = newHexData(input).data
|
||||||
|
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
type BlockRes struct {
|
||||||
|
fullTx bool
|
||||||
|
|
||||||
|
BlockNumber *hexnum `json:"number"`
|
||||||
|
BlockHash *hexdata `json:"hash"`
|
||||||
|
ParentHash *hexdata `json:"parentHash"`
|
||||||
|
Nonce *hexdata `json:"nonce"`
|
||||||
|
Sha3Uncles *hexdata `json:"sha3Uncles"`
|
||||||
|
LogsBloom *hexdata `json:"logsBloom"`
|
||||||
|
TransactionRoot *hexdata `json:"transactionsRoot"`
|
||||||
|
StateRoot *hexdata `json:"stateRoot"`
|
||||||
|
Miner *hexdata `json:"miner"`
|
||||||
|
Difficulty *hexnum `json:"difficulty"`
|
||||||
|
TotalDifficulty *hexnum `json:"totalDifficulty"`
|
||||||
|
Size *hexnum `json:"size"`
|
||||||
|
ExtraData *hexdata `json:"extraData"`
|
||||||
|
GasLimit *hexnum `json:"gasLimit"`
|
||||||
|
GasUsed *hexnum `json:"gasUsed"`
|
||||||
|
UnixTimestamp *hexnum `json:"timestamp"`
|
||||||
|
Transactions []*TransactionRes `json:"transactions"`
|
||||||
|
Uncles []*UncleRes `json:"uncles"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BlockRes) MarshalJSON() ([]byte, error) {
|
||||||
|
if b.fullTx {
|
||||||
|
var ext struct {
|
||||||
|
BlockNumber *hexnum `json:"number"`
|
||||||
|
BlockHash *hexdata `json:"hash"`
|
||||||
|
ParentHash *hexdata `json:"parentHash"`
|
||||||
|
Nonce *hexdata `json:"nonce"`
|
||||||
|
Sha3Uncles *hexdata `json:"sha3Uncles"`
|
||||||
|
LogsBloom *hexdata `json:"logsBloom"`
|
||||||
|
TransactionRoot *hexdata `json:"transactionsRoot"`
|
||||||
|
StateRoot *hexdata `json:"stateRoot"`
|
||||||
|
Miner *hexdata `json:"miner"`
|
||||||
|
Difficulty *hexnum `json:"difficulty"`
|
||||||
|
TotalDifficulty *hexnum `json:"totalDifficulty"`
|
||||||
|
Size *hexnum `json:"size"`
|
||||||
|
ExtraData *hexdata `json:"extraData"`
|
||||||
|
GasLimit *hexnum `json:"gasLimit"`
|
||||||
|
GasUsed *hexnum `json:"gasUsed"`
|
||||||
|
UnixTimestamp *hexnum `json:"timestamp"`
|
||||||
|
Transactions []*TransactionRes `json:"transactions"`
|
||||||
|
Uncles []*hexdata `json:"uncles"`
|
||||||
|
}
|
||||||
|
|
||||||
|
ext.BlockNumber = b.BlockNumber
|
||||||
|
ext.BlockHash = b.BlockHash
|
||||||
|
ext.ParentHash = b.ParentHash
|
||||||
|
ext.Nonce = b.Nonce
|
||||||
|
ext.Sha3Uncles = b.Sha3Uncles
|
||||||
|
ext.LogsBloom = b.LogsBloom
|
||||||
|
ext.TransactionRoot = b.TransactionRoot
|
||||||
|
ext.StateRoot = b.StateRoot
|
||||||
|
ext.Miner = b.Miner
|
||||||
|
ext.Difficulty = b.Difficulty
|
||||||
|
ext.TotalDifficulty = b.TotalDifficulty
|
||||||
|
ext.Size = b.Size
|
||||||
|
ext.ExtraData = b.ExtraData
|
||||||
|
ext.GasLimit = b.GasLimit
|
||||||
|
ext.GasUsed = b.GasUsed
|
||||||
|
ext.UnixTimestamp = b.UnixTimestamp
|
||||||
|
ext.Transactions = b.Transactions
|
||||||
|
ext.Uncles = make([]*hexdata, len(b.Uncles))
|
||||||
|
for i, u := range b.Uncles {
|
||||||
|
ext.Uncles[i] = u.BlockHash
|
||||||
|
}
|
||||||
|
return json.Marshal(ext)
|
||||||
|
} else {
|
||||||
|
var ext struct {
|
||||||
|
BlockNumber *hexnum `json:"number"`
|
||||||
|
BlockHash *hexdata `json:"hash"`
|
||||||
|
ParentHash *hexdata `json:"parentHash"`
|
||||||
|
Nonce *hexdata `json:"nonce"`
|
||||||
|
Sha3Uncles *hexdata `json:"sha3Uncles"`
|
||||||
|
LogsBloom *hexdata `json:"logsBloom"`
|
||||||
|
TransactionRoot *hexdata `json:"transactionsRoot"`
|
||||||
|
StateRoot *hexdata `json:"stateRoot"`
|
||||||
|
Miner *hexdata `json:"miner"`
|
||||||
|
Difficulty *hexnum `json:"difficulty"`
|
||||||
|
TotalDifficulty *hexnum `json:"totalDifficulty"`
|
||||||
|
Size *hexnum `json:"size"`
|
||||||
|
ExtraData *hexdata `json:"extraData"`
|
||||||
|
GasLimit *hexnum `json:"gasLimit"`
|
||||||
|
GasUsed *hexnum `json:"gasUsed"`
|
||||||
|
UnixTimestamp *hexnum `json:"timestamp"`
|
||||||
|
Transactions []*hexdata `json:"transactions"`
|
||||||
|
Uncles []*hexdata `json:"uncles"`
|
||||||
|
}
|
||||||
|
|
||||||
|
ext.BlockNumber = b.BlockNumber
|
||||||
|
ext.BlockHash = b.BlockHash
|
||||||
|
ext.ParentHash = b.ParentHash
|
||||||
|
ext.Nonce = b.Nonce
|
||||||
|
ext.Sha3Uncles = b.Sha3Uncles
|
||||||
|
ext.LogsBloom = b.LogsBloom
|
||||||
|
ext.TransactionRoot = b.TransactionRoot
|
||||||
|
ext.StateRoot = b.StateRoot
|
||||||
|
ext.Miner = b.Miner
|
||||||
|
ext.Difficulty = b.Difficulty
|
||||||
|
ext.TotalDifficulty = b.TotalDifficulty
|
||||||
|
ext.Size = b.Size
|
||||||
|
ext.ExtraData = b.ExtraData
|
||||||
|
ext.GasLimit = b.GasLimit
|
||||||
|
ext.GasUsed = b.GasUsed
|
||||||
|
ext.UnixTimestamp = b.UnixTimestamp
|
||||||
|
ext.Transactions = make([]*hexdata, len(b.Transactions))
|
||||||
|
for i, tx := range b.Transactions {
|
||||||
|
ext.Transactions[i] = tx.Hash
|
||||||
|
}
|
||||||
|
ext.Uncles = make([]*hexdata, len(b.Uncles))
|
||||||
|
for i, u := range b.Uncles {
|
||||||
|
ext.Uncles[i] = u.BlockHash
|
||||||
|
}
|
||||||
|
return json.Marshal(ext)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBlockRes(block *types.Block, fullTx bool) *BlockRes {
|
||||||
|
if block == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
res := new(BlockRes)
|
||||||
|
res.fullTx = fullTx
|
||||||
|
res.BlockNumber = newHexNum(block.Number())
|
||||||
|
res.BlockHash = newHexData(block.Hash())
|
||||||
|
res.ParentHash = newHexData(block.ParentHash())
|
||||||
|
res.Nonce = newHexData(block.Nonce())
|
||||||
|
res.Sha3Uncles = newHexData(block.Header().UncleHash)
|
||||||
|
res.LogsBloom = newHexData(block.Bloom())
|
||||||
|
res.TransactionRoot = newHexData(block.Header().TxHash)
|
||||||
|
res.StateRoot = newHexData(block.Root())
|
||||||
|
res.Miner = newHexData(block.Header().Coinbase)
|
||||||
|
res.Difficulty = newHexNum(block.Difficulty())
|
||||||
|
res.TotalDifficulty = newHexNum(block.Td)
|
||||||
|
res.Size = newHexNum(block.Size().Int64())
|
||||||
|
res.ExtraData = newHexData(block.Header().Extra)
|
||||||
|
res.GasLimit = newHexNum(block.GasLimit())
|
||||||
|
res.GasUsed = newHexNum(block.GasUsed())
|
||||||
|
res.UnixTimestamp = newHexNum(block.Time())
|
||||||
|
|
||||||
|
res.Transactions = make([]*TransactionRes, len(block.Transactions()))
|
||||||
|
for i, tx := range block.Transactions() {
|
||||||
|
res.Transactions[i] = NewTransactionRes(tx)
|
||||||
|
res.Transactions[i].BlockHash = res.BlockHash
|
||||||
|
res.Transactions[i].BlockNumber = res.BlockNumber
|
||||||
|
res.Transactions[i].TxIndex = newHexNum(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
res.Uncles = make([]*UncleRes, len(block.Uncles()))
|
||||||
|
for i, uncle := range block.Uncles() {
|
||||||
|
res.Uncles[i] = NewUncleRes(uncle)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
type TransactionRes struct {
|
||||||
|
Hash *hexdata `json:"hash"`
|
||||||
|
Nonce *hexnum `json:"nonce"`
|
||||||
|
BlockHash *hexdata `json:"blockHash"`
|
||||||
|
BlockNumber *hexnum `json:"blockNumber"`
|
||||||
|
TxIndex *hexnum `json:"transactionIndex"`
|
||||||
|
From *hexdata `json:"from"`
|
||||||
|
To *hexdata `json:"to"`
|
||||||
|
Value *hexnum `json:"value"`
|
||||||
|
Gas *hexnum `json:"gas"`
|
||||||
|
GasPrice *hexnum `json:"gasPrice"`
|
||||||
|
Input *hexdata `json:"input"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTransactionRes(tx *types.Transaction) *TransactionRes {
|
||||||
|
if tx == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var v = new(TransactionRes)
|
||||||
|
v.Hash = newHexData(tx.Hash())
|
||||||
|
v.Nonce = newHexNum(tx.Nonce())
|
||||||
|
// v.BlockHash =
|
||||||
|
// v.BlockNumber =
|
||||||
|
// v.TxIndex =
|
||||||
|
from, _ := tx.From()
|
||||||
|
v.From = newHexData(from)
|
||||||
|
v.To = newHexData(tx.To())
|
||||||
|
v.Value = newHexNum(tx.Value())
|
||||||
|
v.Gas = newHexNum(tx.Gas())
|
||||||
|
v.GasPrice = newHexNum(tx.GasPrice())
|
||||||
|
v.Input = newHexData(tx.Data())
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
type UncleRes struct {
|
||||||
|
BlockNumber *hexnum `json:"number"`
|
||||||
|
BlockHash *hexdata `json:"hash"`
|
||||||
|
ParentHash *hexdata `json:"parentHash"`
|
||||||
|
Nonce *hexdata `json:"nonce"`
|
||||||
|
Sha3Uncles *hexdata `json:"sha3Uncles"`
|
||||||
|
ReceiptHash *hexdata `json:"receiptHash"`
|
||||||
|
LogsBloom *hexdata `json:"logsBloom"`
|
||||||
|
TransactionRoot *hexdata `json:"transactionsRoot"`
|
||||||
|
StateRoot *hexdata `json:"stateRoot"`
|
||||||
|
Miner *hexdata `json:"miner"`
|
||||||
|
Difficulty *hexnum `json:"difficulty"`
|
||||||
|
ExtraData *hexdata `json:"extraData"`
|
||||||
|
GasLimit *hexnum `json:"gasLimit"`
|
||||||
|
GasUsed *hexnum `json:"gasUsed"`
|
||||||
|
UnixTimestamp *hexnum `json:"timestamp"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewUncleRes(h *types.Header) *UncleRes {
|
||||||
|
if h == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var v = new(UncleRes)
|
||||||
|
v.BlockNumber = newHexNum(h.Number)
|
||||||
|
v.BlockHash = newHexData(h.Hash())
|
||||||
|
v.ParentHash = newHexData(h.ParentHash)
|
||||||
|
v.Sha3Uncles = newHexData(h.UncleHash)
|
||||||
|
v.Nonce = newHexData(h.Nonce[:])
|
||||||
|
v.LogsBloom = newHexData(h.Bloom)
|
||||||
|
v.TransactionRoot = newHexData(h.TxHash)
|
||||||
|
v.StateRoot = newHexData(h.Root)
|
||||||
|
v.Miner = newHexData(h.Coinbase)
|
||||||
|
v.Difficulty = newHexNum(h.Difficulty)
|
||||||
|
v.ExtraData = newHexData(h.Extra)
|
||||||
|
v.GasLimit = newHexNum(h.GasLimit)
|
||||||
|
v.GasUsed = newHexNum(h.GasUsed)
|
||||||
|
v.UnixTimestamp = newHexNum(h.Time)
|
||||||
|
v.ReceiptHash = newHexData(h.ReceiptHash)
|
||||||
|
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// type FilterLogRes struct {
|
||||||
|
// Hash string `json:"hash"`
|
||||||
|
// Address string `json:"address"`
|
||||||
|
// Data string `json:"data"`
|
||||||
|
// BlockNumber string `json:"blockNumber"`
|
||||||
|
// TransactionHash string `json:"transactionHash"`
|
||||||
|
// BlockHash string `json:"blockHash"`
|
||||||
|
// TransactionIndex string `json:"transactionIndex"`
|
||||||
|
// LogIndex string `json:"logIndex"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type FilterWhisperRes struct {
|
||||||
|
// Hash string `json:"hash"`
|
||||||
|
// From string `json:"from"`
|
||||||
|
// To string `json:"to"`
|
||||||
|
// Expiry string `json:"expiry"`
|
||||||
|
// Sent string `json:"sent"`
|
||||||
|
// Ttl string `json:"ttl"`
|
||||||
|
// Topics string `json:"topics"`
|
||||||
|
// Payload string `json:"payload"`
|
||||||
|
// WorkProved string `json:"workProved"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
func numString(raw interface{}) (*big.Int, error) {
|
||||||
|
var number *big.Int
|
||||||
|
// Parse as integer
|
||||||
|
num, ok := raw.(float64)
|
||||||
|
if ok {
|
||||||
|
number = big.NewInt(int64(num))
|
||||||
|
return number, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse as string/hexstring
|
||||||
|
str, ok := raw.(string)
|
||||||
|
if ok {
|
||||||
|
number = common.String2Big(str)
|
||||||
|
return number, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, shared.NewInvalidTypeError("", "not a number or string")
|
||||||
|
}
|
||||||
|
|
||||||
|
func blockHeight(raw interface{}, number *int64) error {
|
||||||
|
// Parse as integer
|
||||||
|
num, ok := raw.(float64)
|
||||||
|
if ok {
|
||||||
|
*number = int64(num)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse as string/hexstring
|
||||||
|
str, ok := raw.(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("", "not a number or string")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch str {
|
||||||
|
case "earliest":
|
||||||
|
*number = 0
|
||||||
|
case "latest":
|
||||||
|
*number = -1
|
||||||
|
case "pending":
|
||||||
|
*number = -2
|
||||||
|
default:
|
||||||
|
if common.HasHexPrefix(str) {
|
||||||
|
*number = common.String2Big(str).Int64()
|
||||||
|
} else {
|
||||||
|
return shared.NewInvalidTypeError("blockNumber", "is not a valid string")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func blockHeightFromJson(msg json.RawMessage, number *int64) error {
|
||||||
|
var raw interface{}
|
||||||
|
if err := json.Unmarshal(msg, &raw); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
return blockHeight(raw, number)
|
||||||
|
}
|
126
rpc/api/personal.go
Normal file
126
rpc/api/personal.go
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/codec"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
"github.com/ethereum/go-ethereum/xeth"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
PersonalApiVersion = "1.0"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// mapping between methods and handlers
|
||||||
|
personalMapping = map[string]personalhandler{
|
||||||
|
"personal_listAccounts": (*personalApi).ListAccounts,
|
||||||
|
"personal_newAccount": (*personalApi).NewAccount,
|
||||||
|
"personal_deleteAccount": (*personalApi).DeleteAccount,
|
||||||
|
"personal_unlockAccount": (*personalApi).UnlockAccount,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// net callback handler
|
||||||
|
type personalhandler func(*personalApi, *shared.Request) (interface{}, error)
|
||||||
|
|
||||||
|
// net api provider
|
||||||
|
type personalApi struct {
|
||||||
|
xeth *xeth.XEth
|
||||||
|
ethereum *eth.Ethereum
|
||||||
|
methods map[string]personalhandler
|
||||||
|
codec codec.ApiCoder
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a new net api instance
|
||||||
|
func NewPersonalApi(xeth *xeth.XEth, eth *eth.Ethereum, coder codec.Codec) *personalApi {
|
||||||
|
return &personalApi{
|
||||||
|
xeth: xeth,
|
||||||
|
ethereum: eth,
|
||||||
|
methods: personalMapping,
|
||||||
|
codec: coder.New(nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// collection with supported methods
|
||||||
|
func (self *personalApi) Methods() []string {
|
||||||
|
methods := make([]string, len(self.methods))
|
||||||
|
i := 0
|
||||||
|
for k := range self.methods {
|
||||||
|
methods[i] = k
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return methods
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute given request
|
||||||
|
func (self *personalApi) Execute(req *shared.Request) (interface{}, error) {
|
||||||
|
if callback, ok := self.methods[req.Method]; ok {
|
||||||
|
return callback(self, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, shared.NewNotImplementedError(req.Method)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *personalApi) Name() string {
|
||||||
|
return PersonalApiName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *personalApi) ApiVersion() string {
|
||||||
|
return PersonalApiVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *personalApi) ListAccounts(req *shared.Request) (interface{}, error) {
|
||||||
|
return self.xeth.Accounts(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *personalApi) NewAccount(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(NewAccountArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
am := self.ethereum.AccountManager()
|
||||||
|
acc, err := am.NewAccount(args.Passphrase)
|
||||||
|
return acc.Address.Hex(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *personalApi) DeleteAccount(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(DeleteAccountArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
addr := common.HexToAddress(args.Address)
|
||||||
|
am := self.ethereum.AccountManager()
|
||||||
|
if err := am.DeleteAccount(addr, args.Passphrase); err == nil {
|
||||||
|
return true, nil
|
||||||
|
} else {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *personalApi) UnlockAccount(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(UnlockAccountArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
am := self.ethereum.AccountManager()
|
||||||
|
addr := common.HexToAddress(args.Address)
|
||||||
|
|
||||||
|
if args.Duration == -1 {
|
||||||
|
err = am.Unlock(addr, args.Passphrase)
|
||||||
|
} else {
|
||||||
|
err = am.TimedUnlock(addr, args.Passphrase, time.Duration(args.Duration)*time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
92
rpc/api/personal_args.go
Normal file
92
rpc/api/personal_args.go
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
type NewAccountArgs struct {
|
||||||
|
Passphrase string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *NewAccountArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if passhrase, ok := obj[0].(string); ok {
|
||||||
|
args.Passphrase = passhrase
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return shared.NewInvalidTypeError("passhrase", "not a string")
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeleteAccountArgs struct {
|
||||||
|
Address string
|
||||||
|
Passphrase string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *DeleteAccountArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 2 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
if addr, ok := obj[0].(string); ok {
|
||||||
|
args.Address = addr
|
||||||
|
} else {
|
||||||
|
return shared.NewInvalidTypeError("address", "not a string")
|
||||||
|
}
|
||||||
|
|
||||||
|
if passhrase, ok := obj[1].(string); ok {
|
||||||
|
args.Passphrase = passhrase
|
||||||
|
} else {
|
||||||
|
return shared.NewInvalidTypeError("passhrase", "not a string")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type UnlockAccountArgs struct {
|
||||||
|
Address string
|
||||||
|
Passphrase string
|
||||||
|
Duration int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *UnlockAccountArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
args.Duration = -1
|
||||||
|
|
||||||
|
if len(obj) < 2 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
if addrstr, ok := obj[0].(string); ok {
|
||||||
|
args.Address = addrstr
|
||||||
|
} else {
|
||||||
|
return shared.NewInvalidTypeError("address", "not a string")
|
||||||
|
}
|
||||||
|
|
||||||
|
if passphrasestr, ok := obj[1].(string); ok {
|
||||||
|
args.Passphrase = passphrasestr
|
||||||
|
} else {
|
||||||
|
return shared.NewInvalidTypeError("passphrase", "not a string")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
32
rpc/api/personal_js.go
Normal file
32
rpc/api/personal_js.go
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
const Personal_JS = `
|
||||||
|
web3._extend({
|
||||||
|
property: 'personal',
|
||||||
|
methods:
|
||||||
|
[
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'newAccount',
|
||||||
|
call: 'personal_newAccount',
|
||||||
|
params: 1,
|
||||||
|
inputFormatter: [web3._extend.formatters.formatInputString],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputString
|
||||||
|
}),
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'unlockAccount',
|
||||||
|
call: 'personal_unlockAccount',
|
||||||
|
params: 3,
|
||||||
|
inputFormatter: [web3._extend.formatters.formatInputString,web3._extend.formatters.formatInputString,web3._extend.formatters.formatInputInt],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputBool
|
||||||
|
})
|
||||||
|
],
|
||||||
|
properties:
|
||||||
|
[
|
||||||
|
new web3._extend.Property({
|
||||||
|
name: 'listAccounts',
|
||||||
|
getter: 'personal_listAccounts',
|
||||||
|
outputFormatter: function(obj) { return obj; }
|
||||||
|
})
|
||||||
|
]
|
||||||
|
});
|
||||||
|
`
|
179
rpc/api/shh.go
Normal file
179
rpc/api/shh.go
Normal file
@ -0,0 +1,179 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/codec"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
"github.com/ethereum/go-ethereum/xeth"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ShhApiVersion = "1.0"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// mapping between methods and handlers
|
||||||
|
shhMapping = map[string]shhhandler{
|
||||||
|
"shh_version": (*shhApi).Version,
|
||||||
|
"shh_post": (*shhApi).Post,
|
||||||
|
"shh_hasIdentity": (*shhApi).HasIdentity,
|
||||||
|
"shh_newIdentity": (*shhApi).NewIdentity,
|
||||||
|
"shh_newFilter": (*shhApi).NewFilter,
|
||||||
|
"shh_uninstallFilter": (*shhApi).UninstallFilter,
|
||||||
|
"shh_getFilterChanges": (*shhApi).GetFilterChanges,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func newWhisperOfflineError(method string) error {
|
||||||
|
return shared.NewNotAvailableError(method, "whisper offline")
|
||||||
|
}
|
||||||
|
|
||||||
|
// net callback handler
|
||||||
|
type shhhandler func(*shhApi, *shared.Request) (interface{}, error)
|
||||||
|
|
||||||
|
// shh api provider
|
||||||
|
type shhApi struct {
|
||||||
|
xeth *xeth.XEth
|
||||||
|
ethereum *eth.Ethereum
|
||||||
|
methods map[string]shhhandler
|
||||||
|
codec codec.ApiCoder
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a new whisper api instance
|
||||||
|
func NewShhApi(xeth *xeth.XEth, eth *eth.Ethereum, coder codec.Codec) *shhApi {
|
||||||
|
return &shhApi{
|
||||||
|
xeth: xeth,
|
||||||
|
ethereum: eth,
|
||||||
|
methods: shhMapping,
|
||||||
|
codec: coder.New(nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// collection with supported methods
|
||||||
|
func (self *shhApi) Methods() []string {
|
||||||
|
methods := make([]string, len(self.methods))
|
||||||
|
i := 0
|
||||||
|
for k := range self.methods {
|
||||||
|
methods[i] = k
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return methods
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute given request
|
||||||
|
func (self *shhApi) Execute(req *shared.Request) (interface{}, error) {
|
||||||
|
if callback, ok := self.methods[req.Method]; ok {
|
||||||
|
return callback(self, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, shared.NewNotImplementedError(req.Method)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *shhApi) Name() string {
|
||||||
|
return ShhApiName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *shhApi) ApiVersion() string {
|
||||||
|
return ShhApiVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *shhApi) Version(req *shared.Request) (interface{}, error) {
|
||||||
|
w := self.xeth.Whisper()
|
||||||
|
if w == nil {
|
||||||
|
return nil, newWhisperOfflineError(req.Method)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.Version(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *shhApi) Post(req *shared.Request) (interface{}, error) {
|
||||||
|
w := self.xeth.Whisper()
|
||||||
|
if w == nil {
|
||||||
|
return nil, newWhisperOfflineError(req.Method)
|
||||||
|
}
|
||||||
|
|
||||||
|
args := new(WhisperMessageArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err := w.Post(args.Payload, args.To, args.From, args.Topics, args.Priority, args.Ttl)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *shhApi) HasIdentity(req *shared.Request) (interface{}, error) {
|
||||||
|
w := self.xeth.Whisper()
|
||||||
|
if w == nil {
|
||||||
|
return nil, newWhisperOfflineError(req.Method)
|
||||||
|
}
|
||||||
|
|
||||||
|
args := new(WhisperIdentityArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.HasIdentity(args.Identity), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *shhApi) NewIdentity(req *shared.Request) (interface{}, error) {
|
||||||
|
w := self.xeth.Whisper()
|
||||||
|
if w == nil {
|
||||||
|
return nil, newWhisperOfflineError(req.Method)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.NewIdentity(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *shhApi) NewFilter(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(WhisperFilterArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
id := self.xeth.NewWhisperFilter(args.To, args.From, args.Topics)
|
||||||
|
return newHexNum(big.NewInt(int64(id)).Bytes()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *shhApi) UninstallFilter(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(FilterIdArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return self.xeth.UninstallWhisperFilter(args.Id), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *shhApi) GetFilterChanges(req *shared.Request) (interface{}, error) {
|
||||||
|
w := self.xeth.Whisper()
|
||||||
|
if w == nil {
|
||||||
|
return nil, newWhisperOfflineError(req.Method)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve all the new messages arrived since the last request
|
||||||
|
args := new(FilterIdArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return self.xeth.WhisperMessagesChanged(args.Id), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *shhApi) GetMessages(req *shared.Request) (interface{}, error) {
|
||||||
|
w := self.xeth.Whisper()
|
||||||
|
if w == nil {
|
||||||
|
return nil, newWhisperOfflineError(req.Method)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve all the cached messages matching a specific, existing filter
|
||||||
|
args := new(FilterIdArgs)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return self.xeth.WhisperMessages(args.Id), nil
|
||||||
|
}
|
158
rpc/api/shh_args.go
Normal file
158
rpc/api/shh_args.go
Normal file
@ -0,0 +1,158 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WhisperMessageArgs struct {
|
||||||
|
Payload string
|
||||||
|
To string
|
||||||
|
From string
|
||||||
|
Topics []string
|
||||||
|
Priority uint32
|
||||||
|
Ttl uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *WhisperMessageArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []struct {
|
||||||
|
Payload string
|
||||||
|
To string
|
||||||
|
From string
|
||||||
|
Topics []string
|
||||||
|
Priority interface{}
|
||||||
|
Ttl interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
args.Payload = obj[0].Payload
|
||||||
|
args.To = obj[0].To
|
||||||
|
args.From = obj[0].From
|
||||||
|
args.Topics = obj[0].Topics
|
||||||
|
|
||||||
|
var num *big.Int
|
||||||
|
if num, err = numString(obj[0].Priority); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
args.Priority = uint32(num.Int64())
|
||||||
|
|
||||||
|
if num, err = numString(obj[0].Ttl); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
args.Ttl = uint32(num.Int64())
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type WhisperIdentityArgs struct {
|
||||||
|
Identity string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (args *WhisperIdentityArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
var obj []interface{}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
argstr, ok := obj[0].(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("arg0", "not a string")
|
||||||
|
}
|
||||||
|
|
||||||
|
args.Identity = argstr
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type WhisperFilterArgs struct {
|
||||||
|
To string
|
||||||
|
From string
|
||||||
|
Topics [][]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implements the json.Unmarshaler interface, invoked to convert a
|
||||||
|
// JSON message blob into a WhisperFilterArgs structure.
|
||||||
|
func (args *WhisperFilterArgs) UnmarshalJSON(b []byte) (err error) {
|
||||||
|
// Unmarshal the JSON message and sanity check
|
||||||
|
var obj []struct {
|
||||||
|
To interface{} `json:"to"`
|
||||||
|
From interface{} `json:"from"`
|
||||||
|
Topics interface{} `json:"topics"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(b, &obj); err != nil {
|
||||||
|
return shared.NewDecodeParamError(err.Error())
|
||||||
|
}
|
||||||
|
if len(obj) < 1 {
|
||||||
|
return shared.NewInsufficientParamsError(len(obj), 1)
|
||||||
|
}
|
||||||
|
// Retrieve the simple data contents of the filter arguments
|
||||||
|
if obj[0].To == nil {
|
||||||
|
args.To = ""
|
||||||
|
} else {
|
||||||
|
argstr, ok := obj[0].To.(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("to", "is not a string")
|
||||||
|
}
|
||||||
|
args.To = argstr
|
||||||
|
}
|
||||||
|
if obj[0].From == nil {
|
||||||
|
args.From = ""
|
||||||
|
} else {
|
||||||
|
argstr, ok := obj[0].From.(string)
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("from", "is not a string")
|
||||||
|
}
|
||||||
|
args.From = argstr
|
||||||
|
}
|
||||||
|
// Construct the nested topic array
|
||||||
|
if obj[0].Topics != nil {
|
||||||
|
// Make sure we have an actual topic array
|
||||||
|
list, ok := obj[0].Topics.([]interface{})
|
||||||
|
if !ok {
|
||||||
|
return shared.NewInvalidTypeError("topics", "is not an array")
|
||||||
|
}
|
||||||
|
// Iterate over each topic and handle nil, string or array
|
||||||
|
topics := make([][]string, len(list))
|
||||||
|
for idx, field := range list {
|
||||||
|
switch value := field.(type) {
|
||||||
|
case nil:
|
||||||
|
topics[idx] = []string{}
|
||||||
|
|
||||||
|
case string:
|
||||||
|
topics[idx] = []string{value}
|
||||||
|
|
||||||
|
case []interface{}:
|
||||||
|
topics[idx] = make([]string, len(value))
|
||||||
|
for i, nested := range value {
|
||||||
|
switch value := nested.(type) {
|
||||||
|
case nil:
|
||||||
|
topics[idx][i] = ""
|
||||||
|
|
||||||
|
case string:
|
||||||
|
topics[idx][i] = value
|
||||||
|
|
||||||
|
default:
|
||||||
|
return shared.NewInvalidTypeError(fmt.Sprintf("topic[%d][%d]", idx, i), "is not a string")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return shared.NewInvalidTypeError(fmt.Sprintf("topic[%d]", idx), "not a string or array")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
args.Topics = topics
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
30
rpc/api/ssh_js.go
Normal file
30
rpc/api/ssh_js.go
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
const Shh_JS = `
|
||||||
|
web3._extend({
|
||||||
|
property: 'shh',
|
||||||
|
methods:
|
||||||
|
[
|
||||||
|
new web3._extend.Method({
|
||||||
|
name: 'post',
|
||||||
|
call: 'shh_post',
|
||||||
|
params: 6,
|
||||||
|
inputFormatter: [web3._extend.formatters.formatInputString,
|
||||||
|
web3._extend.formatters.formatInputString,
|
||||||
|
web3._extend.formatters.formatInputString,
|
||||||
|
,
|
||||||
|
, web3._extend.formatters.formatInputInt
|
||||||
|
, web3._extend.formatters.formatInputInt],
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputBool
|
||||||
|
}),
|
||||||
|
],
|
||||||
|
properties:
|
||||||
|
[
|
||||||
|
new web3._extend.Property({
|
||||||
|
name: 'version',
|
||||||
|
getter: 'shh_version',
|
||||||
|
outputFormatter: web3._extend.formatters.formatOutputInt
|
||||||
|
})
|
||||||
|
]
|
||||||
|
});
|
||||||
|
`
|
75
rpc/api/txpool.go
Normal file
75
rpc/api/txpool.go
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/codec"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
"github.com/ethereum/go-ethereum/xeth"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
TxPoolApiVersion = "1.0"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// mapping between methods and handlers
|
||||||
|
txpoolMapping = map[string]txpoolhandler{
|
||||||
|
"txpool_status": (*txPoolApi).Status,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// net callback handler
|
||||||
|
type txpoolhandler func(*txPoolApi, *shared.Request) (interface{}, error)
|
||||||
|
|
||||||
|
// txpool api provider
|
||||||
|
type txPoolApi struct {
|
||||||
|
xeth *xeth.XEth
|
||||||
|
ethereum *eth.Ethereum
|
||||||
|
methods map[string]txpoolhandler
|
||||||
|
codec codec.ApiCoder
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a new txpool api instance
|
||||||
|
func NewTxPoolApi(xeth *xeth.XEth, eth *eth.Ethereum, coder codec.Codec) *txPoolApi {
|
||||||
|
return &txPoolApi{
|
||||||
|
xeth: xeth,
|
||||||
|
ethereum: eth,
|
||||||
|
methods: txpoolMapping,
|
||||||
|
codec: coder.New(nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// collection with supported methods
|
||||||
|
func (self *txPoolApi) Methods() []string {
|
||||||
|
methods := make([]string, len(self.methods))
|
||||||
|
i := 0
|
||||||
|
for k := range self.methods {
|
||||||
|
methods[i] = k
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return methods
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute given request
|
||||||
|
func (self *txPoolApi) Execute(req *shared.Request) (interface{}, error) {
|
||||||
|
if callback, ok := self.methods[req.Method]; ok {
|
||||||
|
return callback(self, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, shared.NewNotImplementedError(req.Method)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *txPoolApi) Name() string {
|
||||||
|
return TxPoolApiName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *txPoolApi) ApiVersion() string {
|
||||||
|
return TxPoolApiVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *txPoolApi) Status(req *shared.Request) (interface{}, error) {
|
||||||
|
return map[string]int{
|
||||||
|
"pending": self.ethereum.TxPool().GetTransactions().Len(),
|
||||||
|
"queued": self.ethereum.TxPool().GetQueuedTransactions().Len(),
|
||||||
|
}, nil
|
||||||
|
}
|
18
rpc/api/txpool_js.go
Normal file
18
rpc/api/txpool_js.go
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
const TxPool_JS = `
|
||||||
|
web3._extend({
|
||||||
|
property: 'txpool',
|
||||||
|
methods:
|
||||||
|
[
|
||||||
|
],
|
||||||
|
properties:
|
||||||
|
[
|
||||||
|
new web3._extend.Property({
|
||||||
|
name: 'status',
|
||||||
|
getter: 'txpool_status',
|
||||||
|
outputFormatter: function(obj) { return obj; }
|
||||||
|
})
|
||||||
|
]
|
||||||
|
});
|
||||||
|
`
|
180
rpc/api/utils.go
Normal file
180
rpc/api/utils.go
Normal file
@ -0,0 +1,180 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/codec"
|
||||||
|
"github.com/ethereum/go-ethereum/xeth"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Mapping between the different methods each api supports
|
||||||
|
AutoCompletion = map[string][]string{
|
||||||
|
"admin": []string{
|
||||||
|
"addPeer",
|
||||||
|
"peers",
|
||||||
|
"nodeInfo",
|
||||||
|
"exportChain",
|
||||||
|
"importChain",
|
||||||
|
"verbosity",
|
||||||
|
"chainSyncStatus",
|
||||||
|
"setSolc",
|
||||||
|
"datadir",
|
||||||
|
},
|
||||||
|
"debug": []string{
|
||||||
|
"dumpBlock",
|
||||||
|
"getBlockRlp",
|
||||||
|
"printBlock",
|
||||||
|
"processBlock",
|
||||||
|
"seedHash",
|
||||||
|
"setHead",
|
||||||
|
},
|
||||||
|
"eth": []string{
|
||||||
|
"accounts",
|
||||||
|
"blockNumber",
|
||||||
|
"getBalance",
|
||||||
|
"protocolVersion",
|
||||||
|
"coinbase",
|
||||||
|
"mining",
|
||||||
|
"gasPrice",
|
||||||
|
"getStorage",
|
||||||
|
"storageAt",
|
||||||
|
"getStorageAt",
|
||||||
|
"getTransactionCount",
|
||||||
|
"getBlockTransactionCountByHash",
|
||||||
|
"getBlockTransactionCountByNumber",
|
||||||
|
"getUncleCountByBlockHash",
|
||||||
|
"getUncleCountByBlockNumber",
|
||||||
|
"getData",
|
||||||
|
"getCode",
|
||||||
|
"sign",
|
||||||
|
"sendTransaction",
|
||||||
|
"transact",
|
||||||
|
"estimateGas",
|
||||||
|
"call",
|
||||||
|
"flush",
|
||||||
|
"getBlockByHash",
|
||||||
|
"getBlockByNumber",
|
||||||
|
"getTransactionByHash",
|
||||||
|
"getTransactionByBlockHashAndIndex",
|
||||||
|
"getUncleByBlockHashAndIndex",
|
||||||
|
"getUncleByBlockNumberAndIndex",
|
||||||
|
"getCompilers",
|
||||||
|
"compileSolidity",
|
||||||
|
"newFilter",
|
||||||
|
"newBlockFilter",
|
||||||
|
"newPendingTransactionFilter",
|
||||||
|
"uninstallFilter",
|
||||||
|
"getFilterChanges",
|
||||||
|
"getFilterLogs",
|
||||||
|
"getLogs",
|
||||||
|
"hashrate",
|
||||||
|
"getWork",
|
||||||
|
"submitWork",
|
||||||
|
},
|
||||||
|
"miner": []string{
|
||||||
|
"hashrate",
|
||||||
|
"makeDAG",
|
||||||
|
"setExtra",
|
||||||
|
"setGasPrice",
|
||||||
|
"startAutoDAG",
|
||||||
|
"start",
|
||||||
|
"stopAutoDAG",
|
||||||
|
"stop",
|
||||||
|
},
|
||||||
|
"net": []string{
|
||||||
|
"peerCount",
|
||||||
|
"listening",
|
||||||
|
},
|
||||||
|
"personal": []string{
|
||||||
|
"listAccounts",
|
||||||
|
"newAccount",
|
||||||
|
"deleteAccount",
|
||||||
|
"unlockAccount",
|
||||||
|
},
|
||||||
|
"shh": []string{
|
||||||
|
"version",
|
||||||
|
"post",
|
||||||
|
"hasIdentity",
|
||||||
|
"newIdentity",
|
||||||
|
"newFilter",
|
||||||
|
"uninstallFilter",
|
||||||
|
"getFilterChanges",
|
||||||
|
},
|
||||||
|
"txpool": []string{
|
||||||
|
"status",
|
||||||
|
},
|
||||||
|
"web3": []string{
|
||||||
|
"sha3",
|
||||||
|
"version",
|
||||||
|
"fromWei",
|
||||||
|
"toWei",
|
||||||
|
"toHex",
|
||||||
|
"toAscii",
|
||||||
|
"fromAscii",
|
||||||
|
"toBigNumber",
|
||||||
|
"isAddress",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Parse a comma separated API string to individual api's
|
||||||
|
func ParseApiString(apistr string, codec codec.Codec, xeth *xeth.XEth, eth *eth.Ethereum) ([]EthereumApi, error) {
|
||||||
|
if len(strings.TrimSpace(apistr)) == 0 {
|
||||||
|
return nil, fmt.Errorf("Empty apistr provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
names := strings.Split(apistr, ",")
|
||||||
|
apis := make([]EthereumApi, len(names))
|
||||||
|
|
||||||
|
for i, name := range names {
|
||||||
|
switch strings.ToLower(strings.TrimSpace(name)) {
|
||||||
|
case AdminApiName:
|
||||||
|
apis[i] = NewAdminApi(xeth, eth, codec)
|
||||||
|
case DebugApiName:
|
||||||
|
apis[i] = NewDebugApi(xeth, eth, codec)
|
||||||
|
case EthApiName:
|
||||||
|
apis[i] = NewEthApi(xeth, codec)
|
||||||
|
case MinerApiName:
|
||||||
|
apis[i] = NewMinerApi(eth, codec)
|
||||||
|
case NetApiName:
|
||||||
|
apis[i] = NewNetApi(xeth, eth, codec)
|
||||||
|
case ShhApiName:
|
||||||
|
apis[i] = NewShhApi(xeth, eth, codec)
|
||||||
|
case TxPoolApiName:
|
||||||
|
apis[i] = NewTxPoolApi(xeth, eth, codec)
|
||||||
|
case PersonalApiName:
|
||||||
|
apis[i] = NewPersonalApi(xeth, eth, codec)
|
||||||
|
case Web3ApiName:
|
||||||
|
apis[i] = NewWeb3Api(xeth, codec)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Unknown API '%s'", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return apis, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func Javascript(name string) string {
|
||||||
|
switch strings.ToLower(strings.TrimSpace(name)) {
|
||||||
|
case AdminApiName:
|
||||||
|
return Admin_JS
|
||||||
|
case DebugApiName:
|
||||||
|
return Debug_JS
|
||||||
|
case MinerApiName:
|
||||||
|
return Miner_JS
|
||||||
|
case NetApiName:
|
||||||
|
return Net_JS
|
||||||
|
case ShhApiName:
|
||||||
|
return Shh_JS
|
||||||
|
case TxPoolApiName:
|
||||||
|
return TxPool_JS
|
||||||
|
case PersonalApiName:
|
||||||
|
return Personal_JS
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
83
rpc/api/web3.go
Normal file
83
rpc/api/web3.go
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/codec"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
"github.com/ethereum/go-ethereum/xeth"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
Web3ApiVersion = "1.0"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// mapping between methods and handlers
|
||||||
|
Web3Mapping = map[string]web3handler{
|
||||||
|
"web3_sha3": (*web3Api).Sha3,
|
||||||
|
"web3_clientVersion": (*web3Api).ClientVersion,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// web3 callback handler
|
||||||
|
type web3handler func(*web3Api, *shared.Request) (interface{}, error)
|
||||||
|
|
||||||
|
// web3 api provider
|
||||||
|
type web3Api struct {
|
||||||
|
xeth *xeth.XEth
|
||||||
|
methods map[string]web3handler
|
||||||
|
codec codec.ApiCoder
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a new web3 api instance
|
||||||
|
func NewWeb3Api(xeth *xeth.XEth, coder codec.Codec) *web3Api {
|
||||||
|
return &web3Api{
|
||||||
|
xeth: xeth,
|
||||||
|
methods: Web3Mapping,
|
||||||
|
codec: coder.New(nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// collection with supported methods
|
||||||
|
func (self *web3Api) Methods() []string {
|
||||||
|
methods := make([]string, len(self.methods))
|
||||||
|
i := 0
|
||||||
|
for k := range self.methods {
|
||||||
|
methods[i] = k
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return methods
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute given request
|
||||||
|
func (self *web3Api) Execute(req *shared.Request) (interface{}, error) {
|
||||||
|
if callback, ok := self.methods[req.Method]; ok {
|
||||||
|
return callback(self, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, &shared.NotImplementedError{req.Method}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *web3Api) Name() string {
|
||||||
|
return Web3ApiName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *web3Api) ApiVersion() string {
|
||||||
|
return Web3ApiVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculates the sha3 over req.Params.Data
|
||||||
|
func (self *web3Api) Sha3(req *shared.Request) (interface{}, error) {
|
||||||
|
args := new(Sha3Args)
|
||||||
|
if err := self.codec.Decode(req.Params, &args); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return common.ToHex(crypto.Sha3(common.FromHex(args.Data))), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns the xeth client vrsion
|
||||||
|
func (self *web3Api) ClientVersion(req *shared.Request) (interface{}, error) {
|
||||||
|
return self.xeth.ClientVersion(), nil
|
||||||
|
}
|
5
rpc/api/web3_args.go
Normal file
5
rpc/api/web3_args.go
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
type Sha3Args struct {
|
||||||
|
Data string
|
||||||
|
}
|
@ -2519,6 +2519,14 @@ func TestSigArgs(t *testing.T) {
|
|||||||
if err := json.Unmarshal([]byte(input), &args); err != nil {
|
if err := json.Unmarshal([]byte(input), &args); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if expected.From != args.From {
|
||||||
|
t.Errorf("From should be %v but is %v", expected.From, args.From)
|
||||||
|
}
|
||||||
|
|
||||||
|
if expected.Data != args.Data {
|
||||||
|
t.Errorf("Data should be %v but is %v", expected.Data, args.Data)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSigArgsEmptyData(t *testing.T) {
|
func TestSigArgsEmptyData(t *testing.T) {
|
||||||
|
47
rpc/codec/codec.go
Normal file
47
rpc/codec/codec.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
package codec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Codec int
|
||||||
|
|
||||||
|
// (de)serialization support for rpc interface
|
||||||
|
type ApiCoder interface {
|
||||||
|
// Parse message to request from underlying stream
|
||||||
|
ReadRequest() (*shared.Request, error)
|
||||||
|
// Parse response message from underlying stream
|
||||||
|
ReadResponse() (interface{}, error)
|
||||||
|
// Encode response to encoded form in underlying stream
|
||||||
|
WriteResponse(interface{}) error
|
||||||
|
// Decode single message from data
|
||||||
|
Decode([]byte, interface{}) error
|
||||||
|
// Encode msg to encoded form
|
||||||
|
Encode(msg interface{}) ([]byte, error)
|
||||||
|
// close the underlying stream
|
||||||
|
Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// supported codecs
|
||||||
|
const (
|
||||||
|
JSON Codec = iota
|
||||||
|
nCodecs
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// collection with supported coders
|
||||||
|
coders = make([]func(net.Conn) ApiCoder, nCodecs)
|
||||||
|
)
|
||||||
|
|
||||||
|
// create a new coder instance
|
||||||
|
func (c Codec) New(conn net.Conn) ApiCoder {
|
||||||
|
switch c {
|
||||||
|
case JSON:
|
||||||
|
return NewJsonCoder(conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
panic("codec: request for codec #" + strconv.Itoa(int(c)) + " is unavailable")
|
||||||
|
}
|
75
rpc/codec/json.go
Normal file
75
rpc/codec/json.go
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
package codec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
MAX_RESPONSE_SIZE = 64 * 1024
|
||||||
|
)
|
||||||
|
|
||||||
|
// Json serialization support
|
||||||
|
type JsonCodec struct {
|
||||||
|
c net.Conn
|
||||||
|
d *json.Decoder
|
||||||
|
e *json.Encoder
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new JSON coder instance
|
||||||
|
func NewJsonCoder(conn net.Conn) ApiCoder {
|
||||||
|
return &JsonCodec{
|
||||||
|
c: conn,
|
||||||
|
d: json.NewDecoder(conn),
|
||||||
|
e: json.NewEncoder(conn),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize obj to JSON and write it to conn
|
||||||
|
func (self *JsonCodec) ReadRequest() (*shared.Request, error) {
|
||||||
|
req := shared.Request{}
|
||||||
|
err := self.d.Decode(&req)
|
||||||
|
if err == nil {
|
||||||
|
return &req, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *JsonCodec) ReadResponse() (interface{}, error) {
|
||||||
|
var err error
|
||||||
|
buf := make([]byte, MAX_RESPONSE_SIZE)
|
||||||
|
n, _ := self.c.Read(buf)
|
||||||
|
|
||||||
|
var failure shared.ErrorResponse
|
||||||
|
if err = json.Unmarshal(buf[:n], &failure); err == nil && failure.Error != nil {
|
||||||
|
return failure, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var success shared.SuccessResponse
|
||||||
|
if err = json.Unmarshal(buf[:n], &success); err == nil {
|
||||||
|
return success, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode response to encoded form in underlying stream
|
||||||
|
func (self *JsonCodec) Decode(data []byte, msg interface{}) error {
|
||||||
|
return json.Unmarshal(data, msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *JsonCodec) Encode(msg interface{}) ([]byte, error) {
|
||||||
|
return json.Marshal(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse JSON data from conn to obj
|
||||||
|
func (self *JsonCodec) WriteResponse(res interface{}) error {
|
||||||
|
return self.e.Encode(&res)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close decoder and encoder
|
||||||
|
func (self *JsonCodec) Close() {
|
||||||
|
self.c.Close()
|
||||||
|
}
|
7
rpc/comms/comms.go
Normal file
7
rpc/comms/comms.go
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
package comms
|
||||||
|
|
||||||
|
type EthereumClient interface {
|
||||||
|
Close()
|
||||||
|
Send(interface{}) error
|
||||||
|
Recv() (interface{}, error)
|
||||||
|
}
|
37
rpc/comms/ipc.go
Normal file
37
rpc/comms/ipc.go
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
package comms
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/api"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/codec"
|
||||||
|
)
|
||||||
|
|
||||||
|
type IpcConfig struct {
|
||||||
|
Endpoint string
|
||||||
|
}
|
||||||
|
|
||||||
|
type ipcClient struct {
|
||||||
|
c codec.ApiCoder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ipcClient) Close() {
|
||||||
|
self.c.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ipcClient) Send(req interface{}) error {
|
||||||
|
return self.c.WriteResponse(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *ipcClient) Recv() (interface{}, error) {
|
||||||
|
return self.c.ReadResponse()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new IPC client, UNIX domain socket on posix, named pipe on Windows
|
||||||
|
func NewIpcClient(cfg IpcConfig, codec codec.Codec) (*ipcClient, error) {
|
||||||
|
return newIpcClient(cfg, codec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start IPC server
|
||||||
|
func StartIpc(cfg IpcConfig, codec codec.Codec, apis ...api.EthereumApi) error {
|
||||||
|
offeredApi := api.Merge(apis...)
|
||||||
|
return startIpc(cfg, codec, offeredApi)
|
||||||
|
}
|
77
rpc/comms/ipc_unix.go
Normal file
77
rpc/comms/ipc_unix.go
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
|
||||||
|
|
||||||
|
package comms
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/api"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/codec"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newIpcClient(cfg IpcConfig, codec codec.Codec) (*ipcClient, error) {
|
||||||
|
c, err := net.DialUnix("unix", nil, &net.UnixAddr{cfg.Endpoint, "unix"})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ipcClient{codec.New(c)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func startIpc(cfg IpcConfig, codec codec.Codec, api api.EthereumApi) error {
|
||||||
|
os.Remove(cfg.Endpoint) // in case it still exists from a previous run
|
||||||
|
|
||||||
|
l, err := net.ListenUnix("unix", &net.UnixAddr{Name: cfg.Endpoint, Net: "unix"})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
os.Chmod(cfg.Endpoint, 0600)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
conn, err := l.AcceptUnix()
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Error).Infof("Error accepting ipc connection - %v\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
go func(conn net.Conn) {
|
||||||
|
codec := codec.New(conn)
|
||||||
|
|
||||||
|
for {
|
||||||
|
req, err := codec.ReadRequest()
|
||||||
|
if err == io.EOF {
|
||||||
|
codec.Close()
|
||||||
|
return
|
||||||
|
} else if err != nil {
|
||||||
|
glog.V(logger.Error).Infof("IPC recv err - %v\n", err)
|
||||||
|
codec.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var rpcResponse interface{}
|
||||||
|
res, err := api.Execute(req)
|
||||||
|
|
||||||
|
rpcResponse = shared.NewRpcResponse(req.Id, req.Jsonrpc, res, err)
|
||||||
|
err = codec.WriteResponse(rpcResponse)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Error).Infof("IPC send err - %v\n", err)
|
||||||
|
codec.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(conn)
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Remove(cfg.Endpoint)
|
||||||
|
}()
|
||||||
|
|
||||||
|
glog.V(logger.Info).Infof("IPC service started (%s)\n", cfg.Endpoint)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
696
rpc/comms/ipc_windows.go
Normal file
696
rpc/comms/ipc_windows.go
Normal file
@ -0,0 +1,696 @@
|
|||||||
|
// +build windows
|
||||||
|
|
||||||
|
package comms
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/api"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/codec"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||||
|
|
||||||
|
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||||
|
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||||
|
procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe")
|
||||||
|
procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW")
|
||||||
|
procCreateEventW = modkernel32.NewProc("CreateEventW")
|
||||||
|
procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult")
|
||||||
|
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
||||||
|
)
|
||||||
|
|
||||||
|
func createNamedPipe(name *uint16, openMode uint32, pipeMode uint32, maxInstances uint32, outBufSize uint32, inBufSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) {
|
||||||
|
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(openMode), uintptr(pipeMode), uintptr(maxInstances), uintptr(outBufSize), uintptr(inBufSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
|
||||||
|
handle = syscall.Handle(r0)
|
||||||
|
if handle == syscall.InvalidHandle {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = error(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func cancelIoEx(handle syscall.Handle, overlapped *syscall.Overlapped) (err error) {
|
||||||
|
r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), 0)
|
||||||
|
if r1 == 0 {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = error(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func connectNamedPipe(handle syscall.Handle, overlapped *syscall.Overlapped) (err error) {
|
||||||
|
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), 0)
|
||||||
|
if r1 == 0 {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = error(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func disconnectNamedPipe(handle syscall.Handle) (err error) {
|
||||||
|
r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(handle), 0, 0)
|
||||||
|
if r1 == 0 {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = error(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitNamedPipe(name *uint16, timeout uint32) (err error) {
|
||||||
|
r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0)
|
||||||
|
if r1 == 0 {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = error(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func createEvent(sa *syscall.SecurityAttributes, manualReset bool, initialState bool, name *uint16) (handle syscall.Handle, err error) {
|
||||||
|
var _p0 uint32
|
||||||
|
if manualReset {
|
||||||
|
_p0 = 1
|
||||||
|
} else {
|
||||||
|
_p0 = 0
|
||||||
|
}
|
||||||
|
var _p1 uint32
|
||||||
|
if initialState {
|
||||||
|
_p1 = 1
|
||||||
|
} else {
|
||||||
|
_p1 = 0
|
||||||
|
}
|
||||||
|
r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(sa)), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(name)), 0, 0)
|
||||||
|
handle = syscall.Handle(r0)
|
||||||
|
if handle == syscall.InvalidHandle {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = error(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOverlappedResult(handle syscall.Handle, overlapped *syscall.Overlapped, transferred *uint32, wait bool) (err error) {
|
||||||
|
var _p0 uint32
|
||||||
|
if wait {
|
||||||
|
_p0 = 1
|
||||||
|
} else {
|
||||||
|
_p0 = 0
|
||||||
|
}
|
||||||
|
r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transferred)), uintptr(_p0), 0, 0)
|
||||||
|
if r1 == 0 {
|
||||||
|
if e1 != 0 {
|
||||||
|
err = error(e1)
|
||||||
|
} else {
|
||||||
|
err = syscall.EINVAL
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// openMode
|
||||||
|
pipe_access_duplex = 0x3
|
||||||
|
pipe_access_inbound = 0x1
|
||||||
|
pipe_access_outbound = 0x2
|
||||||
|
|
||||||
|
// openMode write flags
|
||||||
|
file_flag_first_pipe_instance = 0x00080000
|
||||||
|
file_flag_write_through = 0x80000000
|
||||||
|
file_flag_overlapped = 0x40000000
|
||||||
|
|
||||||
|
// openMode ACL flags
|
||||||
|
write_dac = 0x00040000
|
||||||
|
write_owner = 0x00080000
|
||||||
|
access_system_security = 0x01000000
|
||||||
|
|
||||||
|
// pipeMode
|
||||||
|
pipe_type_byte = 0x0
|
||||||
|
pipe_type_message = 0x4
|
||||||
|
|
||||||
|
// pipeMode read mode flags
|
||||||
|
pipe_readmode_byte = 0x0
|
||||||
|
pipe_readmode_message = 0x2
|
||||||
|
|
||||||
|
// pipeMode wait mode flags
|
||||||
|
pipe_wait = 0x0
|
||||||
|
pipe_nowait = 0x1
|
||||||
|
|
||||||
|
// pipeMode remote-client mode flags
|
||||||
|
pipe_accept_remote_clients = 0x0
|
||||||
|
pipe_reject_remote_clients = 0x8
|
||||||
|
|
||||||
|
pipe_unlimited_instances = 255
|
||||||
|
|
||||||
|
nmpwait_wait_forever = 0xFFFFFFFF
|
||||||
|
|
||||||
|
// the two not-an-errors below occur if a client connects to the pipe between
|
||||||
|
// the server's CreateNamedPipe and ConnectNamedPipe calls.
|
||||||
|
error_no_data syscall.Errno = 0xE8
|
||||||
|
error_pipe_connected syscall.Errno = 0x217
|
||||||
|
error_pipe_busy syscall.Errno = 0xE7
|
||||||
|
error_sem_timeout syscall.Errno = 0x79
|
||||||
|
|
||||||
|
error_bad_pathname syscall.Errno = 0xA1
|
||||||
|
error_invalid_name syscall.Errno = 0x7B
|
||||||
|
|
||||||
|
error_io_incomplete syscall.Errno = 0x3e4
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ net.Conn = (*PipeConn)(nil)
|
||||||
|
var _ net.Listener = (*PipeListener)(nil)
|
||||||
|
|
||||||
|
// ErrClosed is the error returned by PipeListener.Accept when Close is called
|
||||||
|
// on the PipeListener.
|
||||||
|
var ErrClosed = PipeError{"Pipe has been closed.", false}
|
||||||
|
|
||||||
|
// PipeError is an error related to a call to a pipe
|
||||||
|
type PipeError struct {
|
||||||
|
msg string
|
||||||
|
timeout bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error implements the error interface
|
||||||
|
func (e PipeError) Error() string {
|
||||||
|
return e.msg
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timeout implements net.AddrError.Timeout()
|
||||||
|
func (e PipeError) Timeout() bool {
|
||||||
|
return e.timeout
|
||||||
|
}
|
||||||
|
|
||||||
|
// Temporary implements net.AddrError.Temporary()
|
||||||
|
func (e PipeError) Temporary() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dial connects to a named pipe with the given address. If the specified pipe is not available,
|
||||||
|
// it will wait indefinitely for the pipe to become available.
|
||||||
|
//
|
||||||
|
// The address must be of the form \\.\\pipe\<name> for local pipes and \\<computer>\pipe\<name>
|
||||||
|
// for remote pipes.
|
||||||
|
//
|
||||||
|
// Dial will return a PipeError if you pass in a badly formatted pipe name.
|
||||||
|
//
|
||||||
|
// Examples:
|
||||||
|
// // local pipe
|
||||||
|
// conn, err := Dial(`\\.\pipe\mypipename`)
|
||||||
|
//
|
||||||
|
// // remote pipe
|
||||||
|
// conn, err := Dial(`\\othercomp\pipe\mypipename`)
|
||||||
|
func Dial(address string) (*PipeConn, error) {
|
||||||
|
for {
|
||||||
|
conn, err := dial(address, nmpwait_wait_forever)
|
||||||
|
if err == nil {
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
if isPipeNotReady(err) {
|
||||||
|
<-time.After(100 * time.Millisecond)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DialTimeout acts like Dial, but will time out after the duration of timeout
|
||||||
|
func DialTimeout(address string, timeout time.Duration) (*PipeConn, error) {
|
||||||
|
deadline := time.Now().Add(timeout)
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
for now.Before(deadline) {
|
||||||
|
millis := uint32(deadline.Sub(now) / time.Millisecond)
|
||||||
|
conn, err := dial(address, millis)
|
||||||
|
if err == nil {
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
if err == error_sem_timeout {
|
||||||
|
// This is WaitNamedPipe's timeout error, so we know we're done
|
||||||
|
return nil, PipeError{fmt.Sprintf(
|
||||||
|
"Timed out waiting for pipe '%s' to come available", address), true}
|
||||||
|
}
|
||||||
|
if isPipeNotReady(err) {
|
||||||
|
left := deadline.Sub(time.Now())
|
||||||
|
retry := 100 * time.Millisecond
|
||||||
|
if left > retry {
|
||||||
|
<-time.After(retry)
|
||||||
|
} else {
|
||||||
|
<-time.After(left - time.Millisecond)
|
||||||
|
}
|
||||||
|
now = time.Now()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return nil, PipeError{fmt.Sprintf(
|
||||||
|
"Timed out waiting for pipe '%s' to come available", address), true}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isPipeNotReady checks the error to see if it indicates the pipe is not ready
|
||||||
|
func isPipeNotReady(err error) bool {
|
||||||
|
// Pipe Busy means another client just grabbed the open pipe end,
|
||||||
|
// and the server hasn't made a new one yet.
|
||||||
|
// File Not Found means the server hasn't created the pipe yet.
|
||||||
|
// Neither is a fatal error.
|
||||||
|
|
||||||
|
return err == syscall.ERROR_FILE_NOT_FOUND || err == error_pipe_busy
|
||||||
|
}
|
||||||
|
|
||||||
|
// newOverlapped creates a structure used to track asynchronous
|
||||||
|
// I/O requests that have been issued.
|
||||||
|
func newOverlapped() (*syscall.Overlapped, error) {
|
||||||
|
event, err := createEvent(nil, true, true, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &syscall.Overlapped{HEvent: event}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitForCompletion waits for an asynchronous I/O request referred to by overlapped to complete.
|
||||||
|
// This function returns the number of bytes transferred by the operation and an error code if
|
||||||
|
// applicable (nil otherwise).
|
||||||
|
func waitForCompletion(handle syscall.Handle, overlapped *syscall.Overlapped) (uint32, error) {
|
||||||
|
_, err := syscall.WaitForSingleObject(overlapped.HEvent, syscall.INFINITE)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
var transferred uint32
|
||||||
|
err = getOverlappedResult(handle, overlapped, &transferred, true)
|
||||||
|
return transferred, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// dial is a helper to initiate a connection to a named pipe that has been started by a server.
|
||||||
|
// The timeout is only enforced if the pipe server has already created the pipe, otherwise
|
||||||
|
// this function will return immediately.
|
||||||
|
func dial(address string, timeout uint32) (*PipeConn, error) {
|
||||||
|
name, err := syscall.UTF16PtrFromString(string(address))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// If at least one instance of the pipe has been created, this function
|
||||||
|
// will wait timeout milliseconds for it to become available.
|
||||||
|
// It will return immediately regardless of timeout, if no instances
|
||||||
|
// of the named pipe have been created yet.
|
||||||
|
// If this returns with no error, there is a pipe available.
|
||||||
|
if err := waitNamedPipe(name, timeout); err != nil {
|
||||||
|
if err == error_bad_pathname {
|
||||||
|
// badly formatted pipe name
|
||||||
|
return nil, badAddr(address)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
pathp, err := syscall.UTF16PtrFromString(address)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
handle, err := syscall.CreateFile(pathp, syscall.GENERIC_READ|syscall.GENERIC_WRITE,
|
||||||
|
uint32(syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE), nil, syscall.OPEN_EXISTING,
|
||||||
|
syscall.FILE_FLAG_OVERLAPPED, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &PipeConn{handle: handle, addr: PipeAddr(address)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Listen returns a new PipeListener that will listen on a pipe with the given
|
||||||
|
// address. The address must be of the form \\.\pipe\<name>
|
||||||
|
//
|
||||||
|
// Listen will return a PipeError for an incorrectly formatted pipe name.
|
||||||
|
func Listen(address string) (*PipeListener, error) {
|
||||||
|
handle, err := createPipe(address, true)
|
||||||
|
if err == error_invalid_name {
|
||||||
|
return nil, badAddr(address)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &PipeListener{
|
||||||
|
addr: PipeAddr(address),
|
||||||
|
handle: handle,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PipeListener is a named pipe listener. Clients should typically
|
||||||
|
// use variables of type net.Listener instead of assuming named pipe.
|
||||||
|
type PipeListener struct {
|
||||||
|
addr PipeAddr
|
||||||
|
handle syscall.Handle
|
||||||
|
closed bool
|
||||||
|
|
||||||
|
// acceptHandle contains the current handle waiting for
|
||||||
|
// an incoming connection or nil.
|
||||||
|
acceptHandle syscall.Handle
|
||||||
|
// acceptOverlapped is set before waiting on a connection.
|
||||||
|
// If not waiting, it is nil.
|
||||||
|
acceptOverlapped *syscall.Overlapped
|
||||||
|
// acceptMutex protects the handle and overlapped structure.
|
||||||
|
acceptMutex sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accept implements the Accept method in the net.Listener interface; it
|
||||||
|
// waits for the next call and returns a generic net.Conn.
|
||||||
|
func (l *PipeListener) Accept() (net.Conn, error) {
|
||||||
|
c, err := l.AcceptPipe()
|
||||||
|
for err == error_no_data {
|
||||||
|
// Ignore clients that connect and immediately disconnect.
|
||||||
|
c, err = l.AcceptPipe()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AcceptPipe accepts the next incoming call and returns the new connection.
|
||||||
|
// It might return an error if a client connected and immediately cancelled
|
||||||
|
// the connection.
|
||||||
|
func (l *PipeListener) AcceptPipe() (*PipeConn, error) {
|
||||||
|
if l == nil || l.addr == "" || l.closed {
|
||||||
|
return nil, syscall.EINVAL
|
||||||
|
}
|
||||||
|
|
||||||
|
// the first time we call accept, the handle will have been created by the Listen
|
||||||
|
// call. This is to prevent race conditions where the client thinks the server
|
||||||
|
// isn't listening because it hasn't actually called create yet. After the first time, we'll
|
||||||
|
// have to create a new handle each time
|
||||||
|
handle := l.handle
|
||||||
|
if handle == 0 {
|
||||||
|
var err error
|
||||||
|
handle, err = createPipe(string(l.addr), false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
l.handle = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
overlapped, err := newOverlapped()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer syscall.CloseHandle(overlapped.HEvent)
|
||||||
|
if err := connectNamedPipe(handle, overlapped); err != nil && err != error_pipe_connected {
|
||||||
|
if err == error_io_incomplete || err == syscall.ERROR_IO_PENDING {
|
||||||
|
l.acceptMutex.Lock()
|
||||||
|
l.acceptOverlapped = overlapped
|
||||||
|
l.acceptHandle = handle
|
||||||
|
l.acceptMutex.Unlock()
|
||||||
|
defer func() {
|
||||||
|
l.acceptMutex.Lock()
|
||||||
|
l.acceptOverlapped = nil
|
||||||
|
l.acceptHandle = 0
|
||||||
|
l.acceptMutex.Unlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
_, err = waitForCompletion(handle, overlapped)
|
||||||
|
}
|
||||||
|
if err == syscall.ERROR_OPERATION_ABORTED {
|
||||||
|
// Return error compatible to net.Listener.Accept() in case the
|
||||||
|
// listener was closed.
|
||||||
|
return nil, ErrClosed
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &PipeConn{handle: handle, addr: l.addr}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close stops listening on the address.
|
||||||
|
// Already Accepted connections are not closed.
|
||||||
|
func (l *PipeListener) Close() error {
|
||||||
|
if l.closed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
l.closed = true
|
||||||
|
if l.handle != 0 {
|
||||||
|
err := disconnectNamedPipe(l.handle)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = syscall.CloseHandle(l.handle)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
l.handle = 0
|
||||||
|
}
|
||||||
|
l.acceptMutex.Lock()
|
||||||
|
defer l.acceptMutex.Unlock()
|
||||||
|
if l.acceptOverlapped != nil && l.acceptHandle != 0 {
|
||||||
|
// Cancel the pending IO. This call does not block, so it is safe
|
||||||
|
// to hold onto the mutex above.
|
||||||
|
if err := cancelIoEx(l.acceptHandle, l.acceptOverlapped); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err := syscall.CloseHandle(l.acceptOverlapped.HEvent)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
l.acceptOverlapped.HEvent = 0
|
||||||
|
err = syscall.CloseHandle(l.acceptHandle)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
l.acceptHandle = 0
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Addr returns the listener's network address, a PipeAddr.
|
||||||
|
func (l *PipeListener) Addr() net.Addr { return l.addr }
|
||||||
|
|
||||||
|
// PipeConn is the implementation of the net.Conn interface for named pipe connections.
|
||||||
|
type PipeConn struct {
|
||||||
|
handle syscall.Handle
|
||||||
|
addr PipeAddr
|
||||||
|
|
||||||
|
// these aren't actually used yet
|
||||||
|
readDeadline *time.Time
|
||||||
|
writeDeadline *time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type iodata struct {
|
||||||
|
n uint32
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// completeRequest looks at iodata to see if a request is pending. If so, it waits for it to either complete or to
|
||||||
|
// abort due to hitting the specified deadline. Deadline may be set to nil to wait forever. If no request is pending,
|
||||||
|
// the content of iodata is returned.
|
||||||
|
func (c *PipeConn) completeRequest(data iodata, deadline *time.Time, overlapped *syscall.Overlapped) (int, error) {
|
||||||
|
if data.err == error_io_incomplete || data.err == syscall.ERROR_IO_PENDING {
|
||||||
|
var timer <-chan time.Time
|
||||||
|
if deadline != nil {
|
||||||
|
if timeDiff := deadline.Sub(time.Now()); timeDiff > 0 {
|
||||||
|
timer = time.After(timeDiff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
done := make(chan iodata)
|
||||||
|
go func() {
|
||||||
|
n, err := waitForCompletion(c.handle, overlapped)
|
||||||
|
done <- iodata{n, err}
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case data = <-done:
|
||||||
|
case <-timer:
|
||||||
|
syscall.CancelIoEx(c.handle, overlapped)
|
||||||
|
data = iodata{0, timeout(c.addr.String())}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Windows will produce ERROR_BROKEN_PIPE upon closing
|
||||||
|
// a handle on the other end of a connection. Go RPC
|
||||||
|
// expects an io.EOF error in this case.
|
||||||
|
if data.err == syscall.ERROR_BROKEN_PIPE {
|
||||||
|
data.err = io.EOF
|
||||||
|
}
|
||||||
|
return int(data.n), data.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read implements the net.Conn Read method.
|
||||||
|
func (c *PipeConn) Read(b []byte) (int, error) {
|
||||||
|
// Use ReadFile() rather than Read() because the latter
|
||||||
|
// contains a workaround that eats ERROR_BROKEN_PIPE.
|
||||||
|
overlapped, err := newOverlapped()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer syscall.CloseHandle(overlapped.HEvent)
|
||||||
|
var n uint32
|
||||||
|
err = syscall.ReadFile(c.handle, b, &n, overlapped)
|
||||||
|
return c.completeRequest(iodata{n, err}, c.readDeadline, overlapped)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write implements the net.Conn Write method.
|
||||||
|
func (c *PipeConn) Write(b []byte) (int, error) {
|
||||||
|
overlapped, err := newOverlapped()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer syscall.CloseHandle(overlapped.HEvent)
|
||||||
|
var n uint32
|
||||||
|
err = syscall.WriteFile(c.handle, b, &n, overlapped)
|
||||||
|
return c.completeRequest(iodata{n, err}, c.writeDeadline, overlapped)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the connection.
|
||||||
|
func (c *PipeConn) Close() error {
|
||||||
|
return syscall.CloseHandle(c.handle)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LocalAddr returns the local network address.
|
||||||
|
func (c *PipeConn) LocalAddr() net.Addr {
|
||||||
|
return c.addr
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoteAddr returns the remote network address.
|
||||||
|
func (c *PipeConn) RemoteAddr() net.Addr {
|
||||||
|
// not sure what to do here, we don't have remote addr....
|
||||||
|
return c.addr
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDeadline implements the net.Conn SetDeadline method.
|
||||||
|
// Note that timeouts are only supported on Windows Vista/Server 2008 and above
|
||||||
|
func (c *PipeConn) SetDeadline(t time.Time) error {
|
||||||
|
c.SetReadDeadline(t)
|
||||||
|
c.SetWriteDeadline(t)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetReadDeadline implements the net.Conn SetReadDeadline method.
|
||||||
|
// Note that timeouts are only supported on Windows Vista/Server 2008 and above
|
||||||
|
func (c *PipeConn) SetReadDeadline(t time.Time) error {
|
||||||
|
c.readDeadline = &t
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
|
||||||
|
// Note that timeouts are only supported on Windows Vista/Server 2008 and above
|
||||||
|
func (c *PipeConn) SetWriteDeadline(t time.Time) error {
|
||||||
|
c.writeDeadline = &t
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PipeAddr represents the address of a named pipe.
|
||||||
|
type PipeAddr string
|
||||||
|
|
||||||
|
// Network returns the address's network name, "pipe".
|
||||||
|
func (a PipeAddr) Network() string { return "pipe" }
|
||||||
|
|
||||||
|
// String returns the address of the pipe
|
||||||
|
func (a PipeAddr) String() string {
|
||||||
|
return string(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
// createPipe is a helper function to make sure we always create pipes
|
||||||
|
// with the same arguments, since subsequent calls to create pipe need
|
||||||
|
// to use the same arguments as the first one. If first is set, fail
|
||||||
|
// if the pipe already exists.
|
||||||
|
func createPipe(address string, first bool) (syscall.Handle, error) {
|
||||||
|
n, err := syscall.UTF16PtrFromString(address)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
mode := uint32(pipe_access_duplex | syscall.FILE_FLAG_OVERLAPPED)
|
||||||
|
if first {
|
||||||
|
mode |= file_flag_first_pipe_instance
|
||||||
|
}
|
||||||
|
return createNamedPipe(n,
|
||||||
|
mode,
|
||||||
|
pipe_type_byte,
|
||||||
|
pipe_unlimited_instances,
|
||||||
|
512, 512, 0, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func badAddr(addr string) PipeError {
|
||||||
|
return PipeError{fmt.Sprintf("Invalid pipe address '%s'.", addr), false}
|
||||||
|
}
|
||||||
|
func timeout(addr string) PipeError {
|
||||||
|
return PipeError{fmt.Sprintf("Pipe IO timed out waiting for '%s'", addr), true}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newIpcClient(cfg IpcConfig, codec codec.Codec) (*ipcClient, error) {
|
||||||
|
c, err := Dial(cfg.Endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ipcClient{codec.New(c)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func startIpc(cfg IpcConfig, codec codec.Codec, api api.EthereumApi) error {
|
||||||
|
os.Remove(cfg.Endpoint) // in case it still exists from a previous run
|
||||||
|
|
||||||
|
l, err := Listen(cfg.Endpoint)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
os.Chmod(cfg.Endpoint, 0600)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
conn, err := l.Accept()
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Error).Infof("Error accepting ipc connection - %v\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
go func(conn net.Conn) {
|
||||||
|
codec := codec.New(conn)
|
||||||
|
|
||||||
|
for {
|
||||||
|
req, err := codec.ReadRequest()
|
||||||
|
if err == io.EOF {
|
||||||
|
codec.Close()
|
||||||
|
return
|
||||||
|
} else if err != nil {
|
||||||
|
glog.V(logger.Error).Infof("IPC recv err - %v\n", err)
|
||||||
|
codec.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var rpcResponse interface{}
|
||||||
|
res, err := api.Execute(req)
|
||||||
|
|
||||||
|
rpcResponse = shared.NewRpcResponse(req.Id, req.Jsonrpc, res, err)
|
||||||
|
err = codec.WriteResponse(rpcResponse)
|
||||||
|
if err != nil {
|
||||||
|
glog.V(logger.Error).Infof("IPC send err - %v\n", err)
|
||||||
|
codec.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(conn)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
glog.V(logger.Info).Infof("IPC service started (%s)\n", cfg.Endpoint)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
92
rpc/jeth.go
92
rpc/jeth.go
@ -4,17 +4,23 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"reflect"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/jsre"
|
"github.com/ethereum/go-ethereum/jsre"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/codec"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/comms"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc/shared"
|
||||||
"github.com/robertkrimen/otto"
|
"github.com/robertkrimen/otto"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Jeth struct {
|
type Jeth struct {
|
||||||
ethApi *EthereumApi
|
ethApi *EthereumApi
|
||||||
re *jsre.JSRE
|
re *jsre.JSRE
|
||||||
|
ipcpath string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewJeth(ethApi *EthereumApi, re *jsre.JSRE) *Jeth {
|
func NewJeth(ethApi *EthereumApi, re *jsre.JSRE, ipcpath string) *Jeth {
|
||||||
return &Jeth{ethApi, re}
|
return &Jeth{ethApi, re, ipcpath}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (self *Jeth) err(call otto.FunctionCall, code int, msg string, id interface{}) (response otto.Value) {
|
func (self *Jeth) err(call otto.FunctionCall, code int, msg string, id interface{}) (response otto.Value) {
|
||||||
@ -81,3 +87,85 @@ func (self *Jeth) Send(call otto.FunctionCall) (response otto.Value) {
|
|||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (self *Jeth) SendIpc(call otto.FunctionCall) (response otto.Value) {
|
||||||
|
reqif, err := call.Argument(0).Export()
|
||||||
|
if err != nil {
|
||||||
|
return self.err(call, -32700, err.Error(), nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := comms.NewIpcClient(comms.IpcConfig{self.ipcpath}, codec.JSON)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Unable to connect to geth.")
|
||||||
|
return self.err(call, -32603, err.Error(), -1)
|
||||||
|
}
|
||||||
|
defer client.Close()
|
||||||
|
|
||||||
|
jsonreq, err := json.Marshal(reqif)
|
||||||
|
var reqs []RpcRequest
|
||||||
|
batch := true
|
||||||
|
err = json.Unmarshal(jsonreq, &reqs)
|
||||||
|
if err != nil {
|
||||||
|
reqs = make([]RpcRequest, 1)
|
||||||
|
err = json.Unmarshal(jsonreq, &reqs[0])
|
||||||
|
batch = false
|
||||||
|
}
|
||||||
|
|
||||||
|
call.Otto.Set("response_len", len(reqs))
|
||||||
|
call.Otto.Run("var ret_response = new Array(response_len);")
|
||||||
|
|
||||||
|
for i, req := range reqs {
|
||||||
|
err := client.Send(&req)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Error send request:", err)
|
||||||
|
return self.err(call, -32603, err.Error(), req.Id)
|
||||||
|
}
|
||||||
|
|
||||||
|
respif, err := client.Recv()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Error recv response:", err)
|
||||||
|
return self.err(call, -32603, err.Error(), req.Id)
|
||||||
|
}
|
||||||
|
|
||||||
|
if res, ok := respif.(shared.SuccessResponse); ok {
|
||||||
|
call.Otto.Set("ret_id", res.Id)
|
||||||
|
call.Otto.Set("ret_jsonrpc", res.Jsonrpc)
|
||||||
|
resObj, _ := json.Marshal(res.Result)
|
||||||
|
call.Otto.Set("ret_result", string(resObj))
|
||||||
|
call.Otto.Set("response_idx", i)
|
||||||
|
|
||||||
|
response, err = call.Otto.Run(`
|
||||||
|
ret_response[response_idx] = { jsonrpc: ret_jsonrpc, id: ret_id, result: JSON.parse(ret_result) };
|
||||||
|
`)
|
||||||
|
} else if res, ok := respif.(shared.ErrorResponse); ok {
|
||||||
|
fmt.Printf("Error: %s (%d)\n", res.Error.Message, res.Error.Code)
|
||||||
|
|
||||||
|
call.Otto.Set("ret_id", res.Id)
|
||||||
|
call.Otto.Set("ret_jsonrpc", res.Jsonrpc)
|
||||||
|
call.Otto.Set("ret_error", res.Error)
|
||||||
|
call.Otto.Set("response_idx", i)
|
||||||
|
|
||||||
|
response, _ = call.Otto.Run(`
|
||||||
|
ret_response = { jsonrpc: ret_jsonrpc, id: ret_id, error: ret_error };
|
||||||
|
`)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
fmt.Printf("unexpected response\n", reflect.TypeOf(respif))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !batch {
|
||||||
|
call.Otto.Run("ret_response = ret_response[0];")
|
||||||
|
}
|
||||||
|
|
||||||
|
if call.Argument(1).IsObject() {
|
||||||
|
call.Otto.Set("callback", call.Argument(1))
|
||||||
|
call.Otto.Run(`
|
||||||
|
if (Object.prototype.toString.call(callback) == '[object Function]') {
|
||||||
|
callback(null, ret_response);
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
96
rpc/shared/errors.go
Normal file
96
rpc/shared/errors.go
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
package shared
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
type InvalidTypeError struct {
|
||||||
|
method string
|
||||||
|
msg string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *InvalidTypeError) Error() string {
|
||||||
|
return fmt.Sprintf("invalid type on field %s: %s", e.method, e.msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewInvalidTypeError(method, msg string) *InvalidTypeError {
|
||||||
|
return &InvalidTypeError{
|
||||||
|
method: method,
|
||||||
|
msg: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type InsufficientParamsError struct {
|
||||||
|
have int
|
||||||
|
want int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *InsufficientParamsError) Error() string {
|
||||||
|
return fmt.Sprintf("insufficient params, want %d have %d", e.want, e.have)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewInsufficientParamsError(have int, want int) *InsufficientParamsError {
|
||||||
|
return &InsufficientParamsError{
|
||||||
|
have: have,
|
||||||
|
want: want,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type NotImplementedError struct {
|
||||||
|
Method string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *NotImplementedError) Error() string {
|
||||||
|
return fmt.Sprintf("%s method not implemented", e.Method)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewNotImplementedError(method string) *NotImplementedError {
|
||||||
|
return &NotImplementedError{
|
||||||
|
Method: method,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type DecodeParamError struct {
|
||||||
|
err string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *DecodeParamError) Error() string {
|
||||||
|
return fmt.Sprintf("could not decode, %s", e.err)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDecodeParamError(errstr string) error {
|
||||||
|
return &DecodeParamError{
|
||||||
|
err: errstr,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ValidationError struct {
|
||||||
|
ParamName string
|
||||||
|
msg string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ValidationError) Error() string {
|
||||||
|
return fmt.Sprintf("%s not valid, %s", e.ParamName, e.msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewValidationError(param string, msg string) error {
|
||||||
|
return &ValidationError{
|
||||||
|
ParamName: param,
|
||||||
|
msg: msg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type NotAvailableError struct {
|
||||||
|
Method string
|
||||||
|
Reason string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *NotAvailableError) Error() string {
|
||||||
|
return fmt.Sprintf("%s method not available: %s", e.Method, e.Reason)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewNotAvailableError(method string, reason string) *NotAvailableError {
|
||||||
|
return &NotAvailableError{
|
||||||
|
Method: method,
|
||||||
|
Reason: reason,
|
||||||
|
}
|
||||||
|
}
|
64
rpc/shared/types.go
Normal file
64
rpc/shared/types.go
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
package shared
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/logger/glog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RPC request
|
||||||
|
type Request struct {
|
||||||
|
Id interface{} `json:"id"`
|
||||||
|
Jsonrpc string `json:"jsonrpc"`
|
||||||
|
Method string `json:"method"`
|
||||||
|
Params json.RawMessage `json:"params"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RPC response
|
||||||
|
type Response struct {
|
||||||
|
Id interface{} `json:"id"`
|
||||||
|
Jsonrpc string `json:"jsonrpc"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RPC success response
|
||||||
|
type SuccessResponse struct {
|
||||||
|
Id interface{} `json:"id"`
|
||||||
|
Jsonrpc string `json:"jsonrpc"`
|
||||||
|
Result interface{} `json:"result"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RPC error response
|
||||||
|
type ErrorResponse struct {
|
||||||
|
Id interface{} `json:"id"`
|
||||||
|
Jsonrpc string `json:"jsonrpc"`
|
||||||
|
Error *ErrorObject `json:"error"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RPC error response details
|
||||||
|
type ErrorObject struct {
|
||||||
|
Code int `json:"code"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
// Data interface{} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRpcResponse(id interface{}, jsonrpcver string, reply interface{}, err error) *interface{} {
|
||||||
|
var response interface{}
|
||||||
|
|
||||||
|
switch err.(type) {
|
||||||
|
case nil:
|
||||||
|
response = &SuccessResponse{Jsonrpc: jsonrpcver, Id: id, Result: reply}
|
||||||
|
case *NotImplementedError:
|
||||||
|
jsonerr := &ErrorObject{-32601, err.Error()}
|
||||||
|
response = &ErrorResponse{Jsonrpc: jsonrpcver, Id: id, Error: jsonerr}
|
||||||
|
case *DecodeParamError, *InsufficientParamsError, *ValidationError, *InvalidTypeError:
|
||||||
|
jsonerr := &ErrorObject{-32602, err.Error()}
|
||||||
|
response = &ErrorResponse{Jsonrpc: jsonrpcver, Id: id, Error: jsonerr}
|
||||||
|
default:
|
||||||
|
jsonerr := &ErrorObject{-32603, err.Error()}
|
||||||
|
response = &ErrorResponse{Jsonrpc: jsonrpcver, Id: id, Error: jsonerr}
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(logger.Detail).Infof("Generated response: %T %s", response, response)
|
||||||
|
return &response
|
||||||
|
}
|
@ -27,9 +27,8 @@ type Env struct {
|
|||||||
difficulty *big.Int
|
difficulty *big.Int
|
||||||
gasLimit *big.Int
|
gasLimit *big.Int
|
||||||
|
|
||||||
logs state.Logs
|
|
||||||
|
|
||||||
vmTest bool
|
vmTest bool
|
||||||
|
logs []vm.StructLog
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEnv(state *state.StateDB) *Env {
|
func NewEnv(state *state.StateDB) *Env {
|
||||||
@ -38,6 +37,14 @@ func NewEnv(state *state.StateDB) *Env {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (self *Env) StructLogs() []vm.StructLog {
|
||||||
|
return self.logs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (self *Env) AddStructLog(log vm.StructLog) {
|
||||||
|
self.logs = append(self.logs, log)
|
||||||
|
}
|
||||||
|
|
||||||
func NewEnvFromMap(state *state.StateDB, envValues map[string]string, exeValues map[string]string) *Env {
|
func NewEnvFromMap(state *state.StateDB, envValues map[string]string, exeValues map[string]string) *Env {
|
||||||
env := NewEnv(state)
|
env := NewEnv(state)
|
||||||
|
|
||||||
@ -183,7 +190,7 @@ func RunState(statedb *state.StateDB, env, tx map[string]string) ([]byte, state.
|
|||||||
vmenv := NewEnvFromMap(statedb, env, tx)
|
vmenv := NewEnvFromMap(statedb, env, tx)
|
||||||
vmenv.origin = common.BytesToAddress(keyPair.Address())
|
vmenv.origin = common.BytesToAddress(keyPair.Address())
|
||||||
ret, _, err := core.ApplyMessage(vmenv, message, coinbase)
|
ret, _, err := core.ApplyMessage(vmenv, message, coinbase)
|
||||||
if core.IsNonceErr(err) || core.IsInvalidTxErr(err) {
|
if core.IsNonceErr(err) || core.IsInvalidTxErr(err) || state.IsGasLimitErr(err) {
|
||||||
statedb.Set(snapshot)
|
statedb.Set(snapshot)
|
||||||
}
|
}
|
||||||
statedb.Update()
|
statedb.Update()
|
||||||
|
27
xeth/xeth.go
27
xeth/xeth.go
@ -40,7 +40,13 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func DefaultGas() *big.Int { return new(big.Int).Set(defaultGas) }
|
func DefaultGas() *big.Int { return new(big.Int).Set(defaultGas) }
|
||||||
func DefaultGasPrice() *big.Int { return new(big.Int).Set(defaultGasPrice) }
|
|
||||||
|
func (self *XEth) DefaultGasPrice() *big.Int {
|
||||||
|
if self.gpo == nil {
|
||||||
|
self.gpo = eth.NewGasPriceOracle(self.backend)
|
||||||
|
}
|
||||||
|
return self.gpo.SuggestPrice()
|
||||||
|
}
|
||||||
|
|
||||||
type XEth struct {
|
type XEth struct {
|
||||||
backend *eth.Ethereum
|
backend *eth.Ethereum
|
||||||
@ -68,6 +74,8 @@ type XEth struct {
|
|||||||
// register map[string][]*interface{} // TODO improve return type
|
// register map[string][]*interface{} // TODO improve return type
|
||||||
|
|
||||||
agent *miner.RemoteAgent
|
agent *miner.RemoteAgent
|
||||||
|
|
||||||
|
gpo *eth.GasPriceOracle
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTest(eth *eth.Ethereum, frontend Frontend) *XEth {
|
func NewTest(eth *eth.Ethereum, frontend Frontend) *XEth {
|
||||||
@ -80,22 +88,22 @@ func NewTest(eth *eth.Ethereum, frontend Frontend) *XEth {
|
|||||||
// New creates an XEth that uses the given frontend.
|
// New creates an XEth that uses the given frontend.
|
||||||
// If a nil Frontend is provided, a default frontend which
|
// If a nil Frontend is provided, a default frontend which
|
||||||
// confirms all transactions will be used.
|
// confirms all transactions will be used.
|
||||||
func New(eth *eth.Ethereum, frontend Frontend) *XEth {
|
func New(ethereum *eth.Ethereum, frontend Frontend) *XEth {
|
||||||
xeth := &XEth{
|
xeth := &XEth{
|
||||||
backend: eth,
|
backend: ethereum,
|
||||||
frontend: frontend,
|
frontend: frontend,
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
filterManager: filter.NewFilterManager(eth.EventMux()),
|
filterManager: filter.NewFilterManager(ethereum.EventMux()),
|
||||||
logQueue: make(map[int]*logQueue),
|
logQueue: make(map[int]*logQueue),
|
||||||
blockQueue: make(map[int]*hashQueue),
|
blockQueue: make(map[int]*hashQueue),
|
||||||
transactionQueue: make(map[int]*hashQueue),
|
transactionQueue: make(map[int]*hashQueue),
|
||||||
messages: make(map[int]*whisperFilter),
|
messages: make(map[int]*whisperFilter),
|
||||||
agent: miner.NewRemoteAgent(),
|
agent: miner.NewRemoteAgent(),
|
||||||
}
|
}
|
||||||
if eth.Whisper() != nil {
|
if ethereum.Whisper() != nil {
|
||||||
xeth.whisper = NewWhisper(eth.Whisper())
|
xeth.whisper = NewWhisper(ethereum.Whisper())
|
||||||
}
|
}
|
||||||
eth.Miner().Register(xeth.agent)
|
ethereum.Miner().Register(xeth.agent)
|
||||||
if frontend == nil {
|
if frontend == nil {
|
||||||
xeth.frontend = dummyFrontend{}
|
xeth.frontend = dummyFrontend{}
|
||||||
}
|
}
|
||||||
@ -227,6 +235,7 @@ func (self *XEth) WithState(statedb *state.StateDB) *XEth {
|
|||||||
xeth := &XEth{
|
xeth := &XEth{
|
||||||
backend: self.backend,
|
backend: self.backend,
|
||||||
frontend: self.frontend,
|
frontend: self.frontend,
|
||||||
|
gpo: self.gpo,
|
||||||
}
|
}
|
||||||
|
|
||||||
xeth.state = NewState(xeth, statedb)
|
xeth.state = NewState(xeth, statedb)
|
||||||
@ -829,7 +838,7 @@ func (self *XEth) Call(fromStr, toStr, valueStr, gasStr, gasPriceStr, dataStr st
|
|||||||
}
|
}
|
||||||
|
|
||||||
if msg.gasPrice.Cmp(big.NewInt(0)) == 0 {
|
if msg.gasPrice.Cmp(big.NewInt(0)) == 0 {
|
||||||
msg.gasPrice = DefaultGasPrice()
|
msg.gasPrice = self.DefaultGasPrice()
|
||||||
}
|
}
|
||||||
|
|
||||||
block := self.CurrentBlock()
|
block := self.CurrentBlock()
|
||||||
@ -898,7 +907,7 @@ func (self *XEth) Transact(fromStr, toStr, nonceStr, valueStr, gasStr, gasPriceS
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(gasPriceStr) == 0 {
|
if len(gasPriceStr) == 0 {
|
||||||
price = DefaultGasPrice()
|
price = self.DefaultGasPrice()
|
||||||
} else {
|
} else {
|
||||||
price = common.Big(gasPriceStr)
|
price = common.Big(gasPriceStr)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user