Merge remote-tracking branch 'origin/master' into feature/blockTracer

Merging to prevent circle-ci test failure before pull request is made.
This commit is contained in:
philip-morlier 2021-11-10 18:14:26 -08:00
commit d96772ce6b
26 changed files with 831 additions and 596 deletions

View File

@ -85,3 +85,6 @@ workflows:
- build_geth_push:
requires:
- test
filters:
tags:
only: /^v.*/

View File

@ -36,17 +36,19 @@ import (
)
type result struct {
Error error
Address common.Address
Hash common.Hash
Error error
Address common.Address
Hash common.Hash
IntrinsicGas uint64
}
// MarshalJSON marshals as JSON with a hash.
func (r *result) MarshalJSON() ([]byte, error) {
type xx struct {
Error string `json:"error,omitempty"`
Address *common.Address `json:"address,omitempty"`
Hash *common.Hash `json:"hash,omitempty"`
Error string `json:"error,omitempty"`
Address *common.Address `json:"address,omitempty"`
Hash *common.Hash `json:"hash,omitempty"`
IntrinsicGas uint64 `json:"intrinsicGas,omitempty"`
}
var out xx
if r.Error != nil {
@ -58,6 +60,7 @@ func (r *result) MarshalJSON() ([]byte, error) {
if r.Hash != (common.Hash{}) {
out.Hash = &r.Hash
}
out.IntrinsicGas = r.IntrinsicGas
return json.Marshal(out)
}
@ -132,12 +135,36 @@ func Transaction(ctx *cli.Context) error {
} else {
r.Address = sender
}
// Check intrinsic gas
if gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil,
chainConfig.IsHomestead(new(big.Int)), chainConfig.IsIstanbul(new(big.Int))); err != nil {
r.Error = err
} else if tx.Gas() < gas {
r.Error = fmt.Errorf("%w: have %d, want %d", core.ErrIntrinsicGas, tx.Gas(), gas)
results = append(results, r)
continue
} else {
r.IntrinsicGas = gas
if tx.Gas() < gas {
r.Error = fmt.Errorf("%w: have %d, want %d", core.ErrIntrinsicGas, tx.Gas(), gas)
results = append(results, r)
continue
}
}
// Validate <256bit fields
switch {
case tx.Value().BitLen() > 256:
r.Error = errors.New("value exceeds 256 bits")
case tx.GasPrice().BitLen() > 256:
r.Error = errors.New("gasPrice exceeds 256 bits")
case tx.GasTipCap().BitLen() > 256:
r.Error = errors.New("maxPriorityFeePerGas exceeds 256 bits")
case tx.GasFeeCap().BitLen() > 256:
r.Error = errors.New("maxFeePerGas exceeds 256 bits")
case tx.GasFeeCap().Cmp(tx.GasTipCap()) < 0:
r.Error = errors.New("maxFeePerGas < maxPriorityFeePerGas")
case new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas())).BitLen() > 256:
r.Error = errors.New("gas * gasPrice exceeds 256 bits")
case new(big.Int).Mul(tx.GasFeeCap(), new(big.Int).SetUint64(tx.Gas())).BitLen() > 256:
r.Error = errors.New("gas * maxFeePerGas exceeds 256 bits")
}
results = append(results, r)
}

View File

@ -257,6 +257,14 @@ func TestT9n(t *testing.T) {
},
expOut: "exp.json",
},
{ // Transactions with value exceeding 256 bits
base: "./testdata/17",
input: t9nInput{
inTxs: "signed_txs.rlp",
stFork: "London",
},
expOut: "exp.json",
},
} {
args := []string{"t9n"}

View File

@ -1,10 +1,12 @@
[
{
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
"hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476"
"hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476",
"intrinsicGas": 21000
},
{
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
"hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a"
"hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a",
"intrinsicGas": 21000
}
]

View File

@ -1,11 +1,13 @@
[
{
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"hash": "0x7cc3d1a8540a44736750f03bb4d85c0113be4b3472a71bf82241a3b261b479e6"
"hash": "0x7cc3d1a8540a44736750f03bb4d85c0113be4b3472a71bf82241a3b261b479e6",
"intrinsicGas": 21000
},
{
"error": "intrinsic gas too low: have 82, want 21000",
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"hash": "0x3b2d2609e4361562edb9169314f4c05afc6dbf5d706bf9dda5abe242ab76a22b"
"hash": "0x3b2d2609e4361562edb9169314f4c05afc6dbf5d706bf9dda5abe242ab76a22b",
"intrinsicGas": 21000
}
]

22
cmd/evm/testdata/17/exp.json vendored Normal file
View File

@ -0,0 +1,22 @@
[
{
"error": "value exceeds 256 bits",
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"hash": "0xfbd91685dcbf8172f0e8c53e2ddbb4d26707840da6b51a74371f62a33868fd82",
"intrinsicGas": 21000
},
{
"error": "gasPrice exceeds 256 bits",
"address": "0x1b57ccef1fe5fb73f1e64530fb4ebd9cf1655964",
"hash": "0x45dc05035cada83748e4c1fe617220106b331eca054f44c2304d5654a9fb29d5",
"intrinsicGas": 21000
},
{
"error": "invalid transaction v, r, s values",
"hash": "0xf06691c2a803ab7f3c81d06a0c0a896f80f311105c599fc59a9fdbc669356d35"
},
{
"error": "invalid transaction v, r, s values",
"hash": "0x84703b697ad5b0db25e4f1f98fb6b1adce85b9edb2232eeba9cedd8c6601694b"
}
]

46
cmd/evm/testdata/17/rlpdata.txt vendored Normal file
View File

@ -0,0 +1,46 @@
[
[
"",
"d",
5208,
d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0,
010000000000000000000000000000000000000000000000000000000000000001,
"",
1b,
c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549d,
6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28,
],
[
"",
010000000000000000000000000000000000000000000000000000000000000001,
5208,
d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0,
11,
"",
1b,
c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549d,
6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28,
],
[
"",
11,
5208,
d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0,
11,
"",
1b,
c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549daa,
6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28,
],
[
"",
11,
5208,
d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0,
11,
"",
1b,
c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549d,
6180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28bb,
],
]

1
cmd/evm/testdata/17/signed_txs.rlp vendored Normal file
View File

@ -0,0 +1 @@
"0xf901c8f880806482520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0a1010000000000000000000000000000000000000000000000000000000000000001801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28f88080a101000000000000000000000000000000000000000000000000000000000000000182520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d011801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28f860801182520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d011801ba1c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549daaa06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28f860801182520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d011801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da16180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28bb"

View File

@ -17,6 +17,7 @@
package main
import (
"errors"
"fmt"
"os"
"path/filepath"
@ -335,14 +336,15 @@ func dbGet(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
key, err := hexutil.Decode(ctx.Args().Get(0))
key, err := parseHexOrString(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
}
data, err := db.Get(key)
if err != nil {
log.Info("Get operation failed", "error", err)
log.Info("Get operation failed", "key", fmt.Sprintf("0x%#x", key), "error", err)
return err
}
fmt.Printf("key %#x: %#x\n", key, data)
@ -360,7 +362,7 @@ func dbDelete(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
key, err := hexutil.Decode(ctx.Args().Get(0))
key, err := parseHexOrString(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
@ -370,7 +372,7 @@ func dbDelete(ctx *cli.Context) error {
fmt.Printf("Previous value: %#x\n", data)
}
if err = db.Delete(key); err != nil {
log.Info("Delete operation returned an error", "error", err)
log.Info("Delete operation returned an error", "key", fmt.Sprintf("0x%#x", key), "error", err)
return err
}
return nil
@ -393,7 +395,7 @@ func dbPut(ctx *cli.Context) error {
data []byte
err error
)
key, err = hexutil.Decode(ctx.Args().Get(0))
key, err = parseHexOrString(ctx.Args().Get(0))
if err != nil {
log.Info("Could not decode the key", "error", err)
return err
@ -499,3 +501,12 @@ func freezerInspect(ctx *cli.Context) error {
}
return nil
}
// ParseHexOrString tries to hexdecode b, but if the prefix is missing, it instead just returns the raw bytes
func parseHexOrString(str string) ([]byte, error) {
b, err := hexutil.Decode(str)
if errors.Is(err, hexutil.ErrMissingPrefix) {
return []byte(str), nil
}
return b, err
}

View File

@ -17,7 +17,6 @@
package main
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
@ -32,8 +31,10 @@ import (
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/console/prompt"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/log"
"github.com/peterh/liner"
"golang.org/x/crypto/ssh/terminal"
)
@ -76,17 +77,27 @@ type wizard struct {
servers map[string]*sshClient // SSH connections to servers to administer
services map[string][]string // Ethereum services known to be running on servers
in *bufio.Reader // Wrapper around stdin to allow reading user input
lock sync.Mutex // Lock to protect configs during concurrent service discovery
lock sync.Mutex // Lock to protect configs during concurrent service discovery
}
// prompts the user for input with the given prompt string. Returns when a value is entered.
// Causes the wizard to exit if ctrl-d is pressed
func promptInput(p string) string {
for {
text, err := prompt.Stdin.PromptInput(p)
if err != nil {
if err != liner.ErrPromptAborted {
log.Crit("Failed to read user input", "err", err)
}
} else {
return text
}
}
}
// read reads a single line from stdin, trimming if from spaces.
func (w *wizard) read() string {
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
text := promptInput("> ")
return strings.TrimSpace(text)
}
@ -94,11 +105,7 @@ func (w *wizard) read() string {
// non-emptyness.
func (w *wizard) readString() string {
for {
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
text := promptInput("> ")
if text = strings.TrimSpace(text); text != "" {
return text
}
@ -108,11 +115,7 @@ func (w *wizard) readString() string {
// readDefaultString reads a single line from stdin, trimming if from spaces. If
// an empty line is entered, the default value is returned.
func (w *wizard) readDefaultString(def string) string {
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
text := promptInput("> ")
if text = strings.TrimSpace(text); text != "" {
return text
}
@ -124,11 +127,7 @@ func (w *wizard) readDefaultString(def string) string {
// value is returned.
func (w *wizard) readDefaultYesNo(def bool) bool {
for {
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
text := promptInput("> ")
if text = strings.ToLower(strings.TrimSpace(text)); text == "" {
return def
}
@ -146,11 +145,7 @@ func (w *wizard) readDefaultYesNo(def bool) bool {
// interpret it as a URL (http, https or file).
func (w *wizard) readURL() *url.URL {
for {
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
text := promptInput("> ")
uri, err := url.Parse(strings.TrimSpace(text))
if err != nil {
log.Error("Invalid input, expected URL", "err", err)
@ -164,11 +159,7 @@ func (w *wizard) readURL() *url.URL {
// to parse into an integer.
func (w *wizard) readInt() int {
for {
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
text := promptInput("> ")
if text = strings.TrimSpace(text); text == "" {
continue
}
@ -186,11 +177,7 @@ func (w *wizard) readInt() int {
// returned.
func (w *wizard) readDefaultInt(def int) int {
for {
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
text := promptInput("> ")
if text = strings.TrimSpace(text); text == "" {
return def
}
@ -208,11 +195,7 @@ func (w *wizard) readDefaultInt(def int) int {
// default value is returned.
func (w *wizard) readDefaultBigInt(def *big.Int) *big.Int {
for {
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
text := promptInput("> ")
if text = strings.TrimSpace(text); text == "" {
return def
}
@ -225,38 +208,11 @@ func (w *wizard) readDefaultBigInt(def *big.Int) *big.Int {
}
}
/*
// readFloat reads a single line from stdin, trimming if from spaces, enforcing it
// to parse into a float.
func (w *wizard) readFloat() float64 {
for {
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
if text = strings.TrimSpace(text); text == "" {
continue
}
val, err := strconv.ParseFloat(strings.TrimSpace(text), 64)
if err != nil {
log.Error("Invalid input, expected float", "err", err)
continue
}
return val
}
}
*/
// readDefaultFloat reads a single line from stdin, trimming if from spaces, enforcing
// it to parse into a float. If an empty line is entered, the default value is returned.
func (w *wizard) readDefaultFloat(def float64) float64 {
for {
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
text := promptInput("> ")
if text = strings.TrimSpace(text); text == "" {
return def
}
@ -285,12 +241,7 @@ func (w *wizard) readPassword() string {
// it to an Ethereum address.
func (w *wizard) readAddress() *common.Address {
for {
// Read the address from the user
fmt.Printf("> 0x")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
text := promptInput("> 0x")
if text = strings.TrimSpace(text); text == "" {
return nil
}
@ -311,11 +262,7 @@ func (w *wizard) readAddress() *common.Address {
func (w *wizard) readDefaultAddress(def common.Address) common.Address {
for {
// Read the address from the user
fmt.Printf("> 0x")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
text := promptInput("> 0x")
if text = strings.TrimSpace(text); text == "" {
return def
}
@ -334,8 +281,9 @@ func (w *wizard) readJSON() string {
var blob json.RawMessage
for {
fmt.Printf("> ")
if err := json.NewDecoder(w.in).Decode(&blob); err != nil {
text := promptInput("> ")
reader := strings.NewReader(text)
if err := json.NewDecoder(reader).Decode(&blob); err != nil {
log.Error("Invalid JSON, please try again", "err", err)
continue
}
@ -351,10 +299,7 @@ func (w *wizard) readIPAddress() string {
for {
// Read the IP address from the user
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
text := promptInput("> ")
if text = strings.TrimSpace(text); text == "" {
return ""
}

View File

@ -17,7 +17,6 @@
package main
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
@ -38,7 +37,6 @@ func makeWizard(network string) *wizard {
},
servers: make(map[string]*sshClient),
services: make(map[string][]string),
in: bufio.NewReader(os.Stdin),
}
}

View File

@ -18,7 +18,9 @@
package main
import (
"bufio"
"bytes"
"container/list"
"encoding/hex"
"flag"
"fmt"
@ -26,18 +28,20 @@ import (
"os"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
)
var (
hexMode = flag.String("hex", "", "dump given hex data")
noASCII = flag.Bool("noascii", false, "don't print ASCII strings readably")
single = flag.Bool("single", false, "print only the first element, discard the rest")
hexMode = flag.String("hex", "", "dump given hex data")
reverseMode = flag.Bool("reverse", false, "convert ASCII to rlp")
noASCII = flag.Bool("noascii", false, "don't print ASCII strings readably")
single = flag.Bool("single", false, "print only the first element, discard the rest")
)
func init() {
flag.Usage = func() {
fmt.Fprintln(os.Stderr, "Usage:", os.Args[0], "[-noascii] [-hex <data>] [filename]")
fmt.Fprintln(os.Stderr, "Usage:", os.Args[0], "[-noascii] [-hex <data>][-reverse] [filename]")
flag.PrintDefaults()
fmt.Fprintln(os.Stderr, `
Dumps RLP data from the given file in readable form.
@ -73,23 +77,40 @@ func main() {
flag.Usage()
os.Exit(2)
}
s := rlp.NewStream(r, 0)
for {
if err := dump(s, 0); err != nil {
if err != io.EOF {
die(err)
}
break
out := os.Stdout
if *reverseMode {
data, err := textToRlp(r)
if err != nil {
die(err)
}
fmt.Println()
if *single {
break
fmt.Printf("0x%x\n", data)
return
} else {
err := rlpToText(r, out)
if err != nil {
die(err)
}
}
}
func dump(s *rlp.Stream, depth int) error {
func rlpToText(r io.Reader, out io.Writer) error {
s := rlp.NewStream(r, 0)
for {
if err := dump(s, 0, out); err != nil {
if err != io.EOF {
return err
}
break
}
fmt.Fprintln(out)
if *single {
break
}
}
return nil
}
func dump(s *rlp.Stream, depth int, out io.Writer) error {
kind, size, err := s.Kind()
if err != nil {
return err
@ -101,28 +122,28 @@ func dump(s *rlp.Stream, depth int) error {
return err
}
if len(str) == 0 || !*noASCII && isASCII(str) {
fmt.Printf("%s%q", ws(depth), str)
fmt.Fprintf(out, "%s%q", ws(depth), str)
} else {
fmt.Printf("%s%x", ws(depth), str)
fmt.Fprintf(out, "%s%x", ws(depth), str)
}
case rlp.List:
s.List()
defer s.ListEnd()
if size == 0 {
fmt.Print(ws(depth) + "[]")
fmt.Fprintf(out, ws(depth)+"[]")
} else {
fmt.Println(ws(depth) + "[")
fmt.Fprintln(out, ws(depth)+"[")
for i := 0; ; i++ {
if i > 0 {
fmt.Print(",\n")
fmt.Fprint(out, ",\n")
}
if err := dump(s, depth+1); err == rlp.EOL {
if err := dump(s, depth+1, out); err == rlp.EOL {
break
} else if err != nil {
return err
}
}
fmt.Print(ws(depth) + "]")
fmt.Fprint(out, ws(depth)+"]")
}
}
return nil
@ -145,3 +166,45 @@ func die(args ...interface{}) {
fmt.Fprintln(os.Stderr, args...)
os.Exit(1)
}
// textToRlp converts text into RLP (best effort).
func textToRlp(r io.Reader) ([]byte, error) {
// We're expecting the input to be well-formed, meaning that
// - each element is on a separate line
// - each line is either an (element OR a list start/end) + comma
// - an element is either hex-encoded bytes OR a quoted string
var (
scanner = bufio.NewScanner(r)
obj []interface{}
stack = list.New()
)
for scanner.Scan() {
t := strings.TrimSpace(scanner.Text())
if len(t) == 0 {
continue
}
switch t {
case "[": // list start
stack.PushFront(obj)
obj = make([]interface{}, 0)
case "]", "],": // list end
parent := stack.Remove(stack.Front()).([]interface{})
obj = append(parent, obj)
case "[],": // empty list
obj = append(obj, make([]interface{}, 0))
default: // element
data := []byte(t)[:len(t)-1] // cut off comma
if data[0] == '"' { // ascii string
data = []byte(t)[1 : len(data)-1]
} else { // hex data
data = common.FromHex(string(data))
}
obj = append(obj, data)
}
}
if err := scanner.Err(); err != nil {
return nil, err
}
data, err := rlp.EncodeToBytes(obj[0])
return data, err
}

View File

@ -0,0 +1,65 @@
package main
import (
"bytes"
"fmt"
"strings"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
)
func TestRoundtrip(t *testing.T) {
for i, want := range []string{
"0xf880806482520894d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0a1010000000000000000000000000000000000000000000000000000000000000001801ba0c16787a8e25e941d67691954642876c08f00996163ae7dfadbbfd6cd436f549da06180e5626cae31590f40641fe8f63734316c4bfeb4cdfab6714198c1044d2e28",
"0xd5c0d3cb84746573742a2a808213378667617a6f6e6b",
"0xc780c0c1c0825208",
} {
var out strings.Builder
err := rlpToText(bytes.NewReader(common.FromHex(want)), &out)
if err != nil {
t.Fatal(err)
}
text := out.String()
rlpBytes, err := textToRlp(strings.NewReader(text))
if err != nil {
t.Errorf("test %d: error %v", i, err)
continue
}
have := fmt.Sprintf("0x%x", rlpBytes)
if have != want {
t.Errorf("test %d: have\n%v\nwant:\n%v\n", i, have, want)
}
}
}
func TestTextToRlp(t *testing.T) {
type tc struct {
text string
want string
}
cases := []tc{
{
text: `[
"",
[],
[
[],
],
5208,
]`,
want: "0xc780c0c1c0825208",
},
}
for i, tc := range cases {
have, err := textToRlp(strings.NewReader(tc.text))
if err != nil {
t.Errorf("test %d: error %v", i, err)
continue
}
if hexutil.Encode(have) != tc.want {
t.Errorf("test %d:\nhave %v\nwant %v", i, hexutil.Encode(have), tc.want)
}
}
}

View File

@ -686,7 +686,7 @@ var (
}
GpoMaxGasPriceFlag = cli.Int64Flag{
Name: "gpo.maxprice",
Usage: "Maximum gas price will be recommended by gpo",
Usage: "Maximum transaction priority fee (or gasprice before London fork) to be recommended by gpo",
Value: ethconfig.Defaults.GPO.MaxPrice.Int64(),
}
GpoIgnoreGasPriceFlag = cli.Int64Flag{

View File

@ -43,7 +43,6 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
lru "github.com/hashicorp/golang-lru"
)
@ -409,11 +408,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
return bc, nil
}
// GetVMConfig returns the block chain VM config.
func (bc *BlockChain) GetVMConfig() *vm.Config {
return &bc.vmConfig
}
// empty returns an indicator whether the blockchain is empty.
// Note, it's a special case that we connect a non-empty ancient
// database with an empty node, so that we can plugin the ancient
@ -666,53 +660,6 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
return nil
}
// GasLimit returns the gas limit of the current HEAD block.
func (bc *BlockChain) GasLimit() uint64 {
return bc.CurrentBlock().GasLimit()
}
// CurrentBlock retrieves the current head block of the canonical chain. The
// block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentBlock() *types.Block {
return bc.currentBlock.Load().(*types.Block)
}
// Snapshots returns the blockchain snapshot tree.
func (bc *BlockChain) Snapshots() *snapshot.Tree {
return bc.snaps
}
// CurrentFastBlock retrieves the current fast-sync head block of the canonical
// chain. The block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentFastBlock() *types.Block {
return bc.currentFastBlock.Load().(*types.Block)
}
// Validator returns the current validator.
func (bc *BlockChain) Validator() Validator {
return bc.validator
}
// Processor returns the current processor.
func (bc *BlockChain) Processor() Processor {
return bc.processor
}
// State returns a new mutable state based on the current HEAD block.
func (bc *BlockChain) State() (*state.StateDB, error) {
return bc.StateAt(bc.CurrentBlock().Root())
}
// StateAt returns a new mutable state based on a particular point in time.
func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
return state.New(root, bc.stateCache, bc.snaps)
}
// StateCache returns the caching database underpinning the blockchain instance.
func (bc *BlockChain) StateCache() state.Database {
return bc.stateCache
}
// Reset purges the entire blockchain, restoring it to its genesis state.
func (bc *BlockChain) Reset() error {
return bc.ResetWithGenesisBlock(bc.genesisBlock)
@ -819,194 +766,6 @@ func (bc *BlockChain) writeHeadBlock(block *types.Block) {
headBlockGauge.Update(int64(block.NumberU64()))
}
// Genesis retrieves the chain's genesis block.
func (bc *BlockChain) Genesis() *types.Block {
return bc.genesisBlock
}
// GetBody retrieves a block body (transactions and uncles) from the database by
// hash, caching it if found.
func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
// Short circuit if the body's already in the cache, retrieve otherwise
if cached, ok := bc.bodyCache.Get(hash); ok {
body := cached.(*types.Body)
return body
}
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return nil
}
body := rawdb.ReadBody(bc.db, hash, *number)
if body == nil {
return nil
}
// Cache the found body for next time and return
bc.bodyCache.Add(hash, body)
return body
}
// GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
// caching it if found.
func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
// Short circuit if the body's already in the cache, retrieve otherwise
if cached, ok := bc.bodyRLPCache.Get(hash); ok {
return cached.(rlp.RawValue)
}
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return nil
}
body := rawdb.ReadBodyRLP(bc.db, hash, *number)
if len(body) == 0 {
return nil
}
// Cache the found body for next time and return
bc.bodyRLPCache.Add(hash, body)
return body
}
// HasBlock checks if a block is fully present in the database or not.
func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
if bc.blockCache.Contains(hash) {
return true
}
return rawdb.HasBody(bc.db, hash, number)
}
// HasFastBlock checks if a fast block is fully present in the database or not.
func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
if !bc.HasBlock(hash, number) {
return false
}
if bc.receiptsCache.Contains(hash) {
return true
}
return rawdb.HasReceipts(bc.db, hash, number)
}
// HasState checks if state trie is fully present in the database or not.
func (bc *BlockChain) HasState(hash common.Hash) bool {
_, err := bc.stateCache.OpenTrie(hash)
return err == nil
}
// HasBlockAndState checks if a block and associated state trie is fully present
// in the database or not, caching it if present.
func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
// Check first that the block itself is known
block := bc.GetBlock(hash, number)
if block == nil {
return false
}
return bc.HasState(block.Root())
}
// GetBlock retrieves a block from the database by hash and number,
// caching it if found.
func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
// Short circuit if the block's already in the cache, retrieve otherwise
if block, ok := bc.blockCache.Get(hash); ok {
return block.(*types.Block)
}
block := rawdb.ReadBlock(bc.db, hash, number)
if block == nil {
return nil
}
// Cache the found block for next time and return
bc.blockCache.Add(block.Hash(), block)
return block
}
// GetBlockByHash retrieves a block from the database by hash, caching it if found.
func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return nil
}
return bc.GetBlock(hash, *number)
}
// GetBlockByNumber retrieves a block from the database by number, caching it
// (associated with its hash) if found.
func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
hash := rawdb.ReadCanonicalHash(bc.db, number)
if hash == (common.Hash{}) {
return nil
}
return bc.GetBlock(hash, number)
}
// GetReceiptsByHash retrieves the receipts for all transactions in a given block.
func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
if receipts, ok := bc.receiptsCache.Get(hash); ok {
return receipts.(types.Receipts)
}
number := rawdb.ReadHeaderNumber(bc.db, hash)
if number == nil {
return nil
}
receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
if receipts == nil {
return nil
}
bc.receiptsCache.Add(hash, receipts)
return receipts
}
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
// [deprecated by eth/62]
func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return nil
}
for i := 0; i < n; i++ {
block := bc.GetBlock(hash, *number)
if block == nil {
break
}
blocks = append(blocks, block)
hash = block.ParentHash()
*number--
}
return
}
// GetUnclesInChain retrieves all the uncles from a given block backwards until
// a specific distance is reached.
func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
uncles := []*types.Header{}
for i := 0; block != nil && i < length; i++ {
uncles = append(uncles, block.Uncles()...)
block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
}
return uncles
}
// TrieNode retrieves a blob of data associated with a trie node
// either from ephemeral in-memory cache, or from persistent storage.
func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
return bc.stateCache.TrieDB().Node(hash)
}
// ContractCode retrieves a blob of data associated with a contract hash
// either from ephemeral in-memory cache, or from persistent storage.
func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) {
return bc.stateCache.ContractCode(common.Hash{}, hash)
}
// ContractCodeWithPrefix retrieves a blob of data associated with a contract
// hash either from ephemeral in-memory cache, or from persistent storage.
//
// If the code doesn't exist in the in-memory cache, check the storage with
// new code scheme.
func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) {
type codeReader interface {
ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error)
}
return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Hash{}, hash)
}
// Stop stops the blockchain service. If any imports are currently in progress
// it will abort them using the procInterrupt.
func (bc *BlockChain) Stop() {
@ -1390,18 +1149,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
return 0, nil
}
// SetTxLookupLimit is responsible for updating the txlookup limit to the
// original one stored in db if the new mismatches with the old one.
func (bc *BlockChain) SetTxLookupLimit(limit uint64) {
bc.txLookupLimit = limit
}
// TxLookupLimit retrieves the txlookup limit used by blockchain to prune
// stale transaction indices.
func (bc *BlockChain) TxLookupLimit() uint64 {
return bc.txLookupLimit
}
var lastWrite uint64
// writeBlockWithoutState writes only the block and its metadata to the database,
@ -2401,116 +2148,3 @@ func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (i
_, err := bc.hc.InsertHeaderChain(chain, start)
return 0, err
}
// CurrentHeader retrieves the current head header of the canonical chain. The
// header is retrieved from the HeaderChain's internal cache.
func (bc *BlockChain) CurrentHeader() *types.Header {
return bc.hc.CurrentHeader()
}
// GetTd retrieves a block's total difficulty in the canonical chain from the
// database by hash and number, caching it if found.
func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
return bc.hc.GetTd(hash, number)
}
// GetHeader retrieves a block header from the database by hash and number,
// caching it if found.
func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
// Blockchain might have cached the whole block, only if not go to headerchain
if block, ok := bc.blockCache.Get(hash); ok {
return block.(*types.Block).Header()
}
return bc.hc.GetHeader(hash, number)
}
// GetHeaderByHash retrieves a block header from the database by hash, caching it if
// found.
func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
// Blockchain might have cached the whole block, only if not go to headerchain
if block, ok := bc.blockCache.Get(hash); ok {
return block.(*types.Block).Header()
}
return bc.hc.GetHeaderByHash(hash)
}
// HasHeader checks if a block header is present in the database or not, caching
// it if present.
func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
return bc.hc.HasHeader(hash, number)
}
// GetCanonicalHash returns the canonical hash for a given block number
func (bc *BlockChain) GetCanonicalHash(number uint64) common.Hash {
return bc.hc.GetCanonicalHash(number)
}
// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
// number of blocks to be individually checked before we reach the canonical chain.
//
// Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
}
// GetHeaderByNumber retrieves a block header from the database by number,
// caching it (associated with its hash) if found.
func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
return bc.hc.GetHeaderByNumber(number)
}
// GetTransactionLookup retrieves the lookup associate with the given transaction
// hash from the cache or database.
func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry {
// Short circuit if the txlookup already in the cache, retrieve otherwise
if lookup, exist := bc.txLookupCache.Get(hash); exist {
return lookup.(*rawdb.LegacyTxLookupEntry)
}
tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash)
if tx == nil {
return nil
}
lookup := &rawdb.LegacyTxLookupEntry{BlockHash: blockHash, BlockIndex: blockNumber, Index: txIndex}
bc.txLookupCache.Add(hash, lookup)
return lookup
}
// Config retrieves the chain's fork configuration.
func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
// Engine retrieves the blockchain's consensus engine.
func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
// SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
}
// SubscribeChainEvent registers a subscription of ChainEvent.
func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
return bc.scope.Track(bc.chainFeed.Subscribe(ch))
}
// SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
}
// SubscribeChainSideEvent registers a subscription of ChainSideEvent.
func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
}
// SubscribeLogsEvent registers a subscription of []*types.Log.
func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
return bc.scope.Track(bc.logsFeed.Subscribe(ch))
}
// SubscribeBlockProcessingEvent registers a subscription of bool where true means
// block processing has started while false means it has stopped.
func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription {
return bc.scope.Track(bc.blockProcFeed.Subscribe(ch))
}

387
core/blockchain_reader.go Normal file
View File

@ -0,0 +1,387 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
)
// CurrentHeader retrieves the current head header of the canonical chain. The
// header is retrieved from the HeaderChain's internal cache.
func (bc *BlockChain) CurrentHeader() *types.Header {
return bc.hc.CurrentHeader()
}
// CurrentBlock retrieves the current head block of the canonical chain. The
// block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentBlock() *types.Block {
return bc.currentBlock.Load().(*types.Block)
}
// CurrentFastBlock retrieves the current fast-sync head block of the canonical
// chain. The block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentFastBlock() *types.Block {
return bc.currentFastBlock.Load().(*types.Block)
}
// HasHeader checks if a block header is present in the database or not, caching
// it if present.
func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
return bc.hc.HasHeader(hash, number)
}
// GetHeader retrieves a block header from the database by hash and number,
// caching it if found.
func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
return bc.hc.GetHeader(hash, number)
}
// GetHeaderByHash retrieves a block header from the database by hash, caching it if
// found.
func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
return bc.hc.GetHeaderByHash(hash)
}
// GetHeaderByNumber retrieves a block header from the database by number,
// caching it (associated with its hash) if found.
func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
return bc.hc.GetHeaderByNumber(number)
}
// GetBody retrieves a block body (transactions and uncles) from the database by
// hash, caching it if found.
func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
// Short circuit if the body's already in the cache, retrieve otherwise
if cached, ok := bc.bodyCache.Get(hash); ok {
body := cached.(*types.Body)
return body
}
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return nil
}
body := rawdb.ReadBody(bc.db, hash, *number)
if body == nil {
return nil
}
// Cache the found body for next time and return
bc.bodyCache.Add(hash, body)
return body
}
// GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
// caching it if found.
func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
// Short circuit if the body's already in the cache, retrieve otherwise
if cached, ok := bc.bodyRLPCache.Get(hash); ok {
return cached.(rlp.RawValue)
}
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return nil
}
body := rawdb.ReadBodyRLP(bc.db, hash, *number)
if len(body) == 0 {
return nil
}
// Cache the found body for next time and return
bc.bodyRLPCache.Add(hash, body)
return body
}
// HasBlock checks if a block is fully present in the database or not.
func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
if bc.blockCache.Contains(hash) {
return true
}
return rawdb.HasBody(bc.db, hash, number)
}
// HasFastBlock checks if a fast block is fully present in the database or not.
func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
if !bc.HasBlock(hash, number) {
return false
}
if bc.receiptsCache.Contains(hash) {
return true
}
return rawdb.HasReceipts(bc.db, hash, number)
}
// GetBlock retrieves a block from the database by hash and number,
// caching it if found.
func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
// Short circuit if the block's already in the cache, retrieve otherwise
if block, ok := bc.blockCache.Get(hash); ok {
return block.(*types.Block)
}
block := rawdb.ReadBlock(bc.db, hash, number)
if block == nil {
return nil
}
// Cache the found block for next time and return
bc.blockCache.Add(block.Hash(), block)
return block
}
// GetBlockByHash retrieves a block from the database by hash, caching it if found.
func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return nil
}
return bc.GetBlock(hash, *number)
}
// GetBlockByNumber retrieves a block from the database by number, caching it
// (associated with its hash) if found.
func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
hash := rawdb.ReadCanonicalHash(bc.db, number)
if hash == (common.Hash{}) {
return nil
}
return bc.GetBlock(hash, number)
}
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
// [deprecated by eth/62]
func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return nil
}
for i := 0; i < n; i++ {
block := bc.GetBlock(hash, *number)
if block == nil {
break
}
blocks = append(blocks, block)
hash = block.ParentHash()
*number--
}
return
}
// GetReceiptsByHash retrieves the receipts for all transactions in a given block.
func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
if receipts, ok := bc.receiptsCache.Get(hash); ok {
return receipts.(types.Receipts)
}
number := rawdb.ReadHeaderNumber(bc.db, hash)
if number == nil {
return nil
}
receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
if receipts == nil {
return nil
}
bc.receiptsCache.Add(hash, receipts)
return receipts
}
// GetUnclesInChain retrieves all the uncles from a given block backwards until
// a specific distance is reached.
func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
uncles := []*types.Header{}
for i := 0; block != nil && i < length; i++ {
uncles = append(uncles, block.Uncles()...)
block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
}
return uncles
}
// GetCanonicalHash returns the canonical hash for a given block number
func (bc *BlockChain) GetCanonicalHash(number uint64) common.Hash {
return bc.hc.GetCanonicalHash(number)
}
// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
// number of blocks to be individually checked before we reach the canonical chain.
//
// Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
}
// GetTransactionLookup retrieves the lookup associate with the given transaction
// hash from the cache or database.
func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry {
// Short circuit if the txlookup already in the cache, retrieve otherwise
if lookup, exist := bc.txLookupCache.Get(hash); exist {
return lookup.(*rawdb.LegacyTxLookupEntry)
}
tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash)
if tx == nil {
return nil
}
lookup := &rawdb.LegacyTxLookupEntry{BlockHash: blockHash, BlockIndex: blockNumber, Index: txIndex}
bc.txLookupCache.Add(hash, lookup)
return lookup
}
// GetTd retrieves a block's total difficulty in the canonical chain from the
// database by hash and number, caching it if found.
func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
return bc.hc.GetTd(hash, number)
}
// HasState checks if state trie is fully present in the database or not.
func (bc *BlockChain) HasState(hash common.Hash) bool {
_, err := bc.stateCache.OpenTrie(hash)
return err == nil
}
// HasBlockAndState checks if a block and associated state trie is fully present
// in the database or not, caching it if present.
func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
// Check first that the block itself is known
block := bc.GetBlock(hash, number)
if block == nil {
return false
}
return bc.HasState(block.Root())
}
// TrieNode retrieves a blob of data associated with a trie node
// either from ephemeral in-memory cache, or from persistent storage.
func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
return bc.stateCache.TrieDB().Node(hash)
}
// ContractCode retrieves a blob of data associated with a contract hash
// either from ephemeral in-memory cache, or from persistent storage.
func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) {
return bc.stateCache.ContractCode(common.Hash{}, hash)
}
// ContractCodeWithPrefix retrieves a blob of data associated with a contract
// hash either from ephemeral in-memory cache, or from persistent storage.
//
// If the code doesn't exist in the in-memory cache, check the storage with
// new code scheme.
func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) {
type codeReader interface {
ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error)
}
return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Hash{}, hash)
}
// State returns a new mutable state based on the current HEAD block.
func (bc *BlockChain) State() (*state.StateDB, error) {
return bc.StateAt(bc.CurrentBlock().Root())
}
// StateAt returns a new mutable state based on a particular point in time.
func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
return state.New(root, bc.stateCache, bc.snaps)
}
// Config retrieves the chain's fork configuration.
func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
// Engine retrieves the blockchain's consensus engine.
func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
// Snapshots returns the blockchain snapshot tree.
func (bc *BlockChain) Snapshots() *snapshot.Tree {
return bc.snaps
}
// Validator returns the current validator.
func (bc *BlockChain) Validator() Validator {
return bc.validator
}
// Processor returns the current processor.
func (bc *BlockChain) Processor() Processor {
return bc.processor
}
// StateCache returns the caching database underpinning the blockchain instance.
func (bc *BlockChain) StateCache() state.Database {
return bc.stateCache
}
// GasLimit returns the gas limit of the current HEAD block.
func (bc *BlockChain) GasLimit() uint64 {
return bc.CurrentBlock().GasLimit()
}
// Genesis retrieves the chain's genesis block.
func (bc *BlockChain) Genesis() *types.Block {
return bc.genesisBlock
}
// GetVMConfig returns the block chain VM config.
func (bc *BlockChain) GetVMConfig() *vm.Config {
return &bc.vmConfig
}
// SetTxLookupLimit is responsible for updating the txlookup limit to the
// original one stored in db if the new mismatches with the old one.
func (bc *BlockChain) SetTxLookupLimit(limit uint64) {
bc.txLookupLimit = limit
}
// TxLookupLimit retrieves the txlookup limit used by blockchain to prune
// stale transaction indices.
func (bc *BlockChain) TxLookupLimit() uint64 {
return bc.txLookupLimit
}
// SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
}
// SubscribeChainEvent registers a subscription of ChainEvent.
func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
return bc.scope.Track(bc.chainFeed.Subscribe(ch))
}
// SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
}
// SubscribeChainSideEvent registers a subscription of ChainSideEvent.
func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
}
// SubscribeLogsEvent registers a subscription of []*types.Log.
func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
return bc.scope.Track(bc.logsFeed.Subscribe(ch))
}
// SubscribeBlockProcessingEvent registers a subscription of bool where true means
// block processing has started while false means it has stopped.
func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription {
return bc.scope.Track(bc.blockProcFeed.Subscribe(ch))
}

View File

@ -37,77 +37,83 @@ func PluginCommitUpdate(pl *plugins.PluginLoader, num uint64) {
lock.Lock()
defer lock.Unlock()
if freezerUpdates == nil { freezerUpdates = make(map[uint64]map[string]interface{}) }
defer func() { delete(freezerUpdates, num) }()
update, ok := freezerUpdates[num]
if !ok {
log.Warn("Attempting to commit untracked block", "num", num)
return
min := ^uint64(0)
for i := range freezerUpdates{
if min < i { min = i }
}
fnList := pl.Lookup("ModifyAncients", func(item interface{}) bool {
_, ok := item.(func(uint64, map[string]interface{}))
return ok
})
for _, fni := range fnList {
if fn, ok := fni.(func(uint64, map[string]interface{})); ok {
fn(num, update)
for i := min ; i < num; i++ {
update, ok := freezerUpdates[i]
defer func() { delete(freezerUpdates, i) }()
if !ok {
log.Warn("Attempting to commit untracked block", "num", i)
continue
}
}
appendAncientFnList := pl.Lookup("AppendAncient", func(item interface{}) bool {
_, ok := item.(func(number uint64, hash, header, body, receipts, td []byte))
if ok { log.Warn("PlugEth's AppendAncient is deprecated. Please update to ModifyAncients.") }
return ok
})
if len(appendAncientFnList) > 0 {
var (
hash []byte
header []byte
body []byte
receipts []byte
td []byte
)
if hashi, ok := update[freezerHashTable]; ok {
switch v := hashi.(type) {
case []byte:
hash = v
default:
hash, _ = rlp.EncodeToBytes(v)
fnList := pl.Lookup("ModifyAncients", func(item interface{}) bool {
_, ok := item.(func(uint64, map[string]interface{}))
return ok
})
for _, fni := range fnList {
if fn, ok := fni.(func(uint64, map[string]interface{})); ok {
fn(i, update)
}
}
if headeri, ok := update[freezerHeaderTable]; ok {
switch v := headeri.(type) {
case []byte:
header = v
default:
header, _ = rlp.EncodeToBytes(v)
appendAncientFnList := pl.Lookup("AppendAncient", func(item interface{}) bool {
_, ok := item.(func(number uint64, hash, header, body, receipts, td []byte))
if ok { log.Warn("PlugEth's AppendAncient is deprecated. Please update to ModifyAncients.") }
return ok
})
if len(appendAncientFnList) > 0 {
var (
hash []byte
header []byte
body []byte
receipts []byte
td []byte
)
if hashi, ok := update[freezerHashTable]; ok {
switch v := hashi.(type) {
case []byte:
hash = v
default:
hash, _ = rlp.EncodeToBytes(v)
}
}
}
if bodyi, ok := update[freezerBodiesTable]; ok {
switch v := bodyi.(type) {
case []byte:
body = v
default:
body, _ = rlp.EncodeToBytes(v)
if headeri, ok := update[freezerHeaderTable]; ok {
switch v := headeri.(type) {
case []byte:
header = v
default:
header, _ = rlp.EncodeToBytes(v)
}
}
}
if receiptsi, ok := update[freezerReceiptTable]; ok {
switch v := receiptsi.(type) {
case []byte:
receipts = v
default:
receipts, _ = rlp.EncodeToBytes(v)
if bodyi, ok := update[freezerBodiesTable]; ok {
switch v := bodyi.(type) {
case []byte:
body = v
default:
body, _ = rlp.EncodeToBytes(v)
}
}
}
if tdi, ok := update[freezerDifficultyTable]; ok {
switch v := tdi.(type) {
case []byte:
td = v
default:
td, _ = rlp.EncodeToBytes(v)
if receiptsi, ok := update[freezerReceiptTable]; ok {
switch v := receiptsi.(type) {
case []byte:
receipts = v
default:
receipts, _ = rlp.EncodeToBytes(v)
}
}
}
for _, fni := range appendAncientFnList {
if fn, ok := fni.(func(number uint64, hash, header, body, receipts, td []byte)); ok {
fn(num, hash, header, body, receipts, td)
if tdi, ok := update[freezerDifficultyTable]; ok {
switch v := tdi.(type) {
case []byte:
td = v
default:
td, _ = rlp.EncodeToBytes(v)
}
}
for _, fni := range appendAncientFnList {
if fn, ok := fni.(func(number uint64, hash, header, body, receipts, td []byte)); ok {
fn(i, hash, header, body, receipts, td)
}
}
}
}

View File

@ -560,6 +560,12 @@ func (dl *diskLayer) generate(stats *generatorStats) {
default:
}
if batch.ValueSize() > ethdb.IdealBatchSize || abort != nil {
if bytes.Compare(currentLocation, dl.genMarker) < 0 {
log.Error("Snapshot generator went backwards",
"currentLocation", fmt.Sprintf("%x", currentLocation),
"genMarker", fmt.Sprintf("%x", dl.genMarker))
}
// Flush out the batch anyway no matter it's empty or not.
// It's possible that all the states are recovered and the
// generation indeed makes progress.
@ -634,8 +640,14 @@ func (dl *diskLayer) generate(stats *generatorStats) {
stats.storage += common.StorageSize(1 + common.HashLength + dataLen)
stats.accounts++
}
marker := accountHash[:]
// If the snap generation goes here after interrupted, genMarker may go backward
// when last genMarker is consisted of accountHash and storageHash
if accMarker != nil && bytes.Equal(marker, accMarker) && len(dl.genMarker) > common.HashLength {
marker = dl.genMarker[:]
}
// If we've exceeded our batch allowance or termination was requested, flush to disk
if err := checkAndFlush(accountHash[:]); err != nil {
if err := checkAndFlush(marker); err != nil {
return err
}
// If the iterated account is the contract, create a further loop to

View File

@ -18,6 +18,7 @@ package fetcher
import (
"bytes"
"errors"
"fmt"
mrand "math/rand"
"sort"
@ -277,29 +278,27 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
)
errs := f.addTxs(txs)
for i, err := range errs {
if err != nil {
// Track the transaction hash if the price is too low for us.
// Avoid re-request this transaction when we receive another
// announcement.
if err == core.ErrUnderpriced || err == core.ErrReplaceUnderpriced {
for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize {
f.underpriced.Pop()
}
f.underpriced.Add(txs[i].Hash())
// Track the transaction hash if the price is too low for us.
// Avoid re-request this transaction when we receive another
// announcement.
if errors.Is(err, core.ErrUnderpriced) || errors.Is(err, core.ErrReplaceUnderpriced) {
for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize {
f.underpriced.Pop()
}
// Track a few interesting failure types
switch err {
case nil: // Noop, but need to handle to not count these
f.underpriced.Add(txs[i].Hash())
}
// Track a few interesting failure types
switch {
case err == nil: // Noop, but need to handle to not count these
case core.ErrAlreadyKnown:
duplicate++
case errors.Is(err, core.ErrAlreadyKnown):
duplicate++
case core.ErrUnderpriced, core.ErrReplaceUnderpriced:
underpriced++
case errors.Is(err, core.ErrUnderpriced) || errors.Is(err, core.ErrReplaceUnderpriced):
underpriced++
default:
otherreject++
}
default:
otherreject++
}
added = append(added, txs[i].Hash())
}

View File

@ -1334,10 +1334,12 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber
// newRPCPendingTransaction returns a pending transaction that will serialize to the RPC representation
func newRPCPendingTransaction(tx *types.Transaction, current *types.Header, config *params.ChainConfig) *RPCTransaction {
var baseFee *big.Int
blockNumber := uint64(0)
if current != nil {
baseFee = misc.CalcBaseFee(config, current)
blockNumber = current.Number.Uint64()
}
return newRPCTransaction(tx, common.Hash{}, 0, 0, baseFee, config)
return newRPCTransaction(tx, common.Hash{}, blockNumber, 0, baseFee, config)
}
// newRPCTransactionFromBlockIndex returns a transaction that will serialize to the RPC representation.

View File

@ -20,13 +20,13 @@ import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
"errors"
"flag"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
@ -555,7 +555,7 @@ func (n *handshakeTestNode) expectDecode(t *testing.T, ptype byte, p []byte) Pac
func (n *handshakeTestNode) expectDecodeErr(t *testing.T, wantErr error, p []byte) {
t.Helper()
if _, err := n.decode(p); !reflect.DeepEqual(err, wantErr) {
if _, err := n.decode(p); !errors.Is(err, wantErr) {
t.Fatal(fmt.Errorf("(%s) got err %q, want %q", n.ln.ID().TerminalString(), err, wantErr))
}
}

View File

@ -370,6 +370,8 @@ func TestServerSetupConn(t *testing.T) {
clientkey, srvkey = newkey(), newkey()
clientpub = &clientkey.PublicKey
srvpub = &srvkey.PublicKey
fooErr = errors.New("foo")
readErr = errors.New("read error")
)
tests := []struct {
dontstart bool
@ -387,10 +389,10 @@ func TestServerSetupConn(t *testing.T) {
wantCloseErr: errServerStopped,
},
{
tt: &setupTransport{pubkey: clientpub, encHandshakeErr: errors.New("read error")},
tt: &setupTransport{pubkey: clientpub, encHandshakeErr: readErr},
flags: inboundConn,
wantCalls: "doEncHandshake,close,",
wantCloseErr: errors.New("read error"),
wantCloseErr: readErr,
},
{
tt: &setupTransport{pubkey: clientpub, phs: protoHandshake{ID: randomID().Bytes()}},
@ -400,11 +402,11 @@ func TestServerSetupConn(t *testing.T) {
wantCloseErr: DiscUnexpectedIdentity,
},
{
tt: &setupTransport{pubkey: clientpub, protoHandshakeErr: errors.New("foo")},
tt: &setupTransport{pubkey: clientpub, protoHandshakeErr: fooErr},
dialDest: enode.NewV4(clientpub, nil, 0, 0),
flags: dynDialedConn,
wantCalls: "doEncHandshake,doProtoHandshake,close,",
wantCloseErr: errors.New("foo"),
wantCloseErr: fooErr,
},
{
tt: &setupTransport{pubkey: srvpub, phs: protoHandshake{ID: crypto.FromECDSAPub(srvpub)[1:]}},
@ -443,7 +445,7 @@ func TestServerSetupConn(t *testing.T) {
}
p1, _ := net.Pipe()
srv.SetupConn(p1, test.flags, test.dialDest)
if !reflect.DeepEqual(test.tt.closeErr, test.wantCloseErr) {
if !errors.Is(test.tt.closeErr, test.wantCloseErr) {
t.Errorf("test %d: close error mismatch: got %q, want %q", i, test.tt.closeErr, test.wantCloseErr)
}
if test.tt.calls != test.wantCalls {

View File

@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 10 // Minor version component of the current release
VersionPatch = 10 // Patch version component of the current release
VersionPatch = 11 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)

View File

@ -18,8 +18,8 @@ package rlp
import (
"bytes"
"errors"
"io"
"reflect"
"testing"
"testing/quick"
)
@ -54,7 +54,7 @@ func TestCountValues(t *testing.T) {
if count != test.count {
t.Errorf("test %d: count mismatch, got %d want %d\ninput: %s", i, count, test.count, test.input)
}
if !reflect.DeepEqual(err, test.err) {
if !errors.Is(err, test.err) {
t.Errorf("test %d: err mismatch, got %q want %q\ninput: %s", i, err, test.err, test.input)
}
}

View File

@ -18,13 +18,13 @@ package rpc
import (
"context"
"errors"
"io"
"net"
"net/http"
"net/http/httptest"
"net/http/httputil"
"net/url"
"reflect"
"strings"
"sync/atomic"
"testing"
@ -69,7 +69,7 @@ func TestWebsocketOriginCheck(t *testing.T) {
t.Fatal("no error for wrong origin")
}
wantErr := wsHandshakeError{websocket.ErrBadHandshake, "403 Forbidden"}
if !reflect.DeepEqual(err, wantErr) {
if !errors.Is(err, wantErr) {
t.Fatalf("wrong error for wrong origin: %q", err)
}

View File

@ -794,8 +794,8 @@ type cleaner struct {
// Put reacts to database writes and implements dirty data uncaching. This is the
// post-processing step of a commit operation where the already persisted trie is
// removed from the dirty cache and moved into the clean cache. The reason behind
// the two-phase commit is to ensure ensure data availability while moving from
// memory to disk.
// the two-phase commit is to ensure data availability while moving from memory
// to disk.
func (c *cleaner) Put(key []byte, rlp []byte) error {
hash := common.BytesToHash(key)