diff --git a/.gitmodules b/.gitmodules
index 32bdb3b6e..90d1be0a3 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,3 +1,4 @@
[submodule "tests"]
path = tests/testdata
url = https://github.com/ethereum/tests
+ shallow = true
diff --git a/.golangci.yml b/.golangci.yml
index 18b325e20..395a91fe1 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,7 +1,7 @@
# This file configures github.com/golangci/golangci-lint.
run:
- timeout: 3m
+ timeout: 5m
tests: true
# default is true. Enables skipping of directories:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
diff --git a/.travis.yml b/.travis.yml
index a7db711cc..f1018cc11 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,7 +5,7 @@ jobs:
allow_failures:
- stage: build
os: osx
- go: 1.15.x
+ go: 1.17.x
env:
- azure-osx
- azure-ios
@@ -16,7 +16,7 @@ jobs:
- stage: lint
os: linux
dist: bionic
- go: 1.16.x
+ go: 1.17.x
env:
- lint
git:
@@ -31,7 +31,7 @@ jobs:
os: linux
arch: amd64
dist: bionic
- go: 1.16.x
+ go: 1.17.x
env:
- docker
services:
@@ -48,7 +48,7 @@ jobs:
os: linux
arch: arm64
dist: bionic
- go: 1.16.x
+ go: 1.17.x
env:
- docker
services:
@@ -65,7 +65,7 @@ jobs:
if: type = push
os: linux
dist: bionic
- go: 1.16.x
+ go: 1.17.x
env:
- ubuntu-ppa
- GO111MODULE=on
@@ -90,7 +90,7 @@ jobs:
os: linux
dist: bionic
sudo: required
- go: 1.16.x
+ go: 1.17.x
env:
- azure-linux
- GO111MODULE=on
@@ -127,7 +127,7 @@ jobs:
dist: bionic
services:
- docker
- go: 1.16.x
+ go: 1.17.x
env:
- azure-linux-mips
- GO111MODULE=on
@@ -192,7 +192,7 @@ jobs:
- stage: build
if: type = push
os: osx
- go: 1.16.x
+ go: 1.17.x
env:
- azure-osx
- azure-ios
@@ -224,7 +224,7 @@ jobs:
os: linux
arch: amd64
dist: bionic
- go: 1.16.x
+ go: 1.17.x
env:
- GO111MODULE=on
script:
@@ -235,7 +235,7 @@ jobs:
os: linux
arch: arm64
dist: bionic
- go: 1.16.x
+ go: 1.17.x
env:
- GO111MODULE=on
script:
@@ -244,7 +244,7 @@ jobs:
- stage: build
os: linux
dist: bionic
- go: 1.15.x
+ go: 1.16.x
env:
- GO111MODULE=on
script:
@@ -255,7 +255,7 @@ jobs:
if: type = cron
os: linux
dist: bionic
- go: 1.16.x
+ go: 1.17.x
env:
- azure-purge
- GO111MODULE=on
diff --git a/Dockerfile b/Dockerfile
index e76c5765b..7badbc132 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM=""
# Build Geth in a stock Go builder container
-FROM golang:1.16-alpine as builder
+FROM golang:1.17-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git
diff --git a/Dockerfile.alltools b/Dockerfile.alltools
index 71f63b7a4..3ae5377e4 100644
--- a/Dockerfile.alltools
+++ b/Dockerfile.alltools
@@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM=""
# Build Geth in a stock Go builder container
-FROM golang:1.16-alpine as builder
+FROM golang:1.17-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git
diff --git a/SECURITY.md b/SECURITY.md
index bdce7b8d2..635c0869f 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -12,6 +12,8 @@ Audit reports are published in the `docs` folder: https://github.com/ethereum/go
| ------- | ------- | ----------- |
| `geth` | 20170425 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2017-04-25_Geth-audit_Truesec.pdf) |
| `clef` | 20180914 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2018-09-14_Clef-audit_NCC.pdf) |
+| `Discv5` | 20191015 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2019-10-15_Discv5_audit_LeastAuthority.pdf) |
+| `Discv5` | 20200124 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2020-01-24_DiscV5_audit_Cure53.pdf) |
## Reporting a Vulnerability
diff --git a/accounts/abi/argument.go b/accounts/abi/argument.go
index e6d524559..261b4d1b8 100644
--- a/accounts/abi/argument.go
+++ b/accounts/abi/argument.go
@@ -137,7 +137,7 @@ func (arguments Arguments) copyAtomic(v interface{}, marshalledValues interface{
dst := reflect.ValueOf(v).Elem()
src := reflect.ValueOf(marshalledValues)
- if dst.Kind() == reflect.Struct && src.Kind() != reflect.Struct {
+ if dst.Kind() == reflect.Struct {
return set(dst.Field(0), src)
}
return set(dst, src)
diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go
index 354632b25..6d99517e6 100644
--- a/accounts/abi/bind/base.go
+++ b/accounts/abi/bind/base.go
@@ -431,6 +431,9 @@ func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]inter
// UnpackLog unpacks a retrieved log into the provided output structure.
func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error {
+ if log.Topics[0] != c.abi.Events[event].ID {
+ return fmt.Errorf("event signature mismatch")
+ }
if len(log.Data) > 0 {
if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil {
return err
@@ -447,6 +450,9 @@ func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log)
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event string, log types.Log) error {
+ if log.Topics[0] != c.abi.Events[event].ID {
+ return fmt.Errorf("event signature mismatch")
+ }
if len(log.Data) > 0 {
if err := c.abi.UnpackIntoMap(out, event, log.Data); err != nil {
return err
diff --git a/accounts/abi/bind/base_test.go b/accounts/abi/bind/base_test.go
index c4740f68b..7b023e3b4 100644
--- a/accounts/abi/bind/base_test.go
+++ b/accounts/abi/bind/base_test.go
@@ -110,7 +110,7 @@ const hexData = "0x000000000000000000000000376c47978271565f56deb45495afa69e59c16
func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) {
hash := crypto.Keccak256Hash([]byte("testName"))
topics := []common.Hash{
- common.HexToHash("0x0"),
+ crypto.Keccak256Hash([]byte("received(string,address,uint256,bytes)")),
hash,
}
mockLog := newMockLog(topics, common.HexToHash("0x0"))
@@ -135,7 +135,7 @@ func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
}
hash := crypto.Keccak256Hash(sliceBytes)
topics := []common.Hash{
- common.HexToHash("0x0"),
+ crypto.Keccak256Hash([]byte("received(string[],address,uint256,bytes)")),
hash,
}
mockLog := newMockLog(topics, common.HexToHash("0x0"))
@@ -160,7 +160,7 @@ func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) {
}
hash := crypto.Keccak256Hash(arrBytes)
topics := []common.Hash{
- common.HexToHash("0x0"),
+ crypto.Keccak256Hash([]byte("received(address[2],address,uint256,bytes)")),
hash,
}
mockLog := newMockLog(topics, common.HexToHash("0x0"))
@@ -187,7 +187,7 @@ func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) {
var functionTy [24]byte
copy(functionTy[:], functionTyBytes[0:24])
topics := []common.Hash{
- common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"),
+ crypto.Keccak256Hash([]byte("received(function,address,uint256,bytes)")),
common.BytesToHash(functionTyBytes),
}
mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"))
@@ -208,7 +208,7 @@ func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) {
bytes := []byte{1, 2, 3, 4, 5}
hash := crypto.Keccak256Hash(bytes)
topics := []common.Hash{
- common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"),
+ crypto.Keccak256Hash([]byte("received(bytes,address,uint256,bytes)")),
hash,
}
mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"))
diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go
index d49b436db..6d9052f88 100644
--- a/accounts/abi/bind/bind_test.go
+++ b/accounts/abi/bind/bind_test.go
@@ -1785,6 +1785,77 @@ var bindTests = []struct {
nil,
nil,
},
+ // Test resolving single struct argument
+ {
+ `NewSingleStructArgument`,
+ `
+ pragma solidity ^0.8.0;
+
+ contract NewSingleStructArgument {
+ struct MyStruct{
+ uint256 a;
+ uint256 b;
+ }
+ event StructEvent(MyStruct s);
+ function TestEvent() public {
+ emit StructEvent(MyStruct({a: 1, b: 2}));
+ }
+ }
+ `,
+ []string{"608060405234801561001057600080fd5b50610113806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806324ec1d3f14602d575b600080fd5b60336035565b005b7fb4b2ff75e30cb4317eaae16dd8a187dd89978df17565104caa6c2797caae27d460405180604001604052806001815260200160028152506040516078919060ba565b60405180910390a1565b6040820160008201516096600085018260ad565b50602082015160a7602085018260ad565b50505050565b60b48160d3565b82525050565b600060408201905060cd60008301846082565b92915050565b600081905091905056fea26469706673582212208823628796125bf9941ce4eda18da1be3cf2931b231708ab848e1bd7151c0c9a64736f6c63430008070033"},
+ []string{`[{"anonymous":false,"inputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"}],"indexed":false,"internalType":"struct Test.MyStruct","name":"s","type":"tuple"}],"name":"StructEvent","type":"event"},{"inputs":[],"name":"TestEvent","outputs":[],"stateMutability":"nonpayable","type":"function"}]`},
+ `
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/eth/ethconfig"
+ `,
+ `
+ var (
+ key, _ = crypto.GenerateKey()
+ user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
+ sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
+ )
+ defer sim.Close()
+
+ _, _, d, err := DeployNewSingleStructArgument(user, sim)
+ if err != nil {
+ t.Fatalf("Failed to deploy contract %v", err)
+ }
+ sim.Commit()
+
+ _, err = d.TestEvent(user)
+ if err != nil {
+ t.Fatalf("Failed to call contract %v", err)
+ }
+ sim.Commit()
+
+ it, err := d.FilterStructEvent(nil)
+ if err != nil {
+ t.Fatalf("Failed to filter contract event %v", err)
+ }
+ var count int
+ for it.Next() {
+ if it.Event.S.A.Cmp(big.NewInt(1)) != 0 {
+ t.Fatal("Unexpected contract event")
+ }
+ if it.Event.S.B.Cmp(big.NewInt(2)) != 0 {
+ t.Fatal("Unexpected contract event")
+ }
+ count += 1
+ }
+ if count != 1 {
+ t.Fatal("Unexpected contract event number")
+ }
+ `,
+ nil,
+ nil,
+ nil,
+ nil,
+ },
}
// Tests that packages generated by the binder can be successfully compiled and
diff --git a/accounts/abi/unpack_test.go b/accounts/abi/unpack_test.go
index b88f77805..e617f8abc 100644
--- a/accounts/abi/unpack_test.go
+++ b/accounts/abi/unpack_test.go
@@ -762,20 +762,24 @@ func TestUnpackTuple(t *testing.T) {
buff.Write(common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) // ret[b] = -1
// If the result is single tuple, use struct as return value container directly.
- v := struct {
+ type v struct {
A *big.Int
B *big.Int
- }{new(big.Int), new(big.Int)}
+ }
+ type r struct {
+ Result v
+ }
+ var ret0 = new(r)
+ err = abi.UnpackIntoInterface(ret0, "tuple", buff.Bytes())
- err = abi.UnpackIntoInterface(&v, "tuple", buff.Bytes())
if err != nil {
t.Error(err)
} else {
- if v.A.Cmp(big.NewInt(1)) != 0 {
- t.Errorf("unexpected value unpacked: want %x, got %x", 1, v.A)
+ if ret0.Result.A.Cmp(big.NewInt(1)) != 0 {
+ t.Errorf("unexpected value unpacked: want %x, got %x", 1, ret0.Result.A)
}
- if v.B.Cmp(big.NewInt(-1)) != 0 {
- t.Errorf("unexpected value unpacked: want %x, got %x", -1, v.B)
+ if ret0.Result.B.Cmp(big.NewInt(-1)) != 0 {
+ t.Errorf("unexpected value unpacked: want %x, got %x", -1, ret0.Result.B)
}
}
diff --git a/accounts/keystore/account_cache_test.go b/accounts/keystore/account_cache_test.go
index fe9233c04..a847545bc 100644
--- a/accounts/keystore/account_cache_test.go
+++ b/accounts/keystore/account_cache_test.go
@@ -96,7 +96,7 @@ func TestWatchNoDir(t *testing.T) {
// Create ks but not the directory that it watches.
rand.Seed(time.Now().UnixNano())
- dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int()))
+ dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watchnodir-test-%d-%d", os.Getpid(), rand.Int()))
ks := NewKeyStore(dir, LightScryptN, LightScryptP)
list := ks.Accounts()
@@ -322,7 +322,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
// Create a temporary kesytore to test with
rand.Seed(time.Now().UnixNano())
- dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int()))
+ dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int()))
ks := NewKeyStore(dir, LightScryptN, LightScryptP)
list := ks.Accounts()
diff --git a/accounts/keystore/watch.go b/accounts/keystore/watch.go
index d6ef53327..ad176040d 100644
--- a/accounts/keystore/watch.go
+++ b/accounts/keystore/watch.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build (darwin && !ios && cgo) || freebsd || (linux && !arm64) || netbsd || solaris
// +build darwin,!ios,cgo freebsd linux,!arm64 netbsd solaris
package keystore
diff --git a/accounts/keystore/watch_fallback.go b/accounts/keystore/watch_fallback.go
index de0e87f8a..e40eca42f 100644
--- a/accounts/keystore/watch_fallback.go
+++ b/accounts/keystore/watch_fallback.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build (darwin && !cgo) || ios || (linux && arm64) || windows || (!darwin && !freebsd && !linux && !netbsd && !solaris)
// +build darwin,!cgo ios linux,arm64 windows !darwin,!freebsd,!linux,!netbsd,!solaris
// This is the fallback implementation of directory watching.
diff --git a/accounts/manager.go b/accounts/manager.go
index acf41ed8e..1e111d194 100644
--- a/accounts/manager.go
+++ b/accounts/manager.go
@@ -25,6 +25,10 @@ import (
"github.com/ethereum/go-ethereum/event"
)
+// managerSubBufferSize determines how many incoming wallet events
+// the manager will buffer in its channel.
+const managerSubBufferSize = 50
+
// Config contains the settings of the global account manager.
//
// TODO(rjl493456442, karalabe, holiman): Get rid of this when account management
@@ -33,18 +37,27 @@ type Config struct {
InsecureUnlockAllowed bool // Whether account unlocking in insecure environment is allowed
}
+// newBackendEvent lets the manager know it should
+// track the given backend for wallet updates.
+type newBackendEvent struct {
+ backend Backend
+ processed chan struct{} // Informs event emitter that backend has been integrated
+}
+
// Manager is an overarching account manager that can communicate with various
// backends for signing transactions.
type Manager struct {
- config *Config // Global account manager configurations
- backends map[reflect.Type][]Backend // Index of backends currently registered
- updaters []event.Subscription // Wallet update subscriptions for all backends
- updates chan WalletEvent // Subscription sink for backend wallet changes
- wallets []Wallet // Cache of all wallets from all registered backends
+ config *Config // Global account manager configurations
+ backends map[reflect.Type][]Backend // Index of backends currently registered
+ updaters []event.Subscription // Wallet update subscriptions for all backends
+ updates chan WalletEvent // Subscription sink for backend wallet changes
+ newBackends chan newBackendEvent // Incoming backends to be tracked by the manager
+ wallets []Wallet // Cache of all wallets from all registered backends
feed event.Feed // Wallet feed notifying of arrivals/departures
quit chan chan error
+ term chan struct{} // Channel is closed upon termination of the update loop
lock sync.RWMutex
}
@@ -57,7 +70,7 @@ func NewManager(config *Config, backends ...Backend) *Manager {
wallets = merge(wallets, backend.Wallets()...)
}
// Subscribe to wallet notifications from all backends
- updates := make(chan WalletEvent, 4*len(backends))
+ updates := make(chan WalletEvent, managerSubBufferSize)
subs := make([]event.Subscription, len(backends))
for i, backend := range backends {
@@ -65,12 +78,14 @@ func NewManager(config *Config, backends ...Backend) *Manager {
}
// Assemble the account manager and return
am := &Manager{
- config: config,
- backends: make(map[reflect.Type][]Backend),
- updaters: subs,
- updates: updates,
- wallets: wallets,
- quit: make(chan chan error),
+ config: config,
+ backends: make(map[reflect.Type][]Backend),
+ updaters: subs,
+ updates: updates,
+ newBackends: make(chan newBackendEvent),
+ wallets: wallets,
+ quit: make(chan chan error),
+ term: make(chan struct{}),
}
for _, backend := range backends {
kind := reflect.TypeOf(backend)
@@ -93,6 +108,14 @@ func (am *Manager) Config() *Config {
return am.config
}
+// AddBackend starts the tracking of an additional backend for wallet updates.
+// cmd/geth assumes once this func returns the backends have been already integrated.
+func (am *Manager) AddBackend(backend Backend) {
+ done := make(chan struct{})
+ am.newBackends <- newBackendEvent{backend, done}
+ <-done
+}
+
// update is the wallet event loop listening for notifications from the backends
// and updating the cache of wallets.
func (am *Manager) update() {
@@ -122,10 +145,22 @@ func (am *Manager) update() {
// Notify any listeners of the event
am.feed.Send(event)
-
+ case event := <-am.newBackends:
+ am.lock.Lock()
+ // Update caches
+ backend := event.backend
+ am.wallets = merge(am.wallets, backend.Wallets()...)
+ am.updaters = append(am.updaters, backend.Subscribe(am.updates))
+ kind := reflect.TypeOf(backend)
+ am.backends[kind] = append(am.backends[kind], backend)
+ am.lock.Unlock()
+ close(event.processed)
case errc := <-am.quit:
// Manager terminating, return
errc <- nil
+ // Signals event emitters the loop is not receiving values
+ // to prevent them from getting stuck.
+ close(am.term)
return
}
}
@@ -133,6 +168,9 @@ func (am *Manager) update() {
// Backends retrieves the backend(s) with the given type from the account manager.
func (am *Manager) Backends(kind reflect.Type) []Backend {
+ am.lock.RLock()
+ defer am.lock.RUnlock()
+
return am.backends[kind]
}
diff --git a/appveyor.yml b/appveyor.yml
index a72163382..65b5f9684 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -1,29 +1,57 @@
-os: Visual Studio 2019
clone_depth: 5
version: "{branch}.{build}"
+
+image:
+ - Ubuntu
+ - Visual Studio 2019
+
environment:
matrix:
- # We use gcc from MSYS2 because it is the most recent compiler version available on
- # AppVeyor. Note: gcc.exe only works properly if the corresponding bin/ directory is
- # contained in PATH.
- GETH_ARCH: amd64
- GETH_CC: C:\msys64\mingw64\bin\gcc.exe
- PATH: C:\msys64\mingw64\bin;C:\Program Files (x86)\NSIS\;%PATH%
+ GETH_MINGW: 'C:\msys64\mingw64'
- GETH_ARCH: 386
- GETH_CC: C:\msys64\mingw32\bin\gcc.exe
- PATH: C:\msys64\mingw32\bin;C:\Program Files (x86)\NSIS\;%PATH%
+ GETH_MINGW: 'C:\msys64\mingw32'
install:
- git submodule update --init --depth 1
- go version
- - "%GETH_CC% --version"
-build_script:
- - go run build\ci.go install -dlgo -arch %GETH_ARCH% -cc %GETH_CC%
+for:
+ # Linux has its own script without -arch and -cc.
+ # The linux builder also runs lint.
+ - matrix:
+ only:
+ - image: Ubuntu
+ build_script:
+ - go run build/ci.go lint
+ - go run build/ci.go install -dlgo
+ test_script:
+ - go run build/ci.go test -dlgo -coverage
-after_build:
- - go run build\ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
- - go run build\ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
+ # linux/386 is disabled.
+ - matrix:
+ exclude:
+ - image: Ubuntu
+ GETH_ARCH: 386
-test_script:
- - go run build\ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -coverage
+ # Windows builds for amd64 + 386.
+ - matrix:
+ only:
+ - image: Visual Studio 2019
+ environment:
+ # We use gcc from MSYS2 because it is the most recent compiler version available on
+ # AppVeyor. Note: gcc.exe only works properly if the corresponding bin/ directory is
+ # contained in PATH.
+ GETH_CC: '%GETH_MINGW%\bin\gcc.exe'
+ PATH: '%GETH_MINGW%\bin;C:\Program Files (x86)\NSIS\;%PATH%'
+ build_script:
+ - 'echo %GETH_ARCH%'
+ - 'echo %GETH_CC%'
+ - '%GETH_CC% --version'
+ - go run build/ci.go install -dlgo -arch %GETH_ARCH% -cc %GETH_CC%
+ after_build:
+ # Upload builds. Note that ci.go makes this a no-op PR builds.
+ - go run build/ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
+ - go run build/ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
+ test_script:
+ - go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -coverage
diff --git a/build/checksums.txt b/build/checksums.txt
index e667b30ce..686e1604b 100644
--- a/build/checksums.txt
+++ b/build/checksums.txt
@@ -1,33 +1,37 @@
# This file contains sha256 checksums of optional build dependencies.
-ae4f6b6e2a1677d31817984655a762074b5356da50fb58722b99104870d43503 go1.16.4.src.tar.gz
-18fe94775763db3878717393b6d41371b0b45206055e49b3838328120c977d13 go1.16.4.darwin-amd64.tar.gz
-cb6b972cc42e669f3585c648198cd5b6f6d7a0811d413ad64b50c02ba06ccc3a go1.16.4.darwin-arm64.tar.gz
-cd1b146ef6e9006f27dd99e9687773e7fef30e8c985b7d41bff33e955a3bb53a go1.16.4.linux-386.tar.gz
-7154e88f5a8047aad4b80ebace58a059e36e7e2e4eb3b383127a28c711b4ff59 go1.16.4.linux-amd64.tar.gz
-8b18eb05ddda2652d69ab1b1dd1f40dd731799f43c6a58b512ad01ae5b5bba21 go1.16.4.linux-arm64.tar.gz
-a53391a800ddec749ee90d38992babb27b95cfb864027350c737b9aa8e069494 go1.16.4.linux-armv6l.tar.gz
-e75c0b114a09eb5499874162b208931dc260de0fedaeedac8621bf263c974605 go1.16.4.windows-386.zip
-d40139b7ade8a3008e3240a6f86fe8f899a9c465c917e11dac8758af216f5eb0 go1.16.4.windows-amd64.zip
-7cf2bc8a175d6d656861165bfc554f92dc78d2abf5afe5631db3579555d97409 go1.16.4.freebsd-386.tar.gz
-ccdd2b76de1941b60734408fda0d750aaa69330d8a07430eed4c56bdb3502f6f go1.16.4.freebsd-amd64.tar.gz
-80cfac566e344096a8df8f37bbd21f89e76a6fbe601406565d71a87a665fc125 go1.16.4.linux-ppc64le.tar.gz
-d6431881b3573dc29ecc24fbeab5e5ec25d8c9273aa543769c86a1a3bbac1ddf go1.16.4.linux-s390x.tar.gz
+3a70e5055509f347c0fb831ca07a2bf3b531068f349b14a3c652e9b5b67beb5d go1.17.src.tar.gz
+355bd544ce08d7d484d9d7de05a71b5c6f5bc10aa4b316688c2192aeb3dacfd1 go1.17.darwin-amd64.tar.gz
+da4e3e3c194bf9eed081de8842a157120ef44a7a8d7c820201adae7b0e28b20b go1.17.darwin-arm64.tar.gz
+6819a7a11b8351d5d5768f2fff666abde97577602394f132cb7f85b3a7151f05 go1.17.freebsd-386.tar.gz
+15c184c83d99441d719da201b26256455eee85a808747c404b4183e9aa6c64b4 go1.17.freebsd-amd64.tar.gz
+c19e3227a6ac6329db91d1af77bbf239ccd760a259c16e6b9c932d527ff14848 go1.17.linux-386.tar.gz
+6bf89fc4f5ad763871cf7eac80a2d594492de7a818303283f1366a7f6a30372d go1.17.linux-amd64.tar.gz
+01a9af009ada22122d3fcb9816049c1d21842524b38ef5d5a0e2ee4b26d7c3e7 go1.17.linux-arm64.tar.gz
+ae89d33f4e4acc222bdb04331933d5ece4ae71039812f6ccd7493cb3e8ddfb4e go1.17.linux-armv6l.tar.gz
+ee84350114d532bf15f096198c675aafae9ff091dc4cc69eb49e1817ff94dbd7 go1.17.linux-ppc64le.tar.gz
+a50aaecf054f393575f969a9105d5c6864dd91afc5287d772449033fbafcf7e3 go1.17.linux-s390x.tar.gz
+c5afdd2ea4969f2b44637e913b04f7c15265d7beb60924a28063722670a52feb go1.17.windows-386.zip
+2a18bd65583e221be8b9b7c2fbe3696c40f6e27c2df689bbdcc939d49651d151 go1.17.windows-amd64.zip
+5256f92f643d9022394ddc84de5c74fe8660c2151daaa199b12e60e542d694ae go1.17.windows-arm64.zip
-7e9a47ab540aa3e8472fbf8120d28bed3b9d9cf625b955818e8bc69628d7187c golangci-lint-1.39.0-darwin-amd64.tar.gz
-574daa2c9c299b01672a6daeb1873b5f12e413cdb6dc0e30f2ff163956778064 golangci-lint-1.39.0-darwin-arm64.tar.gz
-6225f7014987324ab78e9b511f294e3f25be013728283c33918c67c8576d543e golangci-lint-1.39.0-freebsd-386.tar.gz
-6b3e76e1e5eaf0159411c8e2727f8d533989d3bb19f10e9caa6e0b9619ee267d golangci-lint-1.39.0-freebsd-amd64.tar.gz
-a301cacfff87ed9b00313d95278533c25a4527a06b040a17d969b4b7e1b8a90d golangci-lint-1.39.0-freebsd-armv7.tar.gz
-25bfd96a29c3112f508d5e4fc860dbad7afce657233c343acfa20715717d51e7 golangci-lint-1.39.0-freebsd-armv6.tar.gz
-9687e4ff15545cfc722b0e46107a94195166a505023b48a316579af25ad09505 golangci-lint-1.39.0-linux-armv7.tar.gz
-a7fa7ab2bfc99cbe5e5bcbf5684f5a997f920afbbe2f253d2feb1001d5e3c8b3 golangci-lint-1.39.0-linux-armv6.tar.gz
-c8f9634115beddb4ed9129c1f7ecd4c97c99d07aeef33e3707234097eeb51b7b golangci-lint-1.39.0-linux-mips64le.tar.gz
-d1234c213b74751f1af413302dde0e9a6d4d29aecef034af7abb07dc1b6e887f golangci-lint-1.39.0-linux-arm64.tar.gz
-df25d9267168323b163147acb823ab0215a8a3bb6898a4a9320afdfedde66817 golangci-lint-1.39.0-linux-386.tar.gz
-1767e75fba357b7651b1a796d38453558f371c60af805505ec99e166908c04b5 golangci-lint-1.39.0-linux-ppc64le.tar.gz
-25fd75bf3186b3d930ecae10185689968fd18fd8fa6f9f555d6beb04348c20f6 golangci-lint-1.39.0-linux-s390x.tar.gz
-3a73aa7468087caa62673c8adea99b4e4dff846dc72707222db85f8679b40cbf golangci-lint-1.39.0-linux-amd64.tar.gz
-578caceccf81739bda67dbfec52816709d03608c6878888ecdc0e186a094a41b golangci-lint-1.39.0-linux-mips64.tar.gz
-494b66ba0e32c8ddf6c4f6b1d05729b110900f6017eda943057e43598c17d7a8 golangci-lint-1.39.0-windows-386.zip
-52ec2e13a3cbb47147244dff8cfc35103563deb76e0459133058086fc35fb2c7 golangci-lint-1.39.0-windows-amd64.zip
+d4bd25b9814eeaa2134197dd2c7671bb791eae786d42010d9d788af20dee4bfa golangci-lint-1.42.0-darwin-amd64.tar.gz
+e56859c04a2ad5390c6a497b1acb1cc9329ecb1010260c6faae9b5a4c35b35ea golangci-lint-1.42.0-darwin-arm64.tar.gz
+14d912a3fa856830339472fc4dc341933adf15f37bdb7130bbbfcf960ecf4809 golangci-lint-1.42.0-freebsd-386.tar.gz
+337257fccc9baeb5ee1cd7e70c153e9d9f59d3afde46d631659500048afbdf80 golangci-lint-1.42.0-freebsd-amd64.tar.gz
+6debcc266b629359fdd8eef4f4abb05a621604079d27016265afb5b4593b0eff golangci-lint-1.42.0-freebsd-armv6.tar.gz
+878f0e190169db2ce9dde8cefbd99adc4fe28b90b68686bbfcfcc2085e6d693e golangci-lint-1.42.0-freebsd-armv7.tar.gz
+42c78e31faf62b225363eff1b1d2aa74f9dbcb75686c8914aa3e90d6af65cece golangci-lint-1.42.0-linux-386.tar.gz
+6937f62f8e2329e94822dc11c10b871ace5557ae1fcc4ee2f9980cd6aecbc159 golangci-lint-1.42.0-linux-amd64.tar.gz
+2cf8d23d96cd854a537b355dab2962b960b88a06b615232599f066afd233f246 golangci-lint-1.42.0-linux-arm64.tar.gz
+08b003d1ed61367473886defc957af5301066e62338e5d96a319c34dadc4c1d1 golangci-lint-1.42.0-linux-armv6.tar.gz
+c7c00ec4845e806a1f32685f5b150219e180bd6d6a9d584be8d27f0c41d7a1bf golangci-lint-1.42.0-linux-armv7.tar.gz
+3650fcf29eb3d8ee326d77791a896b15259eb2d5bf77437dc72e7efe5af6bd40 golangci-lint-1.42.0-linux-mips64.tar.gz
+f51ae003fdbca4fef78ba73e2eb736a939c8eaa178cd452234213b489da5a420 golangci-lint-1.42.0-linux-mips64le.tar.gz
+1b0bb7b8b22cc4ea7da44fd5ad5faaf6111d0677e01cc6f961b62a96537de2c6 golangci-lint-1.42.0-linux-ppc64le.tar.gz
+8cb56927eb75e572450efbe0ff0f9cf3f56dc9faa81d9e8d30d6559fc1d06e6d golangci-lint-1.42.0-linux-riscv64.tar.gz
+5ac41cd31825a176b21505a371a7b307cd9cdf17df0f35bbb3bf1466f9356ccc golangci-lint-1.42.0-linux-s390x.tar.gz
+e1cebd2af621ac4b64c20937df92c3819264f2174c92f51e196db1e64ae097e0 golangci-lint-1.42.0-windows-386.zip
+7e70fcde8e87a17cae0455df07d257ebc86669f3968d568e12727fa24bbe9883 golangci-lint-1.42.0-windows-amd64.zip
+59da7ce1bda432616bfc28ae663e52c3675adee8d9bf5959fafd657c159576ab golangci-lint-1.42.0-windows-armv6.zip
+65f62dda937bfcede0326ac77abe947ce1548931e6e13298ca036cb31f224db5 golangci-lint-1.42.0-windows-armv7.zip
diff --git a/build/ci.go b/build/ci.go
index d7d2ce72e..6f1e975a5 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -129,19 +129,13 @@ var (
// Distros for which packages are created.
// Note: vivid is unsupported because there is no golang-1.6 package for it.
- // Note: wily is unsupported because it was officially deprecated on Launchpad.
- // Note: yakkety is unsupported because it was officially deprecated on Launchpad.
- // Note: zesty is unsupported because it was officially deprecated on Launchpad.
- // Note: artful is unsupported because it was officially deprecated on Launchpad.
- // Note: cosmic is unsupported because it was officially deprecated on Launchpad.
- // Note: disco is unsupported because it was officially deprecated on Launchpad.
- // Note: eoan is unsupported because it was officially deprecated on Launchpad.
+ // Note: the following Ubuntu releases have been officially deprecated on Launchpad:
+ // wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy
debDistroGoBoots = map[string]string{
"trusty": "golang-1.11",
"xenial": "golang-go",
"bionic": "golang-go",
"focal": "golang-go",
- "groovy": "golang-go",
"hirsute": "golang-go",
}
@@ -153,7 +147,7 @@ var (
// This is the version of go that will be downloaded by
//
// go run ci.go install -dlgo
- dlgoVersion = "1.16.4"
+ dlgoVersion = "1.17"
)
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@@ -330,7 +324,7 @@ func doLint(cmdline []string) {
// downloadLinter downloads and unpacks golangci-lint.
func downloadLinter(cachedir string) string {
- const version = "1.39.0"
+ const version = "1.42.0"
csdb := build.MustLoadChecksums("build/checksums.txt")
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH)
diff --git a/cmd/devp2p/internal/ethtest/chain.go b/cmd/devp2p/internal/ethtest/chain.go
index 34a20c515..7dcb412b5 100644
--- a/cmd/devp2p/internal/ethtest/chain.go
+++ b/cmd/devp2p/internal/ethtest/chain.go
@@ -39,16 +39,6 @@ type Chain struct {
chainConfig *params.ChainConfig
}
-func (c *Chain) WriteTo(writer io.Writer) error {
- for _, block := range c.blocks {
- if err := rlp.Encode(writer, block); err != nil {
- return err
- }
- }
-
- return nil
-}
-
// Len returns the length of the chain.
func (c *Chain) Len() int {
return len(c.blocks)
diff --git a/cmd/devp2p/internal/ethtest/helpers.go b/cmd/devp2p/internal/ethtest/helpers.go
index 6f7365483..88d8e143c 100644
--- a/cmd/devp2p/internal/ethtest/helpers.go
+++ b/cmd/devp2p/internal/ethtest/helpers.go
@@ -242,9 +242,17 @@ func (s *Suite) createSendAndRecvConns(isEth66 bool) (*Conn, *Conn, error) {
return sendConn, recvConn, nil
}
+func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message {
+ if c.negotiatedProtoVersion == 66 {
+ _, msg := c.readAndServe66(chain, timeout)
+ return msg
+ }
+ return c.readAndServe65(chain, timeout)
+}
+
// readAndServe serves GetBlockHeaders requests while waiting
// on another message from the node.
-func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message {
+func (c *Conn) readAndServe65(chain *Chain, timeout time.Duration) Message {
start := time.Now()
for time.Since(start) < timeout {
c.SetReadDeadline(time.Now().Add(5 * time.Second))
@@ -279,8 +287,8 @@ func (c *Conn) readAndServe66(chain *Chain, timeout time.Duration) (uint64, Mess
switch msg := msg.(type) {
case *Ping:
c.Write(&Pong{})
- case *GetBlockHeaders:
- headers, err := chain.GetHeaders(*msg)
+ case GetBlockHeaders:
+ headers, err := chain.GetHeaders(msg)
if err != nil {
return 0, errorf("could not get headers for inbound header request: %v", err)
}
diff --git a/cmd/devp2p/internal/ethtest/suite_test.go b/cmd/devp2p/internal/ethtest/suite_test.go
index 6e3217151..6d14404e6 100644
--- a/cmd/devp2p/internal/ethtest/suite_test.go
+++ b/cmd/devp2p/internal/ethtest/suite_test.go
@@ -45,7 +45,7 @@ func TestEthSuite(t *testing.T) {
if err != nil {
t.Fatalf("could not create new test suite: %v", err)
}
- for _, test := range suite.AllEthTests() {
+ for _, test := range suite.Eth66Tests() {
t.Run(test.Name, func(t *testing.T) {
result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
if result[0].Failed {
diff --git a/cmd/evm/README.md b/cmd/evm/README.md
index d5257069f..1a029ab70 100644
--- a/cmd/evm/README.md
+++ b/cmd/evm/README.md
@@ -208,7 +208,7 @@ Example:
]
}
```
-When applying this, using a reward of `0x08`
+When applying this, using a reward of `0x80`
Output:
```json
{
diff --git a/cmd/evm/disasm.go b/cmd/evm/disasm.go
index 68a09cbf5..f9719497f 100644
--- a/cmd/evm/disasm.go
+++ b/cmd/evm/disasm.go
@@ -46,7 +46,7 @@ func disasmCmd(ctx *cli.Context) error {
case ctx.GlobalIsSet(InputFlag.Name):
in = ctx.GlobalString(InputFlag.Name)
default:
- return errors.New("Missing filename or --input value")
+ return errors.New("missing filename or --input value")
}
code := strings.TrimSpace(in)
diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index 1ab2f001e..fae65767b 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
@@ -46,13 +47,14 @@ type Prestate struct {
// ExecutionResult contains the execution status after running a state test, any
// error that might have occurred and a dump of the final state if requested.
type ExecutionResult struct {
- StateRoot common.Hash `json:"stateRoot"`
- TxRoot common.Hash `json:"txRoot"`
- ReceiptRoot common.Hash `json:"receiptRoot"`
- LogsHash common.Hash `json:"logsHash"`
- Bloom types.Bloom `json:"logsBloom" gencodec:"required"`
- Receipts types.Receipts `json:"receipts"`
- Rejected []*rejectedTx `json:"rejected,omitempty"`
+ StateRoot common.Hash `json:"stateRoot"`
+ TxRoot common.Hash `json:"txRoot"`
+ ReceiptRoot common.Hash `json:"receiptRoot"`
+ LogsHash common.Hash `json:"logsHash"`
+ Bloom types.Bloom `json:"logsBloom" gencodec:"required"`
+ Receipts types.Receipts `json:"receipts"`
+ Rejected []*rejectedTx `json:"rejected,omitempty"`
+ Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
}
type ommer struct {
@@ -62,23 +64,28 @@ type ommer struct {
//go:generate gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go
type stEnv struct {
- Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
- Difficulty *big.Int `json:"currentDifficulty" gencodec:"required"`
- GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
- Number uint64 `json:"currentNumber" gencodec:"required"`
- Timestamp uint64 `json:"currentTimestamp" gencodec:"required"`
- BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
- Ommers []ommer `json:"ommers,omitempty"`
- BaseFee *big.Int `json:"currentBaseFee,omitempty"`
+ Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
+ Difficulty *big.Int `json:"currentDifficulty"`
+ ParentDifficulty *big.Int `json:"parentDifficulty"`
+ GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
+ Number uint64 `json:"currentNumber" gencodec:"required"`
+ Timestamp uint64 `json:"currentTimestamp" gencodec:"required"`
+ ParentTimestamp uint64 `json:"parentTimestamp,omitempty"`
+ BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
+ Ommers []ommer `json:"ommers,omitempty"`
+ BaseFee *big.Int `json:"currentBaseFee,omitempty"`
+ ParentUncleHash common.Hash `json:"parentUncleHash"`
}
type stEnvMarshaling struct {
- Coinbase common.UnprefixedAddress
- Difficulty *math.HexOrDecimal256
- GasLimit math.HexOrDecimal64
- Number math.HexOrDecimal64
- Timestamp math.HexOrDecimal64
- BaseFee *math.HexOrDecimal256
+ Coinbase common.UnprefixedAddress
+ Difficulty *math.HexOrDecimal256
+ ParentDifficulty *math.HexOrDecimal256
+ GasLimit math.HexOrDecimal64
+ Number math.HexOrDecimal64
+ Timestamp math.HexOrDecimal64
+ ParentTimestamp math.HexOrDecimal64
+ BaseFee *math.HexOrDecimal256
}
type rejectedTx struct {
@@ -247,6 +254,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
LogsHash: rlpHash(statedb.Logs()),
Receipts: receipts,
Rejected: rejectedTxs,
+ Difficulty: (*math.HexOrDecimal256)(vmContext.Difficulty),
}
return statedb, execRs, nil
}
@@ -274,3 +282,23 @@ func rlpHash(x interface{}) (h common.Hash) {
hw.Sum(h[:0])
return h
}
+
+// calcDifficulty is based on ethash.CalcDifficulty. This method is used in case
+// the caller does not provide an explicit difficulty, but instead provides only
+// parent timestamp + difficulty.
+// Note: this method only works for ethash engine.
+func calcDifficulty(config *params.ChainConfig, number, currentTime, parentTime uint64,
+ parentDifficulty *big.Int, parentUncleHash common.Hash) *big.Int {
+ uncleHash := parentUncleHash
+ if uncleHash == (common.Hash{}) {
+ uncleHash = types.EmptyUncleHash
+ }
+ parent := &types.Header{
+ ParentHash: common.Hash{},
+ UncleHash: uncleHash,
+ Difficulty: parentDifficulty,
+ Number: new(big.Int).SetUint64(number - 1),
+ Time: parentTime,
+ }
+ return ethash.CalcDifficulty(config, currentTime, parent)
+}
diff --git a/cmd/evm/internal/t8ntool/flags.go b/cmd/evm/internal/t8ntool/flags.go
index 626f974a0..05b6ed164 100644
--- a/cmd/evm/internal/t8ntool/flags.go
+++ b/cmd/evm/internal/t8ntool/flags.go
@@ -30,7 +30,7 @@ var (
Name: "trace",
Usage: "Output full trace logs to files .jsonl",
}
- TraceDisableMemoryFlag = cli.BoolFlag{
+ TraceDisableMemoryFlag = cli.BoolTFlag{
Name: "trace.nomemory",
Usage: "Disable full memory dump in traces",
}
@@ -38,7 +38,7 @@ var (
Name: "trace.nostack",
Usage: "Disable stack output in traces",
}
- TraceDisableReturnDataFlag = cli.BoolFlag{
+ TraceDisableReturnDataFlag = cli.BoolTFlag{
Name: "trace.noreturndata",
Usage: "Disable return data output in traces",
}
diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go
index c7f079c02..1bb3c6a46 100644
--- a/cmd/evm/internal/t8ntool/gen_stenv.go
+++ b/cmd/evm/internal/t8ntool/gen_stenv.go
@@ -16,38 +16,47 @@ var _ = (*stEnvMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (s stEnv) MarshalJSON() ([]byte, error) {
type stEnv struct {
- Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
- Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
- GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
- Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
- Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
- BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
- Ommers []ommer `json:"ommers,omitempty"`
- BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
+ Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
+ Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
+ ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
+ GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
+ Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
+ Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
+ ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"`
+ BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
+ Ommers []ommer `json:"ommers,omitempty"`
+ BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
+ ParentUncleHash common.Hash `json:"parentUncleHash"`
}
var enc stEnv
enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty)
+ enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty)
enc.GasLimit = math.HexOrDecimal64(s.GasLimit)
enc.Number = math.HexOrDecimal64(s.Number)
enc.Timestamp = math.HexOrDecimal64(s.Timestamp)
+ enc.ParentTimestamp = math.HexOrDecimal64(s.ParentTimestamp)
enc.BlockHashes = s.BlockHashes
enc.Ommers = s.Ommers
enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee)
+ enc.ParentUncleHash = s.ParentUncleHash
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (s *stEnv) UnmarshalJSON(input []byte) error {
type stEnv struct {
- Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
- Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
- GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
- Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
- Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
- BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
- Ommers []ommer `json:"ommers,omitempty"`
- BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
+ Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
+ Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
+ ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
+ GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
+ Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
+ Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
+ ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"`
+ BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"`
+ Ommers []ommer `json:"ommers,omitempty"`
+ BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
+ ParentUncleHash *common.Hash `json:"parentUncleHash"`
}
var dec stEnv
if err := json.Unmarshal(input, &dec); err != nil {
@@ -57,10 +66,12 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'currentCoinbase' for stEnv")
}
s.Coinbase = common.Address(*dec.Coinbase)
- if dec.Difficulty == nil {
- return errors.New("missing required field 'currentDifficulty' for stEnv")
+ if dec.Difficulty != nil {
+ s.Difficulty = (*big.Int)(dec.Difficulty)
+ }
+ if dec.ParentDifficulty != nil {
+ s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty)
}
- s.Difficulty = (*big.Int)(dec.Difficulty)
if dec.GasLimit == nil {
return errors.New("missing required field 'currentGasLimit' for stEnv")
}
@@ -73,6 +84,9 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'currentTimestamp' for stEnv")
}
s.Timestamp = uint64(*dec.Timestamp)
+ if dec.ParentTimestamp != nil {
+ s.ParentTimestamp = uint64(*dec.ParentTimestamp)
+ }
if dec.BlockHashes != nil {
s.BlockHashes = dec.BlockHashes
}
@@ -82,5 +96,8 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
if dec.BaseFee != nil {
s.BaseFee = (*big.Int)(dec.BaseFee)
}
+ if dec.ParentUncleHash != nil {
+ s.ParentUncleHash = *dec.ParentUncleHash
+ }
return nil
}
diff --git a/cmd/evm/internal/t8ntool/transaction.go b/cmd/evm/internal/t8ntool/transaction.go
new file mode 100644
index 000000000..aecbad79d
--- /dev/null
+++ b/cmd/evm/internal/t8ntool/transaction.go
@@ -0,0 +1,136 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package t8ntool
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math/big"
+ "os"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/tests"
+ "gopkg.in/urfave/cli.v1"
+)
+
+type result struct {
+ Error error
+ Address common.Address
+ Hash common.Hash
+}
+
+// MarshalJSON marshals as JSON with a hash.
+func (r *result) MarshalJSON() ([]byte, error) {
+ type xx struct {
+ Error string `json:"error,omitempty"`
+ Address *common.Address `json:"address,omitempty"`
+ Hash *common.Hash `json:"hash,omitempty"`
+ }
+ var out xx
+ if r.Error != nil {
+ out.Error = r.Error.Error()
+ }
+ if r.Address != (common.Address{}) {
+ out.Address = &r.Address
+ }
+ if r.Hash != (common.Hash{}) {
+ out.Hash = &r.Hash
+ }
+ return json.Marshal(out)
+}
+
+func Transaction(ctx *cli.Context) error {
+ // Configure the go-ethereum logger
+ glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
+ glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
+ log.Root().SetHandler(glogger)
+
+ var (
+ err error
+ )
+ // We need to load the transactions. May be either in stdin input or in files.
+ // Check if anything needs to be read from stdin
+ var (
+ txStr = ctx.String(InputTxsFlag.Name)
+ inputData = &input{}
+ chainConfig *params.ChainConfig
+ )
+ // Construct the chainconfig
+ if cConf, _, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil {
+ return NewError(ErrorVMConfig, fmt.Errorf("failed constructing chain configuration: %v", err))
+ } else {
+ chainConfig = cConf
+ }
+ // Set the chain id
+ chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name))
+ var body hexutil.Bytes
+ if txStr == stdinSelector {
+ decoder := json.NewDecoder(os.Stdin)
+ if err := decoder.Decode(inputData); err != nil {
+ return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err))
+ }
+ // Decode the body of already signed transactions
+ body = common.FromHex(inputData.TxRlp)
+ } else {
+ // Read input from file
+ inFile, err := os.Open(txStr)
+ if err != nil {
+ return NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err))
+ }
+ defer inFile.Close()
+ decoder := json.NewDecoder(inFile)
+ if strings.HasSuffix(txStr, ".rlp") {
+ if err := decoder.Decode(&body); err != nil {
+ return err
+ }
+ } else {
+ return NewError(ErrorIO, errors.New("only rlp supported"))
+ }
+ }
+ signer := types.MakeSigner(chainConfig, new(big.Int))
+ // We now have the transactions in 'body', which is supposed to be an
+ // rlp list of transactions
+ it, err := rlp.NewListIterator([]byte(body))
+ if err != nil {
+ return err
+ }
+ var results []result
+ for it.Next() {
+ var tx types.Transaction
+ err := rlp.DecodeBytes(it.Value(), &tx)
+ if err != nil {
+ results = append(results, result{Error: err})
+ continue
+ }
+ sender, err := types.Sender(signer, &tx)
+ if err != nil {
+ results = append(results, result{Error: err})
+ continue
+ }
+ results = append(results, result{Address: sender, Hash: tx.Hash()})
+ }
+ out, err := json.MarshalIndent(results, "", " ")
+ fmt.Println(string(out))
+ return err
+}
diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go
index 8334aa01d..7407ed0a4 100644
--- a/cmd/evm/internal/t8ntool/transition.go
+++ b/cmd/evm/internal/t8ntool/transition.go
@@ -65,10 +65,15 @@ func (n *NumberedError) Error() string {
return fmt.Sprintf("ERROR(%d): %v", n.errorCode, n.err.Error())
}
-func (n *NumberedError) Code() int {
+func (n *NumberedError) ExitCode() int {
return n.errorCode
}
+// compile-time conformance test
+var (
+ _ cli.ExitCoder = (*NumberedError)(nil)
+)
+
type input struct {
Alloc core.GenesisAlloc `json:"alloc,omitempty"`
Env *stEnv `json:"env,omitempty"`
@@ -76,7 +81,7 @@ type input struct {
TxRlp string `json:"txsRlp,omitempty"`
}
-func Main(ctx *cli.Context) error {
+func Transition(ctx *cli.Context) error {
// Configure the go-ethereum logger
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
@@ -102,10 +107,10 @@ func Main(ctx *cli.Context) error {
if ctx.Bool(TraceFlag.Name) {
// Configure the EVM logger
logConfig := &vm.LogConfig{
- DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
- DisableMemory: ctx.Bool(TraceDisableMemoryFlag.Name),
- DisableReturnData: ctx.Bool(TraceDisableReturnDataFlag.Name),
- Debug: true,
+ DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
+ EnableMemory: !ctx.Bool(TraceDisableMemoryFlag.Name),
+ EnableReturnData: !ctx.Bool(TraceDisableReturnDataFlag.Name),
+ Debug: true,
}
var prevFile *os.File
// This one closes the last file
@@ -252,6 +257,20 @@ func Main(ctx *cli.Context) error {
return NewError(ErrorVMConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
}
}
+ if env := prestate.Env; env.Difficulty == nil {
+ // If difficulty was not provided by caller, we need to calculate it.
+ switch {
+ case env.ParentDifficulty == nil:
+ return NewError(ErrorVMConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty"))
+ case env.Number == 0:
+ return NewError(ErrorVMConfig, errors.New("currentDifficulty needs to be provided for block number 0"))
+ case env.Timestamp <= env.ParentTimestamp:
+ return NewError(ErrorVMConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)",
+ env.Timestamp, env.ParentTimestamp))
+ }
+ prestate.Env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp,
+ env.ParentTimestamp, env.ParentDifficulty, env.ParentUncleHash)
+ }
// Run the test and aggregate the result
s, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer)
if err != nil {
@@ -395,7 +414,7 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a
return err
}
if len(stdOutObject) > 0 {
- b, err := json.MarshalIndent(stdOutObject, "", " ")
+ b, err := json.MarshalIndent(stdOutObject, "", " ")
if err != nil {
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
}
@@ -403,7 +422,7 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a
os.Stdout.Write([]byte("\n"))
}
if len(stdErrObject) > 0 {
- b, err := json.MarshalIndent(stdErrObject, "", " ")
+ b, err := json.MarshalIndent(stdErrObject, "", " ")
if err != nil {
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
}
diff --git a/cmd/evm/main.go b/cmd/evm/main.go
index b9c0d17f3..26064efc3 100644
--- a/cmd/evm/main.go
+++ b/cmd/evm/main.go
@@ -113,7 +113,7 @@ var (
Name: "receiver",
Usage: "The transaction receiver (execution context)",
}
- DisableMemoryFlag = cli.BoolFlag{
+ DisableMemoryFlag = cli.BoolTFlag{
Name: "nomemory",
Usage: "disable memory output",
}
@@ -125,9 +125,9 @@ var (
Name: "nostorage",
Usage: "disable storage output",
}
- DisableReturnDataFlag = cli.BoolFlag{
+ DisableReturnDataFlag = cli.BoolTFlag{
Name: "noreturndata",
- Usage: "disable return data output",
+ Usage: "enable return data output",
}
)
@@ -135,7 +135,7 @@ var stateTransitionCommand = cli.Command{
Name: "transition",
Aliases: []string{"t8n"},
Usage: "executes a full state transition",
- Action: t8ntool.Main,
+ Action: t8ntool.Transition,
Flags: []cli.Flag{
t8ntool.TraceFlag,
t8ntool.TraceDisableMemoryFlag,
@@ -154,6 +154,18 @@ var stateTransitionCommand = cli.Command{
t8ntool.VerbosityFlag,
},
}
+var transactionCommand = cli.Command{
+ Name: "transaction",
+ Aliases: []string{"t9n"},
+ Usage: "performs transaction validation",
+ Action: t8ntool.Transaction,
+ Flags: []cli.Flag{
+ t8ntool.InputTxsFlag,
+ t8ntool.ChainIDFlag,
+ t8ntool.ForknameFlag,
+ t8ntool.VerbosityFlag,
+ },
+}
func init() {
app.Flags = []cli.Flag{
@@ -187,6 +199,7 @@ func init() {
runCommand,
stateTestCommand,
stateTransitionCommand,
+ transactionCommand,
}
cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate
}
@@ -195,7 +208,7 @@ func main() {
if err := app.Run(os.Args); err != nil {
code := 1
if ec, ok := err.(*t8ntool.NumberedError); ok {
- code = ec.Code()
+ code = ec.ExitCode()
}
fmt.Fprintln(os.Stderr, err)
os.Exit(code)
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index e409d2692..cedbd2281 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -108,11 +108,11 @@ func runCmd(ctx *cli.Context) error {
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
logconfig := &vm.LogConfig{
- DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
- DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
- DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
- DisableReturnData: ctx.GlobalBool(DisableReturnDataFlag.Name),
- Debug: ctx.GlobalBool(DebugFlag.Name),
+ EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name),
+ DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
+ DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
+ EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name),
+ Debug: ctx.GlobalBool(DebugFlag.Name),
}
var (
diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go
index d8bc4eae8..ab2704609 100644
--- a/cmd/evm/staterunner.go
+++ b/cmd/evm/staterunner.go
@@ -59,10 +59,10 @@ func stateTestCmd(ctx *cli.Context) error {
// Configure the EVM logger
config := &vm.LogConfig{
- DisableMemory: ctx.GlobalBool(DisableMemoryFlag.Name),
- DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
- DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
- DisableReturnData: ctx.GlobalBool(DisableReturnDataFlag.Name),
+ EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name),
+ DisableStack: ctx.GlobalBool(DisableStackFlag.Name),
+ DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name),
+ EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name),
}
var (
tracer vm.Tracer
diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go
new file mode 100644
index 000000000..ad518a91a
--- /dev/null
+++ b/cmd/evm/t8n_test.go
@@ -0,0 +1,292 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/docker/docker/pkg/reexec"
+ "github.com/ethereum/go-ethereum/internal/cmdtest"
+)
+
+func TestMain(m *testing.M) {
+ // Run the app if we've been exec'd as "ethkey-test" in runEthkey.
+ reexec.Register("evm-test", func() {
+ if err := app.Run(os.Args); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ os.Exit(0)
+ })
+ // check if we have been reexec'd
+ if reexec.Init() {
+ return
+ }
+ os.Exit(m.Run())
+}
+
+type testT8n struct {
+ *cmdtest.TestCmd
+}
+
+type t8nInput struct {
+ inAlloc string
+ inTxs string
+ inEnv string
+ stFork string
+ stReward string
+}
+
+func (args *t8nInput) get(base string) []string {
+ var out []string
+ if opt := args.inAlloc; opt != "" {
+ out = append(out, "--input.alloc")
+ out = append(out, fmt.Sprintf("%v/%v", base, opt))
+ }
+ if opt := args.inTxs; opt != "" {
+ out = append(out, "--input.txs")
+ out = append(out, fmt.Sprintf("%v/%v", base, opt))
+ }
+ if opt := args.inEnv; opt != "" {
+ out = append(out, "--input.env")
+ out = append(out, fmt.Sprintf("%v/%v", base, opt))
+ }
+ if opt := args.stFork; opt != "" {
+ out = append(out, "--state.fork", opt)
+ }
+ if opt := args.stReward; opt != "" {
+ out = append(out, "--state.reward", opt)
+ }
+ return out
+}
+
+type t8nOutput struct {
+ alloc bool
+ result bool
+ body bool
+}
+
+func (args *t8nOutput) get() (out []string) {
+ if args.body {
+ out = append(out, "--output.body", "stdout")
+ } else {
+ out = append(out, "--output.body", "") // empty means ignore
+ }
+ if args.result {
+ out = append(out, "--output.result", "stdout")
+ } else {
+ out = append(out, "--output.result", "")
+ }
+ if args.alloc {
+ out = append(out, "--output.alloc", "stdout")
+ } else {
+ out = append(out, "--output.alloc", "")
+ }
+ return out
+}
+
+func TestT8n(t *testing.T) {
+ tt := new(testT8n)
+ tt.TestCmd = cmdtest.NewTestCmd(t, tt)
+ for i, tc := range []struct {
+ base string
+ input t8nInput
+ output t8nOutput
+ expExitCode int
+ expOut string
+ }{
+ { // Test exit (3) on bad config
+ base: "./testdata/1",
+ input: t8nInput{
+ "alloc.json", "txs.json", "env.json", "Frontier+1346", "",
+ },
+ output: t8nOutput{alloc: true, result: true},
+ expExitCode: 3,
+ },
+ {
+ base: "./testdata/1",
+ input: t8nInput{
+ "alloc.json", "txs.json", "env.json", "Byzantium", "",
+ },
+ output: t8nOutput{alloc: true, result: true},
+ expOut: "exp.json",
+ },
+ { // blockhash test
+ base: "./testdata/3",
+ input: t8nInput{
+ "alloc.json", "txs.json", "env.json", "Berlin", "",
+ },
+ output: t8nOutput{alloc: true, result: true},
+ expOut: "exp.json",
+ },
+ { // missing blockhash test
+ base: "./testdata/4",
+ input: t8nInput{
+ "alloc.json", "txs.json", "env.json", "Berlin", "",
+ },
+ output: t8nOutput{alloc: true, result: true},
+ expExitCode: 4,
+ },
+ { // Ommer test
+ base: "./testdata/5",
+ input: t8nInput{
+ "alloc.json", "txs.json", "env.json", "Byzantium", "0x80",
+ },
+ output: t8nOutput{alloc: true, result: true},
+ expOut: "exp.json",
+ },
+ { // Sign json transactions
+ base: "./testdata/13",
+ input: t8nInput{
+ "alloc.json", "txs.json", "env.json", "London", "",
+ },
+ output: t8nOutput{body: true},
+ expOut: "exp.json",
+ },
+ { // Already signed transactions
+ base: "./testdata/13",
+ input: t8nInput{
+ "alloc.json", "signed_txs.rlp", "env.json", "London", "",
+ },
+ output: t8nOutput{result: true},
+ expOut: "exp2.json",
+ },
+ { // Difficulty calculation - no uncles
+ base: "./testdata/14",
+ input: t8nInput{
+ "alloc.json", "txs.json", "env.json", "London", "",
+ },
+ output: t8nOutput{result: true},
+ expOut: "exp.json",
+ },
+ { // Difficulty calculation - with uncles
+ base: "./testdata/14",
+ input: t8nInput{
+ "alloc.json", "txs.json", "env.uncles.json", "London", "",
+ },
+ output: t8nOutput{result: true},
+ expOut: "exp2.json",
+ },
+ } {
+
+ args := []string{"t8n"}
+ args = append(args, tc.output.get()...)
+ args = append(args, tc.input.get(tc.base)...)
+ tt.Run("evm-test", args...)
+ tt.Logf("args: %v\n", strings.Join(args, " "))
+ // Compare the expected output, if provided
+ if tc.expOut != "" {
+ want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut))
+ if err != nil {
+ t.Fatalf("test %d: could not read expected output: %v", i, err)
+ }
+ have := tt.Output()
+ ok, err := cmpJson(have, want)
+ switch {
+ case err != nil:
+ t.Fatalf("test %d, json parsing failed: %v", i, err)
+ case !ok:
+ t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
+ }
+ }
+ tt.WaitExit()
+ if have, want := tt.ExitStatus(), tc.expExitCode; have != want {
+ t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want)
+ }
+ }
+}
+
+type t9nInput struct {
+ inTxs string
+ stFork string
+}
+
+func (args *t9nInput) get(base string) []string {
+ var out []string
+ if opt := args.inTxs; opt != "" {
+ out = append(out, "--input.txs")
+ out = append(out, fmt.Sprintf("%v/%v", base, opt))
+ }
+ if opt := args.stFork; opt != "" {
+ out = append(out, "--state.fork", opt)
+ }
+ return out
+}
+
+func TestT9n(t *testing.T) {
+ tt := new(testT8n)
+ tt.TestCmd = cmdtest.NewTestCmd(t, tt)
+ for i, tc := range []struct {
+ base string
+ input t9nInput
+ expExitCode int
+ expOut string
+ }{
+ { // London txs on homestead
+ base: "./testdata/15",
+ input: t9nInput{
+ inTxs: "signed_txs.rlp",
+ stFork: "Homestead",
+ },
+ expOut: "exp.json",
+ },
+ { // London txs on homestead
+ base: "./testdata/15",
+ input: t9nInput{
+ inTxs: "signed_txs.rlp",
+ stFork: "London",
+ },
+ expOut: "exp2.json",
+ },
+ { // An RLP list (a blockheader really)
+ base: "./testdata/15",
+ input: t9nInput{
+ inTxs: "blockheader.rlp",
+ stFork: "London",
+ },
+ expOut: "exp3.json",
+ },
+ } {
+
+ args := []string{"t9n"}
+ args = append(args, tc.input.get(tc.base)...)
+
+ tt.Run("evm-test", args...)
+ tt.Logf("args:\n go run . %v\n", strings.Join(args, " "))
+ // Compare the expected output, if provided
+ if tc.expOut != "" {
+ want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut))
+ if err != nil {
+ t.Fatalf("test %d: could not read expected output: %v", i, err)
+ }
+ have := tt.Output()
+ ok, err := cmpJson(have, want)
+ switch {
+ case err != nil:
+ t.Logf(string(have))
+ t.Fatalf("test %d, json parsing failed: %v", i, err)
+ case !ok:
+ t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
+ }
+ }
+ tt.WaitExit()
+ if have, want := tt.ExitStatus(), tc.expExitCode; have != want {
+ t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want)
+ }
+ }
+}
+
+// cmpJson compares the JSON in two byte slices.
+func cmpJson(a, b []byte) (bool, error) {
+ var j, j2 interface{}
+ if err := json.Unmarshal(a, &j); err != nil {
+ return false, err
+ }
+ if err := json.Unmarshal(b, &j2); err != nil {
+ return false, err
+ }
+ return reflect.DeepEqual(j2, j), nil
+}
diff --git a/cmd/evm/testdata/1/exp.json b/cmd/evm/testdata/1/exp.json
new file mode 100644
index 000000000..17d2f8267
--- /dev/null
+++ b/cmd/evm/testdata/1/exp.json
@@ -0,0 +1,43 @@
+{
+ "alloc": {
+ "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": {
+ "balance": "0xfeed1a9d",
+ "nonce": "0x1"
+ },
+ "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "balance": "0x5ffd4878be161d74",
+ "nonce": "0xac"
+ },
+ "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "balance": "0xa410"
+ }
+ },
+ "result": {
+ "stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13",
+ "txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d",
+ "receiptRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
+ "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "receipts": [
+ {
+ "root": "0x",
+ "status": "0x1",
+ "cumulativeGasUsed": "0x5208",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "logs": null,
+ "transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673",
+ "contractAddress": "0x0000000000000000000000000000000000000000",
+ "gasUsed": "0x5208",
+ "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "transactionIndex": "0x0"
+ }
+ ],
+ "rejected": [
+ {
+ "index": 1,
+ "error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
+ }
+ ],
+ "currentDifficulty": "0x20000"
+ }
+}
\ No newline at end of file
diff --git a/cmd/evm/testdata/13/exp.json b/cmd/evm/testdata/13/exp.json
new file mode 100644
index 000000000..2b049dfb2
--- /dev/null
+++ b/cmd/evm/testdata/13/exp.json
@@ -0,0 +1,3 @@
+{
+ "body": "0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9"
+}
diff --git a/cmd/evm/testdata/13/exp2.json b/cmd/evm/testdata/13/exp2.json
new file mode 100644
index 000000000..01ab59e84
--- /dev/null
+++ b/cmd/evm/testdata/13/exp2.json
@@ -0,0 +1,38 @@
+{
+ "result": {
+ "stateRoot": "0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61",
+ "txRoot": "0x013509c8563d41c0ae4bf38f2d6d19fc6512a1d0d6be045079c8c9f68bf45f9d",
+ "receiptRoot": "0xa532a08aa9f62431d6fe5d924951b8efb86ed3c54d06fee77788c3767dd13420",
+ "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "receipts": [
+ {
+ "type": "0x2",
+ "root": "0x",
+ "status": "0x0",
+ "cumulativeGasUsed": "0x84d0",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "logs": null,
+ "transactionHash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476",
+ "contractAddress": "0x0000000000000000000000000000000000000000",
+ "gasUsed": "0x84d0",
+ "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "transactionIndex": "0x0"
+ },
+ {
+ "type": "0x2",
+ "root": "0x",
+ "status": "0x0",
+ "cumulativeGasUsed": "0x109a0",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "logs": null,
+ "transactionHash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a",
+ "contractAddress": "0x0000000000000000000000000000000000000000",
+ "gasUsed": "0x84d0",
+ "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "transactionIndex": "0x1"
+ }
+ ],
+ "currentDifficulty": "0x20000"
+ }
+}
diff --git a/cmd/evm/testdata/13/signed_txs.rlp b/cmd/evm/testdata/13/signed_txs.rlp
new file mode 100644
index 000000000..9d1157ea4
--- /dev/null
+++ b/cmd/evm/testdata/13/signed_txs.rlp
@@ -0,0 +1 @@
+"0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9"
\ No newline at end of file
diff --git a/cmd/evm/testdata/14/alloc.json b/cmd/evm/testdata/14/alloc.json
new file mode 100644
index 000000000..cef1a25ff
--- /dev/null
+++ b/cmd/evm/testdata/14/alloc.json
@@ -0,0 +1,12 @@
+{
+ "a94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "balance": "0x5ffd4878be161d74",
+ "code": "0x",
+ "nonce": "0xac",
+ "storage": {}
+ },
+ "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{
+ "balance": "0xfeedbead",
+ "nonce" : "0x00"
+ }
+}
\ No newline at end of file
diff --git a/cmd/evm/testdata/14/env.json b/cmd/evm/testdata/14/env.json
new file mode 100644
index 000000000..0bf1c5cf4
--- /dev/null
+++ b/cmd/evm/testdata/14/env.json
@@ -0,0 +1,9 @@
+{
+ "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b",
+ "currentGasLimit": "0x750a163df65e8a",
+ "currentBaseFee": "0x500",
+ "currentNumber": "12800000",
+ "currentTimestamp": "100015",
+ "parentTimestamp" : "99999",
+ "parentDifficulty" : "0x2000000000000"
+}
diff --git a/cmd/evm/testdata/14/env.uncles.json b/cmd/evm/testdata/14/env.uncles.json
new file mode 100644
index 000000000..83811b95e
--- /dev/null
+++ b/cmd/evm/testdata/14/env.uncles.json
@@ -0,0 +1,10 @@
+{
+ "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b",
+ "currentGasLimit": "0x750a163df65e8a",
+ "currentBaseFee": "0x500",
+ "currentNumber": "12800000",
+ "currentTimestamp": "100035",
+ "parentTimestamp" : "99999",
+ "parentDifficulty" : "0x2000000000000",
+ "parentUncleHash" : "0x000000000000000000000000000000000000000000000000000000000000beef"
+}
diff --git a/cmd/evm/testdata/14/exp.json b/cmd/evm/testdata/14/exp.json
new file mode 100644
index 000000000..bbe6a1317
--- /dev/null
+++ b/cmd/evm/testdata/14/exp.json
@@ -0,0 +1,11 @@
+{
+ "result": {
+ "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
+ "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "currentDifficulty": "0x2000020000000",
+ "receipts": []
+ }
+}
\ No newline at end of file
diff --git a/cmd/evm/testdata/14/exp2.json b/cmd/evm/testdata/14/exp2.json
new file mode 100644
index 000000000..195c738d9
--- /dev/null
+++ b/cmd/evm/testdata/14/exp2.json
@@ -0,0 +1,11 @@
+{
+ "result": {
+ "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
+ "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "receipts": [],
+ "currentDifficulty": "0x1ff8020000000"
+ }
+}
\ No newline at end of file
diff --git a/cmd/evm/testdata/14/readme.md b/cmd/evm/testdata/14/readme.md
new file mode 100644
index 000000000..9d0dc9569
--- /dev/null
+++ b/cmd/evm/testdata/14/readme.md
@@ -0,0 +1,41 @@
+## Difficulty calculation
+
+This test shows how the `evm t8n` can be used to calculate the (ethash) difficulty, if none is provided by the caller.
+
+Calculating it (with an empty set of txs) using `London` rules (and no provided unclehash for the parent block):
+```
+[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.json --output.result=stdout --state.fork=London
+INFO [08-30|20:43:09.352] Trie dumping started root=6f0588..7f4bdc
+INFO [08-30|20:43:09.352] Trie dumping complete accounts=2 elapsed="82.533µs"
+INFO [08-30|20:43:09.352] Wrote file file=alloc.json
+{
+ "result": {
+ "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
+ "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "receipts": [],
+ "currentDifficulty": "0x2000020000000"
+ }
+}
+```
+Same thing, but this time providing a non-empty (and non-`emptyKeccak`) unclehash, which leads to a slightly different result:
+```
+[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.uncles.json --output.result=stdout --state.fork=London
+INFO [08-30|20:44:33.102] Trie dumping started root=6f0588..7f4bdc
+INFO [08-30|20:44:33.102] Trie dumping complete accounts=2 elapsed="72.91µs"
+INFO [08-30|20:44:33.102] Wrote file file=alloc.json
+{
+ "result": {
+ "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
+ "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "receipts": [],
+ "currentDifficulty": "0x1ff8020000000"
+ }
+}
+```
+
diff --git a/cmd/evm/testdata/14/txs.json b/cmd/evm/testdata/14/txs.json
new file mode 100644
index 000000000..fe51488c7
--- /dev/null
+++ b/cmd/evm/testdata/14/txs.json
@@ -0,0 +1 @@
+[]
diff --git a/cmd/evm/testdata/15/blockheader.rlp b/cmd/evm/testdata/15/blockheader.rlp
new file mode 100644
index 000000000..1124e8e2d
--- /dev/null
+++ b/cmd/evm/testdata/15/blockheader.rlp
@@ -0,0 +1 @@
+"0xf901f0a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007b0101020383010203a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"
\ No newline at end of file
diff --git a/cmd/evm/testdata/15/exp.json b/cmd/evm/testdata/15/exp.json
new file mode 100644
index 000000000..03d970c56
--- /dev/null
+++ b/cmd/evm/testdata/15/exp.json
@@ -0,0 +1,8 @@
+[
+ {
+ "error": "transaction type not supported"
+ },
+ {
+ "error": "transaction type not supported"
+ }
+]
diff --git a/cmd/evm/testdata/15/exp2.json b/cmd/evm/testdata/15/exp2.json
new file mode 100644
index 000000000..85d821f55
--- /dev/null
+++ b/cmd/evm/testdata/15/exp2.json
@@ -0,0 +1,10 @@
+[
+ {
+ "address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
+ "hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476"
+ },
+ {
+ "address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
+ "hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a"
+ }
+]
diff --git a/cmd/evm/testdata/15/exp3.json b/cmd/evm/testdata/15/exp3.json
new file mode 100644
index 000000000..6c46d267c
--- /dev/null
+++ b/cmd/evm/testdata/15/exp3.json
@@ -0,0 +1,47 @@
+[
+ {
+ "error": "transaction type not supported"
+ },
+ {
+ "error": "transaction type not supported"
+ },
+ {
+ "error": "transaction type not supported"
+ },
+ {
+ "error": "transaction type not supported"
+ },
+ {
+ "error": "transaction type not supported"
+ },
+ {
+ "error": "transaction type not supported"
+ },
+ {
+ "error": "transaction type not supported"
+ },
+ {
+ "error": "rlp: expected List"
+ },
+ {
+ "error": "rlp: expected List"
+ },
+ {
+ "error": "rlp: expected List"
+ },
+ {
+ "error": "rlp: expected List"
+ },
+ {
+ "error": "rlp: expected List"
+ },
+ {
+ "error": "rlp: expected input list for types.AccessListTx"
+ },
+ {
+ "error": "transaction type not supported"
+ },
+ {
+ "error": "transaction type not supported"
+ }
+]
diff --git a/cmd/evm/testdata/15/signed_txs.rlp b/cmd/evm/testdata/15/signed_txs.rlp
new file mode 100644
index 000000000..9d1157ea4
--- /dev/null
+++ b/cmd/evm/testdata/15/signed_txs.rlp
@@ -0,0 +1 @@
+"0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9"
\ No newline at end of file
diff --git a/cmd/evm/testdata/15/signed_txs.rlp.json b/cmd/evm/testdata/15/signed_txs.rlp.json
new file mode 100644
index 000000000..187f40f24
--- /dev/null
+++ b/cmd/evm/testdata/15/signed_txs.rlp.json
@@ -0,0 +1,4 @@
+{
+ "txsRlp" : "0xf8d2b86702f864010180820fa08284d09411111111111111111111111111111111111111118080c001a0b7dfab36232379bb3d1497a4f91c1966b1f932eae3ade107bf5d723b9cb474e0a06261c359a10f2132f126d250485b90cf20f30340801244a08ef6142ab33d1904b86702f864010280820fa08284d09411111111111111111111111111111111111111118080c080a0d4ec563b6568cd42d998fc4134b36933c6568d01533b5adf08769270243c6c7fa072bf7c21eac6bbeae5143371eef26d5e279637f3bd73482b55979d76d935b1e9"
+}
+
diff --git a/cmd/evm/testdata/3/exp.json b/cmd/evm/testdata/3/exp.json
new file mode 100644
index 000000000..ade09e9ac
--- /dev/null
+++ b/cmd/evm/testdata/3/exp.json
@@ -0,0 +1,37 @@
+{
+ "alloc": {
+ "0x095e7baea6a6c7c4c2dfeb977efac326af552d87": {
+ "code": "0x600140",
+ "balance": "0xde0b6b3a76586a0"
+ },
+ "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": {
+ "balance": "0x521f"
+ },
+ "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "balance": "0xde0b6b3a7622741",
+ "nonce": "0x1"
+ }
+ },
+ "result": {
+ "stateRoot": "0xb7341da3f9f762a6884eaa186c32942734c146b609efee11c4b0214c44857ea1",
+ "txRoot": "0x75e61774a2ff58cbe32653420256c7f44bc715715a423b0b746d5c622979af6b",
+ "receiptRoot": "0xd0d26df80374a327c025d405ebadc752b1bbd089d864801ae78ab704bcad8086",
+ "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "receipts": [
+ {
+ "root": "0x",
+ "status": "0x1",
+ "cumulativeGasUsed": "0x521f",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "logs": null,
+ "transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81",
+ "contractAddress": "0x0000000000000000000000000000000000000000",
+ "gasUsed": "0x521f",
+ "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "transactionIndex": "0x0"
+ }
+ ],
+ "currentDifficulty": "0x20000"
+ }
+}
diff --git a/cmd/evm/testdata/5/exp.json b/cmd/evm/testdata/5/exp.json
new file mode 100644
index 000000000..6340d4cc3
--- /dev/null
+++ b/cmd/evm/testdata/5/exp.json
@@ -0,0 +1,22 @@
+{
+ "alloc": {
+ "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": {
+ "balance": "0x88"
+ },
+ "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb": {
+ "balance": "0x70"
+ },
+ "0xcccccccccccccccccccccccccccccccccccccccc": {
+ "balance": "0x60"
+ }
+ },
+ "result": {
+ "stateRoot": "0xa7312add33811645c6aa65d928a1a4f49d65d448801912c069a0aa8fe9c1f393",
+ "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "receipts": [],
+ "currentDifficulty": "0x20000"
+ }
+}
diff --git a/cmd/geth/accountcmd.go b/cmd/geth/accountcmd.go
index 6d45c8876..e33b9eb0f 100644
--- a/cmd/geth/accountcmd.go
+++ b/cmd/geth/accountcmd.go
@@ -268,11 +268,16 @@ func accountCreate(ctx *cli.Context) error {
}
}
utils.SetNodeConfig(ctx, &cfg.Node)
- scryptN, scryptP, keydir, err := cfg.Node.AccountConfig()
-
+ keydir, err := cfg.Node.KeyDirConfig()
if err != nil {
utils.Fatalf("Failed to read configuration: %v", err)
}
+ scryptN := keystore.StandardScryptN
+ scryptP := keystore.StandardScryptP
+ if cfg.Node.UseLightweightKDF {
+ scryptN = keystore.LightScryptN
+ scryptP = keystore.LightScryptP
+ }
password := utils.GetPassPhraseWithList("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))
diff --git a/cmd/geth/config.go b/cmd/geth/config.go
index 604f907b7..c97a64f17 100644
--- a/cmd/geth/config.go
+++ b/cmd/geth/config.go
@@ -27,6 +27,10 @@ import (
"gopkg.in/urfave/cli.v1"
+ "github.com/ethereum/go-ethereum/accounts/external"
+ "github.com/ethereum/go-ethereum/accounts/keystore"
+ "github.com/ethereum/go-ethereum/accounts/scwallet"
+ "github.com/ethereum/go-ethereum/accounts/usbwallet"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/ethconfig"
@@ -135,6 +139,11 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
if err != nil {
utils.Fatalf("Failed to create the protocol stack: %v", err)
}
+ // Node doesn't by default populate account manager backends
+ if err := setAccountManagerBackends(stack); err != nil {
+ utils.Fatalf("Failed to set account manager backends: %v", err)
+ }
+
utils.SetEthConfig(ctx, stack, &cfg.Eth)
if ctx.GlobalIsSet(utils.EthStatsURLFlag.Name) {
cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name)
@@ -257,3 +266,62 @@ func deprecated(field string) bool {
return false
}
}
+
+func setAccountManagerBackends(stack *node.Node) error {
+ conf := stack.Config()
+ am := stack.AccountManager()
+ keydir := stack.KeyStoreDir()
+ scryptN := keystore.StandardScryptN
+ scryptP := keystore.StandardScryptP
+ if conf.UseLightweightKDF {
+ scryptN = keystore.LightScryptN
+ scryptP = keystore.LightScryptP
+ }
+
+ // Assemble the supported backends
+ if len(conf.ExternalSigner) > 0 {
+ log.Info("Using external signer", "url", conf.ExternalSigner)
+ if extapi, err := external.NewExternalBackend(conf.ExternalSigner); err == nil {
+ am.AddBackend(extapi)
+ return nil
+ } else {
+ return fmt.Errorf("error connecting to external signer: %v", err)
+ }
+ }
+
+ // For now, we're using EITHER external signer OR local signers.
+ // If/when we implement some form of lockfile for USB and keystore wallets,
+ // we can have both, but it's very confusing for the user to see the same
+ // accounts in both externally and locally, plus very racey.
+ am.AddBackend(keystore.NewKeyStore(keydir, scryptN, scryptP))
+ if conf.USB {
+ // Start a USB hub for Ledger hardware wallets
+ if ledgerhub, err := usbwallet.NewLedgerHub(); err != nil {
+ log.Warn(fmt.Sprintf("Failed to start Ledger hub, disabling: %v", err))
+ } else {
+ am.AddBackend(ledgerhub)
+ }
+ // Start a USB hub for Trezor hardware wallets (HID version)
+ if trezorhub, err := usbwallet.NewTrezorHubWithHID(); err != nil {
+ log.Warn(fmt.Sprintf("Failed to start HID Trezor hub, disabling: %v", err))
+ } else {
+ am.AddBackend(trezorhub)
+ }
+ // Start a USB hub for Trezor hardware wallets (WebUSB version)
+ if trezorhub, err := usbwallet.NewTrezorHubWithWebUSB(); err != nil {
+ log.Warn(fmt.Sprintf("Failed to start WebUSB Trezor hub, disabling: %v", err))
+ } else {
+ am.AddBackend(trezorhub)
+ }
+ }
+ if len(conf.SmartCardDaemonPath) > 0 {
+ // Start a smart card hub
+ if schub, err := scwallet.NewHub(conf.SmartCardDaemonPath, scwallet.Scheme, keydir); err != nil {
+ log.Warn(fmt.Sprintf("Failed to start smart card hub, disabling: %v", err))
+ } else {
+ am.AddBackend(schub)
+ }
+ }
+
+ return nil
+}
diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go
index c3f41b187..845ede2f9 100644
--- a/cmd/geth/consolecmd_test.go
+++ b/cmd/geth/consolecmd_test.go
@@ -75,7 +75,7 @@ at block: 0 ({{niltime}})
datadir: {{.Datadir}}
modules: {{apis}}
-To exit, press ctrl-d
+To exit, press ctrl-d or type exit
> {{.InputLine "exit"}}
`)
geth.ExpectExit()
@@ -149,7 +149,7 @@ at block: 0 ({{niltime}}){{if ipc}}
datadir: {{datadir}}{{end}}
modules: {{apis}}
-To exit, press ctrl-d
+To exit, press ctrl-d or type exit
> {{.InputLine "exit" }}
`)
attach.ExpectExit()
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index d75e98425..221fb2bc8 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -127,7 +127,7 @@ var (
utils.MinerEtherbaseFlag,
utils.MinerExtraDataFlag,
utils.MinerRecommitIntervalFlag,
- utils.MinerNoVerfiyFlag,
+ utils.MinerNoVerifyFlag,
utils.NATFlag,
utils.NoDiscoverFlag,
utils.DiscoveryV5Flag,
@@ -161,12 +161,6 @@ var (
utils.HTTPPortFlag,
utils.HTTPCORSDomainFlag,
utils.HTTPVirtualHostsFlag,
- utils.LegacyRPCEnabledFlag,
- utils.LegacyRPCListenAddrFlag,
- utils.LegacyRPCPortFlag,
- utils.LegacyRPCCORSDomainFlag,
- utils.LegacyRPCVirtualHostsFlag,
- utils.LegacyRPCApiFlag,
utils.GraphQLEnabledFlag,
utils.GraphQLCORSDomainFlag,
utils.GraphQLVirtualHostsFlag,
diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go
index 35d027fb1..d3903e0af 100644
--- a/cmd/geth/snapshot.go
+++ b/cmd/geth/snapshot.go
@@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/state/pruner"
"github.com/ethereum/go-ethereum/core/state/snapshot"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
@@ -232,7 +233,7 @@ func verifyState(ctx *cli.Context) error {
}
}
if err := snaptree.Verify(root); err != nil {
- log.Error("Failed to verfiy state", "root", root, "err", err)
+ log.Error("Failed to verify state", "root", root, "err", err)
return err
}
log.Info("Verified the state", "root", root)
@@ -287,7 +288,7 @@ func traverseState(ctx *cli.Context) error {
accIter := trie.NewIterator(t.NodeIterator(nil))
for accIter.Next() {
accounts += 1
- var acc state.Account
+ var acc types.StateAccount
if err := rlp.DecodeBytes(accIter.Value, &acc); err != nil {
log.Error("Invalid account encountered during traversal", "err", err)
return err
@@ -393,7 +394,7 @@ func traverseRawState(ctx *cli.Context) error {
// dig into the storage trie further.
if accIter.Leaf() {
accounts += 1
- var acc state.Account
+ var acc types.StateAccount
if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil {
log.Error("Invalid account encountered during traversal", "err", err)
return errors.New("invalid account")
diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go
index 708edcc79..16446f83e 100644
--- a/cmd/geth/usage.go
+++ b/cmd/geth/usage.go
@@ -185,7 +185,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.MinerEtherbaseFlag,
utils.MinerExtraDataFlag,
utils.MinerRecommitIntervalFlag,
- utils.MinerNoVerfiyFlag,
+ utils.MinerNoVerifyFlag,
},
},
{
@@ -218,13 +218,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{
Name: "ALIASED (deprecated)",
Flags: []cli.Flag{
utils.NoUSBFlag,
- utils.LegacyRPCEnabledFlag,
- utils.LegacyRPCListenAddrFlag,
- utils.LegacyRPCPortFlag,
- utils.LegacyRPCCORSDomainFlag,
- utils.LegacyRPCVirtualHostsFlag,
- utils.LegacyRPCApiFlag,
- utils.LegacyMinerGasTargetFlag,
},
},
{
diff --git a/cmd/puppeth/module_explorer.go b/cmd/puppeth/module_explorer.go
index 3ce9d612b..05821dfc5 100644
--- a/cmd/puppeth/module_explorer.go
+++ b/cmd/puppeth/module_explorer.go
@@ -35,8 +35,8 @@ FROM puppeth/blockscout:latest
ADD genesis.json /genesis.json
RUN \
echo 'geth --cache 512 init /genesis.json' > explorer.sh && \
- echo $'geth --networkid {{.NetworkID}} --syncmode "full" --gcmode "archive" --port {{.EthPort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --http --http.api "net,web3,eth,shh,debug" --http.corsdomain "*" --http.vhosts "*" --ws --ws.origins "*" --exitwhensynced' >> explorer.sh && \
- echo $'exec geth --networkid {{.NetworkID}} --syncmode "full" --gcmode "archive" --port {{.EthPort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --http --http.api "net,web3,eth,shh,debug" --http.corsdomain "*" --http.vhosts "*" --ws --ws.origins "*" &' >> explorer.sh && \
+ echo $'geth --networkid {{.NetworkID}} --syncmode "full" --gcmode "archive" --port {{.EthPort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --http --http.api "net,web3,eth,debug" --http.corsdomain "*" --http.vhosts "*" --ws --ws.origins "*" --exitwhensynced' >> explorer.sh && \
+ echo $'exec geth --networkid {{.NetworkID}} --syncmode "full" --gcmode "archive" --port {{.EthPort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --http --http.api "net,web3,eth,debug" --http.corsdomain "*" --http.vhosts "*" --ws --ws.origins "*" &' >> explorer.sh && \
echo '/usr/local/bin/docker-entrypoint.sh postgres &' >> explorer.sh && \
echo 'sleep 5' >> explorer.sh && \
echo 'mix do ecto.drop --force, ecto.create, ecto.migrate' >> explorer.sh && \
diff --git a/cmd/utils/diskusage.go b/cmd/utils/diskusage.go
index c3d51765f..09844652e 100644
--- a/cmd/utils/diskusage.go
+++ b/cmd/utils/diskusage.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build !windows && !openbsd
// +build !windows,!openbsd
package utils
diff --git a/cmd/utils/diskusage_openbsd.go b/cmd/utils/diskusage_openbsd.go
index 54f759d29..52502d0cf 100644
--- a/cmd/utils/diskusage_openbsd.go
+++ b/cmd/utils/diskusage_openbsd.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build openbsd
// +build openbsd
package utils
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 8a53f525f..52c15c68e 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -460,7 +460,7 @@ var (
Usage: "Time interval to recreate the block being mined",
Value: ethconfig.Defaults.Miner.Recommit,
}
- MinerNoVerfiyFlag = cli.BoolFlag{
+ MinerNoVerifyFlag = cli.BoolFlag{
Name: "miner.noverify",
Usage: "Disable remote sealing verification",
}
@@ -920,14 +920,6 @@ func SplitAndTrim(input string) (ret []string) {
// setHTTP creates the HTTP RPC listener interface string from the set
// command line flags, returning empty if the HTTP endpoint is disabled.
func setHTTP(ctx *cli.Context, cfg *node.Config) {
- if ctx.GlobalBool(LegacyRPCEnabledFlag.Name) && cfg.HTTPHost == "" {
- log.Warn("The flag --rpc is deprecated and will be removed June 2021, please use --http")
- cfg.HTTPHost = "127.0.0.1"
- if ctx.GlobalIsSet(LegacyRPCListenAddrFlag.Name) {
- cfg.HTTPHost = ctx.GlobalString(LegacyRPCListenAddrFlag.Name)
- log.Warn("The flag --rpcaddr is deprecated and will be removed June 2021, please use --http.addr")
- }
- }
if ctx.GlobalBool(HTTPEnabledFlag.Name) && cfg.HTTPHost == "" {
cfg.HTTPHost = "127.0.0.1"
if ctx.GlobalIsSet(HTTPListenAddrFlag.Name) {
@@ -935,34 +927,18 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) {
}
}
- if ctx.GlobalIsSet(LegacyRPCPortFlag.Name) {
- cfg.HTTPPort = ctx.GlobalInt(LegacyRPCPortFlag.Name)
- log.Warn("The flag --rpcport is deprecated and will be removed June 2021, please use --http.port")
- }
if ctx.GlobalIsSet(HTTPPortFlag.Name) {
cfg.HTTPPort = ctx.GlobalInt(HTTPPortFlag.Name)
}
- if ctx.GlobalIsSet(LegacyRPCCORSDomainFlag.Name) {
- cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(LegacyRPCCORSDomainFlag.Name))
- log.Warn("The flag --rpccorsdomain is deprecated and will be removed June 2021, please use --http.corsdomain")
- }
if ctx.GlobalIsSet(HTTPCORSDomainFlag.Name) {
cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(HTTPCORSDomainFlag.Name))
}
- if ctx.GlobalIsSet(LegacyRPCApiFlag.Name) {
- cfg.HTTPModules = SplitAndTrim(ctx.GlobalString(LegacyRPCApiFlag.Name))
- log.Warn("The flag --rpcapi is deprecated and will be removed June 2021, please use --http.api")
- }
if ctx.GlobalIsSet(HTTPApiFlag.Name) {
cfg.HTTPModules = SplitAndTrim(ctx.GlobalString(HTTPApiFlag.Name))
}
- if ctx.GlobalIsSet(LegacyRPCVirtualHostsFlag.Name) {
- cfg.HTTPVirtualHosts = SplitAndTrim(ctx.GlobalString(LegacyRPCVirtualHostsFlag.Name))
- log.Warn("The flag --rpcvhosts is deprecated and will be removed June 2021, please use --http.vhosts")
- }
if ctx.GlobalIsSet(HTTPVirtualHostsFlag.Name) {
cfg.HTTPVirtualHosts = SplitAndTrim(ctx.GlobalString(HTTPVirtualHostsFlag.Name))
}
@@ -1398,8 +1374,8 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) {
if ctx.GlobalIsSet(MinerRecommitIntervalFlag.Name) {
cfg.Recommit = ctx.GlobalDuration(MinerRecommitIntervalFlag.Name)
}
- if ctx.GlobalIsSet(MinerNoVerfiyFlag.Name) {
- cfg.Noverify = ctx.GlobalBool(MinerNoVerfiyFlag.Name)
+ if ctx.GlobalIsSet(MinerNoVerifyFlag.Name) {
+ cfg.Noverify = ctx.GlobalBool(MinerNoVerifyFlag.Name)
}
if ctx.GlobalIsSet(LegacyMinerGasTargetFlag.Name) {
log.Warn("The generic --miner.gastarget flag is deprecated and will be removed in the future!")
diff --git a/cmd/utils/flags_legacy.go b/cmd/utils/flags_legacy.go
index 627f77475..a0f64f609 100644
--- a/cmd/utils/flags_legacy.go
+++ b/cmd/utils/flags_legacy.go
@@ -18,10 +18,8 @@ package utils
import (
"fmt"
- "strings"
"github.com/ethereum/go-ethereum/eth/ethconfig"
- "github.com/ethereum/go-ethereum/node"
"gopkg.in/urfave/cli.v1"
)
@@ -45,35 +43,6 @@ var (
Name: "nousb",
Usage: "Disables monitoring for and managing USB hardware wallets (deprecated)",
}
- LegacyRPCEnabledFlag = cli.BoolFlag{
- Name: "rpc",
- Usage: "Enable the HTTP-RPC server (deprecated and will be removed June 2021, use --http)",
- }
- LegacyRPCListenAddrFlag = cli.StringFlag{
- Name: "rpcaddr",
- Usage: "HTTP-RPC server listening interface (deprecated and will be removed June 2021, use --http.addr)",
- Value: node.DefaultHTTPHost,
- }
- LegacyRPCPortFlag = cli.IntFlag{
- Name: "rpcport",
- Usage: "HTTP-RPC server listening port (deprecated and will be removed June 2021, use --http.port)",
- Value: node.DefaultHTTPPort,
- }
- LegacyRPCCORSDomainFlag = cli.StringFlag{
- Name: "rpccorsdomain",
- Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced) (deprecated and will be removed June 2021, use --http.corsdomain)",
- Value: "",
- }
- LegacyRPCVirtualHostsFlag = cli.StringFlag{
- Name: "rpcvhosts",
- Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. (deprecated and will be removed June 2021, use --http.vhosts)",
- Value: strings.Join(node.DefaultConfig.HTTPVirtualHosts, ","),
- }
- LegacyRPCApiFlag = cli.StringFlag{
- Name: "rpcapi",
- Usage: "API's offered over the HTTP-RPC interface (deprecated and will be removed June 2021, use --http.api)",
- Value: "",
- }
// (Deprecated July 2021, shown in aliased flags section)
LegacyMinerGasTargetFlag = cli.Uint64Flag{
Name: "miner.gastarget",
diff --git a/common/fdlimit/fdlimit_bsd.go b/common/fdlimit/fdlimit_bsd.go
index 86181337a..a3a6902c0 100644
--- a/common/fdlimit/fdlimit_bsd.go
+++ b/common/fdlimit/fdlimit_bsd.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build freebsd || dragonfly
// +build freebsd dragonfly
package fdlimit
diff --git a/common/fdlimit/fdlimit_unix.go b/common/fdlimit/fdlimit_unix.go
index e5a575f7a..a1f388ebb 100644
--- a/common/fdlimit/fdlimit_unix.go
+++ b/common/fdlimit/fdlimit_unix.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build linux || netbsd || openbsd || solaris
// +build linux netbsd openbsd solaris
package fdlimit
diff --git a/common/types.go b/common/types.go
index d71535669..2205835cb 100644
--- a/common/types.go
+++ b/common/types.go
@@ -86,7 +86,7 @@ func (h Hash) String() string {
}
// Format implements fmt.Formatter.
-// Hash supports the %v, %s, %v, %x, %X and %d format verbs.
+// Hash supports the %v, %s, %q, %x, %X and %d format verbs.
func (h Hash) Format(s fmt.State, c rune) {
hexb := make([]byte, 2+len(h)*2)
copy(hexb, "0x")
@@ -270,7 +270,7 @@ func (a Address) hex() []byte {
}
// Format implements fmt.Formatter.
-// Address supports the %v, %s, %v, %x, %X and %d format verbs.
+// Address supports the %v, %s, %q, %x, %X and %d format verbs.
func (a Address) Format(s fmt.State, c rune) {
switch c {
case 'v', 's':
diff --git a/console/console.go b/console/console.go
index ae9f28da0..dd39300d0 100644
--- a/console/console.go
+++ b/console/console.go
@@ -324,7 +324,7 @@ func (c *Console) Welcome() {
sort.Strings(modules)
message += " modules: " + strings.Join(modules, " ") + "\n"
}
- message += "\nTo exit, press ctrl-d"
+ message += "\nTo exit, press ctrl-d or type exit"
fmt.Fprintln(c.printer, message)
}
diff --git a/core/blockchain.go b/core/blockchain.go
index 84a84e80f..78614c7c5 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -207,8 +207,7 @@ type BlockChain struct {
processor Processor // Block transaction processor interface
vmConfig vm.Config
- shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
- terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion.
+ shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block.
}
// NewBlockChain returns a fully initialised block chain using information
@@ -1085,38 +1084,6 @@ const (
SideStatTy
)
-// truncateAncient rewinds the blockchain to the specified header and deletes all
-// data in the ancient store that exceeds the specified header.
-func (bc *BlockChain) truncateAncient(head uint64) error {
- frozen, err := bc.db.Ancients()
- if err != nil {
- return err
- }
- // Short circuit if there is no data to truncate in ancient store.
- if frozen <= head+1 {
- return nil
- }
- // Truncate all the data in the freezer beyond the specified head
- if err := bc.db.TruncateAncients(head + 1); err != nil {
- return err
- }
- // Clear out any stale content from the caches
- bc.hc.headerCache.Purge()
- bc.hc.tdCache.Purge()
- bc.hc.numberCache.Purge()
-
- // Clear out any stale content from the caches
- bc.bodyCache.Purge()
- bc.bodyRLPCache.Purge()
- bc.receiptsCache.Purge()
- bc.blockCache.Purge()
- bc.txLookupCache.Purge()
- bc.futureBlocks.Purge()
-
- log.Info("Rewind ancient data", "number", head)
- return nil
-}
-
// numberHash is just a container for a number and a hash, to represent a block
type numberHash struct {
number uint64
@@ -1155,12 +1122,14 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
var (
stats = struct{ processed, ignored int32 }{}
start = time.Now()
- size = 0
+ size = int64(0)
)
+
// updateHead updates the head fast sync block if the inserted blocks are better
// and returns an indicator whether the inserted blocks are canonical.
updateHead := func(head *types.Block) bool {
bc.chainmu.Lock()
+ defer bc.chainmu.Unlock()
// Rewind may have occurred, skip in that case.
if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 {
@@ -1169,68 +1138,63 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
bc.currentFastBlock.Store(head)
headFastBlockGauge.Update(int64(head.NumberU64()))
- bc.chainmu.Unlock()
return true
}
}
- bc.chainmu.Unlock()
return false
}
+
// writeAncient writes blockchain and corresponding receipt chain into ancient store.
//
// this function only accepts canonical chain data. All side chain will be reverted
// eventually.
writeAncient := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
- var (
- previous = bc.CurrentFastBlock()
- batch = bc.db.NewBatch()
- )
- // If any error occurs before updating the head or we are inserting a side chain,
- // all the data written this time wll be rolled back.
- defer func() {
- if previous != nil {
- if err := bc.truncateAncient(previous.NumberU64()); err != nil {
- log.Crit("Truncate ancient store failed", "err", err)
- }
- }
- }()
- var deleted []*numberHash
- for i, block := range blockChain {
- // Short circuit insertion if shutting down or processing failed
- if bc.insertStopped() {
- return 0, errInsertionInterrupted
- }
- // Short circuit insertion if it is required(used in testing only)
- if bc.terminateInsert != nil && bc.terminateInsert(block.Hash(), block.NumberU64()) {
- return i, errors.New("insertion is terminated for testing purpose")
- }
- // Short circuit if the owner header is unknown
- if !bc.HasHeader(block.Hash(), block.NumberU64()) {
- return i, fmt.Errorf("containing header #%d [%x..] unknown", block.Number(), block.Hash().Bytes()[:4])
- }
- if block.NumberU64() == 1 {
- // Make sure to write the genesis into the freezer
- if frozen, _ := bc.db.Ancients(); frozen == 0 {
- h := rawdb.ReadCanonicalHash(bc.db, 0)
- b := rawdb.ReadBlock(bc.db, h, 0)
- size += rawdb.WriteAncientBlock(bc.db, b, rawdb.ReadReceipts(bc.db, h, 0, bc.chainConfig), rawdb.ReadTd(bc.db, h, 0))
- log.Info("Wrote genesis to ancients")
- }
- }
- // Flush data into ancient database.
- size += rawdb.WriteAncientBlock(bc.db, block, receiptChain[i], bc.GetTd(block.Hash(), block.NumberU64()))
+ first := blockChain[0]
+ last := blockChain[len(blockChain)-1]
- // Write tx indices if any condition is satisfied:
- // * If user requires to reserve all tx indices(txlookuplimit=0)
- // * If all ancient tx indices are required to be reserved(txlookuplimit is even higher than ancientlimit)
- // * If block number is large enough to be regarded as a recent block
- // It means blocks below the ancientLimit-txlookupLimit won't be indexed.
- //
- // But if the `TxIndexTail` is not nil, e.g. Geth is initialized with
- // an external ancient database, during the setup, blockchain will start
- // a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients)
- // range. In this case, all tx indices of newly imported blocks should be
- // generated.
+ // Ensure genesis is in ancients.
+ if first.NumberU64() == 1 {
+ if frozen, _ := bc.db.Ancients(); frozen == 0 {
+ b := bc.genesisBlock
+ td := bc.genesisBlock.Difficulty()
+ writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{b}, []types.Receipts{nil}, td)
+ size += writeSize
+ if err != nil {
+ log.Error("Error writing genesis to ancients", "err", err)
+ return 0, err
+ }
+ log.Info("Wrote genesis to ancients")
+ }
+ }
+ // Before writing the blocks to the ancients, we need to ensure that
+ // they correspond to the what the headerchain 'expects'.
+ // We only check the last block/header, since it's a contiguous chain.
+ if !bc.HasHeader(last.Hash(), last.NumberU64()) {
+ return 0, fmt.Errorf("containing header #%d [%x..] unknown", last.Number(), last.Hash().Bytes()[:4])
+ }
+
+ // Write all chain data to ancients.
+ td := bc.GetTd(first.Hash(), first.NumberU64())
+ writeSize, err := rawdb.WriteAncientBlocks(bc.db, blockChain, receiptChain, td)
+ size += writeSize
+ if err != nil {
+ log.Error("Error importing chain data to ancients", "err", err)
+ return 0, err
+ }
+
+ // Write tx indices if any condition is satisfied:
+ // * If user requires to reserve all tx indices(txlookuplimit=0)
+ // * If all ancient tx indices are required to be reserved(txlookuplimit is even higher than ancientlimit)
+ // * If block number is large enough to be regarded as a recent block
+ // It means blocks below the ancientLimit-txlookupLimit won't be indexed.
+ //
+ // But if the `TxIndexTail` is not nil, e.g. Geth is initialized with
+ // an external ancient database, during the setup, blockchain will start
+ // a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients)
+ // range. In this case, all tx indices of newly imported blocks should be
+ // generated.
+ var batch = bc.db.NewBatch()
+ for _, block := range blockChain {
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
rawdb.WriteTxLookupEntriesByBlock(batch, block)
} else if rawdb.ReadTxIndexTail(bc.db) != nil {
@@ -1238,51 +1202,50 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
}
stats.processed++
}
+
// Flush all tx-lookup index data.
- size += batch.ValueSize()
+ size += int64(batch.ValueSize())
if err := batch.Write(); err != nil {
+ // The tx index data could not be written.
+ // Roll back the ancient store update.
+ fastBlock := bc.CurrentFastBlock().NumberU64()
+ if err := bc.db.TruncateAncients(fastBlock + 1); err != nil {
+ log.Error("Can't truncate ancient store after failed insert", "err", err)
+ }
return 0, err
}
- batch.Reset()
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
if err := bc.db.Sync(); err != nil {
return 0, err
}
+
+ // Update the current fast block because all block data is now present in DB.
+ previousFastBlock := bc.CurrentFastBlock().NumberU64()
if !updateHead(blockChain[len(blockChain)-1]) {
- return 0, errors.New("side blocks can't be accepted as the ancient chain data")
- }
- previous = nil // disable rollback explicitly
-
- // Wipe out canonical block data.
- for _, nh := range deleted {
- rawdb.DeleteBlockWithoutNumber(batch, nh.hash, nh.number)
- rawdb.DeleteCanonicalHash(batch, nh.number)
- }
- for _, block := range blockChain {
- // Always keep genesis block in active database.
- if block.NumberU64() != 0 {
- rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64())
- rawdb.DeleteCanonicalHash(batch, block.NumberU64())
+ // We end up here if the header chain has reorg'ed, and the blocks/receipts
+ // don't match the canonical chain.
+ if err := bc.db.TruncateAncients(previousFastBlock + 1); err != nil {
+ log.Error("Can't truncate ancient store after failed insert", "err", err)
}
+ return 0, errSideChainReceipts
}
- if err := batch.Write(); err != nil {
- return 0, err
- }
+
+ // Delete block data from the main database.
batch.Reset()
-
- // Wipe out side chain too.
- for _, nh := range deleted {
- for _, hash := range rawdb.ReadAllHashes(bc.db, nh.number) {
- rawdb.DeleteBlock(batch, hash, nh.number)
- }
- }
+ canonHashes := make(map[common.Hash]struct{})
for _, block := range blockChain {
- // Always keep genesis block in active database.
- if block.NumberU64() != 0 {
- for _, hash := range rawdb.ReadAllHashes(bc.db, block.NumberU64()) {
- rawdb.DeleteBlock(batch, hash, block.NumberU64())
- }
+ canonHashes[block.Hash()] = struct{}{}
+ if block.NumberU64() == 0 {
+ continue
+ }
+ rawdb.DeleteCanonicalHash(batch, block.NumberU64())
+ rawdb.DeleteBlockWithoutNumber(batch, block.Hash(), block.NumberU64())
+ }
+ // Delete side chain hash-to-number mappings.
+ for _, nh := range rawdb.ReadAllHashesInRange(bc.db, first.NumberU64(), last.NumberU64()) {
+ if _, canon := canonHashes[nh.Hash]; !canon {
+ rawdb.DeleteHeader(batch, nh.Hash, nh.Number)
}
}
if err := batch.Write(); err != nil {
@@ -1290,6 +1253,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
}
return 0, nil
}
+
// writeLive writes blockchain and corresponding receipt chain into active store.
writeLive := func(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
skipPresenceCheck := false
@@ -1327,7 +1291,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
if err := batch.Write(); err != nil {
return 0, err
}
- size += batch.ValueSize()
+ size += int64(batch.ValueSize())
batch.Reset()
}
stats.processed++
@@ -1336,7 +1300,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// we can ensure all components of body is completed(body, receipts,
// tx indexes)
if batch.ValueSize() > 0 {
- size += batch.ValueSize()
+ size += int64(batch.ValueSize())
if err := batch.Write(); err != nil {
return 0, err
}
@@ -1344,6 +1308,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
updateHead(blockChain[len(blockChain)-1])
return 0, nil
}
+
// Write downloaded chain data and corresponding receipt chain data
if len(ancientBlocks) > 0 {
if n, err := writeAncient(ancientBlocks, ancientReceipts); err != nil {
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 4e5df633b..8d94f17aa 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -670,6 +670,7 @@ func TestFastVsFullChains(t *testing.T) {
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(len(blocks)/2)); err != nil {
t.Fatalf("failed to insert receipt %d: %v", n, err)
}
+
// Iterate over all chain data components, and cross reference
for i := 0; i < len(blocks); i++ {
num, hash := blocks[i].NumberU64(), blocks[i].Hash()
@@ -693,10 +694,27 @@ func TestFastVsFullChains(t *testing.T) {
} else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) || types.CalcUncleHash(anblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) {
t.Errorf("block #%d [%x]: uncles mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Uncles(), anblock, arblock.Uncles())
}
- if freceipts, anreceipts, areceipts := rawdb.ReadReceipts(fastDb, hash, *rawdb.ReadHeaderNumber(fastDb, hash), fast.Config()), rawdb.ReadReceipts(ancientDb, hash, *rawdb.ReadHeaderNumber(ancientDb, hash), fast.Config()), rawdb.ReadReceipts(archiveDb, hash, *rawdb.ReadHeaderNumber(archiveDb, hash), fast.Config()); types.DeriveSha(freceipts, trie.NewStackTrie(nil)) != types.DeriveSha(areceipts, trie.NewStackTrie(nil)) {
+
+ // Check receipts.
+ freceipts := rawdb.ReadReceipts(fastDb, hash, num, fast.Config())
+ anreceipts := rawdb.ReadReceipts(ancientDb, hash, num, fast.Config())
+ areceipts := rawdb.ReadReceipts(archiveDb, hash, num, fast.Config())
+ if types.DeriveSha(freceipts, trie.NewStackTrie(nil)) != types.DeriveSha(areceipts, trie.NewStackTrie(nil)) {
t.Errorf("block #%d [%x]: receipts mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, freceipts, anreceipts, areceipts)
}
+
+ // Check that hash-to-number mappings are present in all databases.
+ if m := rawdb.ReadHeaderNumber(fastDb, hash); m == nil || *m != num {
+ t.Errorf("block #%d [%x]: wrong hash-to-number mapping in fastdb: %v", num, hash, m)
+ }
+ if m := rawdb.ReadHeaderNumber(ancientDb, hash); m == nil || *m != num {
+ t.Errorf("block #%d [%x]: wrong hash-to-number mapping in ancientdb: %v", num, hash, m)
+ }
+ if m := rawdb.ReadHeaderNumber(archiveDb, hash); m == nil || *m != num {
+ t.Errorf("block #%d [%x]: wrong hash-to-number mapping in archivedb: %v", num, hash, m)
+ }
}
+
// Check that the canonical chains are the same between the databases
for i := 0; i < len(blocks)+1; i++ {
if fhash, ahash := rawdb.ReadCanonicalHash(fastDb, uint64(i)), rawdb.ReadCanonicalHash(archiveDb, uint64(i)); fhash != ahash {
@@ -1639,20 +1657,34 @@ func TestBlockchainRecovery(t *testing.T) {
}
}
-func TestIncompleteAncientReceiptChainInsertion(t *testing.T) {
- // Configure and generate a sample block chain
- var (
- gendb = rawdb.NewMemoryDatabase()
- key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- address = crypto.PubkeyToAddress(key.PublicKey)
- funds = big.NewInt(1000000000)
- gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
- genesis = gspec.MustCommit(gendb)
- )
- height := uint64(1024)
- blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), nil)
+// This test checks that InsertReceiptChain will roll back correctly when attempting to insert a side chain.
+func TestInsertReceiptChainRollback(t *testing.T) {
+ // Generate forked chain. The returned BlockChain object is used to process the side chain blocks.
+ tmpChain, sideblocks, canonblocks, err := getLongAndShortChains()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer tmpChain.Stop()
+ // Get the side chain receipts.
+ if _, err := tmpChain.InsertChain(sideblocks); err != nil {
+ t.Fatal("processing side chain failed:", err)
+ }
+ t.Log("sidechain head:", tmpChain.CurrentBlock().Number(), tmpChain.CurrentBlock().Hash())
+ sidechainReceipts := make([]types.Receipts, len(sideblocks))
+ for i, block := range sideblocks {
+ sidechainReceipts[i] = tmpChain.GetReceiptsByHash(block.Hash())
+ }
+ // Get the canon chain receipts.
+ if _, err := tmpChain.InsertChain(canonblocks); err != nil {
+ t.Fatal("processing canon chain failed:", err)
+ }
+ t.Log("canon head:", tmpChain.CurrentBlock().Number(), tmpChain.CurrentBlock().Hash())
+ canonReceipts := make([]types.Receipts, len(canonblocks))
+ for i, block := range canonblocks {
+ canonReceipts[i] = tmpChain.GetReceiptsByHash(block.Hash())
+ }
- // Import the chain as a ancient-first node and ensure all pointers are updated
+ // Set up a BlockChain that uses the ancient store.
frdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("failed to create temp freezer dir: %v", err)
@@ -1662,38 +1694,43 @@ func TestIncompleteAncientReceiptChainInsertion(t *testing.T) {
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
+ gspec := Genesis{Config: params.AllEthashProtocolChanges}
gspec.MustCommit(ancientDb)
- ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer ancient.Stop()
+ ancientChain, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ defer ancientChain.Stop()
- headers := make([]*types.Header, len(blocks))
- for i, block := range blocks {
- headers[i] = block.Header()
+ // Import the canonical header chain.
+ canonHeaders := make([]*types.Header, len(canonblocks))
+ for i, block := range canonblocks {
+ canonHeaders[i] = block.Header()
}
- if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
- t.Fatalf("failed to insert header %d: %v", n, err)
+ if _, err = ancientChain.InsertHeaderChain(canonHeaders, 1); err != nil {
+ t.Fatal("can't import canon headers:", err)
}
- // Abort ancient receipt chain insertion deliberately
- ancient.terminateInsert = func(hash common.Hash, number uint64) bool {
- return number == blocks[len(blocks)/2].NumberU64()
+
+ // Try to insert blocks/receipts of the side chain.
+ _, err = ancientChain.InsertReceiptChain(sideblocks, sidechainReceipts, uint64(len(sideblocks)))
+ if err == nil {
+ t.Fatal("expected error from InsertReceiptChain.")
}
- previousFastBlock := ancient.CurrentFastBlock()
- if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err == nil {
- t.Fatalf("failed to insert receipt %d: %v", n, err)
+ if ancientChain.CurrentFastBlock().NumberU64() != 0 {
+ t.Fatalf("failed to rollback ancient data, want %d, have %d", 0, ancientChain.CurrentFastBlock().NumberU64())
}
- if ancient.CurrentFastBlock().NumberU64() != previousFastBlock.NumberU64() {
- t.Fatalf("failed to rollback ancient data, want %d, have %d", previousFastBlock.NumberU64(), ancient.CurrentFastBlock().NumberU64())
+ if frozen, err := ancientChain.db.Ancients(); err != nil || frozen != 1 {
+ t.Fatalf("failed to truncate ancient data, frozen index is %d", frozen)
}
- if frozen, err := ancient.db.Ancients(); err != nil || frozen != 1 {
- t.Fatalf("failed to truncate ancient data")
+
+ // Insert blocks/receipts of the canonical chain.
+ _, err = ancientChain.InsertReceiptChain(canonblocks, canonReceipts, uint64(len(canonblocks)))
+ if err != nil {
+ t.Fatalf("can't import canon chain receipts: %v", err)
}
- ancient.terminateInsert = nil
- if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil {
- t.Fatalf("failed to insert receipt %d: %v", n, err)
- }
- if ancient.CurrentFastBlock().NumberU64() != blocks[len(blocks)-1].NumberU64() {
+ if ancientChain.CurrentFastBlock().NumberU64() != canonblocks[len(canonblocks)-1].NumberU64() {
t.Fatalf("failed to insert ancient recept chain after rollback")
}
+ if frozen, _ := ancientChain.db.Ancients(); frozen != uint64(len(canonblocks))+1 {
+ t.Fatalf("wrong ancients count %d", frozen)
+ }
}
// Tests that importing a very large side fork, which is larger than the canon chain,
@@ -1958,9 +1995,8 @@ func testInsertKnownChainData(t *testing.T, typ string) {
asserter(t, blocks2[len(blocks2)-1])
}
-// getLongAndShortChains returns two chains,
-// A is longer, B is heavier
-func getLongAndShortChains() (*BlockChain, []*types.Block, []*types.Block, error) {
+// getLongAndShortChains returns two chains: A is longer, B is heavier.
+func getLongAndShortChains() (bc *BlockChain, longChain []*types.Block, heavyChain []*types.Block, err error) {
// Generate a canonical chain to act as the main dataset
engine := ethash.NewFaker()
db := rawdb.NewMemoryDatabase()
@@ -1968,7 +2004,7 @@ func getLongAndShortChains() (*BlockChain, []*types.Block, []*types.Block, error
// Generate and import the canonical chain,
// Offset the time, to keep the difficulty low
- longChain, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 80, func(i int, b *BlockGen) {
+ longChain, _ = GenerateChain(params.TestChainConfig, genesis, engine, db, 80, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{1})
})
diskdb := rawdb.NewMemoryDatabase()
@@ -1982,10 +2018,13 @@ func getLongAndShortChains() (*BlockChain, []*types.Block, []*types.Block, error
// Generate fork chain, make it shorter than canon, with common ancestor pretty early
parentIndex := 3
parent := longChain[parentIndex]
- heavyChain, _ := GenerateChain(params.TestChainConfig, parent, engine, db, 75, func(i int, b *BlockGen) {
+ heavyChainExt, _ := GenerateChain(params.TestChainConfig, parent, engine, db, 75, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{2})
b.OffsetTime(-9)
})
+ heavyChain = append(heavyChain, longChain[:parentIndex+1]...)
+ heavyChain = append(heavyChain, heavyChainExt...)
+
// Verify that the test is sane
var (
longerTd = new(big.Int)
diff --git a/core/bloombits/matcher.go b/core/bloombits/matcher.go
index 927232be0..f2a8bda17 100644
--- a/core/bloombits/matcher.go
+++ b/core/bloombits/matcher.go
@@ -510,8 +510,9 @@ type MatcherSession struct {
closer sync.Once // Sync object to ensure we only ever close once
quit chan struct{} // Quit channel to request pipeline termination
- ctx context.Context // Context used by the light client to abort filtering
- err atomic.Value // Global error to track retrieval failures deep in the chain
+ ctx context.Context // Context used by the light client to abort filtering
+ err error // Global error to track retrieval failures deep in the chain
+ errLock sync.Mutex
pend sync.WaitGroup
}
@@ -529,10 +530,10 @@ func (s *MatcherSession) Close() {
// Error returns any failure encountered during the matching session.
func (s *MatcherSession) Error() error {
- if err := s.err.Load(); err != nil {
- return err.(error)
- }
- return nil
+ s.errLock.Lock()
+ defer s.errLock.Unlock()
+
+ return s.err
}
// allocateRetrieval assigns a bloom bit index to a client process that can either
@@ -630,7 +631,9 @@ func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan
result := <-request
if result.Error != nil {
- s.err.Store(result.Error)
+ s.errLock.Lock()
+ s.err = result.Error
+ s.errLock.Unlock()
s.Close()
}
s.deliverSections(result.Bit, result.Sections, result.Bitsets)
diff --git a/core/error.go b/core/error.go
index 33cf874d1..594f3a26e 100644
--- a/core/error.go
+++ b/core/error.go
@@ -31,6 +31,8 @@ var (
// ErrNoGenesis is returned when there is no Genesis Block.
ErrNoGenesis = errors.New("genesis not found in chain")
+
+ errSideChainReceipts = errors.New("side blocks can't be accepted as ancient chain data")
)
// List of evm-call-message pre-checking errors. All state transition messages will
diff --git a/core/genesis.go b/core/genesis.go
index c1f226c34..38ace4920 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -310,7 +310,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
block := g.ToBlock(db)
if block.Number().Sign() != 0 {
- return nil, fmt.Errorf("can't commit genesis block with number > 0")
+ return nil, errors.New("can't commit genesis block with number > 0")
}
config := g.Config
if config == nil {
@@ -319,6 +319,9 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
if err := config.CheckConfigForkOrder(); err != nil {
return nil, err
}
+ if config.Clique != nil && len(block.Extra()) == 0 {
+ return nil, errors.New("can't start clique chain without signers")
+ }
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), g.Difficulty)
rawdb.WriteBlock(db, block)
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
diff --git a/core/genesis_test.go b/core/genesis_test.go
index 52c4384ab..055be2796 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -39,6 +39,22 @@ func TestDefaultGenesisBlock(t *testing.T) {
if block.Hash() != params.RopstenGenesisHash {
t.Errorf("wrong ropsten genesis hash, got %v, want %v", block.Hash(), params.RopstenGenesisHash)
}
+ block = DefaultRinkebyGenesisBlock().ToBlock(nil)
+ if block.Hash() != params.RinkebyGenesisHash {
+ t.Errorf("wrong rinkeby genesis hash, got %v, want %v", block.Hash(), params.RinkebyGenesisHash)
+ }
+ block = DefaultGoerliGenesisBlock().ToBlock(nil)
+ if block.Hash() != params.GoerliGenesisHash {
+ t.Errorf("wrong goerli genesis hash, got %v, want %v", block.Hash(), params.GoerliGenesisHash)
+ }
+}
+
+func TestInvalidCliqueConfig(t *testing.T) {
+ block := DefaultGoerliGenesisBlock()
+ block.ExtraData = []byte{}
+ if _, err := block.Commit(nil); err == nil {
+ t.Fatal("Expected error on invalid clique config")
+ }
}
func TestSetupGenesis(t *testing.T) {
diff --git a/core/mkalloc.go b/core/mkalloc.go
index 5118a4fcb..df167d708 100644
--- a/core/mkalloc.go
+++ b/core/mkalloc.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build none
// +build none
/*
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index 76132bf37..ed1c71e20 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -19,6 +19,8 @@ package rawdb
import (
"bytes"
"encoding/binary"
+ "errors"
+ "fmt"
"math/big"
"sort"
@@ -81,6 +83,37 @@ func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
return hashes
}
+type NumberHash struct {
+ Number uint64
+ Hash common.Hash
+}
+
+// ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
+// both canonical and reorged forks included.
+// This method considers both limits to be _inclusive_.
+func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash {
+ var (
+ start = encodeBlockNumber(first)
+ keyLength = len(headerPrefix) + 8 + 32
+ hashes = make([]*NumberHash, 0, 1+last-first)
+ it = db.NewIterator(headerPrefix, start)
+ )
+ defer it.Release()
+ for it.Next() {
+ key := it.Key()
+ if len(key) != keyLength {
+ continue
+ }
+ num := binary.BigEndian.Uint64(key[len(headerPrefix) : len(headerPrefix)+8])
+ if num > last {
+ break
+ }
+ hash := common.BytesToHash(key[len(key)-32:])
+ hashes = append(hashes, &NumberHash{num, hash})
+ }
+ return hashes
+}
+
// ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the
// certain chain range. If the accumulated entries reaches the given threshold,
// abort the iteration and return the semi-finish result.
@@ -631,6 +664,86 @@ func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
}
}
+// storedReceiptRLP is the storage encoding of a receipt.
+// Re-definition in core/types/receipt.go.
+type storedReceiptRLP struct {
+ PostStateOrStatus []byte
+ CumulativeGasUsed uint64
+ Logs []*types.LogForStorage
+}
+
+// ReceiptLogs is a barebone version of ReceiptForStorage which only keeps
+// the list of logs. When decoding a stored receipt into this object we
+// avoid creating the bloom filter.
+type receiptLogs struct {
+ Logs []*types.Log
+}
+
+// DecodeRLP implements rlp.Decoder.
+func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error {
+ var stored storedReceiptRLP
+ if err := s.Decode(&stored); err != nil {
+ return err
+ }
+ r.Logs = make([]*types.Log, len(stored.Logs))
+ for i, log := range stored.Logs {
+ r.Logs[i] = (*types.Log)(log)
+ }
+ return nil
+}
+
+// DeriveLogFields fills the logs in receiptLogs with information such as block number, txhash, etc.
+func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, txs types.Transactions) error {
+ logIndex := uint(0)
+ if len(txs) != len(receipts) {
+ return errors.New("transaction and receipt count mismatch")
+ }
+ for i := 0; i < len(receipts); i++ {
+ txHash := txs[i].Hash()
+ // The derived log fields can simply be set from the block and transaction
+ for j := 0; j < len(receipts[i].Logs); j++ {
+ receipts[i].Logs[j].BlockNumber = number
+ receipts[i].Logs[j].BlockHash = hash
+ receipts[i].Logs[j].TxHash = txHash
+ receipts[i].Logs[j].TxIndex = uint(i)
+ receipts[i].Logs[j].Index = logIndex
+ logIndex++
+ }
+ }
+ return nil
+}
+
+// ReadLogs retrieves the logs for all transactions in a block. The log fields
+// are populated with metadata. In case the receipts or the block body
+// are not found, a nil is returned.
+func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log {
+ // Retrieve the flattened receipt slice
+ data := ReadReceiptsRLP(db, hash, number)
+ if len(data) == 0 {
+ return nil
+ }
+ receipts := []*receiptLogs{}
+ if err := rlp.DecodeBytes(data, &receipts); err != nil {
+ log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
+ return nil
+ }
+
+ body := ReadBody(db, hash, number)
+ if body == nil {
+ log.Error("Missing body but have receipt", "hash", hash, "number", number)
+ return nil
+ }
+ if err := deriveLogFields(receipts, hash, number, body.Transactions); err != nil {
+ log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)
+ return nil
+ }
+ logs := make([][]*types.Log, len(receipts))
+ for i, receipt := range receipts {
+ logs[i] = receipt.Logs
+ }
+ return logs
+}
+
// ReadBlock retrieves an entire block corresponding to the hash, assembling it
// back from the stored header and body. If either the header or body could not
// be retrieved nil is returned.
@@ -656,34 +769,48 @@ func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
}
// WriteAncientBlock writes entire block data into ancient store and returns the total written size.
-func WriteAncientBlock(db ethdb.AncientWriter, block *types.Block, receipts types.Receipts, td *big.Int) int {
- // Encode all block components to RLP format.
- headerBlob, err := rlp.EncodeToBytes(block.Header())
- if err != nil {
- log.Crit("Failed to RLP encode block header", "err", err)
+func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, td *big.Int) (int64, error) {
+ var (
+ tdSum = new(big.Int).Set(td)
+ stReceipts []*types.ReceiptForStorage
+ )
+ return db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
+ for i, block := range blocks {
+ // Convert receipts to storage format and sum up total difficulty.
+ stReceipts = stReceipts[:0]
+ for _, receipt := range receipts[i] {
+ stReceipts = append(stReceipts, (*types.ReceiptForStorage)(receipt))
+ }
+ header := block.Header()
+ if i > 0 {
+ tdSum.Add(tdSum, header.Difficulty)
+ }
+ if err := writeAncientBlock(op, block, header, stReceipts, tdSum); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
+
+func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *types.Header, receipts []*types.ReceiptForStorage, td *big.Int) error {
+ num := block.NumberU64()
+ if err := op.AppendRaw(freezerHashTable, num, block.Hash().Bytes()); err != nil {
+ return fmt.Errorf("can't add block %d hash: %v", num, err)
}
- bodyBlob, err := rlp.EncodeToBytes(block.Body())
- if err != nil {
- log.Crit("Failed to RLP encode body", "err", err)
+ if err := op.Append(freezerHeaderTable, num, header); err != nil {
+ return fmt.Errorf("can't append block header %d: %v", num, err)
}
- storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
- for i, receipt := range receipts {
- storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
+ if err := op.Append(freezerBodiesTable, num, block.Body()); err != nil {
+ return fmt.Errorf("can't append block body %d: %v", num, err)
}
- receiptBlob, err := rlp.EncodeToBytes(storageReceipts)
- if err != nil {
- log.Crit("Failed to RLP encode block receipts", "err", err)
+ if err := op.Append(freezerReceiptTable, num, receipts); err != nil {
+ return fmt.Errorf("can't append block %d receipts: %v", num, err)
}
- tdBlob, err := rlp.EncodeToBytes(td)
- if err != nil {
- log.Crit("Failed to RLP encode block total difficulty", "err", err)
+ if err := op.Append(freezerDifficultyTable, num, td); err != nil {
+ return fmt.Errorf("can't append block %d total difficulty: %v", num, err)
}
- // Write all blob to flatten files.
- err = db.AppendAncient(block.NumberU64(), block.Hash().Bytes(), headerBlob, bodyBlob, receiptBlob, tdBlob)
- if err != nil {
- log.Crit("Failed to write block data to ancient store", "err", err)
- }
- return len(headerBlob) + len(bodyBlob) + len(receiptBlob) + len(tdBlob) + common.HashLength
+ return nil
}
// DeleteBlock removes all block data associated with a hash.
diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go
index f20e8b1ff..4b173c55e 100644
--- a/core/rawdb/accessors_chain_test.go
+++ b/core/rawdb/accessors_chain_test.go
@@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
@@ -438,7 +439,7 @@ func TestAncientStorage(t *testing.T) {
if err != nil {
t.Fatalf("failed to create temp freezer dir: %v", err)
}
- defer os.Remove(frdir)
+ defer os.RemoveAll(frdir)
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
if err != nil {
@@ -467,8 +468,10 @@ func TestAncientStorage(t *testing.T) {
if blob := ReadTdRLP(db, hash, number); len(blob) > 0 {
t.Fatalf("non existent td returned")
}
+
// Write and verify the header in the database
- WriteAncientBlock(db, block, nil, big.NewInt(100))
+ WriteAncientBlocks(db, []*types.Block{block}, []types.Receipts{nil}, big.NewInt(100))
+
if blob := ReadHeaderRLP(db, hash, number); len(blob) == 0 {
t.Fatalf("no header returned")
}
@@ -481,6 +484,7 @@ func TestAncientStorage(t *testing.T) {
if blob := ReadTdRLP(db, hash, number); len(blob) == 0 {
t.Fatalf("no td returned")
}
+
// Use a fake hash for data retrieval, nothing should be returned.
fakeHash := common.BytesToHash([]byte{0x01, 0x02, 0x03})
if blob := ReadHeaderRLP(db, fakeHash, number); len(blob) != 0 {
@@ -528,3 +532,354 @@ func TestCanonicalHashIteration(t *testing.T) {
}
}
}
+
+func TestHashesInRange(t *testing.T) {
+ mkHeader := func(number, seq int) *types.Header {
+ h := types.Header{
+ Difficulty: new(big.Int),
+ Number: big.NewInt(int64(number)),
+ GasLimit: uint64(seq),
+ }
+ return &h
+ }
+ db := NewMemoryDatabase()
+ // For each number, write N versions of that particular number
+ total := 0
+ for i := 0; i < 15; i++ {
+ for ii := 0; ii < i; ii++ {
+ WriteHeader(db, mkHeader(i, ii))
+ total++
+ }
+ }
+ if have, want := len(ReadAllHashesInRange(db, 10, 10)), 10; have != want {
+ t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
+ }
+ if have, want := len(ReadAllHashesInRange(db, 10, 9)), 0; have != want {
+ t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
+ }
+ if have, want := len(ReadAllHashesInRange(db, 0, 100)), total; have != want {
+ t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
+ }
+ if have, want := len(ReadAllHashesInRange(db, 9, 10)), 9+10; have != want {
+ t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
+ }
+ if have, want := len(ReadAllHashes(db, 10)), 10; have != want {
+ t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
+ }
+ if have, want := len(ReadAllHashes(db, 16)), 0; have != want {
+ t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
+ }
+ if have, want := len(ReadAllHashes(db, 1)), 1; have != want {
+ t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
+ }
+}
+
+// This measures the write speed of the WriteAncientBlocks operation.
+func BenchmarkWriteAncientBlocks(b *testing.B) {
+ // Open freezer database.
+ frdir, err := ioutil.TempDir("", "")
+ if err != nil {
+ b.Fatalf("failed to create temp freezer dir: %v", err)
+ }
+ defer os.RemoveAll(frdir)
+ db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
+ if err != nil {
+ b.Fatalf("failed to create database with ancient backend")
+ }
+
+ // Create the data to insert. The blocks must have consecutive numbers, so we create
+ // all of them ahead of time. However, there is no need to create receipts
+ // individually for each block, just make one batch here and reuse it for all writes.
+ const batchSize = 128
+ const blockTxs = 20
+ allBlocks := makeTestBlocks(b.N, blockTxs)
+ batchReceipts := makeTestReceipts(batchSize, blockTxs)
+ b.ResetTimer()
+
+ // The benchmark loop writes batches of blocks, but note that the total block count is
+ // b.N. This means the resulting ns/op measurement is the time it takes to write a
+ // single block and its associated data.
+ var td = big.NewInt(55)
+ var totalSize int64
+ for i := 0; i < b.N; i += batchSize {
+ length := batchSize
+ if i+batchSize > b.N {
+ length = b.N - i
+ }
+
+ blocks := allBlocks[i : i+length]
+ receipts := batchReceipts[:length]
+ writeSize, err := WriteAncientBlocks(db, blocks, receipts, td)
+ if err != nil {
+ b.Fatal(err)
+ }
+ totalSize += writeSize
+ }
+
+ // Enable MB/s reporting.
+ b.SetBytes(totalSize / int64(b.N))
+}
+
+// makeTestBlocks creates fake blocks for the ancient write benchmark.
+func makeTestBlocks(nblock int, txsPerBlock int) []*types.Block {
+ key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ signer := types.LatestSignerForChainID(big.NewInt(8))
+
+ // Create transactions.
+ txs := make([]*types.Transaction, txsPerBlock)
+ for i := 0; i < len(txs); i++ {
+ var err error
+ to := common.Address{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
+ txs[i], err = types.SignNewTx(key, signer, &types.LegacyTx{
+ Nonce: 2,
+ GasPrice: big.NewInt(30000),
+ Gas: 0x45454545,
+ To: &to,
+ })
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ // Create the blocks.
+ blocks := make([]*types.Block, nblock)
+ for i := 0; i < nblock; i++ {
+ header := &types.Header{
+ Number: big.NewInt(int64(i)),
+ Extra: []byte("test block"),
+ }
+ blocks[i] = types.NewBlockWithHeader(header).WithBody(txs, nil)
+ blocks[i].Hash() // pre-cache the block hash
+ }
+ return blocks
+}
+
+// makeTestReceipts creates fake receipts for the ancient write benchmark.
+func makeTestReceipts(n int, nPerBlock int) []types.Receipts {
+ receipts := make([]*types.Receipt, nPerBlock)
+ for i := 0; i < len(receipts); i++ {
+ receipts[i] = &types.Receipt{
+ Status: types.ReceiptStatusSuccessful,
+ CumulativeGasUsed: 0x888888888,
+ Logs: make([]*types.Log, 5),
+ }
+ }
+ allReceipts := make([]types.Receipts, n)
+ for i := 0; i < n; i++ {
+ allReceipts[i] = receipts
+ }
+ return allReceipts
+}
+
+type fullLogRLP struct {
+ Address common.Address
+ Topics []common.Hash
+ Data []byte
+ BlockNumber uint64
+ TxHash common.Hash
+ TxIndex uint
+ BlockHash common.Hash
+ Index uint
+}
+
+func newFullLogRLP(l *types.Log) *fullLogRLP {
+ return &fullLogRLP{
+ Address: l.Address,
+ Topics: l.Topics,
+ Data: l.Data,
+ BlockNumber: l.BlockNumber,
+ TxHash: l.TxHash,
+ TxIndex: l.TxIndex,
+ BlockHash: l.BlockHash,
+ Index: l.Index,
+ }
+}
+
+// Tests that logs associated with a single block can be retrieved.
+func TestReadLogs(t *testing.T) {
+ db := NewMemoryDatabase()
+
+ // Create a live block since we need metadata to reconstruct the receipt
+ tx1 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil)
+ tx2 := types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2), nil)
+
+ body := &types.Body{Transactions: types.Transactions{tx1, tx2}}
+
+ // Create the two receipts to manage afterwards
+ receipt1 := &types.Receipt{
+ Status: types.ReceiptStatusFailed,
+ CumulativeGasUsed: 1,
+ Logs: []*types.Log{
+ {Address: common.BytesToAddress([]byte{0x11})},
+ {Address: common.BytesToAddress([]byte{0x01, 0x11})},
+ },
+ TxHash: tx1.Hash(),
+ ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
+ GasUsed: 111111,
+ }
+ receipt1.Bloom = types.CreateBloom(types.Receipts{receipt1})
+
+ receipt2 := &types.Receipt{
+ PostState: common.Hash{2}.Bytes(),
+ CumulativeGasUsed: 2,
+ Logs: []*types.Log{
+ {Address: common.BytesToAddress([]byte{0x22})},
+ {Address: common.BytesToAddress([]byte{0x02, 0x22})},
+ },
+ TxHash: tx2.Hash(),
+ ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
+ GasUsed: 222222,
+ }
+ receipt2.Bloom = types.CreateBloom(types.Receipts{receipt2})
+ receipts := []*types.Receipt{receipt1, receipt2}
+
+ hash := common.BytesToHash([]byte{0x03, 0x14})
+ // Check that no receipt entries are in a pristine database
+ if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); len(rs) != 0 {
+ t.Fatalf("non existent receipts returned: %v", rs)
+ }
+ // Insert the body that corresponds to the receipts
+ WriteBody(db, hash, 0, body)
+
+ // Insert the receipt slice into the database and check presence
+ WriteReceipts(db, hash, 0, receipts)
+
+ logs := ReadLogs(db, hash, 0)
+ if len(logs) == 0 {
+ t.Fatalf("no logs returned")
+ }
+ if have, want := len(logs), 2; have != want {
+ t.Fatalf("unexpected number of logs returned, have %d want %d", have, want)
+ }
+ if have, want := len(logs[0]), 2; have != want {
+ t.Fatalf("unexpected number of logs[0] returned, have %d want %d", have, want)
+ }
+ if have, want := len(logs[1]), 2; have != want {
+ t.Fatalf("unexpected number of logs[1] returned, have %d want %d", have, want)
+ }
+
+ // Fill in log fields so we can compare their rlp encoding
+ if err := types.Receipts(receipts).DeriveFields(params.TestChainConfig, hash, 0, body.Transactions); err != nil {
+ t.Fatal(err)
+ }
+ for i, pr := range receipts {
+ for j, pl := range pr.Logs {
+ rlpHave, err := rlp.EncodeToBytes(newFullLogRLP(logs[i][j]))
+ if err != nil {
+ t.Fatal(err)
+ }
+ rlpWant, err := rlp.EncodeToBytes(newFullLogRLP(pl))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(rlpHave, rlpWant) {
+ t.Fatalf("receipt #%d: receipt mismatch: have %s, want %s", i, hex.EncodeToString(rlpHave), hex.EncodeToString(rlpWant))
+ }
+ }
+ }
+}
+
+func TestDeriveLogFields(t *testing.T) {
+ // Create a few transactions to have receipts for
+ to2 := common.HexToAddress("0x2")
+ to3 := common.HexToAddress("0x3")
+ txs := types.Transactions{
+ types.NewTx(&types.LegacyTx{
+ Nonce: 1,
+ Value: big.NewInt(1),
+ Gas: 1,
+ GasPrice: big.NewInt(1),
+ }),
+ types.NewTx(&types.LegacyTx{
+ To: &to2,
+ Nonce: 2,
+ Value: big.NewInt(2),
+ Gas: 2,
+ GasPrice: big.NewInt(2),
+ }),
+ types.NewTx(&types.AccessListTx{
+ To: &to3,
+ Nonce: 3,
+ Value: big.NewInt(3),
+ Gas: 3,
+ GasPrice: big.NewInt(3),
+ }),
+ }
+ // Create the corresponding receipts
+ receipts := []*receiptLogs{
+ {
+ Logs: []*types.Log{
+ {Address: common.BytesToAddress([]byte{0x11})},
+ {Address: common.BytesToAddress([]byte{0x01, 0x11})},
+ },
+ },
+ {
+ Logs: []*types.Log{
+ {Address: common.BytesToAddress([]byte{0x22})},
+ {Address: common.BytesToAddress([]byte{0x02, 0x22})},
+ },
+ },
+ {
+ Logs: []*types.Log{
+ {Address: common.BytesToAddress([]byte{0x33})},
+ {Address: common.BytesToAddress([]byte{0x03, 0x33})},
+ },
+ },
+ }
+
+ // Derive log metadata fields
+ number := big.NewInt(1)
+ hash := common.BytesToHash([]byte{0x03, 0x14})
+ if err := deriveLogFields(receipts, hash, number.Uint64(), txs); err != nil {
+ t.Fatal(err)
+ }
+
+ // Iterate over all the computed fields and check that they're correct
+ logIndex := uint(0)
+ for i := range receipts {
+ for j := range receipts[i].Logs {
+ if receipts[i].Logs[j].BlockNumber != number.Uint64() {
+ t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, receipts[i].Logs[j].BlockNumber, number.Uint64())
+ }
+ if receipts[i].Logs[j].BlockHash != hash {
+ t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, receipts[i].Logs[j].BlockHash.String(), hash.String())
+ }
+ if receipts[i].Logs[j].TxHash != txs[i].Hash() {
+ t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String())
+ }
+ if receipts[i].Logs[j].TxIndex != uint(i) {
+ t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, receipts[i].Logs[j].TxIndex, i)
+ }
+ if receipts[i].Logs[j].Index != logIndex {
+ t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, receipts[i].Logs[j].Index, logIndex)
+ }
+ logIndex++
+ }
+ }
+}
+
+func BenchmarkDecodeRLPLogs(b *testing.B) {
+ // Encoded receipts from block 0x14ee094309fbe8f70b65f45ebcc08fb33f126942d97464aad5eb91cfd1e2d269
+ buf, err := ioutil.ReadFile("testdata/stored_receipts.bin")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.Run("ReceiptForStorage", func(b *testing.B) {
+ b.ReportAllocs()
+ var r []*types.ReceiptForStorage
+ for i := 0; i < b.N; i++ {
+ if err := rlp.DecodeBytes(buf, &r); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ b.Run("rlpLogs", func(b *testing.B) {
+ b.ReportAllocs()
+ var r []*receiptLogs
+ for i := 0; i < b.N; i++ {
+ if err := rlp.DecodeBytes(buf, &r); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+}
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index 90619169a..0e116ef99 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -104,9 +104,9 @@ func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
return 0, errNotSupported
}
-// AppendAncient returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error {
- return errNotSupported
+// ModifyAncients is not supported.
+func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) {
+ return 0, errNotSupported
}
// TruncateAncients returns an error as we don't have a backing chain freezer.
@@ -122,9 +122,7 @@ func (db *nofreezedb) Sync() error {
// NewDatabase creates a high level database on top of a given key-value data
// store without a freezer moving immutable chain segments into cold storage.
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
- return &nofreezedb{
- KeyValueStore: db,
- }
+ return &nofreezedb{KeyValueStore: db}
}
// NewDatabaseWithFreezer creates a high level database on top of a given key-
@@ -132,7 +130,7 @@ func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
// storage.
func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string, readonly bool) (ethdb.Database, error) {
// Create the idle freezer instance
- frdb, err := newFreezer(freezer, namespace, readonly)
+ frdb, err := newFreezer(freezer, namespace, readonly, freezerTableSize, FreezerNoSnappy)
if err != nil {
return nil, err
}
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
index 12130cf33..4cecbc50f 100644
--- a/core/rawdb/freezer.go
+++ b/core/rawdb/freezer.go
@@ -61,6 +61,9 @@ const (
// freezerBatchLimit is the maximum number of blocks to freeze in one batch
// before doing an fsync and deleting it from the key-value store.
freezerBatchLimit = 30000
+
+ // freezerTableSize defines the maximum size of freezer data files.
+ freezerTableSize = 2 * 1000 * 1000 * 1000
)
// freezer is an memory mapped append-only database to store immutable chain data
@@ -77,6 +80,10 @@ type freezer struct {
frozen uint64 // Number of blocks already frozen
threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
+ // This lock synchronizes writers and the truncate operation.
+ writeLock sync.Mutex
+ writeBatch *freezerBatch
+
readonly bool
tables map[string]*freezerTable // Data tables for storing everything
instanceLock fileutil.Releaser // File-system lock to prevent double opens
@@ -90,7 +97,10 @@ type freezer struct {
// newFreezer creates a chain freezer that moves ancient chain data into
// append-only flat file containers.
-func newFreezer(datadir string, namespace string, readonly bool) (*freezer, error) {
+//
+// The 'tables' argument defines the data tables. If the value of a map
+// entry is true, snappy compression is disabled for the table.
+func newFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*freezer, error) {
// Create the initial freezer object
var (
readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil)
@@ -119,8 +129,10 @@ func newFreezer(datadir string, namespace string, readonly bool) (*freezer, erro
trigger: make(chan chan struct{}),
quit: make(chan struct{}),
}
- for name, disableSnappy := range FreezerNoSnappy {
- table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, disableSnappy)
+
+ // Create the tables.
+ for name, disableSnappy := range tables {
+ table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy)
if err != nil {
for _, table := range freezer.tables {
table.Close()
@@ -130,6 +142,8 @@ func newFreezer(datadir string, namespace string, readonly bool) (*freezer, erro
}
freezer.tables[name] = table
}
+
+ // Truncate all tables to common length.
if err := freezer.repair(); err != nil {
for _, table := range freezer.tables {
table.Close()
@@ -137,12 +151,19 @@ func newFreezer(datadir string, namespace string, readonly bool) (*freezer, erro
lock.Release()
return nil, err
}
+
+ // Create the write batch.
+ freezer.writeBatch = newFreezerBatch(freezer)
+
log.Info("Opened ancient database", "database", datadir, "readonly", readonly)
return freezer, nil
}
// Close terminates the chain freezer, unmapping all the data files.
func (f *freezer) Close() error {
+ f.writeLock.Lock()
+ defer f.writeLock.Unlock()
+
var errs []error
f.closeOnce.Do(func() {
close(f.quit)
@@ -199,61 +220,49 @@ func (f *freezer) Ancients() (uint64, error) {
// AncientSize returns the ancient size of the specified category.
func (f *freezer) AncientSize(kind string) (uint64, error) {
+ // This needs the write lock to avoid data races on table fields.
+ // Speed doesn't matter here, AncientSize is for debugging.
+ f.writeLock.Lock()
+ defer f.writeLock.Unlock()
+
if table := f.tables[kind]; table != nil {
return table.size()
}
return 0, errUnknownTable
}
-// AppendAncient injects all binary blobs belong to block at the end of the
-// append-only immutable table files.
-//
-// Notably, this function is lock free but kind of thread-safe. All out-of-order
-// injection will be rejected. But if two injections with same number happen at
-// the same time, we can get into the trouble.
-func (f *freezer) AppendAncient(number uint64, hash, header, body, receipts, td []byte) (err error) {
+// ModifyAncients runs the given write operation.
+func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) {
if f.readonly {
- return errReadOnly
+ return 0, errReadOnly
}
- // Ensure the binary blobs we are appending is continuous with freezer.
- if atomic.LoadUint64(&f.frozen) != number {
- return errOutOrderInsertion
- }
- // Rollback all inserted data if any insertion below failed to ensure
- // the tables won't out of sync.
+ f.writeLock.Lock()
+ defer f.writeLock.Unlock()
+
+ // Roll back all tables to the starting position in case of error.
+ prevItem := f.frozen
defer func() {
if err != nil {
- rerr := f.repair()
- if rerr != nil {
- log.Crit("Failed to repair freezer", "err", rerr)
+ // The write operation has failed. Go back to the previous item position.
+ for name, table := range f.tables {
+ err := table.truncate(prevItem)
+ if err != nil {
+ log.Error("Freezer table roll-back failed", "table", name, "index", prevItem, "err", err)
+ }
}
- log.Info("Append ancient failed", "number", number, "err", err)
}
}()
- pluginAppendAncient(number, hash, header, body, receipts, td)
- // Inject all the components into the relevant data tables
- if err := f.tables[freezerHashTable].Append(f.frozen, hash[:]); err != nil {
- log.Error("Failed to append ancient hash", "number", f.frozen, "hash", hash, "err", err)
- return err
+
+ f.writeBatch.reset()
+ if err := fn(f.writeBatch); err != nil {
+ return 0, err
}
- if err := f.tables[freezerHeaderTable].Append(f.frozen, header); err != nil {
- log.Error("Failed to append ancient header", "number", f.frozen, "hash", hash, "err", err)
- return err
+ item, writeSize, err := f.writeBatch.commit()
+ if err != nil {
+ return 0, err
}
- if err := f.tables[freezerBodiesTable].Append(f.frozen, body); err != nil {
- log.Error("Failed to append ancient body", "number", f.frozen, "hash", hash, "err", err)
- return err
- }
- if err := f.tables[freezerReceiptTable].Append(f.frozen, receipts); err != nil {
- log.Error("Failed to append ancient receipts", "number", f.frozen, "hash", hash, "err", err)
- return err
- }
- if err := f.tables[freezerDifficultyTable].Append(f.frozen, td); err != nil {
- log.Error("Failed to append ancient difficulty", "number", f.frozen, "hash", hash, "err", err)
- return err
- }
- atomic.AddUint64(&f.frozen, 1) // Only modify atomically
- return nil
+ atomic.StoreUint64(&f.frozen, item)
+ return writeSize, nil
}
// TruncateAncients discards any recent data above the provided threshold number.
@@ -261,6 +270,9 @@ func (f *freezer) TruncateAncients(items uint64) error {
if f.readonly {
return errReadOnly
}
+ f.writeLock.Lock()
+ defer f.writeLock.Unlock()
+
if atomic.LoadUint64(&f.frozen) <= items {
return nil
}
@@ -287,6 +299,24 @@ func (f *freezer) Sync() error {
return nil
}
+// repair truncates all data tables to the same length.
+func (f *freezer) repair() error {
+ min := uint64(math.MaxUint64)
+ for _, table := range f.tables {
+ items := atomic.LoadUint64(&table.items)
+ if min > items {
+ min = items
+ }
+ }
+ for _, table := range f.tables {
+ if err := table.truncate(min); err != nil {
+ return err
+ }
+ }
+ atomic.StoreUint64(&f.frozen, min)
+ return nil
+}
+
// freeze is a background thread that periodically checks the blockchain for any
// import progress and moves ancient data from the fast database into the freezer.
//
@@ -353,54 +383,28 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
backoff = true
continue
}
+
// Seems we have data ready to be frozen, process in usable batches
- limit := *number - threshold
- if limit-f.frozen > freezerBatchLimit {
- limit = f.frozen + freezerBatchLimit
- }
var (
start = time.Now()
- first = f.frozen
- ancients = make([]common.Hash, 0, limit-f.frozen)
+ first, _ = f.Ancients()
+ limit = *number - threshold
)
- for f.frozen <= limit {
- // Retrieves all the components of the canonical block
- hash := ReadCanonicalHash(nfdb, f.frozen)
- if hash == (common.Hash{}) {
- log.Error("Canonical hash missing, can't freeze", "number", f.frozen)
- break
- }
- header := ReadHeaderRLP(nfdb, hash, f.frozen)
- if len(header) == 0 {
- log.Error("Block header missing, can't freeze", "number", f.frozen, "hash", hash)
- break
- }
- body := ReadBodyRLP(nfdb, hash, f.frozen)
- if len(body) == 0 {
- log.Error("Block body missing, can't freeze", "number", f.frozen, "hash", hash)
- break
- }
- receipts := ReadReceiptsRLP(nfdb, hash, f.frozen)
- if len(receipts) == 0 {
- log.Error("Block receipts missing, can't freeze", "number", f.frozen, "hash", hash)
- break
- }
- td := ReadTdRLP(nfdb, hash, f.frozen)
- if len(td) == 0 {
- log.Error("Total difficulty missing, can't freeze", "number", f.frozen, "hash", hash)
- break
- }
- log.Trace("Deep froze ancient block", "number", f.frozen, "hash", hash)
- // Inject all the components into the relevant data tables
- if err := f.AppendAncient(f.frozen, hash[:], header, body, receipts, td); err != nil {
- break
- }
- ancients = append(ancients, hash)
+ if limit-first > freezerBatchLimit {
+ limit = first + freezerBatchLimit
}
+ ancients, err := f.freezeRange(nfdb, first, limit)
+ if err != nil {
+ log.Error("Error in block freeze operation", "err", err)
+ backoff = true
+ continue
+ }
+
// Batch of blocks have been frozen, flush them before wiping from leveldb
if err := f.Sync(); err != nil {
log.Crit("Failed to flush frozen tables", "err", err)
}
+
// Wipe out all data from the active database
batch := db.NewBatch()
for i := 0; i < len(ancients); i++ {
@@ -465,6 +469,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
log.Crit("Failed to delete dangling side blocks", "err", err)
}
}
+
// Log something friendly for the user
context := []interface{}{
"blocks", f.frozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", f.frozen - 1,
@@ -481,20 +486,54 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
}
}
-// repair truncates all data tables to the same length.
-func (f *freezer) repair() error {
- min := uint64(math.MaxUint64)
- for _, table := range f.tables {
- items := atomic.LoadUint64(&table.items)
- if min > items {
- min = items
+func (f *freezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hashes []common.Hash, err error) {
+ hashes = make([]common.Hash, 0, limit-number)
+
+ _, err = f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
+ for ; number <= limit; number++ {
+ // Retrieve all the components of the canonical block.
+ hash := ReadCanonicalHash(nfdb, number)
+ if hash == (common.Hash{}) {
+ return fmt.Errorf("canonical hash missing, can't freeze block %d", number)
+ }
+ header := ReadHeaderRLP(nfdb, hash, number)
+ if len(header) == 0 {
+ return fmt.Errorf("block header missing, can't freeze block %d", number)
+ }
+ body := ReadBodyRLP(nfdb, hash, number)
+ if len(body) == 0 {
+ return fmt.Errorf("block body missing, can't freeze block %d", number)
+ }
+ receipts := ReadReceiptsRLP(nfdb, hash, number)
+ if len(receipts) == 0 {
+ return fmt.Errorf("block receipts missing, can't freeze block %d", number)
+ }
+ td := ReadTdRLP(nfdb, hash, number)
+ if len(td) == 0 {
+ return fmt.Errorf("total difficulty missing, can't freeze block %d", number)
+ }
+
+ // Write to the batch.
+ if err := op.AppendRaw(freezerHashTable, number, hash[:]); err != nil {
+ return fmt.Errorf("can't write hash to freezer: %v", err)
+ }
+ if err := op.AppendRaw(freezerHeaderTable, number, header); err != nil {
+ return fmt.Errorf("can't write header to freezer: %v", err)
+ }
+ if err := op.AppendRaw(freezerBodiesTable, number, body); err != nil {
+ return fmt.Errorf("can't write body to freezer: %v", err)
+ }
+ if err := op.AppendRaw(freezerReceiptTable, number, receipts); err != nil {
+ return fmt.Errorf("can't write receipts to freezer: %v", err)
+ }
+ if err := op.AppendRaw(freezerDifficultyTable, number, td); err != nil {
+ return fmt.Errorf("can't write td to freezer: %v", err)
+ }
+
+ hashes = append(hashes, hash)
}
- }
- for _, table := range f.tables {
- if err := table.truncate(min); err != nil {
- return err
- }
- }
- atomic.StoreUint64(&f.frozen, min)
- return nil
+ return nil
+ })
+
+ return hashes, err
}
diff --git a/core/rawdb/freezer_batch.go b/core/rawdb/freezer_batch.go
new file mode 100644
index 000000000..8297c0ac1
--- /dev/null
+++ b/core/rawdb/freezer_batch.go
@@ -0,0 +1,248 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/golang/snappy"
+)
+
+// This is the maximum amount of data that will be buffered in memory
+// for a single freezer table batch.
+const freezerBatchBufferLimit = 2 * 1024 * 1024
+
+// freezerBatch is a write operation of multiple items on a freezer.
+type freezerBatch struct {
+ tables map[string]*freezerTableBatch
+}
+
+func newFreezerBatch(f *freezer) *freezerBatch {
+ batch := &freezerBatch{tables: make(map[string]*freezerTableBatch, len(f.tables))}
+ for kind, table := range f.tables {
+ batch.tables[kind] = table.newBatch()
+ }
+ return batch
+}
+
+// Append adds an RLP-encoded item of the given kind.
+func (batch *freezerBatch) Append(kind string, num uint64, item interface{}) error {
+ return batch.tables[kind].Append(num, item)
+}
+
+// AppendRaw adds an item of the given kind.
+func (batch *freezerBatch) AppendRaw(kind string, num uint64, item []byte) error {
+ return batch.tables[kind].AppendRaw(num, item)
+}
+
+// reset initializes the batch.
+func (batch *freezerBatch) reset() {
+ for _, tb := range batch.tables {
+ tb.reset()
+ }
+}
+
+// commit is called at the end of a write operation and
+// writes all remaining data to tables.
+func (batch *freezerBatch) commit() (item uint64, writeSize int64, err error) {
+ // Check that count agrees on all batches.
+ item = uint64(math.MaxUint64)
+ for name, tb := range batch.tables {
+ if item < math.MaxUint64 && tb.curItem != item {
+ return 0, 0, fmt.Errorf("table %s is at item %d, want %d", name, tb.curItem, item)
+ }
+ item = tb.curItem
+ }
+
+ // Commit all table batches.
+ for _, tb := range batch.tables {
+ if err := tb.commit(); err != nil {
+ return 0, 0, err
+ }
+ writeSize += tb.totalBytes
+ }
+ return item, writeSize, nil
+}
+
+// freezerTableBatch is a batch for a freezer table.
+type freezerTableBatch struct {
+ t *freezerTable
+
+ sb *snappyBuffer
+ encBuffer writeBuffer
+ dataBuffer []byte
+ indexBuffer []byte
+ curItem uint64 // expected index of next append
+ totalBytes int64 // counts written bytes since reset
+}
+
+// newBatch creates a new batch for the freezer table.
+func (t *freezerTable) newBatch() *freezerTableBatch {
+ batch := &freezerTableBatch{t: t}
+ if !t.noCompression {
+ batch.sb = new(snappyBuffer)
+ }
+ batch.reset()
+ return batch
+}
+
+// reset clears the batch for reuse.
+func (batch *freezerTableBatch) reset() {
+ batch.dataBuffer = batch.dataBuffer[:0]
+ batch.indexBuffer = batch.indexBuffer[:0]
+ batch.curItem = atomic.LoadUint64(&batch.t.items)
+ batch.totalBytes = 0
+}
+
+// Append rlp-encodes and adds data at the end of the freezer table. The item number is a
+// precautionary parameter to ensure data correctness, but the table will reject already
+// existing data.
+func (batch *freezerTableBatch) Append(item uint64, data interface{}) error {
+ if item != batch.curItem {
+ return errOutOrderInsertion
+ }
+
+ // Encode the item.
+ batch.encBuffer.Reset()
+ if err := rlp.Encode(&batch.encBuffer, data); err != nil {
+ return err
+ }
+ encItem := batch.encBuffer.data
+ if batch.sb != nil {
+ encItem = batch.sb.compress(encItem)
+ }
+ return batch.appendItem(encItem)
+}
+
+// AppendRaw injects a binary blob at the end of the freezer table. The item number is a
+// precautionary parameter to ensure data correctness, but the table will reject already
+// existing data.
+func (batch *freezerTableBatch) AppendRaw(item uint64, blob []byte) error {
+ if item != batch.curItem {
+ return errOutOrderInsertion
+ }
+
+ encItem := blob
+ if batch.sb != nil {
+ encItem = batch.sb.compress(blob)
+ }
+ return batch.appendItem(encItem)
+}
+
+func (batch *freezerTableBatch) appendItem(data []byte) error {
+ // Check if item fits into current data file.
+ itemSize := int64(len(data))
+ itemOffset := batch.t.headBytes + int64(len(batch.dataBuffer))
+ if itemOffset+itemSize > int64(batch.t.maxFileSize) {
+ // It doesn't fit, go to next file first.
+ if err := batch.commit(); err != nil {
+ return err
+ }
+ if err := batch.t.advanceHead(); err != nil {
+ return err
+ }
+ itemOffset = 0
+ }
+
+ // Put data to buffer.
+ batch.dataBuffer = append(batch.dataBuffer, data...)
+ batch.totalBytes += itemSize
+
+ // Put index entry to buffer.
+ entry := indexEntry{filenum: batch.t.headId, offset: uint32(itemOffset + itemSize)}
+ batch.indexBuffer = entry.append(batch.indexBuffer)
+ batch.curItem++
+
+ return batch.maybeCommit()
+}
+
+// maybeCommit writes the buffered data if the buffer is full enough.
+func (batch *freezerTableBatch) maybeCommit() error {
+ if len(batch.dataBuffer) > freezerBatchBufferLimit {
+ return batch.commit()
+ }
+ return nil
+}
+
+// commit writes the batched items to the backing freezerTable.
+func (batch *freezerTableBatch) commit() error {
+ // Write data.
+ _, err := batch.t.head.Write(batch.dataBuffer)
+ if err != nil {
+ return err
+ }
+ dataSize := int64(len(batch.dataBuffer))
+ batch.dataBuffer = batch.dataBuffer[:0]
+
+ // Write index.
+ _, err = batch.t.index.Write(batch.indexBuffer)
+ if err != nil {
+ return err
+ }
+ indexSize := int64(len(batch.indexBuffer))
+ batch.indexBuffer = batch.indexBuffer[:0]
+
+ // Update headBytes of table.
+ batch.t.headBytes += dataSize
+ atomic.StoreUint64(&batch.t.items, batch.curItem)
+
+ // Update metrics.
+ batch.t.sizeGauge.Inc(dataSize + indexSize)
+ batch.t.writeMeter.Mark(dataSize + indexSize)
+ return nil
+}
+
+// snappyBuffer writes snappy in block format, and can be reused. It is
+// reset when WriteTo is called.
+type snappyBuffer struct {
+ dst []byte
+}
+
+// compress snappy-compresses the data.
+func (s *snappyBuffer) compress(data []byte) []byte {
+ // The snappy library does not care what the capacity of the buffer is,
+ // but only checks the length. If the length is too small, it will
+ // allocate a brand new buffer.
+ // To avoid that, we check the required size here, and grow the size of the
+ // buffer to utilize the full capacity.
+ if n := snappy.MaxEncodedLen(len(data)); len(s.dst) < n {
+ if cap(s.dst) < n {
+ s.dst = make([]byte, n)
+ }
+ s.dst = s.dst[:n]
+ }
+
+ s.dst = snappy.Encode(s.dst, data)
+ return s.dst
+}
+
+// writeBuffer implements io.Writer for a byte slice.
+type writeBuffer struct {
+ data []byte
+}
+
+func (wb *writeBuffer) Write(data []byte) (int, error) {
+ wb.data = append(wb.data, data...)
+ return len(data), nil
+}
+
+func (wb *writeBuffer) Reset() {
+ wb.data = wb.data[:0]
+}
diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go
index 9d052f7cd..22405cf9b 100644
--- a/core/rawdb/freezer_table.go
+++ b/core/rawdb/freezer_table.go
@@ -17,6 +17,7 @@
package rawdb
import (
+ "bytes"
"encoding/binary"
"errors"
"fmt"
@@ -55,19 +56,20 @@ type indexEntry struct {
const indexEntrySize = 6
-// unmarshallBinary deserializes binary b into the rawIndex entry.
+// unmarshalBinary deserializes binary b into the rawIndex entry.
func (i *indexEntry) unmarshalBinary(b []byte) error {
i.filenum = uint32(binary.BigEndian.Uint16(b[:2]))
i.offset = binary.BigEndian.Uint32(b[2:6])
return nil
}
-// marshallBinary serializes the rawIndex entry into binary.
-func (i *indexEntry) marshallBinary() []byte {
- b := make([]byte, indexEntrySize)
- binary.BigEndian.PutUint16(b[:2], uint16(i.filenum))
- binary.BigEndian.PutUint32(b[2:6], i.offset)
- return b
+// append adds the encoded entry to the end of b.
+func (i *indexEntry) append(b []byte) []byte {
+ offset := len(b)
+ out := append(b, make([]byte, indexEntrySize)...)
+ binary.BigEndian.PutUint16(out[offset:], uint16(i.filenum))
+ binary.BigEndian.PutUint32(out[offset+2:], i.offset)
+ return out
}
// bounds returns the start- and end- offsets, and the file number of where to
@@ -107,7 +109,7 @@ type freezerTable struct {
// to count how many historic items have gone missing.
itemOffset uint32 // Offset (number of discarded items)
- headBytes uint32 // Number of bytes written to the head file
+ headBytes int64 // Number of bytes written to the head file
readMeter metrics.Meter // Meter for measuring the effective amount of data read
writeMeter metrics.Meter // Meter for measuring the effective amount of data written
sizeGauge metrics.Gauge // Gauge for tracking the combined size of all freezer tables
@@ -118,12 +120,7 @@ type freezerTable struct {
// NewFreezerTable opens the given path as a freezer table.
func NewFreezerTable(path, name string, disableSnappy bool) (*freezerTable, error) {
- return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, disableSnappy)
-}
-
-// newTable opens a freezer table with default settings - 2G files
-func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, disableSnappy bool) (*freezerTable, error) {
- return newCustomTable(path, name, readMeter, writeMeter, sizeGauge, 2*1000*1000*1000, disableSnappy)
+ return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy)
}
// openFreezerFileForAppend opens a freezer table file and seeks to the end
@@ -164,10 +161,10 @@ func truncateFreezerFile(file *os.File, size int64) error {
return nil
}
-// newCustomTable opens a freezer table, creating the data and index files if they are
+// newTable opens a freezer table, creating the data and index files if they are
// non existent. Both files are truncated to the shortest common length to ensure
// they don't go out of sync.
-func newCustomTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
+func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
// Ensure the containing directory exists and open the indexEntry file
if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
@@ -313,7 +310,7 @@ func (t *freezerTable) repair() error {
}
// Update the item and byte counters and return
t.items = uint64(t.itemOffset) + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
- t.headBytes = uint32(contentSize)
+ t.headBytes = contentSize
t.headId = lastIndex.filenum
// Close opened files and preopen all files
@@ -387,14 +384,14 @@ func (t *freezerTable) truncate(items uint64) error {
t.releaseFilesAfter(expected.filenum, true)
// Set back the historic head
t.head = newHead
- atomic.StoreUint32(&t.headId, expected.filenum)
+ t.headId = expected.filenum
}
if err := truncateFreezerFile(t.head, int64(expected.offset)); err != nil {
return err
}
// All data files truncated, set internal counters and return
+ t.headBytes = int64(expected.offset)
atomic.StoreUint64(&t.items, items)
- atomic.StoreUint32(&t.headBytes, expected.offset)
// Retrieve the new size and update the total size counter
newSize, err := t.sizeNolock()
@@ -471,94 +468,6 @@ func (t *freezerTable) releaseFilesAfter(num uint32, remove bool) {
}
}
-// Append injects a binary blob at the end of the freezer table. The item number
-// is a precautionary parameter to ensure data correctness, but the table will
-// reject already existing data.
-//
-// Note, this method will *not* flush any data to disk so be sure to explicitly
-// fsync before irreversibly deleting data from the database.
-func (t *freezerTable) Append(item uint64, blob []byte) error {
- // Encode the blob before the lock portion
- if !t.noCompression {
- blob = snappy.Encode(nil, blob)
- }
- // Read lock prevents competition with truncate
- retry, err := t.append(item, blob, false)
- if err != nil {
- return err
- }
- if retry {
- // Read lock was insufficient, retry with a writelock
- _, err = t.append(item, blob, true)
- }
- return err
-}
-
-// append injects a binary blob at the end of the freezer table.
-// Normally, inserts do not require holding the write-lock, so it should be invoked with 'wlock' set to
-// false.
-// However, if the data will grown the current file out of bounds, then this
-// method will return 'true, nil', indicating that the caller should retry, this time
-// with 'wlock' set to true.
-func (t *freezerTable) append(item uint64, encodedBlob []byte, wlock bool) (bool, error) {
- if wlock {
- t.lock.Lock()
- defer t.lock.Unlock()
- } else {
- t.lock.RLock()
- defer t.lock.RUnlock()
- }
- // Ensure the table is still accessible
- if t.index == nil || t.head == nil {
- return false, errClosed
- }
- // Ensure only the next item can be written, nothing else
- if atomic.LoadUint64(&t.items) != item {
- return false, fmt.Errorf("appending unexpected item: want %d, have %d", t.items, item)
- }
- bLen := uint32(len(encodedBlob))
- if t.headBytes+bLen < bLen ||
- t.headBytes+bLen > t.maxFileSize {
- // Writing would overflow, so we need to open a new data file.
- // If we don't already hold the writelock, abort and let the caller
- // invoke this method a second time.
- if !wlock {
- return true, nil
- }
- nextID := atomic.LoadUint32(&t.headId) + 1
- // We open the next file in truncated mode -- if this file already
- // exists, we need to start over from scratch on it
- newHead, err := t.openFile(nextID, openFreezerFileTruncated)
- if err != nil {
- return false, err
- }
- // Close old file, and reopen in RDONLY mode
- t.releaseFile(t.headId)
- t.openFile(t.headId, openFreezerFileForReadOnly)
-
- // Swap out the current head
- t.head = newHead
- atomic.StoreUint32(&t.headBytes, 0)
- atomic.StoreUint32(&t.headId, nextID)
- }
- if _, err := t.head.Write(encodedBlob); err != nil {
- return false, err
- }
- newOffset := atomic.AddUint32(&t.headBytes, bLen)
- idx := indexEntry{
- filenum: atomic.LoadUint32(&t.headId),
- offset: newOffset,
- }
- // Write indexEntry
- t.index.Write(idx.marshallBinary())
-
- t.writeMeter.Mark(int64(bLen + indexEntrySize))
- t.sizeGauge.Inc(int64(bLen + indexEntrySize))
-
- atomic.AddUint64(&t.items, 1)
- return false, nil
-}
-
// getIndices returns the index entries for the given from-item, covering 'count' items.
// N.B: The actual number of returned indices for N items will always be N+1 (unless an
// error is returned).
@@ -651,6 +560,7 @@ func (t *freezerTable) RetrieveItems(start, count, maxBytes uint64) ([][]byte, e
func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []int, error) {
t.lock.RLock()
defer t.lock.RUnlock()
+
// Ensure the table and the item is accessible
if t.index == nil || t.head == nil {
return nil, nil, errClosed
@@ -763,6 +673,32 @@ func (t *freezerTable) sizeNolock() (uint64, error) {
return total, nil
}
+// advanceHead should be called when the current head file would outgrow the file limits,
+// and a new file must be opened. The caller of this method must hold the write-lock
+// before calling this method.
+func (t *freezerTable) advanceHead() error {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ // We open the next file in truncated mode -- if this file already
+ // exists, we need to start over from scratch on it.
+ nextID := t.headId + 1
+ newHead, err := t.openFile(nextID, openFreezerFileTruncated)
+ if err != nil {
+ return err
+ }
+
+ // Close old file, and reopen in RDONLY mode.
+ t.releaseFile(t.headId)
+ t.openFile(t.headId, openFreezerFileForReadOnly)
+
+ // Swap out the current head.
+ t.head = newHead
+ t.headBytes = 0
+ t.headId = nextID
+ return nil
+}
+
// Sync pushes any pending data from memory out to disk. This is an expensive
// operation, so use it with care.
func (t *freezerTable) Sync() error {
@@ -775,10 +711,21 @@ func (t *freezerTable) Sync() error {
// DumpIndex is a debug print utility function, mainly for testing. It can also
// be used to analyse a live freezer table index.
func (t *freezerTable) DumpIndex(start, stop int64) {
+ t.dumpIndex(os.Stdout, start, stop)
+}
+
+func (t *freezerTable) dumpIndexString(start, stop int64) string {
+ var out bytes.Buffer
+ out.WriteString("\n")
+ t.dumpIndex(&out, start, stop)
+ return out.String()
+}
+
+func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) {
buf := make([]byte, indexEntrySize)
- fmt.Printf("| number | fileno | offset |\n")
- fmt.Printf("|--------|--------|--------|\n")
+ fmt.Fprintf(w, "| number | fileno | offset |\n")
+ fmt.Fprintf(w, "|--------|--------|--------|\n")
for i := uint64(start); ; i++ {
if _, err := t.index.ReadAt(buf, int64(i*indexEntrySize)); err != nil {
@@ -786,10 +733,10 @@ func (t *freezerTable) DumpIndex(start, stop int64) {
}
var entry indexEntry
entry.unmarshalBinary(buf)
- fmt.Printf("| %03d | %03d | %03d | \n", i, entry.filenum, entry.offset)
+ fmt.Fprintf(w, "| %03d | %03d | %03d | \n", i, entry.filenum, entry.offset)
if stop > 0 && i >= uint64(stop) {
break
}
}
- fmt.Printf("|--------------------------|\n")
+ fmt.Fprintf(w, "|--------------------------|\n")
}
diff --git a/core/rawdb/freezer_table_test.go b/core/rawdb/freezer_table_test.go
index e8a8b5c46..803809b52 100644
--- a/core/rawdb/freezer_table_test.go
+++ b/core/rawdb/freezer_table_test.go
@@ -18,49 +18,36 @@ package rawdb
import (
"bytes"
- "encoding/binary"
"fmt"
- "io/ioutil"
"math/rand"
"os"
"path/filepath"
- "sync"
"testing"
"time"
"github.com/ethereum/go-ethereum/metrics"
+ "github.com/stretchr/testify/require"
)
func init() {
rand.Seed(time.Now().Unix())
}
-// Gets a chunk of data, filled with 'b'
-func getChunk(size int, b int) []byte {
- data := make([]byte, size)
- for i := range data {
- data[i] = byte(b)
- }
- return data
-}
-
// TestFreezerBasics test initializing a freezertable from scratch, writing to the table,
// and reading it back.
func TestFreezerBasics(t *testing.T) {
t.Parallel()
// set cutoff at 50 bytes
- f, err := newCustomTable(os.TempDir(),
+ f, err := newTable(os.TempDir(),
fmt.Sprintf("unittest-%d", rand.Uint64()),
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true)
if err != nil {
t.Fatal(err)
}
defer f.Close()
+
// Write 15 bytes 255 times, results in 85 files
- for x := 0; x < 255; x++ {
- data := getChunk(15, x)
- f.Append(uint64(x), data)
- }
+ writeChunks(t, f, 255, 15)
//print(t, f, 0)
//print(t, f, 1)
@@ -98,16 +85,21 @@ func TestFreezerBasicsClosing(t *testing.T) {
f *freezerTable
err error
)
- f, err = newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
- // Write 15 bytes 255 times, results in 85 files
+
+ // Write 15 bytes 255 times, results in 85 files.
+ // In-between writes, the table is closed and re-opened.
for x := 0; x < 255; x++ {
data := getChunk(15, x)
- f.Append(uint64(x), data)
+ batch := f.newBatch()
+ require.NoError(t, batch.AppendRaw(uint64(x), data))
+ require.NoError(t, batch.commit())
f.Close()
- f, err = newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+
+ f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -124,7 +116,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
}
f.Close()
- f, err = newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -137,22 +129,22 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
- { // Fill table
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ // Fill table
+ {
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
// Write 15 bytes 255 times
- for x := 0; x < 255; x++ {
- data := getChunk(15, x)
- f.Append(uint64(x), data)
- }
+ writeChunks(t, f, 255, 15)
+
// The last item should be there
if _, err = f.Retrieve(0xfe); err != nil {
t.Fatal(err)
}
f.Close()
}
+
// open the index
idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644)
if err != nil {
@@ -165,9 +157,10 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
}
idxFile.Truncate(stat.Size() - 4)
idxFile.Close()
+
// Now open it again
{
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -188,22 +181,22 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
- { // Fill a table and close it
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ // Fill a table and close it
+ {
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
// Write 15 bytes 255 times
- for x := 0; x < 0xff; x++ {
- data := getChunk(15, x)
- f.Append(uint64(x), data)
- }
+ writeChunks(t, f, 255, 15)
+
// The last item should be there
if _, err = f.Retrieve(f.items - 1); err != nil {
t.Fatal(err)
}
f.Close()
}
+
// open the index
idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644)
if err != nil {
@@ -213,9 +206,10 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
// 0-indexEntry, 1-indexEntry, corrupt-indexEntry
idxFile.Truncate(indexEntrySize + indexEntrySize + indexEntrySize/2)
idxFile.Close()
+
// Now open it again
{
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -228,15 +222,17 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
t.Errorf("Expected error for missing index entry")
}
// We should now be able to store items again, from item = 1
+ batch := f.newBatch()
for x := 1; x < 0xff; x++ {
- data := getChunk(15, ^x)
- f.Append(uint64(x), data)
+ require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x)))
}
+ require.NoError(t, batch.commit())
f.Close()
}
+
// And if we open it, we should now be able to read all of them (new values)
{
- f, _ := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
for y := 1; y < 255; y++ {
exp := getChunk(15, ^y)
got, err := f.Retrieve(uint64(y))
@@ -255,22 +251,21 @@ func TestSnappyDetection(t *testing.T) {
t.Parallel()
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("snappytest-%d", rand.Uint64())
+
// Open with snappy
{
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
// Write 15 bytes 255 times
- for x := 0; x < 0xff; x++ {
- data := getChunk(15, x)
- f.Append(uint64(x), data)
- }
+ writeChunks(t, f, 255, 15)
f.Close()
}
+
// Open without snappy
{
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, false)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false)
if err != nil {
t.Fatal(err)
}
@@ -282,7 +277,7 @@ func TestSnappyDetection(t *testing.T) {
// Open with snappy
{
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -292,8 +287,8 @@ func TestSnappyDetection(t *testing.T) {
t.Fatalf("expected no error, got %v", err)
}
}
-
}
+
func assertFileSize(f string, size int64) error {
stat, err := os.Stat(f)
if err != nil {
@@ -303,7 +298,6 @@ func assertFileSize(f string, size int64) error {
return fmt.Errorf("error, expected size %d, got %d", size, stat.Size())
}
return nil
-
}
// TestFreezerRepairDanglingIndex checks that if the index has more entries than there are data,
@@ -313,16 +307,15 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("dangling_indextest-%d", rand.Uint64())
- { // Fill a table and close it
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ // Fill a table and close it
+ {
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
// Write 15 bytes 9 times : 150 bytes
- for x := 0; x < 9; x++ {
- data := getChunk(15, x)
- f.Append(uint64(x), data)
- }
+ writeChunks(t, f, 9, 15)
+
// The last item should be there
if _, err = f.Retrieve(f.items - 1); err != nil {
f.Close()
@@ -331,6 +324,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
f.Close()
// File sizes should be 45, 45, 45 : items[3, 3, 3)
}
+
// Crop third file
fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0002.rdat", fname))
// Truncate third file: 45 ,45, 20
@@ -345,17 +339,18 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
file.Truncate(20)
file.Close()
}
+
// Open db it again
// It should restore the file(s) to
// 45, 45, 15
// with 3+3+1 items
{
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
+ defer f.Close()
if f.items != 7 {
- f.Close()
t.Fatalf("expected %d items, got %d", 7, f.items)
}
if err := assertFileSize(fileToCrop, 15); err != nil {
@@ -365,30 +360,29 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
}
func TestFreezerTruncate(t *testing.T) {
-
t.Parallel()
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("truncation-%d", rand.Uint64())
- { // Fill table
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ // Fill table
+ {
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
// Write 15 bytes 30 times
- for x := 0; x < 30; x++ {
- data := getChunk(15, x)
- f.Append(uint64(x), data)
- }
+ writeChunks(t, f, 30, 15)
+
// The last item should be there
if _, err = f.Retrieve(f.items - 1); err != nil {
t.Fatal(err)
}
f.Close()
}
+
// Reopen, truncate
{
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -401,9 +395,7 @@ func TestFreezerTruncate(t *testing.T) {
if f.headBytes != 15 {
t.Fatalf("expected %d bytes, got %d", 15, f.headBytes)
}
-
}
-
}
// TestFreezerRepairFirstFile tests a head file with the very first item only half-written.
@@ -412,20 +404,26 @@ func TestFreezerRepairFirstFile(t *testing.T) {
t.Parallel()
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("truncationfirst-%d", rand.Uint64())
- { // Fill table
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+
+ // Fill table
+ {
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
// Write 80 bytes, splitting out into two files
- f.Append(0, getChunk(40, 0xFF))
- f.Append(1, getChunk(40, 0xEE))
+ batch := f.newBatch()
+ require.NoError(t, batch.AppendRaw(0, getChunk(40, 0xFF)))
+ require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xEE)))
+ require.NoError(t, batch.commit())
+
// The last item should be there
- if _, err = f.Retrieve(f.items - 1); err != nil {
+ if _, err = f.Retrieve(1); err != nil {
t.Fatal(err)
}
f.Close()
}
+
// Truncate the file in half
fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0001.rdat", fname))
{
@@ -439,9 +437,10 @@ func TestFreezerRepairFirstFile(t *testing.T) {
file.Truncate(20)
file.Close()
}
+
// Reopen
{
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -449,9 +448,14 @@ func TestFreezerRepairFirstFile(t *testing.T) {
f.Close()
t.Fatalf("expected %d items, got %d", 0, f.items)
}
+
// Write 40 bytes
- f.Append(1, getChunk(40, 0xDD))
+ batch := f.newBatch()
+ require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xDD)))
+ require.NoError(t, batch.commit())
+
f.Close()
+
// Should have been truncated down to zero and then 40 written
if err := assertFileSize(fileToCrop, 40); err != nil {
t.Fatal(err)
@@ -468,25 +472,26 @@ func TestFreezerReadAndTruncate(t *testing.T) {
t.Parallel()
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("read_truncate-%d", rand.Uint64())
- { // Fill table
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+
+ // Fill table
+ {
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
// Write 15 bytes 30 times
- for x := 0; x < 30; x++ {
- data := getChunk(15, x)
- f.Append(uint64(x), data)
- }
+ writeChunks(t, f, 30, 15)
+
// The last item should be there
if _, err = f.Retrieve(f.items - 1); err != nil {
t.Fatal(err)
}
f.Close()
}
+
// Reopen and read all files
{
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -497,40 +502,48 @@ func TestFreezerReadAndTruncate(t *testing.T) {
for y := byte(0); y < 30; y++ {
f.Retrieve(uint64(y))
}
+
// Now, truncate back to zero
f.truncate(0)
+
// Write the data again
+ batch := f.newBatch()
for x := 0; x < 30; x++ {
- data := getChunk(15, ^x)
- if err := f.Append(uint64(x), data); err != nil {
- t.Fatalf("error %v", err)
- }
+ require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x)))
}
+ require.NoError(t, batch.commit())
f.Close()
}
}
-func TestOffset(t *testing.T) {
+func TestFreezerOffset(t *testing.T) {
t.Parallel()
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("offset-%d", rand.Uint64())
- { // Fill table
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 40, true)
+
+ // Fill table
+ {
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
if err != nil {
t.Fatal(err)
}
+
// Write 6 x 20 bytes, splitting out into three files
- f.Append(0, getChunk(20, 0xFF))
- f.Append(1, getChunk(20, 0xEE))
+ batch := f.newBatch()
+ require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
+ require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
- f.Append(2, getChunk(20, 0xdd))
- f.Append(3, getChunk(20, 0xcc))
+ require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
+ require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
- f.Append(4, getChunk(20, 0xbb))
- f.Append(5, getChunk(20, 0xaa))
- f.DumpIndex(0, 100)
+ require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
+ require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
+ require.NoError(t, batch.commit())
+
+ t.Log(f.dumpIndexString(0, 100))
f.Close()
}
+
// Now crop it.
{
// delete files 0 and 1
@@ -558,7 +571,7 @@ func TestOffset(t *testing.T) {
filenum: tailId,
offset: itemOffset,
}
- buf := zeroIndex.marshallBinary()
+ buf := zeroIndex.append(nil)
// Overwrite index zero
copy(indexBuf, buf)
// Remove the four next indices by overwriting
@@ -567,44 +580,36 @@ func TestOffset(t *testing.T) {
// Need to truncate the moved index items
indexFile.Truncate(indexEntrySize * (1 + 2))
indexFile.Close()
-
}
+
// Now open again
- checkPresent := func(numDeleted uint64) {
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 40, true)
+ {
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
if err != nil {
t.Fatal(err)
}
- f.DumpIndex(0, 100)
- // It should allow writing item 6
- f.Append(numDeleted+2, getChunk(20, 0x99))
+ defer f.Close()
+ t.Log(f.dumpIndexString(0, 100))
- // It should be fine to fetch 4,5,6
- if got, err := f.Retrieve(numDeleted); err != nil {
- t.Fatal(err)
- } else if exp := getChunk(20, 0xbb); !bytes.Equal(got, exp) {
- t.Fatalf("expected %x got %x", exp, got)
- }
- if got, err := f.Retrieve(numDeleted + 1); err != nil {
- t.Fatal(err)
- } else if exp := getChunk(20, 0xaa); !bytes.Equal(got, exp) {
- t.Fatalf("expected %x got %x", exp, got)
- }
- if got, err := f.Retrieve(numDeleted + 2); err != nil {
- t.Fatal(err)
- } else if exp := getChunk(20, 0x99); !bytes.Equal(got, exp) {
- t.Fatalf("expected %x got %x", exp, got)
- }
+ // It should allow writing item 6.
+ batch := f.newBatch()
+ require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x99)))
+ require.NoError(t, batch.commit())
- // It should error at 0, 1,2,3
- for i := numDeleted - 1; i > numDeleted-10; i-- {
- if _, err := f.Retrieve(i); err == nil {
- t.Fatal("expected err")
- }
- }
+ checkRetrieveError(t, f, map[uint64]error{
+ 0: errOutOfBounds,
+ 1: errOutOfBounds,
+ 2: errOutOfBounds,
+ 3: errOutOfBounds,
+ })
+ checkRetrieve(t, f, map[uint64][]byte{
+ 4: getChunk(20, 0xbb),
+ 5: getChunk(20, 0xaa),
+ 6: getChunk(20, 0x99),
+ })
}
- checkPresent(4)
- // Now, let's pretend we have deleted 1M items
+
+ // Edit the index again, with a much larger initial offset of 1M.
{
// Read the index file
p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname))
@@ -624,13 +629,71 @@ func TestOffset(t *testing.T) {
offset: itemOffset,
filenum: tailId,
}
- buf := zeroIndex.marshallBinary()
+ buf := zeroIndex.append(nil)
// Overwrite index zero
copy(indexBuf, buf)
indexFile.WriteAt(indexBuf, 0)
indexFile.Close()
}
- checkPresent(1000000)
+
+ // Check that existing items have been moved to index 1M.
+ {
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+ t.Log(f.dumpIndexString(0, 100))
+
+ checkRetrieveError(t, f, map[uint64]error{
+ 0: errOutOfBounds,
+ 1: errOutOfBounds,
+ 2: errOutOfBounds,
+ 3: errOutOfBounds,
+ 999999: errOutOfBounds,
+ })
+ checkRetrieve(t, f, map[uint64][]byte{
+ 1000000: getChunk(20, 0xbb),
+ 1000001: getChunk(20, 0xaa),
+ })
+ }
+}
+
+func checkRetrieve(t *testing.T, f *freezerTable, items map[uint64][]byte) {
+ t.Helper()
+
+ for item, wantBytes := range items {
+ value, err := f.Retrieve(item)
+ if err != nil {
+ t.Fatalf("can't get expected item %d: %v", item, err)
+ }
+ if !bytes.Equal(value, wantBytes) {
+ t.Fatalf("item %d has wrong value %x (want %x)", item, value, wantBytes)
+ }
+ }
+}
+
+func checkRetrieveError(t *testing.T, f *freezerTable, items map[uint64]error) {
+ t.Helper()
+
+ for item, wantError := range items {
+ value, err := f.Retrieve(item)
+ if err == nil {
+ t.Fatalf("unexpected value %x for item %d, want error %v", item, value, wantError)
+ }
+ if err != wantError {
+ t.Fatalf("wrong error for item %d: %v", item, err)
+ }
+ }
+}
+
+// Gets a chunk of data, filled with 'b'
+func getChunk(size int, b int) []byte {
+ data := make([]byte, size)
+ for i := range data {
+ data[i] = byte(b)
+ }
+ return data
}
// TODO (?)
@@ -644,53 +707,18 @@ func TestOffset(t *testing.T) {
// should be handled already, and the case described above can only (?) happen if an
// external process/user deletes files from the filesystem.
-// TestAppendTruncateParallel is a test to check if the Append/truncate operations are
-// racy.
-//
-// The reason why it's not a regular fuzzer, within tests/fuzzers, is that it is dependent
-// on timing rather than 'clever' input -- there's no determinism.
-func TestAppendTruncateParallel(t *testing.T) {
- dir, err := ioutil.TempDir("", "freezer")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dir)
+func writeChunks(t *testing.T, ft *freezerTable, n int, length int) {
+ t.Helper()
- f, err := newCustomTable(dir, "tmp", metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, 8, true)
- if err != nil {
- t.Fatal(err)
- }
-
- fill := func(mark uint64) []byte {
- data := make([]byte, 8)
- binary.LittleEndian.PutUint64(data, mark)
- return data
- }
-
- for i := 0; i < 5000; i++ {
- f.truncate(0)
- data0 := fill(0)
- f.Append(0, data0)
- data1 := fill(1)
-
- var wg sync.WaitGroup
- wg.Add(2)
- go func() {
- f.truncate(0)
- wg.Done()
- }()
- go func() {
- f.Append(1, data1)
- wg.Done()
- }()
- wg.Wait()
-
- if have, err := f.Retrieve(0); err == nil {
- if !bytes.Equal(have, data0) {
- t.Fatalf("have %x want %x", have, data0)
- }
+ batch := ft.newBatch()
+ for i := 0; i < n; i++ {
+ if err := batch.AppendRaw(uint64(i), getChunk(length, i)); err != nil {
+ t.Fatalf("AppendRaw(%d, ...) returned error: %v", i, err)
}
}
+ if err := batch.commit(); err != nil {
+ t.Fatalf("Commit returned error: %v", err)
+ }
}
// TestSequentialRead does some basic tests on the RetrieveItems.
@@ -698,20 +726,17 @@ func TestSequentialRead(t *testing.T) {
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("batchread-%d", rand.Uint64())
{ // Fill table
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
// Write 15 bytes 30 times
- for x := 0; x < 30; x++ {
- data := getChunk(15, x)
- f.Append(uint64(x), data)
- }
+ writeChunks(t, f, 30, 15)
f.DumpIndex(0, 30)
f.Close()
}
{ // Open it, iterate, verify iteration
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -732,7 +757,7 @@ func TestSequentialRead(t *testing.T) {
}
{ // Open it, iterate, verify byte limit. The byte limit is less than item
// size, so each lookup should only return one item
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 40, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
if err != nil {
t.Fatal(err)
}
@@ -761,16 +786,13 @@ func TestSequentialReadByteLimit(t *testing.T) {
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("batchread-2-%d", rand.Uint64())
{ // Fill table
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 100, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true)
if err != nil {
t.Fatal(err)
}
// Write 10 bytes 30 times,
// Splitting it at every 100 bytes (10 items)
- for x := 0; x < 30; x++ {
- data := getChunk(10, x)
- f.Append(uint64(x), data)
- }
+ writeChunks(t, f, 30, 10)
f.Close()
}
for i, tc := range []struct {
@@ -786,7 +808,7 @@ func TestSequentialReadByteLimit(t *testing.T) {
{100, 109, 10},
} {
{
- f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 100, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true)
if err != nil {
t.Fatal(err)
}
diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go
new file mode 100644
index 000000000..7359131c8
--- /dev/null
+++ b/core/rawdb/freezer_test.go
@@ -0,0 +1,301 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "math/big"
+ "math/rand"
+ "os"
+ "sync"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/stretchr/testify/require"
+)
+
+var freezerTestTableDef = map[string]bool{"test": true}
+
+func TestFreezerModify(t *testing.T) {
+ t.Parallel()
+
+ // Create test data.
+ var valuesRaw [][]byte
+ var valuesRLP []*big.Int
+ for x := 0; x < 100; x++ {
+ v := getChunk(256, x)
+ valuesRaw = append(valuesRaw, v)
+ iv := big.NewInt(int64(x))
+ iv = iv.Exp(iv, iv, nil)
+ valuesRLP = append(valuesRLP, iv)
+ }
+
+ tables := map[string]bool{"raw": true, "rlp": false}
+ f, dir := newFreezerForTesting(t, tables)
+ defer os.RemoveAll(dir)
+ defer f.Close()
+
+ // Commit test data.
+ _, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
+ for i := range valuesRaw {
+ if err := op.AppendRaw("raw", uint64(i), valuesRaw[i]); err != nil {
+ return err
+ }
+ if err := op.Append("rlp", uint64(i), valuesRLP[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ t.Fatal("ModifyAncients failed:", err)
+ }
+
+ // Dump indexes.
+ for _, table := range f.tables {
+ t.Log(table.name, "index:", table.dumpIndexString(0, int64(len(valuesRaw))))
+ }
+
+ // Read back test data.
+ checkAncientCount(t, f, "raw", uint64(len(valuesRaw)))
+ checkAncientCount(t, f, "rlp", uint64(len(valuesRLP)))
+ for i := range valuesRaw {
+ v, _ := f.Ancient("raw", uint64(i))
+ if !bytes.Equal(v, valuesRaw[i]) {
+ t.Fatalf("wrong raw value at %d: %x", i, v)
+ }
+ ivEnc, _ := f.Ancient("rlp", uint64(i))
+ want, _ := rlp.EncodeToBytes(valuesRLP[i])
+ if !bytes.Equal(ivEnc, want) {
+ t.Fatalf("wrong RLP value at %d: %x", i, ivEnc)
+ }
+ }
+}
+
+// This checks that ModifyAncients rolls back freezer updates
+// when the function passed to it returns an error.
+func TestFreezerModifyRollback(t *testing.T) {
+ t.Parallel()
+
+ f, dir := newFreezerForTesting(t, freezerTestTableDef)
+ defer os.RemoveAll(dir)
+
+ theError := errors.New("oops")
+ _, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
+ // Append three items. This creates two files immediately,
+ // because the table size limit of the test freezer is 2048.
+ require.NoError(t, op.AppendRaw("test", 0, make([]byte, 2048)))
+ require.NoError(t, op.AppendRaw("test", 1, make([]byte, 2048)))
+ require.NoError(t, op.AppendRaw("test", 2, make([]byte, 2048)))
+ return theError
+ })
+ if err != theError {
+ t.Errorf("ModifyAncients returned wrong error %q", err)
+ }
+ checkAncientCount(t, f, "test", 0)
+ f.Close()
+
+ // Reopen and check that the rolled-back data doesn't reappear.
+ tables := map[string]bool{"test": true}
+ f2, err := newFreezer(dir, "", false, 2049, tables)
+ if err != nil {
+ t.Fatalf("can't reopen freezer after failed ModifyAncients: %v", err)
+ }
+ defer f2.Close()
+ checkAncientCount(t, f2, "test", 0)
+}
+
+// This test runs ModifyAncients and Ancient concurrently with each other.
+func TestFreezerConcurrentModifyRetrieve(t *testing.T) {
+ t.Parallel()
+
+ f, dir := newFreezerForTesting(t, freezerTestTableDef)
+ defer os.RemoveAll(dir)
+ defer f.Close()
+
+ var (
+ numReaders = 5
+ writeBatchSize = uint64(50)
+ written = make(chan uint64, numReaders*6)
+ wg sync.WaitGroup
+ )
+ wg.Add(numReaders + 1)
+
+ // Launch the writer. It appends 10000 items in batches.
+ go func() {
+ defer wg.Done()
+ defer close(written)
+ for item := uint64(0); item < 10000; item += writeBatchSize {
+ _, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
+ for i := uint64(0); i < writeBatchSize; i++ {
+ item := item + i
+ value := getChunk(32, int(item))
+ if err := op.AppendRaw("test", item, value); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ panic(err)
+ }
+ for i := 0; i < numReaders; i++ {
+ written <- item + writeBatchSize
+ }
+ }
+ }()
+
+ // Launch the readers. They read random items from the freezer up to the
+ // current frozen item count.
+ for i := 0; i < numReaders; i++ {
+ go func() {
+ defer wg.Done()
+ for frozen := range written {
+ for rc := 0; rc < 80; rc++ {
+ num := uint64(rand.Intn(int(frozen)))
+ value, err := f.Ancient("test", num)
+ if err != nil {
+ panic(fmt.Errorf("error reading %d (frozen %d): %v", num, frozen, err))
+ }
+ if !bytes.Equal(value, getChunk(32, int(num))) {
+ panic(fmt.Errorf("wrong value at %d", num))
+ }
+ }
+ }
+ }()
+ }
+
+ wg.Wait()
+}
+
+// This test runs ModifyAncients and TruncateAncients concurrently with each other.
+func TestFreezerConcurrentModifyTruncate(t *testing.T) {
+ f, dir := newFreezerForTesting(t, freezerTestTableDef)
+ defer os.RemoveAll(dir)
+ defer f.Close()
+
+ var item = make([]byte, 256)
+
+ for i := 0; i < 1000; i++ {
+ // First reset and write 100 items.
+ if err := f.TruncateAncients(0); err != nil {
+ t.Fatal("truncate failed:", err)
+ }
+ _, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
+ for i := uint64(0); i < 100; i++ {
+ if err := op.AppendRaw("test", i, item); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ t.Fatal("modify failed:", err)
+ }
+ checkAncientCount(t, f, "test", 100)
+
+ // Now append 100 more items and truncate concurrently.
+ var (
+ wg sync.WaitGroup
+ truncateErr error
+ modifyErr error
+ )
+ wg.Add(3)
+ go func() {
+ _, modifyErr = f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
+ for i := uint64(100); i < 200; i++ {
+ if err := op.AppendRaw("test", i, item); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ wg.Done()
+ }()
+ go func() {
+ truncateErr = f.TruncateAncients(10)
+ wg.Done()
+ }()
+ go func() {
+ f.AncientSize("test")
+ wg.Done()
+ }()
+ wg.Wait()
+
+ // Now check the outcome. If the truncate operation went through first, the append
+ // fails, otherwise it succeeds. In either case, the freezer should be positioned
+ // at 10 after both operations are done.
+ if truncateErr != nil {
+ t.Fatal("concurrent truncate failed:", err)
+ }
+ if !(modifyErr == nil || modifyErr == errOutOrderInsertion) {
+ t.Fatal("wrong error from concurrent modify:", modifyErr)
+ }
+ checkAncientCount(t, f, "test", 10)
+ }
+}
+
+func newFreezerForTesting(t *testing.T, tables map[string]bool) (*freezer, string) {
+ t.Helper()
+
+ dir, err := ioutil.TempDir("", "freezer")
+ if err != nil {
+ t.Fatal(err)
+ }
+ // note: using low max table size here to ensure the tests actually
+ // switch between multiple files.
+ f, err := newFreezer(dir, "", false, 2049, tables)
+ if err != nil {
+ t.Fatal("can't open freezer", err)
+ }
+ return f, dir
+}
+
+// checkAncientCount verifies that the freezer contains n items.
+func checkAncientCount(t *testing.T, f *freezer, kind string, n uint64) {
+ t.Helper()
+
+ if frozen, _ := f.Ancients(); frozen != n {
+ t.Fatalf("Ancients() returned %d, want %d", frozen, n)
+ }
+
+ // Check at index n-1.
+ if n > 0 {
+ index := n - 1
+ if ok, _ := f.HasAncient(kind, index); !ok {
+ t.Errorf("HasAncient(%q, %d) returned false unexpectedly", kind, index)
+ }
+ if _, err := f.Ancient(kind, index); err != nil {
+ t.Errorf("Ancient(%q, %d) returned unexpected error %q", kind, index, err)
+ }
+ }
+
+ // Check at index n.
+ index := n
+ if ok, _ := f.HasAncient(kind, index); ok {
+ t.Errorf("HasAncient(%q, %d) returned true unexpectedly", kind, index)
+ }
+ if _, err := f.Ancient(kind, index); err == nil {
+ t.Errorf("Ancient(%q, %d) didn't return expected error", kind, index)
+ } else if err != errOutOfBounds {
+ t.Errorf("Ancient(%q, %d) returned unexpected error %q", kind, index, err)
+ }
+}
diff --git a/core/rawdb/table.go b/core/rawdb/table.go
index 586451c06..02e23b517 100644
--- a/core/rawdb/table.go
+++ b/core/rawdb/table.go
@@ -80,10 +80,9 @@ func (t *table) AncientSize(kind string) (uint64, error) {
return t.db.AncientSize(kind)
}
-// AppendAncient is a noop passthrough that just forwards the request to the underlying
-// database.
-func (t *table) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error {
- return t.db.AppendAncient(number, hash, header, body, receipts, td)
+// ModifyAncients runs an ancient write operation on the underlying database.
+func (t *table) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, error) {
+ return t.db.ModifyAncients(fn)
}
// TruncateAncients is a noop passthrough that just forwards the request to the underlying
diff --git a/core/rawdb/testdata/stored_receipts.bin b/core/rawdb/testdata/stored_receipts.bin
new file mode 100644
index 000000000..8204fae09
Binary files /dev/null and b/core/rawdb/testdata/stored_receipts.bin differ
diff --git a/core/state/database.go b/core/state/database.go
index 1a06e3340..bbcd2358e 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -23,6 +23,7 @@ import (
"github.com/VictoriaMetrics/fastcache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
lru "github.com/hashicorp/golang-lru"
@@ -70,6 +71,9 @@ type Trie interface {
// trie.MissingNodeError is returned.
TryGet(key []byte) ([]byte, error)
+ // TryUpdateAccount abstract an account write in the trie.
+ TryUpdateAccount(key []byte, account *types.StateAccount) error
+
// TryUpdate associates key with value in the trie. If value has length zero, any
// existing value is deleted from the trie. The value bytes must not be modified
// by the caller while they are stored in the trie. If a node was not found in the
@@ -86,7 +90,7 @@ type Trie interface {
// Commit writes all nodes to the trie's memory database, tracking the internal
// and external (for account tries) references.
- Commit(onleaf trie.LeafCallback) (common.Hash, error)
+ Commit(onleaf trie.LeafCallback) (common.Hash, int, error)
// NodeIterator returns an iterator that returns nodes of the trie. Iteration
// starts at the key after the given start key.
diff --git a/core/state/dump.go b/core/state/dump.go
index 00faa4ed6..bfcc03543 100644
--- a/core/state/dump.go
+++ b/core/state/dump.go
@@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
@@ -140,7 +141,7 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []
it := trie.NewIterator(s.trie.NodeIterator(conf.Start))
for it.Next() {
- var data Account
+ var data types.StateAccount
if err := rlp.DecodeBytes(it.Value, &data); err != nil {
panic(err)
}
diff --git a/core/state/iterator.go b/core/state/iterator.go
index 6a5c73d3d..611df5243 100644
--- a/core/state/iterator.go
+++ b/core/state/iterator.go
@@ -21,6 +21,7 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
@@ -104,7 +105,7 @@ func (it *NodeIterator) step() error {
return nil
}
// Otherwise we've reached an account node, initiate data iteration
- var account Account
+ var account types.StateAccount
if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil {
return err
}
diff --git a/core/state/metrics.go b/core/state/metrics.go
new file mode 100644
index 000000000..7b40ff37a
--- /dev/null
+++ b/core/state/metrics.go
@@ -0,0 +1,28 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package state
+
+import "github.com/ethereum/go-ethereum/metrics"
+
+var (
+ accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil)
+ storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil)
+ accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil)
+ storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil)
+ accountCommittedMeter = metrics.NewRegisteredMeter("state/commit/account", nil)
+ storageCommittedMeter = metrics.NewRegisteredMeter("state/commit/storage", nil)
+)
diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go
index 4d6e41551..37772ca35 100644
--- a/core/state/pruner/pruner.go
+++ b/core/state/pruner/pruner.go
@@ -29,7 +29,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
@@ -426,7 +425,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
// If it's a leaf node, yes we are touching an account,
// dig into the storage trie further.
if accIter.Leaf() {
- var acc state.Account
+ var acc types.StateAccount
if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil {
return err
}
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index 7e29e51b2..3e11b4ac6 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -436,7 +436,7 @@ func (dl *diskLayer) generateRange(root common.Hash, prefix []byte, kind string,
for i, key := range result.keys {
snapTrie.Update(key, result.vals[i])
}
- root, _ := snapTrie.Commit(nil)
+ root, _, _ := snapTrie.Commit(nil)
snapTrieDb.Commit(root, false, nil)
}
tr := result.tr
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
index a92517b31..582da6a2e 100644
--- a/core/state/snapshot/generate_test.go
+++ b/core/state/snapshot/generate_test.go
@@ -60,7 +60,7 @@ func TestGeneration(t *testing.T) {
acc = &Account{Balance: big.NewInt(3), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
val, _ = rlp.EncodeToBytes(acc)
accTrie.Update([]byte("acc-3"), val) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
- root, _ := accTrie.Commit(nil) // Root: 0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd
+ root, _, _ := accTrie.Commit(nil) // Root: 0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd
triedb.Commit(root, false, nil)
if have, want := root, common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"); have != want {
@@ -128,7 +128,7 @@ func TestGenerateExistentState(t *testing.T) {
rawdb.WriteStorageSnapshot(diskdb, hashData([]byte("acc-3")), hashData([]byte("key-2")), []byte("val-2"))
rawdb.WriteStorageSnapshot(diskdb, hashData([]byte("acc-3")), hashData([]byte("key-3")), []byte("val-3"))
- root, _ := accTrie.Commit(nil) // Root: 0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd
+ root, _, _ := accTrie.Commit(nil) // Root: 0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd
triedb.Commit(root, false, nil)
snap := generateSnapshot(diskdb, triedb, 16, root)
@@ -215,12 +215,12 @@ func (t *testHelper) makeStorageTrie(keys []string, vals []string) []byte {
for i, k := range keys {
stTrie.Update([]byte(k), []byte(vals[i]))
}
- root, _ := stTrie.Commit(nil)
+ root, _, _ := stTrie.Commit(nil)
return root.Bytes()
}
func (t *testHelper) Generate() (common.Hash, *diskLayer) {
- root, _ := t.accTrie.Commit(nil)
+ root, _, _ := t.accTrie.Commit(nil)
t.triedb.Commit(root, false, nil)
snap := generateSnapshot(t.diskdb, t.triedb, 16, root)
return root, snap
@@ -575,7 +575,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) {
rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("b-key-2")), []byte("b-val-2"))
rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("b-key-3")), []byte("b-val-3"))
}
- root, _ := accTrie.Commit(nil)
+ root, _, _ := accTrie.Commit(nil)
t.Logf("root: %x", root)
triedb.Commit(root, false, nil)
// To verify the test: If we now inspect the snap db, there should exist extraneous storage items
@@ -637,7 +637,7 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) {
rawdb.WriteAccountSnapshot(diskdb, key, val)
}
}
- root, _ := accTrie.Commit(nil)
+ root, _, _ := accTrie.Commit(nil)
t.Logf("root: %x", root)
triedb.Commit(root, false, nil)
@@ -690,7 +690,7 @@ func TestGenerateWithExtraBeforeAndAfter(t *testing.T) {
rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x07"), val)
}
- root, _ := accTrie.Commit(nil)
+ root, _, _ := accTrie.Commit(nil)
t.Logf("root: %x", root)
triedb.Commit(root, false, nil)
@@ -734,7 +734,7 @@ func TestGenerateWithMalformedSnapdata(t *testing.T) {
rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x05"), junk)
}
- root, _ := accTrie.Commit(nil)
+ root, _, _ := accTrie.Commit(nil)
t.Logf("root: %x", root)
triedb.Commit(root, false, nil)
diff --git a/core/state/snapshot/iterator.go b/core/state/snapshot/iterator.go
index 1d9340bba..c1a196c7f 100644
--- a/core/state/snapshot/iterator.go
+++ b/core/state/snapshot/iterator.go
@@ -385,7 +385,7 @@ func (it *diskStorageIterator) Hash() common.Hash {
return common.BytesToHash(it.it.Key()) // The prefix will be truncated
}
-// Slot returns the raw strorage slot content the iterator is currently at.
+// Slot returns the raw storage slot content the iterator is currently at.
func (it *diskStorageIterator) Slot() []byte {
return it.it.Value()
}
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 38621ffb6..73e9cb78e 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -24,6 +24,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
@@ -65,7 +66,7 @@ func (s Storage) Copy() Storage {
type stateObject struct {
address common.Address
addrHash common.Hash // hash of ethereum address of the account
- data Account
+ data types.StateAccount
db *StateDB
// DB error.
@@ -97,17 +98,8 @@ func (s *stateObject) empty() bool {
return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, emptyCodeHash)
}
-// Account is the Ethereum consensus representation of accounts.
-// These objects are stored in the main account trie.
-type Account struct {
- Nonce uint64
- Balance *big.Int
- Root common.Hash // merkle root of the storage trie
- CodeHash []byte
-}
-
// newObject creates a state object.
-func newObject(db *StateDB, address common.Address, data Account) *stateObject {
+func newObject(db *StateDB, address common.Address, data types.StateAccount) *stateObject {
if data.Balance == nil {
data.Balance = new(big.Int)
}
@@ -130,7 +122,7 @@ func newObject(db *StateDB, address common.Address, data Account) *stateObject {
// EncodeRLP implements rlp.Encoder.
func (s *stateObject) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, s.data)
+ return rlp.Encode(w, &s.data)
}
// setError remembers the first non-nil error it is called with.
@@ -329,7 +321,7 @@ func (s *stateObject) finalise(prefetch bool) {
// It will return nil if the trie has not been loaded and no changes have been made
func (s *stateObject) updateTrie(db Database) Trie {
// Make sure all dirty slots are finalized into the pending storage area
- s.finalise(false) // Don't prefetch any more, pull directly if need be
+ s.finalise(false) // Don't prefetch anymore, pull directly if need be
if len(s.pendingStorage) == 0 {
return s.trie
}
@@ -354,10 +346,12 @@ func (s *stateObject) updateTrie(db Database) Trie {
var v []byte
if (value == common.Hash{}) {
s.setError(tr.TryDelete(key[:]))
+ s.db.StorageDeleted += 1
} else {
// Encoding []byte cannot fail, ok to ignore the error.
v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
s.setError(tr.TryUpdate(key[:], v))
+ s.db.StorageUpdated += 1
}
// If state snapshotting is active, cache the data til commit
if s.db.snap != nil {
@@ -368,7 +362,7 @@ func (s *stateObject) updateTrie(db Database) Trie {
s.db.snapStorage[s.addrHash] = storage
}
}
- storage[crypto.HashData(hasher, key[:])] = v // v will be nil if value is 0x00
+ storage[crypto.HashData(hasher, key[:])] = v // v will be nil if it's deleted
}
usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure
}
@@ -396,23 +390,23 @@ func (s *stateObject) updateRoot(db Database) {
// CommitTrie the storage trie of the object to db.
// This updates the trie root.
-func (s *stateObject) CommitTrie(db Database) error {
+func (s *stateObject) CommitTrie(db Database) (int, error) {
// If nothing changed, don't bother with hashing anything
if s.updateTrie(db) == nil {
- return nil
+ return 0, nil
}
if s.dbErr != nil {
- return s.dbErr
+ return 0, s.dbErr
}
// Track the amount of time wasted on committing the storage trie
if metrics.EnabledExpensive {
defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now())
}
- root, err := s.trie.Commit(nil)
+ root, committed, err := s.trie.Commit(nil)
if err == nil {
s.data.Root = root
}
- return err
+ return committed, err
}
// AddBalance adds amount to s's balance.
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 8543f453a..2e61935df 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -117,6 +117,11 @@ type StateDB struct {
SnapshotAccountReads time.Duration
SnapshotStorageReads time.Duration
SnapshotCommits time.Duration
+
+ AccountUpdated int
+ StorageUpdated int
+ AccountDeleted int
+ StorageDeleted int
}
// New creates a new state from a given trie.
@@ -455,12 +460,7 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
}
// Encode the account and update the account trie
addr := obj.Address()
-
- data, err := rlp.EncodeToBytes(obj)
- if err != nil {
- panic(fmt.Errorf("can't encode object at %x: %v", addr[:], err))
- }
- if err = s.trie.TryUpdate(addr[:], data); err != nil {
+ if err := s.trie.TryUpdateAccount(addr[:], &obj.data); err != nil {
s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
}
@@ -507,7 +507,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
}
// If no live objects are available, attempt to use snapshots
var (
- data *Account
+ data *types.StateAccount
err error
)
if s.snap != nil {
@@ -519,7 +519,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
if acc == nil {
return nil
}
- data = &Account{
+ data = &types.StateAccount{
Nonce: acc.Nonce,
Balance: acc.Balance,
CodeHash: acc.CodeHash,
@@ -546,7 +546,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
if len(enc) == 0 {
return nil
}
- data = new(Account)
+ data = new(types.StateAccount)
if err := rlp.DecodeBytes(enc, data); err != nil {
log.Error("Failed to decode state object", "addr", addr, "err", err)
return nil
@@ -583,7 +583,7 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject)
s.snapDestructs[prev.addrHash] = struct{}{}
}
}
- newobj = newObject(s, addr, Account{})
+ newobj = newObject(s, addr, types.StateAccount{})
if prev == nil {
s.journal.append(createObjectChange{account: &addr})
} else {
@@ -860,8 +860,10 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
for addr := range s.stateObjectsPending {
if obj := s.stateObjects[addr]; obj.deleted {
s.deleteStateObject(obj)
+ s.AccountDeleted += 1
} else {
s.updateStateObject(obj)
+ s.AccountUpdated += 1
}
usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure
}
@@ -903,6 +905,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
s.IntermediateRoot(deleteEmptyObjects)
// Commit objects to the trie, measuring the elapsed time
+ var storageCommitted int
codeUpdates := make(map[common.Hash][]byte)
codeWriter := s.db.TrieDB().DiskDB().NewBatch()
for addr := range s.stateObjectsDirty {
@@ -914,9 +917,11 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
obj.dirtyCode = false
}
// Write any storage changes in the state object to its storage trie
- if err := obj.CommitTrie(s.db); err != nil {
+ committed, err := obj.CommitTrie(s.db)
+ if err != nil {
return common.Hash{}, err
}
+ storageCommitted += committed
}
}
if len(s.stateObjectsDirty) > 0 {
@@ -934,8 +939,8 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
}
// The onleaf func is called _serially_, so we can reuse the same account
// for unmarshalling every time.
- var account Account
- root, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash) error {
+ var account types.StateAccount
+ root, accountCommitted, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash) error {
if err := rlp.DecodeBytes(leaf, &account); err != nil {
return nil
}
@@ -944,8 +949,20 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
}
return nil
})
+ if err != nil {
+ return common.Hash{}, err
+ }
if metrics.EnabledExpensive {
s.AccountCommits += time.Since(start)
+
+ accountUpdatedMeter.Mark(int64(s.AccountUpdated))
+ storageUpdatedMeter.Mark(int64(s.StorageUpdated))
+ accountDeletedMeter.Mark(int64(s.AccountDeleted))
+ storageDeletedMeter.Mark(int64(s.StorageDeleted))
+ accountCommittedMeter.Mark(int64(accountCommitted))
+ storageCommittedMeter.Mark(int64(storageCommitted))
+ s.AccountUpdated, s.AccountDeleted = 0, 0
+ s.StorageUpdated, s.StorageDeleted = 0, 0
}
// If snapshotting is enabled, update the snapshot tree with this new version
if s.snap != nil {
diff --git a/core/state/sync.go b/core/state/sync.go
index 7a5852fb1..734961d9c 100644
--- a/core/state/sync.go
+++ b/core/state/sync.go
@@ -20,6 +20,7 @@ import (
"bytes"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
@@ -43,7 +44,7 @@ func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.S
return err
}
}
- var obj Account
+ var obj types.StateAccount
if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil {
return err
}
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index a13fcf56a..beb8fcfd9 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
@@ -203,7 +204,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
}
results[len(hashQueue)+i] = trie.SyncResult{Hash: crypto.Keccak256Hash(data), Data: data}
} else {
- var acc Account
+ var acc types.StateAccount
if err := rlp.DecodeBytes(srcTrie.Get(path[0]), &acc); err != nil {
t.Fatalf("failed to decode account on path %x: %v", path, err)
}
diff --git a/core/tx_list.go b/core/tx_list.go
index 607838ba3..ea96f3ebb 100644
--- a/core/tx_list.go
+++ b/core/tx_list.go
@@ -21,6 +21,8 @@ import (
"math"
"math/big"
"sort"
+ "sync"
+ "sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -478,9 +480,15 @@ func (h *priceHeap) Pop() interface{} {
// better candidates for inclusion while in other cases (at the top of the baseFee peak)
// the floating heap is better. When baseFee is decreasing they behave similarly.
type txPricedList struct {
- all *txLookup // Pointer to the map of all transactions
- urgent, floating priceHeap // Heaps of prices of all the stored **remote** transactions
- stales int // Number of stale price points to (re-heap trigger)
+ // Number of stale price points to (re-heap trigger).
+ // This field is accessed atomically, and must be the first field
+ // to ensure it has correct alignment for atomic.AddInt64.
+ // See https://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+ stales int64
+
+ all *txLookup // Pointer to the map of all transactions
+ urgent, floating priceHeap // Heaps of prices of all the stored **remote** transactions
+ reheapMu sync.Mutex // Mutex asserts that only one routine is reheaping the list
}
const (
@@ -510,8 +518,8 @@ func (l *txPricedList) Put(tx *types.Transaction, local bool) {
// the heap if a large enough ratio of transactions go stale.
func (l *txPricedList) Removed(count int) {
// Bump the stale counter, but exit if still too low (< 25%)
- l.stales += count
- if l.stales <= (len(l.urgent.list)+len(l.floating.list))/4 {
+ stales := atomic.AddInt64(&l.stales, int64(count))
+ if int(stales) <= (len(l.urgent.list)+len(l.floating.list))/4 {
return
}
// Seems we've reached a critical number of stale transactions, reheap
@@ -535,7 +543,7 @@ func (l *txPricedList) underpricedFor(h *priceHeap, tx *types.Transaction) bool
for len(h.list) > 0 {
head := h.list[0]
if l.all.GetRemote(head.Hash()) == nil { // Removed or migrated
- l.stales--
+ atomic.AddInt64(&l.stales, -1)
heap.Pop(h)
continue
}
@@ -561,7 +569,7 @@ func (l *txPricedList) Discard(slots int, force bool) (types.Transactions, bool)
// Discard stale transactions if found during cleanup
tx := heap.Pop(&l.urgent).(*types.Transaction)
if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated
- l.stales--
+ atomic.AddInt64(&l.stales, -1)
continue
}
// Non stale transaction found, move to floating heap
@@ -574,7 +582,7 @@ func (l *txPricedList) Discard(slots int, force bool) (types.Transactions, bool)
// Discard stale transactions if found during cleanup
tx := heap.Pop(&l.floating).(*types.Transaction)
if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated
- l.stales--
+ atomic.AddInt64(&l.stales, -1)
continue
}
// Non stale transaction found, discard it
@@ -594,8 +602,10 @@ func (l *txPricedList) Discard(slots int, force bool) (types.Transactions, bool)
// Reheap forcibly rebuilds the heap based on the current remote transaction set.
func (l *txPricedList) Reheap() {
+ l.reheapMu.Lock()
+ defer l.reheapMu.Unlock()
start := time.Now()
- l.stales = 0
+ atomic.StoreInt64(&l.stales, 0)
l.urgent.list = make([]*types.Transaction, 0, l.all.RemoteCount())
l.all.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool {
l.urgent.list = append(l.urgent.list, tx)
diff --git a/core/tx_pool.go b/core/tx_pool.go
index f6228df2f..5c70cf170 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -22,6 +22,7 @@ import (
"math/big"
"sort"
"sync"
+ "sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -111,6 +112,14 @@ var (
invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil)
underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil)
+ // throttleTxMeter counts how many transactions are rejected due to too-many-changes between
+ // txpool reorgs.
+ throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil)
+ // reorgDurationTimer measures how long time a txpool reorg takes.
+ reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil)
+ // dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected
+ // that this number is pretty low, since txpool reorgs happen very frequently.
+ dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015))
pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil)
@@ -256,6 +265,9 @@ type TxPool struct {
reorgDoneCh chan chan struct{}
reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop
wg sync.WaitGroup // tracks loop, scheduleReorgLoop
+ initDoneCh chan struct{} // is closed once the pool is initialized (for tests)
+
+ changesSinceReorg int // A counter for how many drops we've performed in-between reorg.
}
type txpoolResetRequest struct {
@@ -284,6 +296,7 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block
queueTxEventCh: make(chan *types.Transaction),
reorgDoneCh: make(chan chan struct{}),
reorgShutdownCh: make(chan struct{}),
+ initDoneCh: make(chan struct{}),
gasPrice: new(big.Int).SetUint64(config.PriceLimit),
}
pool.locals = newAccountSet(pool.signer)
@@ -337,6 +350,8 @@ func (pool *TxPool) loop() {
defer evict.Stop()
defer journal.Stop()
+ // Notify tests that the init phase is done
+ close(pool.initDoneCh)
for {
select {
// Handle ChainHeadEvent
@@ -355,8 +370,8 @@ func (pool *TxPool) loop() {
case <-report.C:
pool.mu.RLock()
pending, queued := pool.stats()
- stales := pool.priced.stales
pool.mu.RUnlock()
+ stales := int(atomic.LoadInt64(&pool.priced.stales))
if pending != prevPending || queued != prevQueued || stales != prevStales {
log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
@@ -663,6 +678,15 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
underpricedTxMeter.Mark(1)
return false, ErrUnderpriced
}
+ // We're about to replace a transaction. The reorg does a more thorough
+ // analysis of what to remove and how, but it runs async. We don't want to
+ // do too many replacements between reorg-runs, so we cap the number of
+ // replacements to 25% of the slots
+ if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) {
+ throttleTxMeter.Mark(1)
+ return false, ErrTxPoolOverflow
+ }
+
// New transaction is better than our worse ones, make room for it.
// If it's a local transaction, forcibly discard all available transactions.
// Otherwise if we can't make enough room for new one, abort the operation.
@@ -674,6 +698,8 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
overflowedTxMeter.Mark(1)
return false, ErrTxPoolOverflow
}
+ // Bump the counter of rejections-since-reorg
+ pool.changesSinceReorg += len(drop)
// Kick out the underpriced remote transactions.
for _, tx := range drop {
log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
@@ -1114,6 +1140,9 @@ func (pool *TxPool) scheduleReorgLoop() {
// runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop.
func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) {
+ defer func(t0 time.Time) {
+ reorgDurationTimer.Update(time.Since(t0))
+ }(time.Now())
defer close(done)
var promoteAddrs []common.Address
@@ -1163,6 +1192,8 @@ func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirt
highestPending := list.LastElement()
pool.pendingNonces.set(addr, highestPending.Nonce()+1)
}
+ dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg))
+ pool.changesSinceReorg = 0 // Reset change counter
pool.mu.Unlock()
// Notify subsystems for newly added transactions
diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go
index e02096fe2..9b5208de3 100644
--- a/core/tx_pool_test.go
+++ b/core/tx_pool_test.go
@@ -24,6 +24,7 @@ import (
"math/big"
"math/rand"
"os"
+ "sync/atomic"
"testing"
"time"
@@ -57,14 +58,14 @@ func init() {
}
type testBlockChain struct {
+ gasLimit uint64 // must be first field for 64 bit alignment (atomic access)
statedb *state.StateDB
- gasLimit uint64
chainHeadFeed *event.Feed
}
func (bc *testBlockChain) CurrentBlock() *types.Block {
return types.NewBlock(&types.Header{
- GasLimit: bc.gasLimit,
+ GasLimit: atomic.LoadUint64(&bc.gasLimit),
}, nil, nil, nil, trie.NewStackTrie(nil))
}
@@ -118,11 +119,13 @@ func setupTxPool() (*TxPool, *ecdsa.PrivateKey) {
func setupTxPoolWithConfig(config *params.ChainConfig) (*TxPool, *ecdsa.PrivateKey) {
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := &testBlockChain{statedb, 10000000, new(event.Feed)}
+ blockchain := &testBlockChain{10000000, statedb, new(event.Feed)}
key, _ := crypto.GenerateKey()
pool := NewTxPool(testTxPoolConfig, config, blockchain)
+ // wait for the pool to initialize
+ <-pool.initDoneCh
return pool, key
}
@@ -228,7 +231,7 @@ func TestStateChangeDuringTransactionPoolReset(t *testing.T) {
// setup pool with 2 transaction in it
statedb.SetBalance(address, new(big.Int).SetUint64(params.Ether))
- blockchain := &testChain{&testBlockChain{statedb, 1000000000, new(event.Feed)}, address, &trigger}
+ blockchain := &testChain{&testBlockChain{1000000000, statedb, new(event.Feed)}, address, &trigger}
tx0 := transaction(0, 100000, key)
tx1 := transaction(1, 100000, key)
@@ -426,7 +429,7 @@ func TestTransactionChainFork(t *testing.T) {
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
statedb.AddBalance(addr, big.NewInt(100000000000000))
- pool.chain = &testBlockChain{statedb, 1000000, new(event.Feed)}
+ pool.chain = &testBlockChain{1000000, statedb, new(event.Feed)}
<-pool.requestReset(nil, nil)
}
resetState()
@@ -455,7 +458,7 @@ func TestTransactionDoubleNonce(t *testing.T) {
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
statedb.AddBalance(addr, big.NewInt(100000000000000))
- pool.chain = &testBlockChain{statedb, 1000000, new(event.Feed)}
+ pool.chain = &testBlockChain{1000000, statedb, new(event.Feed)}
<-pool.requestReset(nil, nil)
}
resetState()
@@ -625,7 +628,7 @@ func TestTransactionDropping(t *testing.T) {
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 4)
}
// Reduce the block gas limit, check that invalidated transactions are dropped
- pool.chain.(*testBlockChain).gasLimit = 100
+ atomic.StoreUint64(&pool.chain.(*testBlockChain).gasLimit, 100)
<-pool.requestReset(nil, nil)
if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
@@ -653,7 +656,7 @@ func TestTransactionPostponing(t *testing.T) {
// Create the pool to test the postponing with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
+ blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
defer pool.Stop()
@@ -866,7 +869,7 @@ func testTransactionQueueGlobalLimiting(t *testing.T, nolocals bool) {
// Create the pool to test the limit enforcement with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
+ blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
config := testTxPoolConfig
config.NoLocals = nolocals
@@ -958,7 +961,7 @@ func testTransactionQueueTimeLimiting(t *testing.T, nolocals bool) {
// Create the pool to test the non-expiration enforcement
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
+ blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
config := testTxPoolConfig
config.Lifetime = time.Second
@@ -1143,7 +1146,7 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) {
// Create the pool to test the limit enforcement with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
+ blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
config := testTxPoolConfig
config.GlobalSlots = config.AccountSlots * 10
@@ -1245,7 +1248,7 @@ func TestTransactionCapClearsFromAll(t *testing.T) {
// Create the pool to test the limit enforcement with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
+ blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
config := testTxPoolConfig
config.AccountSlots = 2
@@ -1279,7 +1282,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) {
// Create the pool to test the limit enforcement with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
+ blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
config := testTxPoolConfig
config.GlobalSlots = 1
@@ -1327,7 +1330,7 @@ func TestTransactionPoolRepricing(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
+ blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
defer pool.Stop()
@@ -1575,7 +1578,7 @@ func TestTransactionPoolRepricingKeepsLocals(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
+ blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain)
defer pool.Stop()
@@ -1648,7 +1651,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
+ blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
config := testTxPoolConfig
config.GlobalSlots = 2
@@ -1754,7 +1757,7 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
+ blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
config := testTxPoolConfig
config.GlobalSlots = 128
@@ -1946,20 +1949,20 @@ func TestDualHeapEviction(t *testing.T) {
}
add := func(urgent bool) {
- txs := make([]*types.Transaction, 20)
- for i := range txs {
+ for i := 0; i < 20; i++ {
+ var tx *types.Transaction
// Create a test accounts and fund it
key, _ := crypto.GenerateKey()
testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000000))
if urgent {
- txs[i] = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+1+i)), big.NewInt(int64(1+i)), key)
- highTip = txs[i]
+ tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+1+i)), big.NewInt(int64(1+i)), key)
+ highTip = tx
} else {
- txs[i] = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+200+i)), big.NewInt(1), key)
- highCap = txs[i]
+ tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+200+i)), big.NewInt(1), key)
+ highCap = tx
}
+ pool.AddRemotesSync([]*types.Transaction{tx})
}
- pool.AddRemotes(txs)
pending, queued := pool.Stats()
if pending+queued != 20 {
t.Fatalf("transaction count mismatch: have %d, want %d", pending+queued, 10)
@@ -1986,7 +1989,7 @@ func TestTransactionDeduplication(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
+ blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
defer pool.Stop()
@@ -2052,7 +2055,7 @@ func TestTransactionReplacement(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
+ blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
defer pool.Stop()
@@ -2257,7 +2260,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) {
// Create the original pool to inject transaction into the journal
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
+ blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
config := testTxPoolConfig
config.NoLocals = nolocals
@@ -2299,7 +2302,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) {
// Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive
pool.Stop()
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
- blockchain = &testBlockChain{statedb, 1000000, new(event.Feed)}
+ blockchain = &testBlockChain{1000000, statedb, new(event.Feed)}
pool = NewTxPool(config, params.TestChainConfig, blockchain)
@@ -2326,7 +2329,7 @@ func testTransactionJournaling(t *testing.T, nolocals bool) {
pool.Stop()
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
- blockchain = &testBlockChain{statedb, 1000000, new(event.Feed)}
+ blockchain = &testBlockChain{1000000, statedb, new(event.Feed)}
pool = NewTxPool(config, params.TestChainConfig, blockchain)
pending, queued = pool.Stats()
@@ -2355,7 +2358,7 @@ func TestTransactionStatusCheck(t *testing.T) {
// Create the pool to test the status retrievals with
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
+ blockchain := &testBlockChain{1000000, statedb, new(event.Feed)}
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
defer pool.Stop()
diff --git a/core/types/receipt_test.go b/core/types/receipt_test.go
index 22a316c23..492493d5c 100644
--- a/core/types/receipt_test.go
+++ b/core/types/receipt_test.go
@@ -273,9 +273,6 @@ func TestDeriveFields(t *testing.T) {
if receipts[i].Logs[j].TxHash != txs[i].Hash() {
t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String())
}
- if receipts[i].Logs[j].TxHash != txs[i].Hash() {
- t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String())
- }
if receipts[i].Logs[j].TxIndex != uint(i) {
t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, receipts[i].Logs[j].TxIndex, i)
}
diff --git a/core/types/state_account.go b/core/types/state_account.go
new file mode 100644
index 000000000..68804bf31
--- /dev/null
+++ b/core/types/state_account.go
@@ -0,0 +1,32 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package types
+
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// StateAccount is the Ethereum consensus representation of accounts.
+// These objects are stored in the main account trie.
+type StateAccount struct {
+ Nonce uint64
+ Balance *big.Int
+ Root common.Hash // merkle root of the storage trie
+ CodeHash []byte
+}
diff --git a/core/types/types_test.go b/core/types/types_test.go
new file mode 100644
index 000000000..1fb386d5d
--- /dev/null
+++ b/core/types/types_test.go
@@ -0,0 +1,148 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package types
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+type devnull struct{ len int }
+
+func (d *devnull) Write(p []byte) (int, error) {
+ d.len += len(p)
+ return len(p), nil
+}
+
+func BenchmarkEncodeRLP(b *testing.B) {
+ benchRLP(b, true)
+}
+
+func BenchmarkDecodeRLP(b *testing.B) {
+ benchRLP(b, false)
+}
+
+func benchRLP(b *testing.B, encode bool) {
+ key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ to := common.HexToAddress("0x00000000000000000000000000000000deadbeef")
+ signer := NewLondonSigner(big.NewInt(1337))
+ for _, tc := range []struct {
+ name string
+ obj interface{}
+ }{
+ {
+ "legacy-header",
+ &Header{
+ Difficulty: big.NewInt(10000000000),
+ Number: big.NewInt(1000),
+ GasLimit: 8_000_000,
+ GasUsed: 8_000_000,
+ Time: 555,
+ Extra: make([]byte, 32),
+ },
+ },
+ {
+ "london-header",
+ &Header{
+ Difficulty: big.NewInt(10000000000),
+ Number: big.NewInt(1000),
+ GasLimit: 8_000_000,
+ GasUsed: 8_000_000,
+ Time: 555,
+ Extra: make([]byte, 32),
+ BaseFee: big.NewInt(10000000000),
+ },
+ },
+ {
+ "receipt-for-storage",
+ &ReceiptForStorage{
+ Status: ReceiptStatusSuccessful,
+ CumulativeGasUsed: 0x888888888,
+ Logs: make([]*Log, 0),
+ },
+ },
+ {
+ "receipt-full",
+ &Receipt{
+ Status: ReceiptStatusSuccessful,
+ CumulativeGasUsed: 0x888888888,
+ Logs: make([]*Log, 0),
+ },
+ },
+ {
+ "legacy-transaction",
+ MustSignNewTx(key, signer,
+ &LegacyTx{
+ Nonce: 1,
+ GasPrice: big.NewInt(500),
+ Gas: 1000000,
+ To: &to,
+ Value: big.NewInt(1),
+ }),
+ },
+ {
+ "access-transaction",
+ MustSignNewTx(key, signer,
+ &AccessListTx{
+ Nonce: 1,
+ GasPrice: big.NewInt(500),
+ Gas: 1000000,
+ To: &to,
+ Value: big.NewInt(1),
+ }),
+ },
+ {
+ "1559-transaction",
+ MustSignNewTx(key, signer,
+ &DynamicFeeTx{
+ Nonce: 1,
+ Gas: 1000000,
+ To: &to,
+ Value: big.NewInt(1),
+ GasTipCap: big.NewInt(500),
+ GasFeeCap: big.NewInt(500),
+ }),
+ },
+ } {
+ if encode {
+ b.Run(tc.name, func(b *testing.B) {
+ b.ReportAllocs()
+ var null = &devnull{}
+ for i := 0; i < b.N; i++ {
+ rlp.Encode(null, tc.obj)
+ }
+ b.SetBytes(int64(null.len / b.N))
+ })
+ } else {
+ data, _ := rlp.EncodeToBytes(tc.obj)
+ // Test decoding
+ b.Run(tc.name, func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ if err := rlp.DecodeBytes(data, tc.obj); err != nil {
+ b.Fatal(err)
+ }
+ }
+ b.SetBytes(int64(len(data)))
+ })
+ }
+ }
+}
diff --git a/core/vm/access_list_tracer.go b/core/vm/access_list_tracer.go
index cc5461d1c..11b4e2942 100644
--- a/core/vm/access_list_tracer.go
+++ b/core/vm/access_list_tracer.go
@@ -166,6 +166,11 @@ func (*AccessListTracer) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost
func (*AccessListTracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) {}
+func (*AccessListTracer) CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
+}
+
+func (*AccessListTracer) CaptureExit(output []byte, gasUsed uint64, err error) {}
+
// AccessList returns the current accesslist maintained by the tracer.
func (a *AccessListTracer) AccessList() types.AccessList {
return a.list.accessList()
diff --git a/core/vm/analysis_test.go b/core/vm/analysis_test.go
index 585bb3097..d7f21e04a 100644
--- a/core/vm/analysis_test.go
+++ b/core/vm/analysis_test.go
@@ -55,9 +55,12 @@ func TestJumpDestAnalysis(t *testing.T) {
}
}
+const analysisCodeSize = 1200 * 1024
+
func BenchmarkJumpdestAnalysis_1200k(bench *testing.B) {
// 1.4 ms
- code := make([]byte, 1200000)
+ code := make([]byte, analysisCodeSize)
+ bench.SetBytes(analysisCodeSize)
bench.ResetTimer()
for i := 0; i < bench.N; i++ {
codeBitmap(code)
@@ -66,7 +69,8 @@ func BenchmarkJumpdestAnalysis_1200k(bench *testing.B) {
}
func BenchmarkJumpdestHashing_1200k(bench *testing.B) {
// 4 ms
- code := make([]byte, 1200000)
+ code := make([]byte, analysisCodeSize)
+ bench.SetBytes(analysisCodeSize)
bench.ResetTimer()
for i := 0; i < bench.N; i++ {
crypto.Keccak256Hash(code)
@@ -77,13 +81,19 @@ func BenchmarkJumpdestHashing_1200k(bench *testing.B) {
func BenchmarkJumpdestOpAnalysis(bench *testing.B) {
var op OpCode
bencher := func(b *testing.B) {
- code := make([]byte, 32*b.N)
+ code := make([]byte, analysisCodeSize)
+ b.SetBytes(analysisCodeSize)
for i := range code {
code[i] = byte(op)
}
bits := make(bitvec, len(code)/8+1+4)
b.ResetTimer()
- codeBitmapInternal(code, bits)
+ for i := 0; i < b.N; i++ {
+ for j := range bits {
+ bits[j] = 0
+ }
+ codeBitmapInternal(code, bits)
+ }
}
for op = PUSH1; op <= PUSH32; op++ {
bench.Run(op.String(), bencher)
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 896476673..3b4bd69d7 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -193,11 +193,19 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
evm.Context.Transfer(evm.StateDB, caller.Address(), addr, value)
// Capture the tracer start/end events in debug mode
- if evm.Config.Debug && evm.depth == 0 {
- evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value)
- defer func(startGas uint64, startTime time.Time) { // Lazy evaluation of the parameters
- evm.Config.Tracer.CaptureEnd(ret, startGas-gas, time.Since(startTime), err)
- }(gas, time.Now())
+ if evm.Config.Debug {
+ if evm.depth == 0 {
+ evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value)
+ defer func(startGas uint64, startTime time.Time) { // Lazy evaluation of the parameters
+ evm.Config.Tracer.CaptureEnd(ret, startGas-gas, time.Since(startTime), err)
+ }(gas, time.Now())
+ } else {
+ // Handle tracer events for entering and exiting a call frame
+ evm.Config.Tracer.CaptureEnter(CALL, caller.Address(), addr, input, gas, value)
+ defer func(startGas uint64) {
+ evm.Config.Tracer.CaptureExit(ret, startGas-gas, err)
+ }(gas)
+ }
}
if isPrecompile {
@@ -257,6 +265,14 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte,
}
var snapshot = evm.StateDB.Snapshot()
+ // Invoke tracer hooks that signal entering/exiting a call frame
+ if evm.Config.Debug {
+ evm.Config.Tracer.CaptureEnter(CALLCODE, caller.Address(), addr, input, gas, value)
+ defer func(startGas uint64) {
+ evm.Config.Tracer.CaptureExit(ret, startGas-gas, err)
+ }(gas)
+ }
+
// It is allowed to call precompiles, even via delegatecall
if p, isPrecompile := evm.precompile(addr); isPrecompile {
ret, gas, err = RunPrecompiledContract(p, input, gas)
@@ -293,6 +309,14 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by
}
var snapshot = evm.StateDB.Snapshot()
+ // Invoke tracer hooks that signal entering/exiting a call frame
+ if evm.Config.Debug {
+ evm.Config.Tracer.CaptureEnter(DELEGATECALL, caller.Address(), addr, input, gas, nil)
+ defer func(startGas uint64) {
+ evm.Config.Tracer.CaptureExit(ret, startGas-gas, err)
+ }(gas)
+ }
+
// It is allowed to call precompiles, even via delegatecall
if p, isPrecompile := evm.precompile(addr); isPrecompile {
ret, gas, err = RunPrecompiledContract(p, input, gas)
@@ -338,6 +362,14 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte
// future scenarios
evm.StateDB.AddBalance(addr, big0)
+ // Invoke tracer hooks that signal entering/exiting a call frame
+ if evm.Config.Debug {
+ evm.Config.Tracer.CaptureEnter(STATICCALL, caller.Address(), addr, input, gas, nil)
+ defer func(startGas uint64) {
+ evm.Config.Tracer.CaptureExit(ret, startGas-gas, err)
+ }(gas)
+ }
+
if p, isPrecompile := evm.precompile(addr); isPrecompile {
ret, gas, err = RunPrecompiledContract(p, input, gas)
} else {
@@ -377,7 +409,7 @@ func (c *codeAndHash) Hash() common.Hash {
}
// create creates a new contract using code as deployment code.
-func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, value *big.Int, address common.Address) ([]byte, common.Address, uint64, error) {
+func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, value *big.Int, address common.Address, typ OpCode) ([]byte, common.Address, uint64, error) {
// Depth check execution. Fail if we're trying to execute above the
// limit.
if evm.depth > int(params.CallCreateDepth) {
@@ -415,9 +447,14 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
return nil, address, gas, nil
}
- if evm.Config.Debug && evm.depth == 0 {
- evm.Config.Tracer.CaptureStart(evm, caller.Address(), address, true, codeAndHash.code, gas, value)
+ if evm.Config.Debug {
+ if evm.depth == 0 {
+ evm.Config.Tracer.CaptureStart(evm, caller.Address(), address, true, codeAndHash.code, gas, value)
+ } else {
+ evm.Config.Tracer.CaptureEnter(typ, caller.Address(), address, codeAndHash.code, gas, value)
+ }
}
+
start := time.Now()
ret, err := evm.interpreter.Run(contract, nil, false)
@@ -455,8 +492,12 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
}
}
- if evm.Config.Debug && evm.depth == 0 {
- evm.Config.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err)
+ if evm.Config.Debug {
+ if evm.depth == 0 {
+ evm.Config.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err)
+ } else {
+ evm.Config.Tracer.CaptureExit(ret, gas-contract.Gas, err)
+ }
}
return ret, address, contract.Gas, err
}
@@ -464,7 +505,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
// Create creates a new contract using code as deployment code.
func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
contractAddr = crypto.CreateAddress(caller.Address(), evm.StateDB.GetNonce(caller.Address()))
- return evm.create(caller, &codeAndHash{code: code}, gas, value, contractAddr)
+ return evm.create(caller, &codeAndHash{code: code}, gas, value, contractAddr, CREATE)
}
// Create2 creates a new contract using code as deployment code.
@@ -474,7 +515,7 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I
func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *big.Int, salt *uint256.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
codeAndHash := &codeAndHash{code: code}
contractAddr = crypto.CreateAddress2(caller.Address(), salt.Bytes32(), codeAndHash.Hash().Bytes())
- return evm.create(caller, codeAndHash, gas, endowment, contractAddr)
+ return evm.create(caller, codeAndHash, gas, endowment, contractAddr, CREATE2)
}
// ChainConfig returns the environment's chain configuration
diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go
index 944b6cf0a..19d2198af 100644
--- a/core/vm/gas_table.go
+++ b/core/vm/gas_table.go
@@ -60,7 +60,7 @@ func memoryGasCost(mem *Memory, newMemSize uint64) (uint64, error) {
// as argument:
// CALLDATACOPY (stack position 2)
// CODECOPY (stack position 2)
-// EXTCODECOPY (stack poition 3)
+// EXTCODECOPY (stack position 3)
// RETURNDATACOPY (stack position 2)
func memoryCopierGas(stackpos int) gasFunc {
return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index 6c8c6e6e6..bda480f08 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -791,6 +791,10 @@ func opSuicide(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]
balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address())
interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance)
interpreter.evm.StateDB.Suicide(scope.Contract.Address())
+ if interpreter.cfg.Debug {
+ interpreter.cfg.Tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance)
+ interpreter.cfg.Tracer.CaptureExit([]byte{}, 0, nil)
+ }
return nil, nil
}
diff --git a/core/vm/logger.go b/core/vm/logger.go
index 900a5e585..52dc0b8a0 100644
--- a/core/vm/logger.go
+++ b/core/vm/logger.go
@@ -46,12 +46,12 @@ func (s Storage) Copy() Storage {
// LogConfig are the configuration options for structured logger the EVM
type LogConfig struct {
- DisableMemory bool // disable memory capture
- DisableStack bool // disable stack capture
- DisableStorage bool // disable storage capture
- DisableReturnData bool // disable return data capture
- Debug bool // print output during capture end
- Limit int // maximum length of output, but zero means unlimited
+ EnableMemory bool // enable memory capture
+ DisableStack bool // disable stack capture
+ DisableStorage bool // disable storage capture
+ EnableReturnData bool // enable return data capture
+ Debug bool // print output during capture end
+ Limit int // maximum length of output, but zero means unlimited
// Chain overrides, can be used to execute a trace using future fork rules
Overrides *params.ChainConfig `json:"overrides,omitempty"`
}
@@ -106,6 +106,8 @@ func (s *StructLog) ErrorString() string {
type Tracer interface {
CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int)
CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error)
+ CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int)
+ CaptureExit(output []byte, gasUsed uint64, err error)
CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error)
CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error)
}
@@ -160,7 +162,7 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui
}
// Copy a snapshot of the current memory state to a new buffer
var mem []byte
- if !l.cfg.DisableMemory {
+ if l.cfg.EnableMemory {
mem = make([]byte, len(memory.Data()))
copy(mem, memory.Data())
}
@@ -199,7 +201,7 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui
}
}
var rdata []byte
- if !l.cfg.DisableReturnData {
+ if l.cfg.EnableReturnData {
rdata = make([]byte, len(rData))
copy(rdata, rData)
}
@@ -225,6 +227,11 @@ func (l *StructLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration
}
}
+func (l *StructLogger) CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
+}
+
+func (l *StructLogger) CaptureExit(output []byte, gasUsed uint64, err error) {}
+
// StructLogs returns the captured log entries.
func (l *StructLogger) StructLogs() []StructLog { return l.logs }
@@ -342,3 +349,8 @@ func (t *mdLogger) CaptureEnd(output []byte, gasUsed uint64, tm time.Duration, e
fmt.Fprintf(t.out, "\nOutput: `0x%x`\nConsumed gas: `%d`\nError: `%v`\n",
output, gasUsed, err)
}
+
+func (t *mdLogger) CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
+}
+
+func (t *mdLogger) CaptureExit(output []byte, gasUsed uint64, err error) {}
diff --git a/core/vm/logger_json.go b/core/vm/logger_json.go
index 5210f479f..479a00c0a 100644
--- a/core/vm/logger_json.go
+++ b/core/vm/logger_json.go
@@ -61,13 +61,13 @@ func (l *JSONLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint
RefundCounter: env.StateDB.GetRefund(),
Err: err,
}
- if !l.cfg.DisableMemory {
+ if l.cfg.EnableMemory {
log.Memory = memory.Data()
}
if !l.cfg.DisableStack {
log.Stack = stack.data
}
- if !l.cfg.DisableReturnData {
+ if l.cfg.EnableReturnData {
log.ReturnData = rData
}
l.encoder.Encode(log)
@@ -87,3 +87,8 @@ func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration,
}
l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, errMsg})
}
+
+func (l *JSONLogger) CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
+}
+
+func (l *JSONLogger) CaptureExit(output []byte, gasUsed uint64, err error) {}
diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go
index dcf2d0d44..9f4bafbc7 100644
--- a/core/vm/runtime/runtime_test.go
+++ b/core/vm/runtime/runtime_test.go
@@ -33,6 +33,7 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/params"
)
@@ -342,11 +343,21 @@ func (s *stepCounter) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, co
// benchmarkNonModifyingCode benchmarks code, but if the code modifies the
// state, this should not be used, since it does not reset the state between runs.
-func benchmarkNonModifyingCode(gas uint64, code []byte, name string, b *testing.B) {
+func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode string, b *testing.B) {
cfg := new(Config)
setDefaults(cfg)
cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
cfg.GasLimit = gas
+ if len(tracerCode) > 0 {
+ tracer, err := tracers.New(tracerCode, new(tracers.Context))
+ if err != nil {
+ b.Fatal(err)
+ }
+ cfg.EVMConfig = vm.Config{
+ Debug: true,
+ Tracer: tracer,
+ }
+ }
var (
destination = common.BytesToAddress([]byte("contract"))
vmenv = NewEnv(cfg)
@@ -486,12 +497,12 @@ func BenchmarkSimpleLoop(b *testing.B) {
// Tracer: tracer,
// }})
// 100M gas
- benchmarkNonModifyingCode(100000000, staticCallIdentity, "staticcall-identity-100M", b)
- benchmarkNonModifyingCode(100000000, callIdentity, "call-identity-100M", b)
- benchmarkNonModifyingCode(100000000, loopingCode, "loop-100M", b)
- benchmarkNonModifyingCode(100000000, callInexistant, "call-nonexist-100M", b)
- benchmarkNonModifyingCode(100000000, callEOA, "call-EOA-100M", b)
- benchmarkNonModifyingCode(100000000, calllRevertingContractWithInput, "call-reverting-100M", b)
+ benchmarkNonModifyingCode(100000000, staticCallIdentity, "staticcall-identity-100M", "", b)
+ benchmarkNonModifyingCode(100000000, callIdentity, "call-identity-100M", "", b)
+ benchmarkNonModifyingCode(100000000, loopingCode, "loop-100M", "", b)
+ benchmarkNonModifyingCode(100000000, callInexistant, "call-nonexist-100M", "", b)
+ benchmarkNonModifyingCode(100000000, callEOA, "call-EOA-100M", "", b)
+ benchmarkNonModifyingCode(100000000, calllRevertingContractWithInput, "call-reverting-100M", "", b)
//benchmarkNonModifyingCode(10000000, staticCallIdentity, "staticcall-identity-10M", b)
//benchmarkNonModifyingCode(10000000, loopingCode, "loop-10M", b)
@@ -688,3 +699,241 @@ func TestColdAccountAccessCost(t *testing.T) {
}
}
}
+
+func TestRuntimeJSTracer(t *testing.T) {
+ jsTracers := []string{
+ `{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, steps:0,
+ step: function() { this.steps++},
+ fault: function() {},
+ result: function() {
+ return [this.enters, this.exits,this.enterGas,this.gasUsed, this.steps].join(",")
+ },
+ enter: function(frame) {
+ this.enters++;
+ this.enterGas = frame.getGas();
+ },
+ exit: function(res) {
+ this.exits++;
+ this.gasUsed = res.getGasUsed();
+ }}`,
+ `{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, steps:0,
+ fault: function() {},
+ result: function() {
+ return [this.enters, this.exits,this.enterGas,this.gasUsed, this.steps].join(",")
+ },
+ enter: function(frame) {
+ this.enters++;
+ this.enterGas = frame.getGas();
+ },
+ exit: function(res) {
+ this.exits++;
+ this.gasUsed = res.getGasUsed();
+ }}`}
+ tests := []struct {
+ code []byte
+ // One result per tracer
+ results []string
+ }{
+ {
+ // CREATE
+ code: []byte{
+ // Store initcode in memory at 0x00 (5 bytes left-padded to 32 bytes)
+ byte(vm.PUSH5),
+ // Init code: PUSH1 0, PUSH1 0, RETURN (3 steps)
+ byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN),
+ byte(vm.PUSH1), 0,
+ byte(vm.MSTORE),
+ // length, offset, value
+ byte(vm.PUSH1), 5, byte(vm.PUSH1), 27, byte(vm.PUSH1), 0,
+ byte(vm.CREATE),
+ byte(vm.POP),
+ },
+ results: []string{`"1,1,4294935775,6,12"`, `"1,1,4294935775,6,0"`},
+ },
+ {
+ // CREATE2
+ code: []byte{
+ // Store initcode in memory at 0x00 (5 bytes left-padded to 32 bytes)
+ byte(vm.PUSH5),
+ // Init code: PUSH1 0, PUSH1 0, RETURN (3 steps)
+ byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN),
+ byte(vm.PUSH1), 0,
+ byte(vm.MSTORE),
+ // salt, length, offset, value
+ byte(vm.PUSH1), 1, byte(vm.PUSH1), 5, byte(vm.PUSH1), 27, byte(vm.PUSH1), 0,
+ byte(vm.CREATE2),
+ byte(vm.POP),
+ },
+ results: []string{`"1,1,4294935766,6,13"`, `"1,1,4294935766,6,0"`},
+ },
+ {
+ // CALL
+ code: []byte{
+ // outsize, outoffset, insize, inoffset
+ byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0,
+ byte(vm.PUSH1), 0, // value
+ byte(vm.PUSH1), 0xbb, //address
+ byte(vm.GAS), // gas
+ byte(vm.CALL),
+ byte(vm.POP),
+ },
+ results: []string{`"1,1,4294964716,6,13"`, `"1,1,4294964716,6,0"`},
+ },
+ {
+ // CALLCODE
+ code: []byte{
+ // outsize, outoffset, insize, inoffset
+ byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0,
+ byte(vm.PUSH1), 0, // value
+ byte(vm.PUSH1), 0xcc, //address
+ byte(vm.GAS), // gas
+ byte(vm.CALLCODE),
+ byte(vm.POP),
+ },
+ results: []string{`"1,1,4294964716,6,13"`, `"1,1,4294964716,6,0"`},
+ },
+ {
+ // STATICCALL
+ code: []byte{
+ // outsize, outoffset, insize, inoffset
+ byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0,
+ byte(vm.PUSH1), 0xdd, //address
+ byte(vm.GAS), // gas
+ byte(vm.STATICCALL),
+ byte(vm.POP),
+ },
+ results: []string{`"1,1,4294964719,6,12"`, `"1,1,4294964719,6,0"`},
+ },
+ {
+ // DELEGATECALL
+ code: []byte{
+ // outsize, outoffset, insize, inoffset
+ byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0,
+ byte(vm.PUSH1), 0xee, //address
+ byte(vm.GAS), // gas
+ byte(vm.DELEGATECALL),
+ byte(vm.POP),
+ },
+ results: []string{`"1,1,4294964719,6,12"`, `"1,1,4294964719,6,0"`},
+ },
+ {
+ // CALL self-destructing contract
+ code: []byte{
+ // outsize, outoffset, insize, inoffset
+ byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0,
+ byte(vm.PUSH1), 0, // value
+ byte(vm.PUSH1), 0xff, //address
+ byte(vm.GAS), // gas
+ byte(vm.CALL),
+ byte(vm.POP),
+ },
+ results: []string{`"2,2,0,5003,12"`, `"2,2,0,5003,0"`},
+ },
+ }
+ calleeCode := []byte{
+ byte(vm.PUSH1), 0,
+ byte(vm.PUSH1), 0,
+ byte(vm.RETURN),
+ }
+ depressedCode := []byte{
+ byte(vm.PUSH1), 0xaa,
+ byte(vm.SELFDESTRUCT),
+ }
+ main := common.HexToAddress("0xaa")
+ for i, jsTracer := range jsTracers {
+ for j, tc := range tests {
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb.SetCode(main, tc.code)
+ statedb.SetCode(common.HexToAddress("0xbb"), calleeCode)
+ statedb.SetCode(common.HexToAddress("0xcc"), calleeCode)
+ statedb.SetCode(common.HexToAddress("0xdd"), calleeCode)
+ statedb.SetCode(common.HexToAddress("0xee"), calleeCode)
+ statedb.SetCode(common.HexToAddress("0xff"), depressedCode)
+
+ tracer, err := tracers.New(jsTracer, new(tracers.Context))
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, _, err = Call(main, nil, &Config{
+ State: statedb,
+ EVMConfig: vm.Config{
+ Debug: true,
+ Tracer: tracer,
+ }})
+ if err != nil {
+ t.Fatal("didn't expect error", err)
+ }
+ res, err := tracer.GetResult()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if have, want := string(res), tc.results[i]; have != want {
+ t.Errorf("wrong result for tracer %d testcase %d, have \n%v\nwant\n%v\n", i, j, have, want)
+ }
+ }
+ }
+}
+
+func TestJSTracerCreateTx(t *testing.T) {
+ jsTracer := `
+ {enters: 0, exits: 0,
+ step: function() {},
+ fault: function() {},
+ result: function() { return [this.enters, this.exits].join(",") },
+ enter: function(frame) { this.enters++ },
+ exit: function(res) { this.exits++ }}`
+ code := []byte{byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN)}
+
+ statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ tracer, err := tracers.New(jsTracer, new(tracers.Context))
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, _, _, err = Create(code, &Config{
+ State: statedb,
+ EVMConfig: vm.Config{
+ Debug: true,
+ Tracer: tracer,
+ }})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := tracer.GetResult()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if have, want := string(res), `"0,0"`; have != want {
+ t.Errorf("wrong result for tracer, have \n%v\nwant\n%v\n", have, want)
+ }
+}
+
+func BenchmarkTracerStepVsCallFrame(b *testing.B) {
+ // Simply pushes and pops some values in a loop
+ code := []byte{
+ byte(vm.JUMPDEST),
+ byte(vm.PUSH1), 0,
+ byte(vm.PUSH1), 0,
+ byte(vm.POP),
+ byte(vm.POP),
+ byte(vm.PUSH1), 0, // jumpdestination
+ byte(vm.JUMP),
+ }
+
+ stepTracer := `
+ {
+ step: function() {},
+ fault: function() {},
+ result: function() {},
+ }`
+ callFrameTracer := `
+ {
+ enter: function() {},
+ exit: function() {},
+ fault: function() {},
+ result: function() {},
+ }`
+
+ benchmarkNonModifyingCode(10000000, code, "tracer-step-10M", stepTracer, b)
+ benchmarkNonModifyingCode(10000000, code, "tracer-call-frame-10M", callFrameTracer, b)
+}
diff --git a/core/vm/stack.go b/core/vm/stack.go
index c71d2653a..220f97c89 100644
--- a/core/vm/stack.go
+++ b/core/vm/stack.go
@@ -91,7 +91,7 @@ func (st *Stack) Print() {
fmt.Println("### stack ###")
if len(st.data) > 0 {
for i, val := range st.data {
- fmt.Printf("%-3d %v\n", i, val)
+ fmt.Printf("%-3d %s\n", i, val.String())
}
} else {
fmt.Println("-- empty --")
diff --git a/crypto/blake2b/blake2bAVX2_amd64.go b/crypto/blake2b/blake2bAVX2_amd64.go
index 0d52b1869..3a85d0e73 100644
--- a/crypto/blake2b/blake2bAVX2_amd64.go
+++ b/crypto/blake2b/blake2bAVX2_amd64.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build go1.7 && amd64 && !gccgo && !appengine
// +build go1.7,amd64,!gccgo,!appengine
package blake2b
diff --git a/crypto/blake2b/blake2b_amd64.go b/crypto/blake2b/blake2b_amd64.go
index 4dbe90da8..a318b2b61 100644
--- a/crypto/blake2b/blake2b_amd64.go
+++ b/crypto/blake2b/blake2b_amd64.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build !go1.7 && amd64 && !gccgo && !appengine
// +build !go1.7,amd64,!gccgo,!appengine
package blake2b
diff --git a/crypto/blake2b/blake2b_f_fuzz.go b/crypto/blake2b/blake2b_f_fuzz.go
index ab7334280..b2f405707 100644
--- a/crypto/blake2b/blake2b_f_fuzz.go
+++ b/crypto/blake2b/blake2b_f_fuzz.go
@@ -1,3 +1,4 @@
+//go:build gofuzz
// +build gofuzz
package blake2b
diff --git a/crypto/blake2b/blake2b_ref.go b/crypto/blake2b/blake2b_ref.go
index 9d0ade473..095c71a64 100644
--- a/crypto/blake2b/blake2b_ref.go
+++ b/crypto/blake2b/blake2b_ref.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build !amd64 || appengine || gccgo
// +build !amd64 appengine gccgo
package blake2b
diff --git a/crypto/blake2b/register.go b/crypto/blake2b/register.go
index efd689af4..9d8633963 100644
--- a/crypto/blake2b/register.go
+++ b/crypto/blake2b/register.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build go1.9
// +build go1.9
package blake2b
diff --git a/crypto/bls12381/arithmetic_decl.go b/crypto/bls12381/arithmetic_decl.go
index ec0b21e80..f6d232d65 100644
--- a/crypto/bls12381/arithmetic_decl.go
+++ b/crypto/bls12381/arithmetic_decl.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build (amd64 && blsasm) || (amd64 && blsadx)
// +build amd64,blsasm amd64,blsadx
package bls12381
diff --git a/crypto/bls12381/arithmetic_fallback.go b/crypto/bls12381/arithmetic_fallback.go
index 91cabf4f3..c09ae0d91 100644
--- a/crypto/bls12381/arithmetic_fallback.go
+++ b/crypto/bls12381/arithmetic_fallback.go
@@ -31,6 +31,7 @@
// Package bls (generated by goff) contains field arithmetics operations
+//go:build !amd64 || (!blsasm && !blsadx)
// +build !amd64 !blsasm,!blsadx
package bls12381
diff --git a/crypto/bls12381/arithmetic_x86_adx.go b/crypto/bls12381/arithmetic_x86_adx.go
index 9c30741e6..a40c7384e 100644
--- a/crypto/bls12381/arithmetic_x86_adx.go
+++ b/crypto/bls12381/arithmetic_x86_adx.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build amd64 && blsadx
// +build amd64,blsadx
package bls12381
diff --git a/crypto/bls12381/arithmetic_x86_noadx.go b/crypto/bls12381/arithmetic_x86_noadx.go
index eaac4b45d..679b30ec8 100644
--- a/crypto/bls12381/arithmetic_x86_noadx.go
+++ b/crypto/bls12381/arithmetic_x86_noadx.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build amd64 && blsasm
// +build amd64,blsasm
package bls12381
diff --git a/crypto/bn256/bn256_fast.go b/crypto/bn256/bn256_fast.go
index 14b596539..e3c9b6051 100644
--- a/crypto/bn256/bn256_fast.go
+++ b/crypto/bn256/bn256_fast.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
+//go:build amd64 || arm64
// +build amd64 arm64
// Package bn256 implements the Optimal Ate pairing over a 256-bit Barreto-Naehrig curve.
diff --git a/crypto/bn256/bn256_slow.go b/crypto/bn256/bn256_slow.go
index 49021082f..4c0c351e2 100644
--- a/crypto/bn256/bn256_slow.go
+++ b/crypto/bn256/bn256_slow.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file.
+//go:build !amd64 && !arm64
// +build !amd64,!arm64
// Package bn256 implements the Optimal Ate pairing over a 256-bit Barreto-Naehrig curve.
diff --git a/crypto/bn256/cloudflare/bn256_test.go b/crypto/bn256/cloudflare/bn256_test.go
index 0c8016d86..481e2f78c 100644
--- a/crypto/bn256/cloudflare/bn256_test.go
+++ b/crypto/bn256/cloudflare/bn256_test.go
@@ -92,6 +92,19 @@ func TestTripartiteDiffieHellman(t *testing.T) {
}
}
+func TestG2SelfAddition(t *testing.T) {
+ s, _ := rand.Int(rand.Reader, Order)
+ p := new(G2).ScalarBaseMult(s)
+
+ if !p.p.IsOnCurve() {
+ t.Fatal("p isn't on curve")
+ }
+ m := p.Add(p, p).Marshal()
+ if _, err := p.Unmarshal(m); err != nil {
+ t.Fatalf("p.Add(p, p) ∉ G₂: %v", err)
+ }
+}
+
func BenchmarkG1(b *testing.B) {
x, _ := rand.Int(rand.Reader, Order)
b.ResetTimer()
diff --git a/crypto/bn256/cloudflare/curve.go b/crypto/bn256/cloudflare/curve.go
index 18e9b38f3..16f0489e3 100644
--- a/crypto/bn256/cloudflare/curve.go
+++ b/crypto/bn256/cloudflare/curve.go
@@ -171,15 +171,15 @@ func (c *curvePoint) Double(a *curvePoint) {
gfpAdd(t, d, d)
gfpSub(&c.x, f, t)
+ gfpMul(&c.z, &a.y, &a.z)
+ gfpAdd(&c.z, &c.z, &c.z)
+
gfpAdd(t, C, C)
gfpAdd(t2, t, t)
gfpAdd(t, t2, t2)
gfpSub(&c.y, d, &c.x)
gfpMul(t2, e, &c.y)
gfpSub(&c.y, t2, t)
-
- gfpMul(t, &a.y, &a.z)
- gfpAdd(&c.z, t, t)
}
func (c *curvePoint) Mul(a *curvePoint, scalar *big.Int) {
diff --git a/crypto/bn256/cloudflare/gfp.go b/crypto/bn256/cloudflare/gfp.go
index e8e84e7b3..b15e1697e 100644
--- a/crypto/bn256/cloudflare/gfp.go
+++ b/crypto/bn256/cloudflare/gfp.go
@@ -61,6 +61,7 @@ func (e *gfP) Marshal(out []byte) {
func (e *gfP) Unmarshal(in []byte) error {
// Unmarshal the bytes into little endian form
for w := uint(0); w < 4; w++ {
+ e[3-w] = 0
for b := uint(0); b < 8; b++ {
e[3-w] += uint64(in[8*w+b]) << (56 - 8*b)
}
diff --git a/crypto/bn256/cloudflare/gfp_decl.go b/crypto/bn256/cloudflare/gfp_decl.go
index fdea5c11a..ec4018e88 100644
--- a/crypto/bn256/cloudflare/gfp_decl.go
+++ b/crypto/bn256/cloudflare/gfp_decl.go
@@ -1,3 +1,4 @@
+//go:build (amd64 && !generic) || (arm64 && !generic)
// +build amd64,!generic arm64,!generic
package bn256
diff --git a/crypto/bn256/cloudflare/gfp_generic.go b/crypto/bn256/cloudflare/gfp_generic.go
index 8e6be9596..7742dda4c 100644
--- a/crypto/bn256/cloudflare/gfp_generic.go
+++ b/crypto/bn256/cloudflare/gfp_generic.go
@@ -1,3 +1,4 @@
+//go:build (!amd64 && !arm64) || generic
// +build !amd64,!arm64 generic
package bn256
diff --git a/crypto/bn256/cloudflare/twist.go b/crypto/bn256/cloudflare/twist.go
index 0c2f80d4e..2c7a69a4d 100644
--- a/crypto/bn256/cloudflare/twist.go
+++ b/crypto/bn256/cloudflare/twist.go
@@ -150,15 +150,15 @@ func (c *twistPoint) Double(a *twistPoint) {
t.Add(d, d)
c.x.Sub(f, t)
+ c.z.Mul(&a.y, &a.z)
+ c.z.Add(&c.z, &c.z)
+
t.Add(C, C)
t2.Add(t, t)
t.Add(t2, t2)
c.y.Sub(d, &c.x)
t2.Mul(e, &c.y)
c.y.Sub(t2, t)
-
- t.Mul(&a.y, &a.z)
- c.z.Add(t, t)
}
func (c *twistPoint) Mul(a *twistPoint, scalar *big.Int) {
diff --git a/crypto/secp256k1/dummy.go b/crypto/secp256k1/dummy.go
index c0f2ee52c..65a75080f 100644
--- a/crypto/secp256k1/dummy.go
+++ b/crypto/secp256k1/dummy.go
@@ -1,3 +1,4 @@
+//go:build dummy
// +build dummy
// This file is part of a workaround for `go mod vendor` which won't vendor
diff --git a/crypto/secp256k1/panic_cb.go b/crypto/secp256k1/panic_cb.go
index 5da2bea37..a30b04f51 100644
--- a/crypto/secp256k1/panic_cb.go
+++ b/crypto/secp256k1/panic_cb.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be found in
// the LICENSE file.
-// +build !gofuzz
-// +build cgo
+//go:build !gofuzz && cgo
+// +build !gofuzz,cgo
package secp256k1
diff --git a/crypto/secp256k1/scalar_mult_cgo.go b/crypto/secp256k1/scalar_mult_cgo.go
index f28a1c782..8afa9d023 100644
--- a/crypto/secp256k1/scalar_mult_cgo.go
+++ b/crypto/secp256k1/scalar_mult_cgo.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be found in
// the LICENSE file.
-// +build !gofuzz
-// +build cgo
+//go:build !gofuzz && cgo
+// +build !gofuzz,cgo
package secp256k1
diff --git a/crypto/secp256k1/scalar_mult_nocgo.go b/crypto/secp256k1/scalar_mult_nocgo.go
index 55756b5be..22f53ac6a 100644
--- a/crypto/secp256k1/scalar_mult_nocgo.go
+++ b/crypto/secp256k1/scalar_mult_nocgo.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be found in
// the LICENSE file.
+//go:build gofuzz || !cgo
// +build gofuzz !cgo
package secp256k1
diff --git a/crypto/secp256k1/secp256.go b/crypto/secp256k1/secp256.go
index a1bcf7796..c9c01b320 100644
--- a/crypto/secp256k1/secp256.go
+++ b/crypto/secp256k1/secp256.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be found in
// the LICENSE file.
-// +build !gofuzz
-// +build cgo
+//go:build !gofuzz && cgo
+// +build !gofuzz,cgo
// Package secp256k1 wraps the bitcoin secp256k1 C library.
package secp256k1
diff --git a/crypto/signature_cgo.go b/crypto/signature_cgo.go
index 843360298..bd72d97d3 100644
--- a/crypto/signature_cgo.go
+++ b/crypto/signature_cgo.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build !nacl && !js && cgo && !gofuzz
// +build !nacl,!js,cgo,!gofuzz
package crypto
diff --git a/crypto/signature_nocgo.go b/crypto/signature_nocgo.go
index 77c8a1db0..fd1e66c7e 100644
--- a/crypto/signature_nocgo.go
+++ b/crypto/signature_nocgo.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build nacl || js || !cgo || gofuzz
// +build nacl js !cgo gofuzz
package crypto
diff --git a/crypto/signify/signify_fuzz.go b/crypto/signify/signify_fuzz.go
index f9167900a..2dc9b2102 100644
--- a/crypto/signify/signify_fuzz.go
+++ b/crypto/signify/signify_fuzz.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build gofuzz
// +build gofuzz
package signify
diff --git a/docs/postmortems/2021-08-22-split-postmortem.md b/docs/postmortems/2021-08-22-split-postmortem.md
new file mode 100644
index 000000000..429f22d70
--- /dev/null
+++ b/docs/postmortems/2021-08-22-split-postmortem.md
@@ -0,0 +1,266 @@
+# Minority split 2021-08-27 post mortem
+
+This is a post-mortem concerning the minority split that occurred on Ethereum mainnet on block [13107518](https://etherscan.io/block/13107518), at which a minority chain split occurred.
+
+## Timeline
+
+
+- 2021-08-17: Guido Vranken submitted bounty report. Investigation started, root cause identified, patch variations discussed.
+- 2021-08-18: Made public announcement over twitter about upcoming security release upcoming Tuesday. Downstream projects were also notified about the upcoming patch-release.
+- 2021-08-24: Released [v1.10.8](https://github.com/ethereum/go-ethereum/releases/tag/v1.10.8) containing the fix on Tuesday morning (CET). Erigon released [v2021.08.04](https://github.com/ledgerwatch/erigon/releases/tag/v2021.08.04).
+- 2021-08-27: At 12:50:07 UTC, issue exploited. Analysis started roughly 30m later,
+
+
+
+## Bounty report
+
+### 2021-08-17 RETURNDATA corruption via datacopy
+
+On 2021-08-17, Guido Vranken submitted a report to bounty@ethereum.org. This coincided with a geth-meetup in Berlin, so the geth team could fairly quickly analyse the issue.
+
+He submitted a proof of concept which called the `dataCopy` precompile, where the input slice and output slice were overlapping but shifted. Doing a `copy` where the `src` and `dest` overlaps is not a problem in itself, however, the `returnData`slice was _also_ using the same memory as a backing-array.
+
+#### Technical details
+
+During CALL-variants, `geth` does not copy the input. This was changed at one point, to avoid a DoS attack reported by Hubert Ritzdorf, to avoid copying data a lot on repeated `CALL`s -- essentially combating a DoS via `malloc`. Further, the datacopy precompile also does not copy the data, but just returns the same slice. This is fine so far.
+
+After the execution of `dataCopy`, we copy the `ret` into the designated memory area, and this is what causes a problem. Because we're copying a slice of memory over a slice of memory, and this operation modifies (shifts) the data in the source -- the `ret`. So this means we wind up with corrupted returndata.
+
+
+```
+1. Calling datacopy
+
+ memory: [0, 1, 2, 3, 4]
+ in (mem[0:4]) : [0,1,2,3]
+ out (mem[1:5]): [1,2,3,4]
+
+2. dataCopy returns
+
+ returndata (==in, mem[0:4]): [0,1,2,3]
+
+3. Copy in -> out
+
+ => memory: [0,0,1,2,3]
+ => returndata: [0,0,1,2]
+```
+
+
+#### Summary
+
+A memory-corruption bug within the EVM can cause a consensus error, where vulnerable nodes obtain a different `stateRoot` when processing a maliciously crafted transaction. This, in turn, would lead to the chain being split: mainnet splitting in two forks.
+
+#### Handling
+
+On the evening of 17th, we discussed options how to handle it. We made a state test to reproduce the issue, and verified that neither `openethereum`, `nethermind` nor `besu` were affected by the same vulnerability, and started a full-sync with a patched version of `geth`.
+
+It was decided that in this specific instance, it would be possible to make a public announcement and a patch release:
+
+- The fix can be made pretty 'generically', e.g. always copying data on input to precompiles.
+- The flaw is pretty difficult to find, given a generic fix in the call. The attacker needs to figure out that it concerns the precompiles, specifically the datcopy, and that it concerns the `RETURNDATA` buffer rather than the regular memory, and lastly the special circumstances to trigger it (overlapping but shifted input/output).
+
+Since we had merged the removal of `ETH65`, if the entire network were to upgrade, then nodes which have not yet implemented `ETH66` would be cut off from the network. After further discussions, we decided to:
+
+- Announce an upcoming security release on Tuesday (August 24th), via Twitter and official channels, plus reach out to downstream projects.
+- Temporarily revert the `ETH65`-removal.
+- Place the fix into the PR optimizing the jumpdest analysis [233381](https://github.com/ethereum/go-ethereum/pull/23381).
+- After 4-8 weeks, release details about the vulnerability.
+
+
+## Exploit
+
+At block [13107518](https://etherscan.io/block/13107518), mined at (Aug-27-2021 12:50:07 PM +UTC), a minority chain split occurred. The discord user @AlexSSD7 notified the allcoredevs-channel on the Eth R&D discord, on Aug 27 13:09 UTC.
+
+
+At 14:09 UTC, it was confirmed that the transaction `0x1cb6fb36633d270edefc04d048145b4298e67b8aa82a9e5ec4aa1435dd770ce4` had triggered the bug, leading to a minority-split of the chain. The term 'minority split' means that the majority of miners continued to mine on the correct chain.
+
+At 14:17 UTC, @mhswende tweeted out about the issue [2].
+
+The attack was sent from an account funded from Tornado cash.
+
+It was also found that the same attack had been carried out on the BSC chain at roughly the same time -- at a block mined [12 minutes earlier](https://bscscan.com/tx/0xf667f820631f6adbd04a4c92274374034a3e41fa9057dc42cb4e787535136dce), at Aug-27-2021 12:38:30 PM +UTC.
+
+The blocks on the 'bad' chain were investigated, and Tim Beiko reached out to those mining operators on the minority chain who could be identified via block extradata.
+
+
+## Lessons learned
+
+
+### Disclosure decision
+
+The geth-team have an official policy regarding [vulnerability disclosure](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities).
+
+> The primary goal for the Geth team is the health of the Ethereum network as a whole, and the decision whether or not to publish details about a serious vulnerability boils down to minimizing the risk and/or impact of discovery and exploitation.
+
+In this case, it was decided that public pre-announce + patch would likely lead to sufficient update-window for a critical mass of nodes/miners to upgrade in time before it could be exploited. In hindsight, this was a dangerous decision, and it's unlikely that the same decision would be reached were a similar incident to happen again.
+
+
+### Disclosure path
+
+Several subprojects were informed about the upcoming security patch:
+
+- Polygon/Matic
+- MEV
+- Avalanche
+- Erigon
+- BSC
+- EWF
+- Quorum
+- ETC
+- xDAI
+
+However, some were 'lost', and only notified later
+
+- Optimism
+- Summa
+- Harmony
+
+Action point: create a low-volume geth-announce@ethereum.org email list where dependent projects/operators can receive public announcements.
+- This has been done. If you wish to receive release- and security announcements, sign up [here](https://groups.google.com/a/ethereum.org/g/geth-announce/about)
+
+### Fork monitoring
+
+The fork monitor behaved 'ok' during the incident, but had to be restarted during the evening.
+
+Action point: improve the resiliency of the forkmon, which is currently not performing great when many nodes are connected.
+
+Action point: enable push-based alerts to be sent from the forkmon, to speed up the fork detection.
+
+
+## Links
+
+- [1] https://twitter.com/go_ethereum/status/1428051458763763721
+- [2] https://twitter.com/mhswende/status/1431259601530458112
+
+
+## Appendix
+
+### Subprojects
+
+
+The projects were sent variations of the following text:
+```
+We have identified a security issue with go-ethereum, and will issue a
+new release (v1.10.8) on Tuesday next week.
+
+At this point, we will not disclose details about the issue, but
+recommend downstream/dependent projects to be ready to take actions to
+upgrade to the latest go-ethereum codebase. More information about the
+issue will be disclosed at a later date.
+
+https://twitter.com/go_ethereum/status/1428051458763763721
+
+```
+### Patch
+
+```diff
+diff --git a/core/vm/instructions.go b/core/vm/instructions.go
+index f7ef2f900e..6c8c6e6e6f 100644
+--- a/core/vm/instructions.go
++++ b/core/vm/instructions.go
+@@ -669,6 +669,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt
+ }
+ stack.push(&temp)
+ if err == nil || err == ErrExecutionReverted {
++ ret = common.CopyBytes(ret)
+ scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
+ }
+ scope.Contract.Gas += returnGas
+@@ -703,6 +704,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
+ }
+ stack.push(&temp)
+ if err == nil || err == ErrExecutionReverted {
++ ret = common.CopyBytes(ret)
+ scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
+ }
+ scope.Contract.Gas += returnGas
+@@ -730,6 +732,7 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
+ }
+ stack.push(&temp)
+ if err == nil || err == ErrExecutionReverted {
++ ret = common.CopyBytes(ret)
+ scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
+ }
+ scope.Contract.Gas += returnGas
+@@ -757,6 +760,7 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
+ }
+ stack.push(&temp)
+ if err == nil || err == ErrExecutionReverted {
++ ret = common.CopyBytes(ret)
+ scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
+ }
+ scope.Contract.Gas += returnGas
+diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
+index 9cf0c4e2c1..9fb83799c9 100644
+--- a/core/vm/interpreter.go
++++ b/core/vm/interpreter.go
+@@ -262,7 +262,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
+ // if the operation clears the return data (e.g. it has returning data)
+ // set the last return to the result of the operation.
+ if operation.returns {
+- in.returnData = common.CopyBytes(res)
++ in.returnData = res
+ }
+
+ switch {
+```
+
+### Statetest to test for the issue
+
+```json
+{
+ "trigger-issue": {
+ "env": {
+ "currentCoinbase": "b94f5374fce5edbc8e2a8697c15331677e6ebf0b",
+ "currentDifficulty": "0x20000",
+ "currentGasLimit": "0x26e1f476fe1e22",
+ "currentNumber": "0x1",
+ "currentTimestamp": "0x3e8",
+ "previousHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
+ },
+ "pre": {
+ "0x00000000000000000000000000000000000000bb": {
+ "code": "0x6001600053600260015360036002536004600353600560045360066005536006600260066000600060047f7ef0367e633852132a0ebbf70eb714015dd44bc82e1e55a96ef1389c999c1bcaf13d600060003e596000208055",
+ "storage": {},
+ "balance": "0x5",
+ "nonce": "0x0"
+ },
+ "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
+ "code": "0x",
+ "storage": {},
+ "balance": "0xffffffff",
+ "nonce": "0x0"
+ }
+ },
+ "transaction": {
+ "gasPrice": "0x1",
+ "nonce": "0x0",
+ "to": "0x00000000000000000000000000000000000000bb",
+ "data": [
+ "0x"
+ ],
+ "gasLimit": [
+ "0x7a1200"
+ ],
+ "value": [
+ "0x01"
+ ],
+ "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8"
+ },
+ "out": "0x",
+ "post": {
+ "Berlin": [
+ {
+ "hash": "2a38a040bab1e1fa499253d98b2fd363e5756ecc52db47dd59af7116c068368c",
+ "logs": "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
+ "indexes": {
+ "data": 0,
+ "gas": 0,
+ "value": 0
+ }
+ }
+ ]
+ }
+ }
+}
+```
+
diff --git a/eth/api.go b/eth/api.go
index 0f57128d7..8b96d1f31 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -342,7 +342,7 @@ func (api *PrivateDebugAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs,
} else {
blockRlp = fmt.Sprintf("0x%x", rlpBytes)
}
- if blockJSON, err = ethapi.RPCMarshalBlock(block, true, true, api.eth.engine); err != nil {
+ if blockJSON, err = ethapi.RPCMarshalBlock(block, true, true); err != nil {
blockJSON = map[string]interface{}{"error": err.Error()}
}
results = append(results, &BadBlockArgs{
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 49de70e21..1af33414c 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -21,6 +21,7 @@ import (
"errors"
"math/big"
+ "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
@@ -30,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
@@ -181,13 +181,14 @@ func (b *EthAPIBackend) GetReceipts(ctx context.Context, hash common.Hash) (type
}
func (b *EthAPIBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
- receipts := b.eth.blockchain.GetReceiptsByHash(hash)
- if receipts == nil {
- return nil, nil
+ db := b.eth.ChainDb()
+ number := rawdb.ReadHeaderNumber(db, hash)
+ if number == nil {
+ return nil, errors.New("failed to get block number from hash")
}
- logs := make([][]*types.Log, len(receipts))
- for i, receipt := range receipts {
- logs[i] = receipt.Logs
+ logs := rawdb.ReadLogs(db, hash, *number)
+ if logs == nil {
+ return nil, errors.New("failed to get logs for block")
}
return logs, nil
}
@@ -279,8 +280,8 @@ func (b *EthAPIBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.S
return b.eth.TxPool().SubscribeNewTxsEvent(ch)
}
-func (b *EthAPIBackend) Downloader() *downloader.Downloader {
- return b.eth.Downloader()
+func (b *EthAPIBackend) SyncProgress() ethereum.SyncProgress {
+ return b.eth.Downloader().Progress()
}
func (b *EthAPIBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) {
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 1767506a3..a6bf87acb 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -448,8 +448,8 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
d.mux.Post(DoneEvent{latest})
}
}()
- if p.version < eth.ETH65 {
- return fmt.Errorf("%w: advertized %d < required %d", errTooOld, p.version, eth.ETH65)
+ if p.version < eth.ETH66 {
+ return fmt.Errorf("%w: advertized %d < required %d", errTooOld, p.version, eth.ETH66)
}
mode := d.getMode()
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 794160993..17cd3630c 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -522,10 +522,6 @@ func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, leng
}
}
-func TestCanonicalSynchronisation65Full(t *testing.T) { testCanonSync(t, eth.ETH65, FullSync) }
-func TestCanonicalSynchronisation65Fast(t *testing.T) { testCanonSync(t, eth.ETH65, FastSync) }
-func TestCanonicalSynchronisation65Light(t *testing.T) { testCanonSync(t, eth.ETH65, LightSync) }
-
func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) }
func TestCanonicalSynchronisation66Fast(t *testing.T) { testCanonSync(t, eth.ETH66, FastSync) }
func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) }
@@ -549,9 +545,6 @@ func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if a large batch of blocks are being downloaded, it is throttled
// until the cached blocks are retrieved.
-func TestThrottling65Full(t *testing.T) { testThrottling(t, eth.ETH65, FullSync) }
-func TestThrottling65Fast(t *testing.T) { testThrottling(t, eth.ETH65, FastSync) }
-
func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) }
func TestThrottling66Fast(t *testing.T) { testThrottling(t, eth.ETH66, FastSync) }
@@ -634,10 +627,6 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
// Tests that simple synchronization against a forked chain works correctly. In
// this test common ancestor lookup should *not* be short circuited, and a full
// binary search should be executed.
-func TestForkedSync65Full(t *testing.T) { testForkedSync(t, eth.ETH65, FullSync) }
-func TestForkedSync65Fast(t *testing.T) { testForkedSync(t, eth.ETH65, FastSync) }
-func TestForkedSync65Light(t *testing.T) { testForkedSync(t, eth.ETH65, LightSync) }
-
func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) }
func TestForkedSync66Fast(t *testing.T) { testForkedSync(t, eth.ETH66, FastSync) }
func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) }
@@ -667,10 +656,6 @@ func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that synchronising against a much shorter but much heavyer fork works
// corrently and is not dropped.
-func TestHeavyForkedSync65Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH65, FullSync) }
-func TestHeavyForkedSync65Fast(t *testing.T) { testHeavyForkedSync(t, eth.ETH65, FastSync) }
-func TestHeavyForkedSync65Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH65, LightSync) }
-
func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) }
func TestHeavyForkedSync66Fast(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FastSync) }
func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) }
@@ -702,10 +687,6 @@ func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that chain forks are contained within a certain interval of the current
// chain head, ensuring that malicious peers cannot waste resources by feeding
// long dead chains.
-func TestBoundedForkedSync65Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH65, FullSync) }
-func TestBoundedForkedSync65Fast(t *testing.T) { testBoundedForkedSync(t, eth.ETH65, FastSync) }
-func TestBoundedForkedSync65Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH65, LightSync) }
-
func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) }
func TestBoundedForkedSync66Fast(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FastSync) }
func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) }
@@ -736,16 +717,6 @@ func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that chain forks are contained within a certain interval of the current
// chain head for short but heavy forks too. These are a bit special because they
// take different ancestor lookup paths.
-func TestBoundedHeavyForkedSync65Full(t *testing.T) {
- testBoundedHeavyForkedSync(t, eth.ETH65, FullSync)
-}
-func TestBoundedHeavyForkedSync65Fast(t *testing.T) {
- testBoundedHeavyForkedSync(t, eth.ETH65, FastSync)
-}
-func TestBoundedHeavyForkedSync65Light(t *testing.T) {
- testBoundedHeavyForkedSync(t, eth.ETH65, LightSync)
-}
-
func TestBoundedHeavyForkedSync66Full(t *testing.T) {
testBoundedHeavyForkedSync(t, eth.ETH66, FullSync)
}
@@ -800,10 +771,6 @@ func TestInactiveDownloader63(t *testing.T) {
}
// Tests that a canceled download wipes all previously accumulated state.
-func TestCancel65Full(t *testing.T) { testCancel(t, eth.ETH65, FullSync) }
-func TestCancel65Fast(t *testing.T) { testCancel(t, eth.ETH65, FastSync) }
-func TestCancel65Light(t *testing.T) { testCancel(t, eth.ETH65, LightSync) }
-
func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) }
func TestCancel66Fast(t *testing.T) { testCancel(t, eth.ETH66, FastSync) }
func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) }
@@ -833,10 +800,6 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) {
}
// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
-func TestMultiSynchronisation65Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH65, FullSync) }
-func TestMultiSynchronisation65Fast(t *testing.T) { testMultiSynchronisation(t, eth.ETH65, FastSync) }
-func TestMultiSynchronisation65Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH65, LightSync) }
-
func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) }
func TestMultiSynchronisation66Fast(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FastSync) }
func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) }
@@ -863,10 +826,6 @@ func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
// Tests that synchronisations behave well in multi-version protocol environments
// and not wreak havoc on other nodes in the network.
-func TestMultiProtoSynchronisation65Full(t *testing.T) { testMultiProtoSync(t, eth.ETH65, FullSync) }
-func TestMultiProtoSynchronisation65Fast(t *testing.T) { testMultiProtoSync(t, eth.ETH65, FastSync) }
-func TestMultiProtoSynchronisation65Light(t *testing.T) { testMultiProtoSync(t, eth.ETH65, LightSync) }
-
func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) }
func TestMultiProtoSynchronisation66Fast(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FastSync) }
func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) }
@@ -881,8 +840,8 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
chain := testChainBase.shorten(blockCacheMaxItems - 15)
// Create peers of every type
- tester.newPeer("peer 65", eth.ETH65, chain)
tester.newPeer("peer 66", eth.ETH66, chain)
+ //tester.newPeer("peer 65", eth.ETH67, chain)
// Synchronise with the requested peer and make sure all blocks were retrieved
if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
@@ -891,7 +850,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
assertOwnChain(t, tester, chain.len())
// Check that no peers have been dropped off
- for _, version := range []int{65, 66} {
+ for _, version := range []int{66} {
peer := fmt.Sprintf("peer %d", version)
if _, ok := tester.peers[peer]; !ok {
t.Errorf("%s dropped", peer)
@@ -901,10 +860,6 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if a block is empty (e.g. header only), no body request should be
// made, and instead the header should be assembled into a whole block in itself.
-func TestEmptyShortCircuit65Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH65, FullSync) }
-func TestEmptyShortCircuit65Fast(t *testing.T) { testEmptyShortCircuit(t, eth.ETH65, FastSync) }
-func TestEmptyShortCircuit65Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH65, LightSync) }
-
func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) }
func TestEmptyShortCircuit66Fast(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FastSync) }
func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) }
@@ -955,10 +910,6 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
// Tests that headers are enqueued continuously, preventing malicious nodes from
// stalling the downloader by feeding gapped header chains.
-func TestMissingHeaderAttack65Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH65, FullSync) }
-func TestMissingHeaderAttack65Fast(t *testing.T) { testMissingHeaderAttack(t, eth.ETH65, FastSync) }
-func TestMissingHeaderAttack65Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH65, LightSync) }
-
func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) }
func TestMissingHeaderAttack66Fast(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FastSync) }
func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) }
@@ -987,10 +938,6 @@ func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if requested headers are shifted (i.e. first is missing), the queue
// detects the invalid numbering.
-func TestShiftedHeaderAttack65Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH65, FullSync) }
-func TestShiftedHeaderAttack65Fast(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH65, FastSync) }
-func TestShiftedHeaderAttack65Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH65, LightSync) }
-
func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) }
func TestShiftedHeaderAttack66Fast(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FastSync) }
func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) }
@@ -1024,7 +971,6 @@ func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
// Tests that upon detecting an invalid header, the recent ones are rolled back
// for various failure scenarios. Afterwards a full sync is attempted to make
// sure no state was corrupted.
-func TestInvalidHeaderRollback65Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH65, FastSync) }
func TestInvalidHeaderRollback66Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, FastSync) }
func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
@@ -1115,16 +1061,6 @@ func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
// Tests that a peer advertising a high TD doesn't get to stall the downloader
// afterwards by not sending any useful hashes.
-func TestHighTDStarvationAttack65Full(t *testing.T) {
- testHighTDStarvationAttack(t, eth.ETH65, FullSync)
-}
-func TestHighTDStarvationAttack65Fast(t *testing.T) {
- testHighTDStarvationAttack(t, eth.ETH65, FastSync)
-}
-func TestHighTDStarvationAttack65Light(t *testing.T) {
- testHighTDStarvationAttack(t, eth.ETH65, LightSync)
-}
-
func TestHighTDStarvationAttack66Full(t *testing.T) {
testHighTDStarvationAttack(t, eth.ETH66, FullSync)
}
@@ -1149,7 +1085,6 @@ func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
}
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
-func TestBlockHeaderAttackerDropping65(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH65) }
func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) }
func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
@@ -1202,10 +1137,6 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
// Tests that synchronisation progress (origin block number, current block number
// and highest block number) is tracked and updated correctly.
-func TestSyncProgress65Full(t *testing.T) { testSyncProgress(t, eth.ETH65, FullSync) }
-func TestSyncProgress65Fast(t *testing.T) { testSyncProgress(t, eth.ETH65, FastSync) }
-func TestSyncProgress65Light(t *testing.T) { testSyncProgress(t, eth.ETH65, LightSync) }
-
func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) }
func TestSyncProgress66Fast(t *testing.T) { testSyncProgress(t, eth.ETH66, FastSync) }
func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) }
@@ -1286,10 +1217,6 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync
// Tests that synchronisation progress (origin block number and highest block
// number) is tracked and updated correctly in case of a fork (or manual head
// revertal).
-func TestForkedSyncProgress65Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH65, FullSync) }
-func TestForkedSyncProgress65Fast(t *testing.T) { testForkedSyncProgress(t, eth.ETH65, FastSync) }
-func TestForkedSyncProgress65Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH65, LightSync) }
-
func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) }
func TestForkedSyncProgress66Fast(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FastSync) }
func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) }
@@ -1362,10 +1289,6 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if synchronisation is aborted due to some failure, then the progress
// origin is not updated in the next sync cycle, as it should be considered the
// continuation of the previous sync and not a new instance.
-func TestFailedSyncProgress65Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH65, FullSync) }
-func TestFailedSyncProgress65Fast(t *testing.T) { testFailedSyncProgress(t, eth.ETH65, FastSync) }
-func TestFailedSyncProgress65Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH65, LightSync) }
-
func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) }
func TestFailedSyncProgress66Fast(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FastSync) }
func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) }
@@ -1435,10 +1358,6 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if an attacker fakes a chain height, after the attack is detected,
// the progress height is successfully reduced at the next sync invocation.
-func TestFakedSyncProgress65Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH65, FullSync) }
-func TestFakedSyncProgress65Fast(t *testing.T) { testFakedSyncProgress(t, eth.ETH65, FastSync) }
-func TestFakedSyncProgress65Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH65, LightSync) }
-
func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) }
func TestFakedSyncProgress66Fast(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FastSync) }
func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) }
@@ -1512,10 +1431,6 @@ func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
// This test reproduces an issue where unexpected deliveries would
// block indefinitely if they arrived at the right time.
-func TestDeliverHeadersHang65Full(t *testing.T) { testDeliverHeadersHang(t, eth.ETH65, FullSync) }
-func TestDeliverHeadersHang65Fast(t *testing.T) { testDeliverHeadersHang(t, eth.ETH65, FastSync) }
-func TestDeliverHeadersHang65Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH65, LightSync) }
-
func TestDeliverHeadersHang66Full(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, FullSync) }
func TestDeliverHeadersHang66Fast(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, FastSync) }
func TestDeliverHeadersHang66Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, LightSync) }
@@ -1673,12 +1588,6 @@ func TestRemoteHeaderRequestSpan(t *testing.T) {
// Tests that peers below a pre-configured checkpoint block are prevented from
// being fast-synced from, avoiding potential cheap eclipse attacks.
-func TestCheckpointEnforcement65Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH65, FullSync) }
-func TestCheckpointEnforcement65Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH65, FastSync) }
-func TestCheckpointEnforcement65Light(t *testing.T) {
- testCheckpointEnforcement(t, eth.ETH65, LightSync)
-}
-
func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FullSync) }
func TestCheckpointEnforcement66Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FastSync) }
func TestCheckpointEnforcement66Light(t *testing.T) {
diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go
index 066a36631..863294832 100644
--- a/eth/downloader/peer.go
+++ b/eth/downloader/peer.go
@@ -413,7 +413,7 @@ func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) {
throughput := func(p *peerConnection) int {
return p.rates.Capacity(eth.BlockHeadersMsg, time.Second)
}
- return ps.idlePeers(eth.ETH65, eth.ETH66, idle, throughput)
+ return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput)
}
// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
@@ -425,7 +425,7 @@ func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) {
throughput := func(p *peerConnection) int {
return p.rates.Capacity(eth.BlockBodiesMsg, time.Second)
}
- return ps.idlePeers(eth.ETH65, eth.ETH66, idle, throughput)
+ return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput)
}
// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers
@@ -437,7 +437,7 @@ func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) {
throughput := func(p *peerConnection) int {
return p.rates.Capacity(eth.ReceiptsMsg, time.Second)
}
- return ps.idlePeers(eth.ETH65, eth.ETH66, idle, throughput)
+ return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput)
}
// NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle
@@ -449,7 +449,7 @@ func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) {
throughput := func(p *peerConnection) int {
return p.rates.Capacity(eth.NodeDataMsg, time.Second)
}
- return ps.idlePeers(eth.ETH65, eth.ETH66, idle, throughput)
+ return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput)
}
// idlePeers retrieves a flat list of all currently idle peers satisfying the
diff --git a/eth/handler.go b/eth/handler.go
index aff4871af..06a8088bf 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -117,7 +117,6 @@ type handler struct {
whitelist map[uint64]common.Hash
// channels for fetcher, syncer, txsyncLoop
- txsyncCh chan *txsync
quitSync chan struct{}
chainSync *chainSyncer
@@ -140,7 +139,6 @@ func newHandler(config *handlerConfig) (*handler, error) {
chain: config.Chain,
peers: newPeerSet(),
whitelist: config.Whitelist,
- txsyncCh: make(chan *txsync),
quitSync: make(chan struct{}),
}
if config.Sync == downloader.FullSync {
@@ -408,9 +406,8 @@ func (h *handler) Start(maxPeers int) {
go h.minedBroadcastLoop()
// start sync handlers
- h.wg.Add(2)
+ h.wg.Add(1)
go h.chainSync.loop()
- go h.txsyncLoop64() // TODO(karalabe): Legacy initial tx echange, drop with eth/64.
}
func (h *handler) Stop() {
diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go
index 038de4699..039091244 100644
--- a/eth/handler_eth_test.go
+++ b/eth/handler_eth_test.go
@@ -80,7 +80,6 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
// Tests that peers are correctly accepted (or rejected) based on the advertised
// fork IDs in the protocol handshake.
-func TestForkIDSplit65(t *testing.T) { testForkIDSplit(t, eth.ETH65) }
func TestForkIDSplit66(t *testing.T) { testForkIDSplit(t, eth.ETH66) }
func testForkIDSplit(t *testing.T, protocol uint) {
@@ -236,7 +235,6 @@ func testForkIDSplit(t *testing.T, protocol uint) {
}
// Tests that received transactions are added to the local pool.
-func TestRecvTransactions65(t *testing.T) { testRecvTransactions(t, eth.ETH65) }
func TestRecvTransactions66(t *testing.T) { testRecvTransactions(t, eth.ETH66) }
func testRecvTransactions(t *testing.T, protocol uint) {
@@ -294,7 +292,6 @@ func testRecvTransactions(t *testing.T, protocol uint) {
}
// This test checks that pending transactions are sent.
-func TestSendTransactions65(t *testing.T) { testSendTransactions(t, eth.ETH65) }
func TestSendTransactions66(t *testing.T) { testSendTransactions(t, eth.ETH66) }
func testSendTransactions(t *testing.T, protocol uint) {
@@ -306,7 +303,7 @@ func testSendTransactions(t *testing.T, protocol uint) {
insert := make([]*types.Transaction, 100)
for nonce := range insert {
- tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), make([]byte, txsyncPackSize/10))
+ tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), make([]byte, 10240))
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
insert[nonce] = tx
@@ -380,7 +377,6 @@ func testSendTransactions(t *testing.T, protocol uint) {
// Tests that transactions get propagated to all attached peers, either via direct
// broadcasts or via announcements/retrievals.
-func TestTransactionPropagation65(t *testing.T) { testTransactionPropagation(t, eth.ETH65) }
func TestTransactionPropagation66(t *testing.T) { testTransactionPropagation(t, eth.ETH66) }
func testTransactionPropagation(t *testing.T, protocol uint) {
@@ -521,8 +517,8 @@ func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpo
defer p2pLocal.Close()
defer p2pRemote.Close()
- local := eth.NewPeer(eth.ETH65, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pLocal), p2pLocal, handler.txpool)
- remote := eth.NewPeer(eth.ETH65, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pRemote), p2pRemote, handler.txpool)
+ local := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pLocal), p2pLocal, handler.txpool)
+ remote := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pRemote), p2pRemote, handler.txpool)
defer local.Close()
defer remote.Close()
@@ -543,30 +539,39 @@ func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpo
if err := remote.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil {
t.Fatalf("failed to run protocol handshake")
}
-
// Connect a new peer and check that we receive the checkpoint challenge.
if checkpoint {
- if err := remote.ExpectRequestHeadersByNumber(response.Number.Uint64(), 1, 0, false); err != nil {
- t.Fatalf("challenge mismatch: %v", err)
+ msg, err := p2pRemote.ReadMsg()
+ if err != nil {
+ t.Fatalf("failed to read checkpoint challenge: %v", err)
+ }
+ request := new(eth.GetBlockHeadersPacket66)
+ if err := msg.Decode(request); err != nil {
+ t.Fatalf("failed to decode checkpoint challenge: %v", err)
+ }
+ query := request.GetBlockHeadersPacket
+ if query.Origin.Number != response.Number.Uint64() || query.Amount != 1 || query.Skip != 0 || query.Reverse {
+ t.Fatalf("challenge mismatch: have [%d, %d, %d, %v] want [%d, %d, %d, %v]",
+ query.Origin.Number, query.Amount, query.Skip, query.Reverse,
+ response.Number.Uint64(), 1, 0, false)
}
// Create a block to reply to the challenge if no timeout is simulated.
if !timeout {
if empty {
- if err := remote.SendBlockHeaders([]*types.Header{}); err != nil {
+ if err := remote.ReplyBlockHeaders(request.RequestId, []*types.Header{}); err != nil {
t.Fatalf("failed to answer challenge: %v", err)
}
} else if match {
- if err := remote.SendBlockHeaders([]*types.Header{response}); err != nil {
+ if err := remote.ReplyBlockHeaders(request.RequestId, []*types.Header{response}); err != nil {
t.Fatalf("failed to answer challenge: %v", err)
}
} else {
- if err := remote.SendBlockHeaders([]*types.Header{{Number: response.Number}}); err != nil {
+ if err := remote.ReplyBlockHeaders(request.RequestId, []*types.Header{{Number: response.Number}}); err != nil {
t.Fatalf("failed to answer challenge: %v", err)
}
}
}
}
-
// Wait until the test timeout passes to ensure proper cleanup
time.Sleep(syncChallengeTimeout + 300*time.Millisecond)
@@ -619,8 +624,8 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
defer sourcePipe.Close()
defer sinkPipe.Close()
- sourcePeer := eth.NewPeer(eth.ETH65, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil)
- sinkPeer := eth.NewPeer(eth.ETH65, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil)
+ sourcePeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil)
+ sinkPeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil)
defer sourcePeer.Close()
defer sinkPeer.Close()
@@ -671,7 +676,6 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
// Tests that a propagated malformed block (uncles or transactions don't match
// with the hashes in the header) gets discarded and not broadcast forward.
-func TestBroadcastMalformedBlock65(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH65) }
func TestBroadcastMalformedBlock66(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH66) }
func testBroadcastMalformedBlock(t *testing.T, protocol uint) {
diff --git a/eth/plugin_hooks.go b/eth/plugin_hooks.go
index 9107ed7c2..5773504af 100644
--- a/eth/plugin_hooks.go
+++ b/eth/plugin_hooks.go
@@ -37,6 +37,9 @@ func (mt *metaTracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration,
tracer.CaptureEnd(output, gasUsed, t, err)
}
}
+// TODO: Align these with PluGeth-utils
+func (mt *metaTracer) CaptureEnter(vm.OpCode, common.Address, common.Address, []byte, uint64, *big.Int) {}
+func (mt *metaTracer) CaptureExit([]byte, uint64, error) {}
func PluginUpdateBlockchainVMConfig(pl *plugins.PluginLoader, cfg *vm.Config) {
tracerList := plugins.Lookup("LiveTracer", func(item interface{}) bool {
diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go
index 6bbaa2f55..828930014 100644
--- a/eth/protocols/eth/handler.go
+++ b/eth/protocols/eth/handler.go
@@ -171,39 +171,21 @@ type Decoder interface {
Time() time.Time
}
-var eth65 = map[uint64]msgHandler{
- GetBlockHeadersMsg: handleGetBlockHeaders,
- BlockHeadersMsg: handleBlockHeaders,
- GetBlockBodiesMsg: handleGetBlockBodies,
- BlockBodiesMsg: handleBlockBodies,
- GetNodeDataMsg: handleGetNodeData,
- NodeDataMsg: handleNodeData,
- GetReceiptsMsg: handleGetReceipts,
- ReceiptsMsg: handleReceipts,
- NewBlockHashesMsg: handleNewBlockhashes,
- NewBlockMsg: handleNewBlock,
- TransactionsMsg: handleTransactions,
- NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes,
- GetPooledTransactionsMsg: handleGetPooledTransactions,
- PooledTransactionsMsg: handlePooledTransactions,
-}
-
var eth66 = map[uint64]msgHandler{
NewBlockHashesMsg: handleNewBlockhashes,
NewBlockMsg: handleNewBlock,
TransactionsMsg: handleTransactions,
NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes,
- // eth66 messages with request-id
- GetBlockHeadersMsg: handleGetBlockHeaders66,
- BlockHeadersMsg: handleBlockHeaders66,
- GetBlockBodiesMsg: handleGetBlockBodies66,
- BlockBodiesMsg: handleBlockBodies66,
- GetNodeDataMsg: handleGetNodeData66,
- NodeDataMsg: handleNodeData66,
- GetReceiptsMsg: handleGetReceipts66,
- ReceiptsMsg: handleReceipts66,
- GetPooledTransactionsMsg: handleGetPooledTransactions66,
- PooledTransactionsMsg: handlePooledTransactions66,
+ GetBlockHeadersMsg: handleGetBlockHeaders66,
+ BlockHeadersMsg: handleBlockHeaders66,
+ GetBlockBodiesMsg: handleGetBlockBodies66,
+ BlockBodiesMsg: handleBlockBodies66,
+ GetNodeDataMsg: handleGetNodeData66,
+ NodeDataMsg: handleNodeData66,
+ GetReceiptsMsg: handleGetReceipts66,
+ ReceiptsMsg: handleReceipts66,
+ GetPooledTransactionsMsg: handleGetPooledTransactions66,
+ PooledTransactionsMsg: handlePooledTransactions66,
}
// handleMessage is invoked whenever an inbound message is received from a remote
@@ -219,10 +201,11 @@ func handleMessage(backend Backend, peer *Peer) error {
}
defer msg.Discard()
- var handlers = eth65
- if peer.Version() >= ETH66 {
- handlers = eth66
- }
+ var handlers = eth66
+ //if peer.Version() >= ETH67 { // Left in as a sample when new protocol is added
+ // handlers = eth67
+ //}
+
// Track the amount of time it takes to serve the request and run the handler
if metrics.Enabled {
h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go
index 473be3f9b..809f17e36 100644
--- a/eth/protocols/eth/handler_test.go
+++ b/eth/protocols/eth/handler_test.go
@@ -110,7 +110,6 @@ func (b *testBackend) Handle(*Peer, Packet) error {
}
// Tests that block headers can be retrieved from a remote chain based on user queries.
-func TestGetBlockHeaders65(t *testing.T) { testGetBlockHeaders(t, ETH65) }
func TestGetBlockHeaders66(t *testing.T) { testGetBlockHeaders(t, ETH66) }
func testGetBlockHeaders(t *testing.T, protocol uint) {
@@ -254,44 +253,30 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
headers = append(headers, backend.chain.GetBlockByHash(hash).Header())
}
// Send the hash request and verify the response
- if protocol <= ETH65 {
- p2p.Send(peer.app, GetBlockHeadersMsg, tt.query)
- if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, headers); err != nil {
- t.Errorf("test %d: headers mismatch: %v", i, err)
- }
- } else {
- p2p.Send(peer.app, GetBlockHeadersMsg, GetBlockHeadersPacket66{
- RequestId: 123,
- GetBlockHeadersPacket: tt.query,
- })
- if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, BlockHeadersPacket66{
- RequestId: 123,
- BlockHeadersPacket: headers,
- }); err != nil {
- t.Errorf("test %d: headers mismatch: %v", i, err)
- }
+ p2p.Send(peer.app, GetBlockHeadersMsg, GetBlockHeadersPacket66{
+ RequestId: 123,
+ GetBlockHeadersPacket: tt.query,
+ })
+ if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, BlockHeadersPacket66{
+ RequestId: 123,
+ BlockHeadersPacket: headers,
+ }); err != nil {
+ t.Errorf("test %d: headers mismatch: %v", i, err)
}
// If the test used number origins, repeat with hashes as the too
if tt.query.Origin.Hash == (common.Hash{}) {
if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil {
tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0
- if protocol <= ETH65 {
- p2p.Send(peer.app, GetBlockHeadersMsg, tt.query)
- if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, headers); err != nil {
- t.Errorf("test %d: headers mismatch: %v", i, err)
- }
- } else {
- p2p.Send(peer.app, GetBlockHeadersMsg, GetBlockHeadersPacket66{
- RequestId: 456,
- GetBlockHeadersPacket: tt.query,
- })
- if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, BlockHeadersPacket66{
- RequestId: 456,
- BlockHeadersPacket: headers,
- }); err != nil {
- t.Errorf("test %d: headers mismatch: %v", i, err)
- }
+ p2p.Send(peer.app, GetBlockHeadersMsg, GetBlockHeadersPacket66{
+ RequestId: 456,
+ GetBlockHeadersPacket: tt.query,
+ })
+ if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, BlockHeadersPacket66{
+ RequestId: 456,
+ BlockHeadersPacket: headers,
+ }); err != nil {
+ t.Errorf("test %d: headers mismatch: %v", i, err)
}
}
}
@@ -299,7 +284,6 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
}
// Tests that block contents can be retrieved from a remote chain based on their hashes.
-func TestGetBlockBodies65(t *testing.T) { testGetBlockBodies(t, ETH65) }
func TestGetBlockBodies66(t *testing.T) { testGetBlockBodies(t, ETH66) }
func testGetBlockBodies(t *testing.T, protocol uint) {
@@ -369,28 +353,20 @@ func testGetBlockBodies(t *testing.T, protocol uint) {
}
}
// Send the hash request and verify the response
- if protocol <= ETH65 {
- p2p.Send(peer.app, GetBlockBodiesMsg, hashes)
- if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, bodies); err != nil {
- t.Errorf("test %d: bodies mismatch: %v", i, err)
- }
- } else {
- p2p.Send(peer.app, GetBlockBodiesMsg, GetBlockBodiesPacket66{
- RequestId: 123,
- GetBlockBodiesPacket: hashes,
- })
- if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, BlockBodiesPacket66{
- RequestId: 123,
- BlockBodiesPacket: bodies,
- }); err != nil {
- t.Errorf("test %d: bodies mismatch: %v", i, err)
- }
+ p2p.Send(peer.app, GetBlockBodiesMsg, GetBlockBodiesPacket66{
+ RequestId: 123,
+ GetBlockBodiesPacket: hashes,
+ })
+ if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, BlockBodiesPacket66{
+ RequestId: 123,
+ BlockBodiesPacket: bodies,
+ }); err != nil {
+ t.Errorf("test %d: bodies mismatch: %v", i, err)
}
}
}
// Tests that the state trie nodes can be retrieved based on hashes.
-func TestGetNodeData65(t *testing.T) { testGetNodeData(t, ETH65) }
func TestGetNodeData66(t *testing.T) { testGetNodeData(t, ETH66) }
func testGetNodeData(t *testing.T, protocol uint) {
@@ -449,14 +425,10 @@ func testGetNodeData(t *testing.T, protocol uint) {
}
it.Release()
- if protocol <= ETH65 {
- p2p.Send(peer.app, GetNodeDataMsg, hashes)
- } else {
- p2p.Send(peer.app, GetNodeDataMsg, GetNodeDataPacket66{
- RequestId: 123,
- GetNodeDataPacket: hashes,
- })
- }
+ p2p.Send(peer.app, GetNodeDataMsg, GetNodeDataPacket66{
+ RequestId: 123,
+ GetNodeDataPacket: hashes,
+ })
msg, err := peer.app.ReadMsg()
if err != nil {
t.Fatalf("failed to read node data response: %v", err)
@@ -464,18 +436,14 @@ func testGetNodeData(t *testing.T, protocol uint) {
if msg.Code != NodeDataMsg {
t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, NodeDataMsg)
}
- var data [][]byte
- if protocol <= ETH65 {
- if err := msg.Decode(&data); err != nil {
- t.Fatalf("failed to decode response node data: %v", err)
- }
- } else {
- var res NodeDataPacket66
- if err := msg.Decode(&res); err != nil {
- t.Fatalf("failed to decode response node data: %v", err)
- }
- data = res.NodeDataPacket
+ var (
+ data [][]byte
+ res NodeDataPacket66
+ )
+ if err := msg.Decode(&res); err != nil {
+ t.Fatalf("failed to decode response node data: %v", err)
}
+ data = res.NodeDataPacket
// Verify that all hashes correspond to the requested data, and reconstruct a state tree
for i, want := range hashes {
if hash := crypto.Keccak256Hash(data[i]); hash != want {
@@ -506,7 +474,6 @@ func testGetNodeData(t *testing.T, protocol uint) {
}
// Tests that the transaction receipts can be retrieved based on hashes.
-func TestGetBlockReceipts65(t *testing.T) { testGetBlockReceipts(t, ETH65) }
func TestGetBlockReceipts66(t *testing.T) { testGetBlockReceipts(t, ETH66) }
func testGetBlockReceipts(t *testing.T, protocol uint) {
@@ -566,21 +533,14 @@ func testGetBlockReceipts(t *testing.T, protocol uint) {
receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash()))
}
// Send the hash request and verify the response
- if protocol <= ETH65 {
- p2p.Send(peer.app, GetReceiptsMsg, hashes)
- if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, receipts); err != nil {
- t.Errorf("receipts mismatch: %v", err)
- }
- } else {
- p2p.Send(peer.app, GetReceiptsMsg, GetReceiptsPacket66{
- RequestId: 123,
- GetReceiptsPacket: hashes,
- })
- if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, ReceiptsPacket66{
- RequestId: 123,
- ReceiptsPacket: receipts,
- }); err != nil {
- t.Errorf("receipts mismatch: %v", err)
- }
+ p2p.Send(peer.app, GetReceiptsMsg, GetReceiptsPacket66{
+ RequestId: 123,
+ GetReceiptsPacket: hashes,
+ })
+ if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, ReceiptsPacket66{
+ RequestId: 123,
+ ReceiptsPacket: receipts,
+ }); err != nil {
+ t.Errorf("receipts mismatch: %v", err)
}
}
diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go
index d7d993a23..e54838cbc 100644
--- a/eth/protocols/eth/handlers.go
+++ b/eth/protocols/eth/handlers.go
@@ -27,17 +27,6 @@ import (
"github.com/ethereum/go-ethereum/trie"
)
-// handleGetBlockHeaders handles Block header query, collect the requested headers and reply
-func handleGetBlockHeaders(backend Backend, msg Decoder, peer *Peer) error {
- // Decode the complex header query
- var query GetBlockHeadersPacket
- if err := msg.Decode(&query); err != nil {
- return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
- }
- response := answerGetBlockHeadersQuery(backend, &query, peer)
- return peer.SendBlockHeaders(response)
-}
-
// handleGetBlockHeaders66 is the eth/66 version of handleGetBlockHeaders
func handleGetBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error {
// Decode the complex header query
@@ -135,16 +124,6 @@ func answerGetBlockHeadersQuery(backend Backend, query *GetBlockHeadersPacket, p
return headers
}
-func handleGetBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
- // Decode the block body retrieval message
- var query GetBlockBodiesPacket
- if err := msg.Decode(&query); err != nil {
- return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
- }
- response := answerGetBlockBodiesQuery(backend, query, peer)
- return peer.SendBlockBodiesRLP(response)
-}
-
func handleGetBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {
// Decode the block body retrieval message
var query GetBlockBodiesPacket66
@@ -174,16 +153,6 @@ func answerGetBlockBodiesQuery(backend Backend, query GetBlockBodiesPacket, peer
return bodies
}
-func handleGetNodeData(backend Backend, msg Decoder, peer *Peer) error {
- // Decode the trie node data retrieval message
- var query GetNodeDataPacket
- if err := msg.Decode(&query); err != nil {
- return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
- }
- response := answerGetNodeDataQuery(backend, query, peer)
- return peer.SendNodeData(response)
-}
-
func handleGetNodeData66(backend Backend, msg Decoder, peer *Peer) error {
// Decode the trie node data retrieval message
var query GetNodeDataPacket66
@@ -223,16 +192,6 @@ func answerGetNodeDataQuery(backend Backend, query GetNodeDataPacket, peer *Peer
return nodes
}
-func handleGetReceipts(backend Backend, msg Decoder, peer *Peer) error {
- // Decode the block receipts retrieval message
- var query GetReceiptsPacket
- if err := msg.Decode(&query); err != nil {
- return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
- }
- response := answerGetReceiptsQuery(backend, query, peer)
- return peer.SendReceiptsRLP(response)
-}
-
func handleGetReceipts66(backend Backend, msg Decoder, peer *Peer) error {
// Decode the block receipts retrieval message
var query GetReceiptsPacket66
@@ -312,15 +271,6 @@ func handleNewBlock(backend Backend, msg Decoder, peer *Peer) error {
return backend.Handle(peer, ann)
}
-func handleBlockHeaders(backend Backend, msg Decoder, peer *Peer) error {
- // A batch of headers arrived to one of our previous requests
- res := new(BlockHeadersPacket)
- if err := msg.Decode(res); err != nil {
- return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
- }
- return backend.Handle(peer, res)
-}
-
func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error {
// A batch of headers arrived to one of our previous requests
res := new(BlockHeadersPacket66)
@@ -332,15 +282,6 @@ func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error {
return backend.Handle(peer, &res.BlockHeadersPacket)
}
-func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {
- // A batch of block bodies arrived to one of our previous requests
- res := new(BlockBodiesPacket)
- if err := msg.Decode(res); err != nil {
- return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
- }
- return backend.Handle(peer, res)
-}
-
func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {
// A batch of block bodies arrived to one of our previous requests
res := new(BlockBodiesPacket66)
@@ -352,15 +293,6 @@ func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {
return backend.Handle(peer, &res.BlockBodiesPacket)
}
-func handleNodeData(backend Backend, msg Decoder, peer *Peer) error {
- // A batch of node state data arrived to one of our previous requests
- res := new(NodeDataPacket)
- if err := msg.Decode(res); err != nil {
- return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
- }
- return backend.Handle(peer, res)
-}
-
func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error {
// A batch of node state data arrived to one of our previous requests
res := new(NodeDataPacket66)
@@ -372,15 +304,6 @@ func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error {
return backend.Handle(peer, &res.NodeDataPacket)
}
-func handleReceipts(backend Backend, msg Decoder, peer *Peer) error {
- // A batch of receipts arrived to one of our previous requests
- res := new(ReceiptsPacket)
- if err := msg.Decode(res); err != nil {
- return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
- }
- return backend.Handle(peer, res)
-}
-
func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error {
// A batch of receipts arrived to one of our previous requests
res := new(ReceiptsPacket66)
@@ -409,16 +332,6 @@ func handleNewPooledTransactionHashes(backend Backend, msg Decoder, peer *Peer)
return backend.Handle(peer, ann)
}
-func handleGetPooledTransactions(backend Backend, msg Decoder, peer *Peer) error {
- // Decode the pooled transactions retrieval message
- var query GetPooledTransactionsPacket
- if err := msg.Decode(&query); err != nil {
- return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
- }
- hashes, txs := answerGetPooledTransactions(backend, query, peer)
- return peer.SendPooledTransactionsRLP(hashes, txs)
-}
-
func handleGetPooledTransactions66(backend Backend, msg Decoder, peer *Peer) error {
// Decode the pooled transactions retrieval message
var query GetPooledTransactionsPacket66
@@ -477,26 +390,6 @@ func handleTransactions(backend Backend, msg Decoder, peer *Peer) error {
return backend.Handle(peer, &txs)
}
-func handlePooledTransactions(backend Backend, msg Decoder, peer *Peer) error {
- // Transactions arrived, make sure we have a valid and fresh chain to handle them
- if !backend.AcceptTxs() {
- return nil
- }
- // Transactions can be processed, parse all of them and deliver to the pool
- var txs PooledTransactionsPacket
- if err := msg.Decode(&txs); err != nil {
- return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
- }
- for i, tx := range txs {
- // Validate and mark the remote transaction
- if tx == nil {
- return fmt.Errorf("%w: transaction %d is nil", errDecode, i)
- }
- peer.markTransaction(tx.Hash())
- }
- return backend.Handle(peer, &txs)
-}
-
func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error {
// Transactions arrived, make sure we have a valid and fresh chain to handle them
if !backend.AcceptTxs() {
diff --git a/eth/protocols/eth/handshake_test.go b/eth/protocols/eth/handshake_test.go
index 3bebda2dc..05d473e05 100644
--- a/eth/protocols/eth/handshake_test.go
+++ b/eth/protocols/eth/handshake_test.go
@@ -27,7 +27,6 @@ import (
)
// Tests that handshake failures are detected and reported correctly.
-func TestHandshake65(t *testing.T) { testHandshake(t, ETH65) }
func TestHandshake66(t *testing.T) { testHandshake(t, ETH66) }
func testHandshake(t *testing.T, protocol uint) {
diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go
index e619c183b..1b4cfeb3d 100644
--- a/eth/protocols/eth/peer.go
+++ b/eth/protocols/eth/peer.go
@@ -75,12 +75,12 @@ type Peer struct {
head common.Hash // Latest advertised head block hash
td *big.Int // Latest advertised head block total difficulty
- knownBlocks mapset.Set // Set of block hashes known to be known by this peer
+ knownBlocks *knownCache // Set of block hashes known to be known by this peer
queuedBlocks chan *blockPropagation // Queue of blocks to broadcast to the peer
queuedBlockAnns chan *types.Block // Queue of blocks to announce to the peer
txpool TxPool // Transaction pool used by the broadcasters for liveness checks
- knownTxs mapset.Set // Set of transaction hashes known to be known by this peer
+ knownTxs *knownCache // Set of transaction hashes known to be known by this peer
txBroadcast chan []common.Hash // Channel used to queue transaction propagation requests
txAnnounce chan []common.Hash // Channel used to queue transaction announcement requests
@@ -96,8 +96,8 @@ func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter, txpool TxPool) *Pe
Peer: p,
rw: rw,
version: version,
- knownTxs: mapset.NewSet(),
- knownBlocks: mapset.NewSet(),
+ knownTxs: newKnownCache(maxKnownTxs),
+ knownBlocks: newKnownCache(maxKnownBlocks),
queuedBlocks: make(chan *blockPropagation, maxQueuedBlocks),
queuedBlockAnns: make(chan *types.Block, maxQueuedBlockAnns),
txBroadcast: make(chan []common.Hash),
@@ -108,9 +108,8 @@ func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter, txpool TxPool) *Pe
// Start up all the broadcasters
go peer.broadcastBlocks()
go peer.broadcastTransactions()
- if version >= ETH65 {
- go peer.announceTransactions()
- }
+ go peer.announceTransactions()
+
return peer
}
@@ -163,9 +162,6 @@ func (p *Peer) KnownTransaction(hash common.Hash) bool {
// never be propagated to this particular peer.
func (p *Peer) markBlock(hash common.Hash) {
// If we reached the memory allowance, drop a previously known block hash
- for p.knownBlocks.Cardinality() >= maxKnownBlocks {
- p.knownBlocks.Pop()
- }
p.knownBlocks.Add(hash)
}
@@ -173,9 +169,6 @@ func (p *Peer) markBlock(hash common.Hash) {
// will never be propagated to this particular peer.
func (p *Peer) markTransaction(hash common.Hash) {
// If we reached the memory allowance, drop a previously known transaction hash
- for p.knownTxs.Cardinality() >= maxKnownTxs {
- p.knownTxs.Pop()
- }
p.knownTxs.Add(hash)
}
@@ -190,9 +183,6 @@ func (p *Peer) markTransaction(hash common.Hash) {
// tests that directly send messages without having to do the asyn queueing.
func (p *Peer) SendTransactions(txs types.Transactions) error {
// Mark all the transactions as known, but ensure we don't overflow our limits
- for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(txs)) {
- p.knownTxs.Pop()
- }
for _, tx := range txs {
p.knownTxs.Add(tx.Hash())
}
@@ -206,12 +196,7 @@ func (p *Peer) AsyncSendTransactions(hashes []common.Hash) {
select {
case p.txBroadcast <- hashes:
// Mark all the transactions as known, but ensure we don't overflow our limits
- for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
- p.knownTxs.Pop()
- }
- for _, hash := range hashes {
- p.knownTxs.Add(hash)
- }
+ p.knownTxs.Add(hashes...)
case <-p.term:
p.Log().Debug("Dropping transaction propagation", "count", len(hashes))
}
@@ -225,12 +210,7 @@ func (p *Peer) AsyncSendTransactions(hashes []common.Hash) {
// not be managed directly.
func (p *Peer) sendPooledTransactionHashes(hashes []common.Hash) error {
// Mark all the transactions as known, but ensure we don't overflow our limits
- for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
- p.knownTxs.Pop()
- }
- for _, hash := range hashes {
- p.knownTxs.Add(hash)
- }
+ p.knownTxs.Add(hashes...)
return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket(hashes))
}
@@ -241,42 +221,17 @@ func (p *Peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) {
select {
case p.txAnnounce <- hashes:
// Mark all the transactions as known, but ensure we don't overflow our limits
- for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
- p.knownTxs.Pop()
- }
- for _, hash := range hashes {
- p.knownTxs.Add(hash)
- }
+ p.knownTxs.Add(hashes...)
case <-p.term:
p.Log().Debug("Dropping transaction announcement", "count", len(hashes))
}
}
-// SendPooledTransactionsRLP sends requested transactions to the peer and adds the
-// hashes in its transaction hash set for future reference.
-//
-// Note, the method assumes the hashes are correct and correspond to the list of
-// transactions being sent.
-func (p *Peer) SendPooledTransactionsRLP(hashes []common.Hash, txs []rlp.RawValue) error {
- // Mark all the transactions as known, but ensure we don't overflow our limits
- for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
- p.knownTxs.Pop()
- }
- for _, hash := range hashes {
- p.knownTxs.Add(hash)
- }
- return p2p.Send(p.rw, PooledTransactionsMsg, txs) // Not packed into PooledTransactionsPacket to avoid RLP decoding
-}
-
// ReplyPooledTransactionsRLP is the eth/66 version of SendPooledTransactionsRLP.
func (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs []rlp.RawValue) error {
// Mark all the transactions as known, but ensure we don't overflow our limits
- for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
- p.knownTxs.Pop()
- }
- for _, hash := range hashes {
- p.knownTxs.Add(hash)
- }
+ p.knownTxs.Add(hashes...)
+
// Not packed into PooledTransactionsPacket to avoid RLP decoding
return p2p.Send(p.rw, PooledTransactionsMsg, PooledTransactionsRLPPacket66{
RequestId: id,
@@ -288,12 +243,8 @@ func (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs [
// a hash notification.
func (p *Peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error {
// Mark all the block hashes as known, but ensure we don't overflow our limits
- for p.knownBlocks.Cardinality() > max(0, maxKnownBlocks-len(hashes)) {
- p.knownBlocks.Pop()
- }
- for _, hash := range hashes {
- p.knownBlocks.Add(hash)
- }
+ p.knownBlocks.Add(hashes...)
+
request := make(NewBlockHashesPacket, len(hashes))
for i := 0; i < len(hashes); i++ {
request[i].Hash = hashes[i]
@@ -309,9 +260,6 @@ func (p *Peer) AsyncSendNewBlockHash(block *types.Block) {
select {
case p.queuedBlockAnns <- block:
// Mark all the block hash as known, but ensure we don't overflow our limits
- for p.knownBlocks.Cardinality() >= maxKnownBlocks {
- p.knownBlocks.Pop()
- }
p.knownBlocks.Add(block.Hash())
default:
p.Log().Debug("Dropping block announcement", "number", block.NumberU64(), "hash", block.Hash())
@@ -321,9 +269,6 @@ func (p *Peer) AsyncSendNewBlockHash(block *types.Block) {
// SendNewBlock propagates an entire block to a remote peer.
func (p *Peer) SendNewBlock(block *types.Block, td *big.Int) error {
// Mark all the block hash as known, but ensure we don't overflow our limits
- for p.knownBlocks.Cardinality() >= maxKnownBlocks {
- p.knownBlocks.Pop()
- }
p.knownBlocks.Add(block.Hash())
return p2p.Send(p.rw, NewBlockMsg, &NewBlockPacket{
Block: block,
@@ -337,20 +282,12 @@ func (p *Peer) AsyncSendNewBlock(block *types.Block, td *big.Int) {
select {
case p.queuedBlocks <- &blockPropagation{block: block, td: td}:
// Mark all the block hash as known, but ensure we don't overflow our limits
- for p.knownBlocks.Cardinality() >= maxKnownBlocks {
- p.knownBlocks.Pop()
- }
p.knownBlocks.Add(block.Hash())
default:
p.Log().Debug("Dropping block propagation", "number", block.NumberU64(), "hash", block.Hash())
}
}
-// SendBlockHeaders sends a batch of block headers to the remote peer.
-func (p *Peer) SendBlockHeaders(headers []*types.Header) error {
- return p2p.Send(p.rw, BlockHeadersMsg, BlockHeadersPacket(headers))
-}
-
// ReplyBlockHeaders is the eth/66 version of SendBlockHeaders.
func (p *Peer) ReplyBlockHeaders(id uint64, headers []*types.Header) error {
return p2p.Send(p.rw, BlockHeadersMsg, BlockHeadersPacket66{
@@ -359,12 +296,6 @@ func (p *Peer) ReplyBlockHeaders(id uint64, headers []*types.Header) error {
})
}
-// SendBlockBodiesRLP sends a batch of block contents to the remote peer from
-// an already RLP encoded format.
-func (p *Peer) SendBlockBodiesRLP(bodies []rlp.RawValue) error {
- return p2p.Send(p.rw, BlockBodiesMsg, bodies) // Not packed into BlockBodiesPacket to avoid RLP decoding
-}
-
// ReplyBlockBodiesRLP is the eth/66 version of SendBlockBodiesRLP.
func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error {
// Not packed into BlockBodiesPacket to avoid RLP decoding
@@ -374,12 +305,6 @@ func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error {
})
}
-// SendNodeDataRLP sends a batch of arbitrary internal data, corresponding to the
-// hashes requested.
-func (p *Peer) SendNodeData(data [][]byte) error {
- return p2p.Send(p.rw, NodeDataMsg, NodeDataPacket(data))
-}
-
// ReplyNodeData is the eth/66 response to GetNodeData.
func (p *Peer) ReplyNodeData(id uint64, data [][]byte) error {
return p2p.Send(p.rw, NodeDataMsg, NodeDataPacket66{
@@ -388,12 +313,6 @@ func (p *Peer) ReplyNodeData(id uint64, data [][]byte) error {
})
}
-// SendReceiptsRLP sends a batch of transaction receipts, corresponding to the
-// ones requested from an already RLP encoded format.
-func (p *Peer) SendReceiptsRLP(receipts []rlp.RawValue) error {
- return p2p.Send(p.rw, ReceiptsMsg, receipts) // Not packed into ReceiptsPacket to avoid RLP decoding
-}
-
// ReplyReceiptsRLP is the eth/66 response to GetReceipts.
func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error {
return p2p.Send(p.rw, ReceiptsMsg, ReceiptsRLPPacket66{
@@ -406,138 +325,136 @@ func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error {
// single header. It is used solely by the fetcher.
func (p *Peer) RequestOneHeader(hash common.Hash) error {
p.Log().Debug("Fetching single header", "hash", hash)
- query := GetBlockHeadersPacket{
- Origin: HashOrNumber{Hash: hash},
- Amount: uint64(1),
- Skip: uint64(0),
- Reverse: false,
- }
- if p.Version() >= ETH66 {
- id := rand.Uint64()
+ id := rand.Uint64()
- requestTracker.Track(p.id, p.version, GetBlockHeadersMsg, BlockHeadersMsg, id)
- return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
- RequestId: id,
- GetBlockHeadersPacket: &query,
- })
- }
- return p2p.Send(p.rw, GetBlockHeadersMsg, &query)
+ requestTracker.Track(p.id, p.version, GetBlockHeadersMsg, BlockHeadersMsg, id)
+ return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
+ RequestId: id,
+ GetBlockHeadersPacket: &GetBlockHeadersPacket{
+ Origin: HashOrNumber{Hash: hash},
+ Amount: uint64(1),
+ Skip: uint64(0),
+ Reverse: false,
+ },
+ })
}
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
// specified header query, based on the hash of an origin block.
func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
- query := GetBlockHeadersPacket{
- Origin: HashOrNumber{Hash: origin},
- Amount: uint64(amount),
- Skip: uint64(skip),
- Reverse: reverse,
- }
- if p.Version() >= ETH66 {
- id := rand.Uint64()
+ id := rand.Uint64()
- requestTracker.Track(p.id, p.version, GetBlockHeadersMsg, BlockHeadersMsg, id)
- return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
- RequestId: id,
- GetBlockHeadersPacket: &query,
- })
- }
- return p2p.Send(p.rw, GetBlockHeadersMsg, &query)
+ requestTracker.Track(p.id, p.version, GetBlockHeadersMsg, BlockHeadersMsg, id)
+ return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
+ RequestId: id,
+ GetBlockHeadersPacket: &GetBlockHeadersPacket{
+ Origin: HashOrNumber{Hash: origin},
+ Amount: uint64(amount),
+ Skip: uint64(skip),
+ Reverse: reverse,
+ },
+ })
}
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
// specified header query, based on the number of an origin block.
func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
- query := GetBlockHeadersPacket{
- Origin: HashOrNumber{Number: origin},
- Amount: uint64(amount),
- Skip: uint64(skip),
- Reverse: reverse,
- }
- if p.Version() >= ETH66 {
- id := rand.Uint64()
+ id := rand.Uint64()
- requestTracker.Track(p.id, p.version, GetBlockHeadersMsg, BlockHeadersMsg, id)
- return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
- RequestId: id,
- GetBlockHeadersPacket: &query,
- })
- }
- return p2p.Send(p.rw, GetBlockHeadersMsg, &query)
-}
-
-// ExpectRequestHeadersByNumber is a testing method to mirror the recipient side
-// of the RequestHeadersByNumber operation.
-func (p *Peer) ExpectRequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
- req := &GetBlockHeadersPacket{
- Origin: HashOrNumber{Number: origin},
- Amount: uint64(amount),
- Skip: uint64(skip),
- Reverse: reverse,
- }
- return p2p.ExpectMsg(p.rw, GetBlockHeadersMsg, req)
+ requestTracker.Track(p.id, p.version, GetBlockHeadersMsg, BlockHeadersMsg, id)
+ return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
+ RequestId: id,
+ GetBlockHeadersPacket: &GetBlockHeadersPacket{
+ Origin: HashOrNumber{Number: origin},
+ Amount: uint64(amount),
+ Skip: uint64(skip),
+ Reverse: reverse,
+ },
+ })
}
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
// specified.
func (p *Peer) RequestBodies(hashes []common.Hash) error {
p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
- if p.Version() >= ETH66 {
- id := rand.Uint64()
+ id := rand.Uint64()
- requestTracker.Track(p.id, p.version, GetBlockBodiesMsg, BlockBodiesMsg, id)
- return p2p.Send(p.rw, GetBlockBodiesMsg, &GetBlockBodiesPacket66{
- RequestId: id,
- GetBlockBodiesPacket: hashes,
- })
- }
- return p2p.Send(p.rw, GetBlockBodiesMsg, GetBlockBodiesPacket(hashes))
+ requestTracker.Track(p.id, p.version, GetBlockBodiesMsg, BlockBodiesMsg, id)
+ return p2p.Send(p.rw, GetBlockBodiesMsg, &GetBlockBodiesPacket66{
+ RequestId: id,
+ GetBlockBodiesPacket: hashes,
+ })
}
// RequestNodeData fetches a batch of arbitrary data from a node's known state
// data, corresponding to the specified hashes.
func (p *Peer) RequestNodeData(hashes []common.Hash) error {
p.Log().Debug("Fetching batch of state data", "count", len(hashes))
- if p.Version() >= ETH66 {
- id := rand.Uint64()
+ id := rand.Uint64()
- requestTracker.Track(p.id, p.version, GetNodeDataMsg, NodeDataMsg, id)
- return p2p.Send(p.rw, GetNodeDataMsg, &GetNodeDataPacket66{
- RequestId: id,
- GetNodeDataPacket: hashes,
- })
- }
- return p2p.Send(p.rw, GetNodeDataMsg, GetNodeDataPacket(hashes))
+ requestTracker.Track(p.id, p.version, GetNodeDataMsg, NodeDataMsg, id)
+ return p2p.Send(p.rw, GetNodeDataMsg, &GetNodeDataPacket66{
+ RequestId: id,
+ GetNodeDataPacket: hashes,
+ })
}
// RequestReceipts fetches a batch of transaction receipts from a remote node.
func (p *Peer) RequestReceipts(hashes []common.Hash) error {
p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
- if p.Version() >= ETH66 {
- id := rand.Uint64()
+ id := rand.Uint64()
- requestTracker.Track(p.id, p.version, GetReceiptsMsg, ReceiptsMsg, id)
- return p2p.Send(p.rw, GetReceiptsMsg, &GetReceiptsPacket66{
- RequestId: id,
- GetReceiptsPacket: hashes,
- })
- }
- return p2p.Send(p.rw, GetReceiptsMsg, GetReceiptsPacket(hashes))
+ requestTracker.Track(p.id, p.version, GetReceiptsMsg, ReceiptsMsg, id)
+ return p2p.Send(p.rw, GetReceiptsMsg, &GetReceiptsPacket66{
+ RequestId: id,
+ GetReceiptsPacket: hashes,
+ })
}
// RequestTxs fetches a batch of transactions from a remote node.
func (p *Peer) RequestTxs(hashes []common.Hash) error {
p.Log().Debug("Fetching batch of transactions", "count", len(hashes))
- if p.Version() >= ETH66 {
- id := rand.Uint64()
+ id := rand.Uint64()
- requestTracker.Track(p.id, p.version, GetPooledTransactionsMsg, PooledTransactionsMsg, id)
- return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket66{
- RequestId: id,
- GetPooledTransactionsPacket: hashes,
- })
- }
- return p2p.Send(p.rw, GetPooledTransactionsMsg, GetPooledTransactionsPacket(hashes))
+ requestTracker.Track(p.id, p.version, GetPooledTransactionsMsg, PooledTransactionsMsg, id)
+ return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket66{
+ RequestId: id,
+ GetPooledTransactionsPacket: hashes,
+ })
+}
+
+// knownCache is a cache for known hashes.
+type knownCache struct {
+ hashes mapset.Set
+ max int
+}
+
+// newKnownCache creates a new knownCache with a max capacity.
+func newKnownCache(max int) *knownCache {
+ return &knownCache{
+ max: max,
+ hashes: mapset.NewSet(),
+ }
+}
+
+// Add adds a list of elements to the set.
+func (k *knownCache) Add(hashes ...common.Hash) {
+ for k.hashes.Cardinality() > max(0, k.max-len(hashes)) {
+ k.hashes.Pop()
+ }
+ for _, hash := range hashes {
+ k.hashes.Add(hash)
+ }
+}
+
+// Contains returns whether the given item is in the set.
+func (k *knownCache) Contains(hash common.Hash) bool {
+ return k.hashes.Contains(hash)
+}
+
+// Cardinality returns the number of elements in the set.
+func (k *knownCache) Cardinality() int {
+ return k.hashes.Cardinality()
}
diff --git a/eth/protocols/eth/peer_test.go b/eth/protocols/eth/peer_test.go
index 70e9959f8..fc9344370 100644
--- a/eth/protocols/eth/peer_test.go
+++ b/eth/protocols/eth/peer_test.go
@@ -21,7 +21,9 @@ package eth
import (
"crypto/rand"
+ "testing"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
)
@@ -59,3 +61,28 @@ func (p *testPeer) close() {
p.Peer.Close()
p.app.Close()
}
+
+func TestPeerSet(t *testing.T) {
+ size := 5
+ s := newKnownCache(size)
+
+ // add 10 items
+ for i := 0; i < size*2; i++ {
+ s.Add(common.Hash{byte(i)})
+ }
+
+ if s.Cardinality() != size {
+ t.Fatalf("wrong size, expected %d but found %d", size, s.Cardinality())
+ }
+
+ vals := []common.Hash{}
+ for i := 10; i < 20; i++ {
+ vals = append(vals, common.Hash{byte(i)})
+ }
+
+ // add item in batch
+ s.Add(vals...)
+ if s.Cardinality() < size {
+ t.Fatalf("bad size")
+ }
+}
diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go
index de1b0ed1e..3c3da30fa 100644
--- a/eth/protocols/eth/protocol.go
+++ b/eth/protocols/eth/protocol.go
@@ -30,7 +30,6 @@ import (
// Constants to match up protocol versions and messages
const (
- ETH65 = 65
ETH66 = 66
)
@@ -40,31 +39,28 @@ const ProtocolName = "eth"
// ProtocolVersions are the supported versions of the `eth` protocol (first
// is primary).
-var ProtocolVersions = []uint{ETH66, ETH65}
+var ProtocolVersions = []uint{ETH66}
// protocolLengths are the number of implemented message corresponding to
// different protocol versions.
-var protocolLengths = map[uint]uint64{ETH66: 17, ETH65: 17}
+var protocolLengths = map[uint]uint64{ETH66: 17}
// maxMessageSize is the maximum cap on the size of a protocol message.
const maxMessageSize = 10 * 1024 * 1024
const (
- // Protocol messages in eth/64
- StatusMsg = 0x00
- NewBlockHashesMsg = 0x01
- TransactionsMsg = 0x02
- GetBlockHeadersMsg = 0x03
- BlockHeadersMsg = 0x04
- GetBlockBodiesMsg = 0x05
- BlockBodiesMsg = 0x06
- NewBlockMsg = 0x07
- GetNodeDataMsg = 0x0d
- NodeDataMsg = 0x0e
- GetReceiptsMsg = 0x0f
- ReceiptsMsg = 0x10
-
- // Protocol messages overloaded in eth/65
+ StatusMsg = 0x00
+ NewBlockHashesMsg = 0x01
+ TransactionsMsg = 0x02
+ GetBlockHeadersMsg = 0x03
+ BlockHeadersMsg = 0x04
+ GetBlockBodiesMsg = 0x05
+ BlockBodiesMsg = 0x06
+ NewBlockMsg = 0x07
+ GetNodeDataMsg = 0x0d
+ NodeDataMsg = 0x0e
+ GetReceiptsMsg = 0x0f
+ ReceiptsMsg = 0x10
NewPooledTransactionHashesMsg = 0x08
GetPooledTransactionsMsg = 0x09
PooledTransactionsMsg = 0x0a
@@ -128,7 +124,7 @@ type GetBlockHeadersPacket struct {
Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis)
}
-// GetBlockHeadersPacket represents a block header query over eth/66
+// GetBlockHeadersPacket66 represents a block header query over eth/66
type GetBlockHeadersPacket66 struct {
RequestId uint64
*GetBlockHeadersPacket
diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go
index 3d668a2eb..c62f9cfca 100644
--- a/eth/protocols/snap/handler.go
+++ b/eth/protocols/snap/handler.go
@@ -23,7 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
@@ -319,7 +319,7 @@ func handleMessage(backend Backend, peer *Peer) error {
if err != nil {
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
}
- var acc state.Account
+ var acc types.StateAccount
if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil {
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
}
@@ -469,7 +469,7 @@ func handleMessage(backend Backend, peer *Peer) error {
// Storage slots requested, open the storage trie and retrieve from there
account, err := snap.Account(common.BytesToHash(pathset[0]))
loads++ // always account database reads, even for failures
- if err != nil {
+ if err != nil || account == nil {
break
}
stTrie, err := trie.NewSecure(common.BytesToHash(account.Root), triedb)
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index 646df0388..9ef9d7571 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -32,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/state/snapshot"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
@@ -125,8 +126,8 @@ type accountRequest struct {
type accountResponse struct {
task *accountTask // Task which this request is filling
- hashes []common.Hash // Account hashes in the returned range
- accounts []*state.Account // Expanded accounts in the returned range
+ hashes []common.Hash // Account hashes in the returned range
+ accounts []*types.StateAccount // Expanded accounts in the returned range
cont bool // Whether the account range has a continuation
}
@@ -2274,9 +2275,9 @@ func (s *Syncer) OnAccounts(peer SyncPeer, id uint64, hashes []common.Hash, acco
s.scheduleRevertAccountRequest(req)
return err
}
- accs := make([]*state.Account, len(accounts))
+ accs := make([]*types.StateAccount, len(accounts))
for i, account := range accounts {
- acc := new(state.Account)
+ acc := new(types.StateAccount)
if err := rlp.DecodeBytes(account, acc); err != nil {
panic(err) // We created these blobs, we must be able to decode them
}
@@ -2740,7 +2741,7 @@ func (s *Syncer) onHealByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) e
// Note it's not concurrent safe, please handle the concurrent issue outside.
func (s *Syncer) onHealState(paths [][]byte, value []byte) error {
if len(paths) == 1 {
- var account state.Account
+ var account types.StateAccount
if err := rlp.DecodeBytes(value, &account); err != nil {
return nil
}
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index 023fc8ee0..47ab1f026 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -29,7 +29,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/light"
@@ -1349,7 +1349,7 @@ func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
accTrie, _ := trie.New(common.Hash{}, db)
var entries entrySlice
for i := uint64(1); i <= uint64(n); i++ {
- value, _ := rlp.EncodeToBytes(state.Account{
+ value, _ := rlp.EncodeToBytes(types.StateAccount{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: emptyRoot,
@@ -1394,7 +1394,7 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
}
// Fill boundary accounts
for i := 0; i < len(boundaries); i++ {
- value, _ := rlp.EncodeToBytes(state.Account{
+ value, _ := rlp.EncodeToBytes(types.StateAccount{
Nonce: uint64(0),
Balance: big.NewInt(int64(i)),
Root: emptyRoot,
@@ -1406,7 +1406,7 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
}
// Fill other accounts if required
for i := uint64(1); i <= uint64(n); i++ {
- value, _ := rlp.EncodeToBytes(state.Account{
+ value, _ := rlp.EncodeToBytes(types.StateAccount{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: emptyRoot,
@@ -1442,7 +1442,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
stTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db)
stRoot := stTrie.Hash()
stTrie.Commit(nil)
- value, _ := rlp.EncodeToBytes(state.Account{
+ value, _ := rlp.EncodeToBytes(types.StateAccount{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: stRoot,
@@ -1489,7 +1489,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie
if code {
codehash = getCodeHash(i)
}
- value, _ := rlp.EncodeToBytes(state.Account{
+ value, _ := rlp.EncodeToBytes(types.StateAccount{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: stRoot,
diff --git a/eth/state_accessor.go b/eth/state_accessor.go
index eb178311f..ca2002b60 100644
--- a/eth/state_accessor.go
+++ b/eth/state_accessor.go
@@ -119,7 +119,8 @@ func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64, base *state
// Finalize the state so any modifications are written to the trie
root, err := statedb.Commit(eth.blockchain.Config().IsEIP158(current.Number()))
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w",
+ current.NumberU64(), current.Root().Hex(), err)
}
statedb, err = state.New(root, database, nil)
if err != nil {
diff --git a/eth/sync.go b/eth/sync.go
index ab114b59f..27941158f 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -18,7 +18,6 @@ package eth
import (
"math/big"
- "math/rand"
"sync/atomic"
"time"
@@ -28,23 +27,13 @@ import (
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/p2p/enode"
)
const (
forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available
defaultMinSyncPeers = 5 // Amount of peers desired to start syncing
-
- // This is the target size for the packs of transactions sent by txsyncLoop64.
- // A pack can get larger than this if a single transactions exceeds this size.
- txsyncPackSize = 100 * 1024
)
-type txsync struct {
- p *eth.Peer
- txs []*types.Transaction
-}
-
// syncTransactions starts sending all currently pending transactions to the given peer.
func (h *handler) syncTransactions(p *eth.Peer) {
// Assemble the set of transaction to broadcast or announce to the remote
@@ -64,94 +53,11 @@ func (h *handler) syncTransactions(p *eth.Peer) {
// The eth/65 protocol introduces proper transaction announcements, so instead
// of dripping transactions across multiple peers, just send the entire list as
// an announcement and let the remote side decide what they need (likely nothing).
- if p.Version() >= eth.ETH65 {
- hashes := make([]common.Hash, len(txs))
- for i, tx := range txs {
- hashes[i] = tx.Hash()
- }
- p.AsyncSendPooledTransactionHashes(hashes)
- return
- }
- // Out of luck, peer is running legacy protocols, drop the txs over
- select {
- case h.txsyncCh <- &txsync{p: p, txs: txs}:
- case <-h.quitSync:
- }
-}
-
-// txsyncLoop64 takes care of the initial transaction sync for each new
-// connection. When a new peer appears, we relay all currently pending
-// transactions. In order to minimise egress bandwidth usage, we send
-// the transactions in small packs to one peer at a time.
-func (h *handler) txsyncLoop64() {
- defer h.wg.Done()
-
- var (
- pending = make(map[enode.ID]*txsync)
- sending = false // whether a send is active
- pack = new(txsync) // the pack that is being sent
- done = make(chan error, 1) // result of the send
- )
-
- // send starts a sending a pack of transactions from the sync.
- send := func(s *txsync) {
- if s.p.Version() >= eth.ETH65 {
- panic("initial transaction syncer running on eth/65+")
- }
- // Fill pack with transactions up to the target size.
- size := common.StorageSize(0)
- pack.p = s.p
- pack.txs = pack.txs[:0]
- for i := 0; i < len(s.txs) && size < txsyncPackSize; i++ {
- pack.txs = append(pack.txs, s.txs[i])
- size += s.txs[i].Size()
- }
- // Remove the transactions that will be sent.
- s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])]
- if len(s.txs) == 0 {
- delete(pending, s.p.Peer.ID())
- }
- // Send the pack in the background.
- s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size)
- sending = true
- go func() { done <- pack.p.SendTransactions(pack.txs) }()
- }
- // pick chooses the next pending sync.
- pick := func() *txsync {
- if len(pending) == 0 {
- return nil
- }
- n := rand.Intn(len(pending)) + 1
- for _, s := range pending {
- if n--; n == 0 {
- return s
- }
- }
- return nil
- }
-
- for {
- select {
- case s := <-h.txsyncCh:
- pending[s.p.Peer.ID()] = s
- if !sending {
- send(s)
- }
- case err := <-done:
- sending = false
- // Stop tracking peers that cause send failures.
- if err != nil {
- pack.p.Log().Debug("Transaction send failed", "err", err)
- delete(pending, pack.p.Peer.ID())
- }
- // Schedule the next send.
- if s := pick(); s != nil {
- send(s)
- }
- case <-h.quitSync:
- return
- }
+ hashes := make([]common.Hash, len(txs))
+ for i, tx := range txs {
+ hashes[i] = tx.Hash()
}
+ p.AsyncSendPooledTransactionHashes(hashes)
}
// chainSyncer coordinates blockchain sync components.
diff --git a/eth/sync_test.go b/eth/sync_test.go
index a0c6f8602..e96b9ee81 100644
--- a/eth/sync_test.go
+++ b/eth/sync_test.go
@@ -28,7 +28,6 @@ import (
)
// Tests that fast sync is disabled after a successful sync cycle.
-func TestFastSyncDisabling65(t *testing.T) { testFastSyncDisabling(t, eth.ETH65) }
func TestFastSyncDisabling66(t *testing.T) { testFastSyncDisabling(t, eth.ETH66) }
// Tests that fast sync gets disabled as soon as a real block is successfully
diff --git a/eth/tracers/api.go b/eth/tracers/api.go
index d8d290454..5a9cb133f 100644
--- a/eth/tracers/api.go
+++ b/eth/tracers/api.go
@@ -291,7 +291,11 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config
}()
}
// Start a goroutine to feed all the blocks into the tracers
- begin := time.Now()
+ var (
+ begin = time.Now()
+ derefTodo []common.Hash // list of hashes to dereference from the db
+ derefsMu sync.Mutex // mutex for the derefs
+ )
go func() {
var (
@@ -325,6 +329,14 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config
return
default:
}
+ // clean out any derefs
+ derefsMu.Lock()
+ for _, h := range derefTodo {
+ statedb.Database().TrieDB().Dereference(h)
+ }
+ derefTodo = derefTodo[:0]
+ derefsMu.Unlock()
+
// Print progress logs if long enough time elapsed
if time.Since(logged) > 8*time.Second {
logged = time.Now()
@@ -383,12 +395,11 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config
Hash: res.block.Hash(),
Traces: res.results,
}
+ // Schedule any parent tries held in memory by this task for dereferencing
done[uint64(result.Block)] = result
-
- // Dereference any parent tries held in memory by this task
- if res.statedb.Database().TrieDB() != nil {
- res.statedb.Database().TrieDB().Dereference(res.rootref)
- }
+ derefsMu.Lock()
+ derefTodo = append(derefTodo, res.rootref)
+ derefsMu.Unlock()
// Stream completed traces to the user, aborting on the first error
for result, ok := done[next]; ok; result, ok = done[next] {
if len(result.Traces) > 0 || next == end.NumberU64() {
@@ -446,12 +457,11 @@ func (api *API) TraceBlockFromFile(ctx context.Context, file string, config *Tra
// EVM against a block pulled from the pool of bad ones and returns them as a JSON
// object.
func (api *API) TraceBadBlock(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) {
- for _, block := range rawdb.ReadAllBadBlocks(api.backend.ChainDb()) {
- if block.Hash() == hash {
- return api.traceBlock(ctx, block, config)
- }
+ block := rawdb.ReadBadBlock(api.backend.ChainDb(), hash)
+ if block == nil {
+ return nil, fmt.Errorf("bad block %#x not found", hash)
}
- return nil, fmt.Errorf("bad block %#x not found", hash)
+ return api.traceBlock(ctx, block, config)
}
// StandardTraceBlockToFile dumps the structured logs created during the
@@ -465,16 +475,72 @@ func (api *API) StandardTraceBlockToFile(ctx context.Context, hash common.Hash,
return api.standardTraceBlockToFile(ctx, block, config)
}
+// IntermediateRoots executes a block (bad- or canon- or side-), and returns a list
+// of intermediate roots: the stateroot after each transaction.
+func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config *TraceConfig) ([]common.Hash, error) {
+ block, _ := api.blockByHash(ctx, hash)
+ if block == nil {
+ // Check in the bad blocks
+ block = rawdb.ReadBadBlock(api.backend.ChainDb(), hash)
+ }
+ if block == nil {
+ return nil, fmt.Errorf("block %#x not found", hash)
+ }
+ if block.NumberU64() == 0 {
+ return nil, errors.New("genesis is not traceable")
+ }
+ parent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash())
+ if err != nil {
+ return nil, err
+ }
+ reexec := defaultTraceReexec
+ if config != nil && config.Reexec != nil {
+ reexec = *config.Reexec
+ }
+ statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true)
+ if err != nil {
+ return nil, err
+ }
+ var (
+ roots []common.Hash
+ signer = types.MakeSigner(api.backend.ChainConfig(), block.Number())
+ chainConfig = api.backend.ChainConfig()
+ vmctx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
+ deleteEmptyObjects = chainConfig.IsEIP158(block.Number())
+ )
+ for i, tx := range block.Transactions() {
+ var (
+ msg, _ = tx.AsMessage(signer, block.BaseFee())
+ txContext = core.NewEVMTxContext(msg)
+ vmenv = vm.NewEVM(vmctx, txContext, statedb, chainConfig, vm.Config{})
+ )
+ statedb.Prepare(tx.Hash(), i)
+ if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil {
+ log.Warn("Tracing intermediate roots did not complete", "txindex", i, "txhash", tx.Hash(), "err", err)
+ // We intentionally don't return the error here: if we do, then the RPC server will not
+ // return the roots. Most likely, the caller already knows that a certain transaction fails to
+ // be included, but still want the intermediate roots that led to that point.
+ // It may happen the tx_N causes an erroneous state, which in turn causes tx_N+M to not be
+ // executable.
+ // N.B: This should never happen while tracing canon blocks, only when tracing bad blocks.
+ return roots, nil
+ }
+ // calling IntermediateRoot will internally call Finalize on the state
+ // so any modifications are written to the trie
+ roots = append(roots, statedb.IntermediateRoot(deleteEmptyObjects))
+ }
+ return roots, nil
+}
+
// StandardTraceBadBlockToFile dumps the structured logs created during the
// execution of EVM against a block pulled from the pool of bad ones to the
// local file system and returns a list of files to the caller.
func (api *API) StandardTraceBadBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) {
- for _, block := range rawdb.ReadAllBadBlocks(api.backend.ChainDb()) {
- if block.Hash() == hash {
- return api.standardTraceBlockToFile(ctx, block, config)
- }
+ block := rawdb.ReadBadBlock(api.backend.ChainDb(), hash)
+ if block == nil {
+ return nil, fmt.Errorf("bad block %#x not found", hash)
}
- return nil, fmt.Errorf("bad block %#x not found", hash)
+ return api.standardTraceBlockToFile(ctx, block, config)
}
// traceBlock configures a new tracer according to the provided configuration, and
diff --git a/eth/tracers/internal/tracers/4byte_tracer.js b/eth/tracers/internal/tracers/4byte_tracer.js
index 462b4ad4c..9ec3209f8 100644
--- a/eth/tracers/internal/tracers/4byte_tracer.js
+++ b/eth/tracers/internal/tracers/4byte_tracer.js
@@ -31,48 +31,27 @@
// ids aggregates the 4byte ids found.
ids : {},
- // callType returns 'false' for non-calls, or the peek-index for the first param
- // after 'value', i.e. meminstart.
- callType: function(opstr){
- switch(opstr){
- case "CALL": case "CALLCODE":
- // gas, addr, val, memin, meminsz, memout, memoutsz
- return 3; // stack ptr to memin
-
- case "DELEGATECALL": case "STATICCALL":
- // gas, addr, memin, meminsz, memout, memoutsz
- return 2; // stack ptr to memin
- }
- return false;
- },
-
// store save the given indentifier and datasize.
store: function(id, size){
var key = "" + toHex(id) + "-" + size;
this.ids[key] = this.ids[key] + 1 || 1;
},
- // step is invoked for every opcode that the VM executes.
- step: function(log, db) {
- // Skip any opcodes that are not internal calls
- var ct = this.callType(log.op.toString());
- if (!ct) {
- return;
- }
+ enter: function(frame) {
// Skip any pre-compile invocations, those are just fancy opcodes
- if (isPrecompiled(toAddress(log.stack.peek(1).toString(16)))) {
+ if (isPrecompiled(frame.getTo())) {
return;
}
- // Gather internal call details
- var inSz = log.stack.peek(ct + 1).valueOf();
- if (inSz >= 4) {
- var inOff = log.stack.peek(ct).valueOf();
- this.store(log.memory.slice(inOff, inOff + 4), inSz-4);
+ var input = frame.getInput()
+ if (input.length >= 4) {
+ this.store(slice(input, 0, 4), input.length - 4);
}
},
+ exit: function(frameResult) {},
+
// fault is invoked when the actual execution of an opcode fails.
- fault: function(log, db) { },
+ fault: function(log, db) {},
// result is invoked when all the opcodes have been iterated over and returns
// the final result of the tracing.
diff --git a/eth/tracers/internal/tracers/4byte_tracer_legacy.js b/eth/tracers/internal/tracers/4byte_tracer_legacy.js
new file mode 100644
index 000000000..462b4ad4c
--- /dev/null
+++ b/eth/tracers/internal/tracers/4byte_tracer_legacy.js
@@ -0,0 +1,86 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// 4byteTracer searches for 4byte-identifiers, and collects them for post-processing.
+// It collects the methods identifiers along with the size of the supplied data, so
+// a reversed signature can be matched against the size of the data.
+//
+// Example:
+// > debug.traceTransaction( "0x214e597e35da083692f5386141e69f47e973b2c56e7a8073b1ea08fd7571e9de", {tracer: "4byteTracer"})
+// {
+// 0x27dc297e-128: 1,
+// 0x38cc4831-0: 2,
+// 0x524f3889-96: 1,
+// 0xadf59f99-288: 1,
+// 0xc281d19e-0: 1
+// }
+{
+ // ids aggregates the 4byte ids found.
+ ids : {},
+
+ // callType returns 'false' for non-calls, or the peek-index for the first param
+ // after 'value', i.e. meminstart.
+ callType: function(opstr){
+ switch(opstr){
+ case "CALL": case "CALLCODE":
+ // gas, addr, val, memin, meminsz, memout, memoutsz
+ return 3; // stack ptr to memin
+
+ case "DELEGATECALL": case "STATICCALL":
+ // gas, addr, memin, meminsz, memout, memoutsz
+ return 2; // stack ptr to memin
+ }
+ return false;
+ },
+
+ // store save the given indentifier and datasize.
+ store: function(id, size){
+ var key = "" + toHex(id) + "-" + size;
+ this.ids[key] = this.ids[key] + 1 || 1;
+ },
+
+ // step is invoked for every opcode that the VM executes.
+ step: function(log, db) {
+ // Skip any opcodes that are not internal calls
+ var ct = this.callType(log.op.toString());
+ if (!ct) {
+ return;
+ }
+ // Skip any pre-compile invocations, those are just fancy opcodes
+ if (isPrecompiled(toAddress(log.stack.peek(1).toString(16)))) {
+ return;
+ }
+ // Gather internal call details
+ var inSz = log.stack.peek(ct + 1).valueOf();
+ if (inSz >= 4) {
+ var inOff = log.stack.peek(ct).valueOf();
+ this.store(log.memory.slice(inOff, inOff + 4), inSz-4);
+ }
+ },
+
+ // fault is invoked when the actual execution of an opcode fails.
+ fault: function(log, db) { },
+
+ // result is invoked when all the opcodes have been iterated over and returns
+ // the final result of the tracing.
+ result: function(ctx) {
+ // Save the outer calldata also
+ if (ctx.input.length >= 4) {
+ this.store(slice(ctx.input, 0, 4), ctx.input.length-4)
+ }
+ return this.ids;
+ },
+}
diff --git a/eth/tracers/internal/tracers/assets.go b/eth/tracers/internal/tracers/assets.go
index 7f45ab286..c1469c758 100644
--- a/eth/tracers/internal/tracers/assets.go
+++ b/eth/tracers/internal/tracers/assets.go
@@ -1,8 +1,10 @@
// Code generated by go-bindata. DO NOT EDIT.
// sources:
-// 4byte_tracer.js (2.933kB)
+// 4byte_tracer.js (2.224kB)
+// 4byte_tracer_legacy.js (2.933kB)
// bigram_tracer.js (1.712kB)
-// call_tracer.js (8.956kB)
+// call_tracer.js (4.251kB)
+// call_tracer_legacy.js (8.956kB)
// evmdis_tracer.js (4.195kB)
// noop_tracer.js (1.271kB)
// opcount_tracer.js (1.372kB)
@@ -77,7 +79,7 @@ func (fi bindataFileInfo) Sys() interface{} {
return nil
}
-var __4byte_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x56\x5b\x6f\xdb\x4a\x0e\x7e\xb6\x7f\x05\xd7\x2f\xb5\x51\x59\x8e\x2f\x89\x2f\xd9\x16\xf0\xe6\xa4\x6d\x80\x9c\x24\x88\xdd\x3d\x28\x16\xfb\x30\x9e\xa1\xac\xd9\xc8\x33\xc2\x0c\xe5\x4b\x73\xf2\xdf\x17\x1c\x49\x89\x93\xd3\x62\xbb\x4f\x96\x47\xc3\x8f\x1f\xc9\x8f\xa4\x7a\x3d\xb8\xb0\xf9\xc1\xe9\x75\x4a\x30\x38\xe9\x8f\x61\x99\x22\xac\x6d\x17\x29\x45\x87\xc5\x06\xe6\x05\xa5\xd6\xf9\x66\xaf\x07\xcb\x54\x7b\x48\x74\x86\xa0\x3d\xe4\xc2\x11\xd8\x04\xe8\xcd\xfd\x4c\xaf\x9c\x70\x87\xb8\xd9\xeb\x95\x36\x3f\x7c\xcd\x08\x89\x43\x04\x6f\x13\xda\x09\x87\x33\x38\xd8\x02\xa4\x30\xe0\x50\x69\x4f\x4e\xaf\x0a\x42\xd0\x04\xc2\xa8\x9e\x75\xb0\xb1\x4a\x27\x07\x86\xd4\x04\x85\x51\xe8\x82\x6b\x42\xb7\xf1\x35\x8f\xcf\x37\x5f\xe1\x1a\xbd\x47\x07\x9f\xd1\xa0\x13\x19\xdc\x15\xab\x4c\x4b\xb8\xd6\x12\x8d\x47\x10\x1e\x72\x3e\xf1\x29\x2a\x58\x05\x38\x36\xfc\xc4\x54\x16\x15\x15\xf8\x64\x0b\xa3\x04\x69\x6b\x22\x40\xcd\xcc\x61\x8b\xce\x6b\x6b\x60\x58\xbb\xaa\x00\x23\xb0\x8e\x41\xda\x82\x38\x00\x07\x36\x67\xbb\x0e\x08\x73\x80\x4c\xd0\x8b\xe9\x2f\x24\xe4\x25\x6e\x05\xda\x04\x37\xa9\xcd\x11\x28\x15\xc4\x51\xef\x74\x96\xc1\x0a\xa1\xf0\x98\x14\x59\xc4\x68\xab\x82\xe0\x8f\xab\xe5\x97\xdb\xaf\x4b\x98\xdf\x7c\x83\x3f\xe6\xf7\xf7\xf3\x9b\xe5\xb7\x73\xd8\x69\x4a\x6d\x41\x80\x5b\x2c\xa1\xf4\x26\xcf\x34\x2a\xd8\x09\xe7\x84\xa1\x03\xd8\x84\x11\x7e\xbf\xbc\xbf\xf8\x32\xbf\x59\xce\xff\x71\x75\x7d\xb5\xfc\x06\xd6\xc1\xa7\xab\xe5\xcd\xe5\x62\x01\x9f\x6e\xef\x61\x0e\x77\xf3\xfb\xe5\xd5\xc5\xd7\xeb\xf9\x3d\xdc\x7d\xbd\xbf\xbb\x5d\x5c\xc6\xb0\x40\x66\x85\x6c\xff\xbf\x73\x9e\x84\xea\x39\x04\x85\x24\x74\xe6\xeb\x4c\x7c\xb3\x05\xf8\xd4\x16\x99\x82\x54\x6c\x11\x1c\x4a\xd4\x5b\x54\x20\x40\xda\xfc\xf0\xcb\x45\x65\x2c\x91\x59\xb3\x0e\x31\xff\x54\x90\x70\x95\x80\xb1\x14\x81\x47\x84\xbf\xa7\x44\xf9\xac\xd7\xdb\xed\x76\xf1\xda\x14\xb1\x75\xeb\x5e\x56\xc2\xf9\xde\xc7\xb8\xc9\x98\xa3\xd5\x81\x70\xe9\x84\x44\x07\x1e\x85\x93\x29\xfa\x10\x4c\x78\xd1\xd5\x0a\x0d\xe9\x44\xa3\xf3\x11\x8b\x14\xa4\xcd\x32\x94\xe4\x99\xc1\x26\x5c\xcc\xad\xa7\x6e\xee\xac\x44\xef\xb5\x59\x73\xe0\x70\x45\xaf\x2e\xc2\x06\x29\xb5\xca\xc3\x11\xdc\xdb\x68\xbc\xfe\x8e\x75\x36\x7c\x91\x97\x65\x54\x82\x44\x04\xde\x86\xe8\xc1\x21\xcb\x0c\x15\x78\xbd\x36\x82\x0a\x87\xa1\x97\x56\x08\x1b\x41\x92\xc5\x2e\xd6\x42\x1b\x4f\x7f\x01\x64\x9c\xba\x22\x97\x7b\xb1\xc9\x33\x9c\xf1\x33\xc0\x47\x50\xb8\x2a\xd6\x31\x71\x0a\x96\x4e\x18\x2f\x24\x8b\xbb\x0d\xad\x93\xfd\xa0\x3f\xc2\xd3\xe9\x18\x87\xa7\x4a\x9c\x4c\x86\x67\xd3\x41\x72\x3a\x9c\x9c\xf5\x47\x7d\x3c\x9b\x26\xa3\x31\x4e\xc7\xc3\xd5\x40\x9e\x9e\xe1\x58\x4c\x4e\xc6\xc3\x55\x1f\xc5\xc9\x24\x51\xe3\xd3\x71\x1f\xa7\x0a\x5b\x11\x3c\x06\x60\x37\x83\xd6\x51\xa6\x5b\x4f\x9d\xd2\xfb\x63\xf9\x03\x70\xb2\x1f\x8c\x95\x1c\x4c\xc7\xd8\xed\x0f\x26\x33\xe8\x47\x2f\x6f\x86\x13\x29\x47\x93\x61\xbf\x7b\x32\x83\xc1\xd1\xf9\xe9\x60\x94\x0c\x27\x93\x69\x77\x7a\xf6\xda\x40\xa8\xe4\x74\x9a\x4c\xa7\xdd\xc1\xe4\x0d\x94\x1c\x4c\xfa\xaa\x3f\x45\x86\xea\x97\xc7\x4f\xcd\xc7\x66\x83\x07\x8e\xf2\x20\xd6\x6b\x87\x6b\x41\x58\x56\x2d\x30\x0e\x2f\x12\x1e\x16\x71\xb3\xc1\xcf\x33\x78\x7c\x8a\x9a\xc1\x46\x8a\x2c\x5b\x1e\x72\x56\x35\x15\xce\x78\x78\x97\x88\xcc\xe3\xbb\xa0\x0b\x63\x4d\x97\x2f\x78\x1e\x1f\x01\x2f\x47\x7c\xe8\x6a\xa3\x70\x1f\x2e\xf0\x51\xa2\x9d\x27\x1e\xb3\x62\x13\x10\x45\xc2\xd3\xe4\xdd\x56\x64\x05\xbe\x8b\x40\xc7\x18\xc3\x06\x37\x5c\x54\xe1\x28\x6e\x36\x6a\x97\x33\x48\x0a\x53\x56\xca\xe6\x9e\x5c\xe7\xb1\xd9\x68\xf8\x9d\x26\x99\x1e\x1d\x48\xe1\x11\x5a\x17\xf3\xeb\xeb\xd6\x0c\x5e\xfe\x5c\xdc\xfe\x76\xd9\x9a\x35\x1b\x0d\x76\xb9\x16\x2c\x6d\xa5\x5c\x04\x5b\x91\x45\xa5\xbb\xea\xc7\x7f\x0f\x0f\xb6\xa0\xfa\xd7\x7f\x67\xb3\x32\x5e\x18\x9e\x43\xaf\x07\x9e\x84\x7c\x80\x9c\x1c\x90\x2d\xcd\x9a\xcf\xae\x7f\xbb\xbc\xbe\xfc\x3c\x5f\x5e\xbe\xa2\xb0\x58\xce\x97\x57\x17\xe5\xd1\x5f\x49\xfc\x1f\xfe\x07\x3f\xf3\xdf\x68\x3c\x35\x9f\x6f\x85\x9a\x9c\x37\x1b\x75\xd5\x3c\xf1\x9c\xf2\x3c\x8d\xc2\x18\xd1\x3c\x3c\xb9\x2c\x55\x6b\x86\x3e\xe7\x8e\xe1\x0e\x8a\x9b\x8d\x70\xff\x28\xdf\x5a\x45\xa1\xb9\x42\x86\xb7\xc2\xc1\x03\x1e\xe0\x03\xb4\x5a\xf0\x1e\xc8\x7e\xc1\x7d\x5b\xab\x0e\xbc\x87\x56\x97\x4f\xf8\xe6\x79\xb3\xd1\xa0\x54\xfb\x58\x2b\xff\xaf\x07\x3c\xfc\x1b\x3e\xc0\xeb\xff\xef\xa1\x0f\x7f\xfe\x09\xfd\x57\x34\x31\xe7\x85\xa1\xcd\xd6\x3e\xa0\x0a\x92\xe1\x01\x70\x00\x9b\x4b\xab\xaa\x8d\xc1\x11\xfc\xf3\x77\xc0\x3d\xca\x82\xd0\x07\xba\x98\x1f\xb1\xcd\xec\x3a\x02\xb5\xea\x00\xb3\xed\xf5\x60\xf1\xa0\xf3\xb0\xb8\x4a\x14\x5f\xc2\xf0\x46\x34\x96\x40\x1b\x42\x67\x44\x16\xa4\xed\xab\xf8\x24\xd5\x7c\x6b\xf5\x31\x6a\x6c\xf3\x98\xec\x82\x9c\x36\xeb\x76\xa7\xc3\x31\xea\x04\xda\x7f\x93\x54\xfa\xaa\xd2\x7f\x5e\x15\xe3\xd8\x75\xee\xb0\x2b\xed\x26\x0f\x5f\x19\x66\x6b\x65\xd8\xc3\x3e\x02\x4a\x2d\xef\x6f\x87\xf0\x9f\xc2\x13\x24\xc2\xc8\x67\xa2\x15\xbe\xf6\x77\x0e\x2b\x63\xd5\x26\x3b\x57\xca\xa1\xf7\x81\x51\x50\x42\xcc\x6d\xd6\xee\x77\x5e\xc8\xf5\xcf\x3a\x9d\xce\xcf\x48\x7d\x16\x61\xf7\xbf\x0a\xbc\x5e\x62\x55\xfc\xda\x2c\xbe\xc3\x07\x78\xe3\x41\x12\x57\xad\x13\x87\x5e\xbd\x4d\xda\xcf\x19\x08\xd7\x3f\x7e\x80\x51\xe5\xb2\x84\xb8\x4d\x92\x1f\x61\xbc\xb1\x2f\x65\x12\x14\x17\x22\x62\xd1\xbb\x43\xec\x79\x6d\xb5\x03\x48\x54\x61\xbd\x87\x51\x27\x0a\xd4\xba\xa3\x4e\x15\x4f\x2d\x9d\x44\x14\x19\x1d\x6b\x67\x97\x56\xdf\x07\x42\x52\x21\xb2\x4a\x2e\xfc\xad\x63\x13\x10\xa6\x56\x54\x52\x6e\xee\x46\xb0\xff\xa1\x86\xa0\x76\xe1\xd0\xff\xc8\x07\x27\x8f\xfd\xd4\xe2\x0a\x3b\x7f\x85\xdc\x60\x84\x4e\xf0\x47\x8f\xdd\x56\x2d\x56\x0d\xcd\x00\x57\xce\x42\xce\x7f\x05\x5c\x2d\x2e\xde\x1e\x61\xa9\x36\xca\xf3\x23\x52\x92\xf6\x2f\xa2\xae\x9b\xd9\x16\x3c\x3f\xb9\x86\xdc\xc0\x20\x32\x6f\xab\xaa\x48\xda\xc7\xda\xe4\x05\xc5\x19\x9a\x35\xa5\xc7\x15\x3a\x4a\x7a\x99\xe9\xe7\xcb\x11\x9c\x44\x21\xd1\x6f\xcd\xbb\xa3\xce\xeb\x29\x53\xf7\x73\xd9\xc1\x4f\xcd\xff\x06\x00\x00\xff\xff\x8e\xc8\x27\x72\x75\x0b\x00\x00")
+var __4byte_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x55\x5b\x6f\x22\x39\x13\x7d\x86\x5f\x71\xc4\x13\x68\x9a\x4b\x73\x09\x97\xf9\x32\x12\xdf\x28\x99\x41\xca\x66\x22\x42\x34\x8a\x56\xfb\x60\xda\xd5\xdd\xde\x18\xbb\x65\xbb\xb9\x6c\x26\xff\x7d\x65\x37\xe4\x36\xbb\xda\x79\x02\xec\xaa\x73\xaa\x4e\x1d\x17\xdd\x2e\x3e\xeb\xe2\x60\x44\x96\x3b\xf4\x7b\xf1\x18\xab\x9c\x90\xe9\x36\xb9\x9c\x0c\x95\x1b\xcc\x4b\x97\x6b\x63\xeb\xdd\x2e\x56\xb9\xb0\x48\x85\x24\x08\x8b\x82\x19\x07\x9d\xc2\xbd\x8b\x97\x62\x6d\x98\x39\x74\xea\xdd\x6e\x95\xf3\x8f\xd7\x1e\x21\x35\x44\xb0\x3a\x75\x3b\x66\x68\x86\x83\x2e\x91\x30\x05\x43\x5c\x58\x67\xc4\xba\x74\x04\xe1\xc0\x14\xef\x6a\x83\x8d\xe6\x22\x3d\x78\x48\xe1\x50\x2a\x4e\x26\x50\x3b\x32\x1b\x7b\xaa\xe3\xcb\xf5\x1d\xae\xc8\x5a\x32\xf8\x42\x8a\x0c\x93\xb8\x29\xd7\x52\x24\xb8\x12\x09\x29\x4b\x60\x16\x85\x3f\xb1\x39\x71\xac\x03\x9c\x4f\xbc\xf4\xa5\xdc\x1e\x4b\xc1\xa5\x2e\x15\x67\x4e\x68\x15\x81\x84\xaf\x1c\x5b\x32\x56\x68\x85\xc1\x89\xea\x08\x18\x41\x1b\x0f\xd2\x64\xce\x37\x60\xa0\x0b\x9f\xd7\x02\x53\x07\x48\xe6\x5e\x52\x7f\x41\x90\x97\xbe\x39\x84\x0a\x34\xb9\x2e\x08\x2e\x67\xce\x77\xbd\x13\x52\x62\x4d\x28\x2d\xa5\xa5\x8c\x3c\xda\xba\x74\xf8\xbe\x58\x7d\xfd\x76\xb7\xc2\xfc\xfa\x1e\xdf\xe7\xcb\xe5\xfc\x7a\x75\xff\x11\x3b\xe1\x72\x5d\x3a\xd0\x96\x2a\x28\xb1\x29\xa4\x20\x8e\x1d\x33\x86\x29\x77\x80\x4e\x3d\xc2\x6f\x17\xcb\xcf\x5f\xe7\xd7\xab\xf9\xff\x17\x57\x8b\xd5\x3d\xb4\xc1\xe5\x62\x75\x7d\x71\x7b\x8b\xcb\x6f\x4b\xcc\x71\x33\x5f\xae\x16\x9f\xef\xae\xe6\x4b\xdc\xdc\x2d\x6f\xbe\xdd\x5e\x74\x70\x4b\xbe\x2a\xf2\xf9\xff\xad\x79\x1a\xa6\x67\x08\x9c\x1c\x13\xd2\x9e\x94\xb8\xd7\x25\x6c\xae\x4b\xc9\x91\xb3\x2d\xc1\x50\x42\x62\x4b\x1c\x0c\x89\x2e\x0e\xbf\x3c\x54\x8f\xc5\xa4\x56\x59\xe8\xf9\x5f\x0d\x89\x45\x0a\xa5\x5d\x04\x4b\x84\xff\xe5\xce\x15\xb3\x6e\x77\xb7\xdb\x75\x32\x55\x76\xb4\xc9\xba\xb2\x82\xb3\xdd\x4f\x9d\xba\xc7\x1c\xae\x0f\x8e\x56\x86\x25\x64\x60\x89\x99\x24\x27\x1b\x9a\x09\x17\x6d\xc1\x49\x39\x91\x0a\x32\x36\xf2\x26\x45\xa2\xa5\xa4\xc4\x59\x5f\xc1\x26\x04\x16\xda\xba\x76\x61\x74\x42\xd6\x0a\x95\xf9\xc6\xb1\x70\x6f\x02\xb1\x21\x97\x6b\x6e\xf1\x0a\xee\x7d\x37\x56\xfc\x45\x27\x35\x6c\x59\x54\x63\xe4\xcc\xb1\x08\x56\x87\xee\x61\xc8\xdb\x8c\x38\xac\xc8\x14\x73\xa5\xa1\xf0\x96\xd6\x84\x0d\x73\x89\x37\x3b\xcb\x98\x50\xd6\xfd\x04\xe8\x71\x4e\x13\xb9\xd8\xb3\x4d\x21\x69\xe6\xbf\x03\x9f\xc0\x69\x5d\x66\x1d\xe7\x25\x58\x19\xa6\x2c\x4b\xbc\xb9\x9b\x68\xf4\xf6\xfd\x78\x48\xa3\xe9\x98\x06\x23\xce\x7a\x93\xc1\xd9\xb4\x9f\x8e\x06\x93\xb3\x78\x18\xd3\xd9\x34\x1d\x8e\x69\x3a\x1e\xac\xfb\xc9\xe8\x8c\xc6\x6c\xd2\x1b\x0f\xd6\x31\xb1\xde\x24\xe5\xe3\xd1\x38\xa6\x29\xa7\x46\x84\xc7\x00\x6c\x66\x68\xbc\x52\xba\xf1\xd4\xaa\xd8\x1f\xab\x0f\xa0\xb7\xef\x8f\x79\xd2\x9f\x8e\xa9\x1d\xf7\x27\x33\xc4\xd1\xcb\xcd\x60\x92\x24\xc3\xc9\x20\x6e\xf7\x66\xe8\xbf\x3a\x1f\xf5\x87\xe9\x60\x32\x99\xb6\xa7\x67\x6f\x13\x18\x4f\x47\xd3\x74\x3a\x6d\xf7\x27\xef\xa0\x92\xfe\x24\xe6\xf1\x94\x3c\x54\x5c\x1d\x3f\xd5\x1f\xeb\x35\xbf\x70\xb8\x05\xcb\x32\x43\x19\x73\x54\x4d\x2d\x54\x1c\x2e\x52\xbf\x2c\x3a\xf5\x9a\xff\x3e\xc3\xe3\x53\x54\x0f\x39\xd6\x79\xc7\x5b\xef\xeb\x60\x48\xe1\x9f\xa1\x50\xcf\x43\x0e\x8e\xf1\xda\xfb\x59\x74\xea\xb5\x10\x3f\x43\x5a\xaa\x4a\x63\xc1\xa3\x30\xa6\xd6\x63\xbd\x56\xdb\x32\x83\x07\x3a\xe0\x1c\x8d\x06\x3e\xc0\xe9\xaf\xb4\x6f\x0a\xde\xc2\x07\x34\xda\xfe\xc4\x47\x7e\xac\xd7\x6a\x2e\x17\xb6\x23\xb8\xfd\xfd\x81\x0e\x7f\xe0\x1c\x6f\x7f\x7f\x40\x8c\x1f\x3f\x10\x7f\xac\xd7\x42\x99\xa4\x9c\x97\xff\x99\x33\x35\x6c\x43\x2d\x78\xc6\x6e\x17\xb7\x0f\xa2\x08\x6b\xac\x30\xd4\x4e\xf4\xa6\x08\x8b\x5f\x6d\x75\x12\x56\xa3\x8d\xe0\x72\xed\x57\xaa\x21\xfc\x59\x5a\x87\x94\xa9\xe4\x00\x5d\x24\x9a\x93\xad\xd7\x6a\x22\x45\x53\xd8\x1b\x43\xc7\x64\x5e\x11\x74\x32\x72\x2b\xdd\x6c\xb5\x2a\xa6\x9a\x21\x57\x1a\xe5\xab\x7f\x3a\xb6\x2a\x54\x51\x3a\x9c\xe3\x39\x7c\xe1\x0f\x9a\xad\x13\xa6\xff\xd5\x91\xa4\x32\x97\xe3\xd3\x39\x86\x47\xa0\xd0\x6c\xd0\xb1\x69\xfd\x5b\xae\x02\x23\xf4\x22\x0c\x5b\x11\xde\xa4\xb5\x31\x6c\x1d\x29\x2b\x29\xf6\xc2\xbd\x57\x62\x49\xb6\x94\xae\xf5\x32\xd3\x94\x95\xd2\xf9\x45\xed\x55\x78\xf0\xab\x34\x3f\xee\x56\x96\xb8\x92\x49\xd0\x9e\x92\xd2\x03\xf8\xc7\xc5\xd4\x51\x0b\xa4\xd5\xd6\xab\x85\xfc\x57\x2c\x52\x67\x11\xf8\xfa\x15\x83\x09\x94\x3f\x51\x30\x29\x03\xcd\x51\xdb\x6a\x5d\xae\xc9\x3b\xca\x91\x61\xfe\xff\x42\x6f\x8f\x9e\xaa\xe4\xb4\x01\xce\xe7\xa4\x42\x31\x79\x02\x3e\xbe\x79\xff\xf0\xc2\x3e\xaa\x55\xe7\xaf\x6a\x4a\xdc\xfe\xc5\x01\x27\xf7\xea\xd2\xff\x91\x25\x4c\x4a\xef\x58\x30\x69\xf5\x71\x16\x89\xdb\x77\x7e\x79\x1e\xcf\xc1\xcf\x33\x79\x9f\xde\x1e\xb6\x8e\x3e\xa8\xda\x78\x36\x70\x65\xd9\xa7\xfa\xdf\x01\x00\x00\xff\xff\xf6\xa8\xa1\xb9\xb0\x08\x00\x00")
func _4byte_tracerJsBytes() ([]byte, error) {
return bindataRead(
@@ -93,6 +95,26 @@ func _4byte_tracerJs() (*asset, error) {
}
info := bindataFileInfo{name: "4byte_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
+ a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6b, 0xa8, 0x46, 0xa2, 0x3a, 0x2b, 0xaa, 0xb9, 0xb9, 0xba, 0xe2, 0x22, 0x10, 0xe, 0xe7, 0x4c, 0x24, 0xfc, 0x4c, 0x85, 0xeb, 0x96, 0x48, 0xe8, 0x7f, 0xc8, 0xe0, 0xd0, 0xd, 0x26, 0xa1, 0xb2}}
+ return a, nil
+}
+
+var __4byte_tracer_legacyJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x56\x5b\x6f\xdb\x4a\x0e\x7e\xb6\x7f\x05\xd7\x2f\xb5\x51\x59\x8e\x2f\x89\x2f\xd9\x16\xf0\xe6\xa4\x6d\x80\x9c\x24\x88\xdd\x3d\x28\x16\xfb\x30\x9e\xa1\xac\xd9\xc8\x33\xc2\x0c\xe5\x4b\x73\xf2\xdf\x17\x1c\x49\x89\x93\xd3\x62\xbb\x4f\x96\x47\xc3\x8f\x1f\xc9\x8f\xa4\x7a\x3d\xb8\xb0\xf9\xc1\xe9\x75\x4a\x30\x38\xe9\x8f\x61\x99\x22\xac\x6d\x17\x29\x45\x87\xc5\x06\xe6\x05\xa5\xd6\xf9\x66\xaf\x07\xcb\x54\x7b\x48\x74\x86\xa0\x3d\xe4\xc2\x11\xd8\x04\xe8\xcd\xfd\x4c\xaf\x9c\x70\x87\xb8\xd9\xeb\x95\x36\x3f\x7c\xcd\x08\x89\x43\x04\x6f\x13\xda\x09\x87\x33\x38\xd8\x02\xa4\x30\xe0\x50\x69\x4f\x4e\xaf\x0a\x42\xd0\x04\xc2\xa8\x9e\x75\xb0\xb1\x4a\x27\x07\x86\xd4\x04\x85\x51\xe8\x82\x6b\x42\xb7\xf1\x35\x8f\xcf\x37\x5f\xe1\x1a\xbd\x47\x07\x9f\xd1\xa0\x13\x19\xdc\x15\xab\x4c\x4b\xb8\xd6\x12\x8d\x47\x10\x1e\x72\x3e\xf1\x29\x2a\x58\x05\x38\x36\xfc\xc4\x54\x16\x15\x15\xf8\x64\x0b\xa3\x04\x69\x6b\x22\x40\xcd\xcc\x61\x8b\xce\x6b\x6b\x60\x58\xbb\xaa\x00\x23\xb0\x8e\x41\xda\x82\x38\x00\x07\x36\x67\xbb\x0e\x08\x73\x80\x4c\xd0\x8b\xe9\x2f\x24\xe4\x25\x6e\x05\xda\x04\x37\xa9\xcd\x11\x28\x15\xc4\x51\xef\x74\x96\xc1\x0a\xa1\xf0\x98\x14\x59\xc4\x68\xab\x82\xe0\x8f\xab\xe5\x97\xdb\xaf\x4b\x98\xdf\x7c\x83\x3f\xe6\xf7\xf7\xf3\x9b\xe5\xb7\x73\xd8\x69\x4a\x6d\x41\x80\x5b\x2c\xa1\xf4\x26\xcf\x34\x2a\xd8\x09\xe7\x84\xa1\x03\xd8\x84\x11\x7e\xbf\xbc\xbf\xf8\x32\xbf\x59\xce\xff\x71\x75\x7d\xb5\xfc\x06\xd6\xc1\xa7\xab\xe5\xcd\xe5\x62\x01\x9f\x6e\xef\x61\x0e\x77\xf3\xfb\xe5\xd5\xc5\xd7\xeb\xf9\x3d\xdc\x7d\xbd\xbf\xbb\x5d\x5c\xc6\xb0\x40\x66\x85\x6c\xff\xbf\x73\x9e\x84\xea\x39\x04\x85\x24\x74\xe6\xeb\x4c\x7c\xb3\x05\xf8\xd4\x16\x99\x82\x54\x6c\x11\x1c\x4a\xd4\x5b\x54\x20\x40\xda\xfc\xf0\xcb\x45\x65\x2c\x91\x59\xb3\x0e\x31\xff\x54\x90\x70\x95\x80\xb1\x14\x81\x47\x84\xbf\xa7\x44\xf9\xac\xd7\xdb\xed\x76\xf1\xda\x14\xb1\x75\xeb\x5e\x56\xc2\xf9\xde\xc7\xb8\xc9\x98\xa3\xd5\x81\x70\xe9\x84\x44\x07\x1e\x85\x93\x29\xfa\x10\x4c\x78\xd1\xd5\x0a\x0d\xe9\x44\xa3\xf3\x11\x8b\x14\xa4\xcd\x32\x94\xe4\x99\xc1\x26\x5c\xcc\xad\xa7\x6e\xee\xac\x44\xef\xb5\x59\x73\xe0\x70\x45\xaf\x2e\xc2\x06\x29\xb5\xca\xc3\x11\xdc\xdb\x68\xbc\xfe\x8e\x75\x36\x7c\x91\x97\x65\x54\x82\x44\x04\xde\x86\xe8\xc1\x21\xcb\x0c\x15\x78\xbd\x36\x82\x0a\x87\xa1\x97\x56\x08\x1b\x41\x92\xc5\x2e\xd6\x42\x1b\x4f\x7f\x01\x64\x9c\xba\x22\x97\x7b\xb1\xc9\x33\x9c\xf1\x33\xc0\x47\x50\xb8\x2a\xd6\x31\x71\x0a\x96\x4e\x18\x2f\x24\x8b\xbb\x0d\xad\x93\xfd\xa0\x3f\xc2\xd3\xe9\x18\x87\xa7\x4a\x9c\x4c\x86\x67\xd3\x41\x72\x3a\x9c\x9c\xf5\x47\x7d\x3c\x9b\x26\xa3\x31\x4e\xc7\xc3\xd5\x40\x9e\x9e\xe1\x58\x4c\x4e\xc6\xc3\x55\x1f\xc5\xc9\x24\x51\xe3\xd3\x71\x1f\xa7\x0a\x5b\x11\x3c\x06\x60\x37\x83\xd6\x51\xa6\x5b\x4f\x9d\xd2\xfb\x63\xf9\x03\x70\xb2\x1f\x8c\x95\x1c\x4c\xc7\xd8\xed\x0f\x26\x33\xe8\x47\x2f\x6f\x86\x13\x29\x47\x93\x61\xbf\x7b\x32\x83\xc1\xd1\xf9\xe9\x60\x94\x0c\x27\x93\x69\x77\x7a\xf6\xda\x40\xa8\xe4\x74\x9a\x4c\xa7\xdd\xc1\xe4\x0d\x94\x1c\x4c\xfa\xaa\x3f\x45\x86\xea\x97\xc7\x4f\xcd\xc7\x66\x83\x07\x8e\xf2\x20\xd6\x6b\x87\x6b\x41\x58\x56\x2d\x30\x0e\x2f\x12\x1e\x16\x71\xb3\xc1\xcf\x33\x78\x7c\x8a\x9a\xc1\x46\x8a\x2c\x5b\x1e\x72\x56\x35\x15\xce\x78\x78\x97\x88\xcc\xe3\xbb\xa0\x0b\x63\x4d\x97\x2f\x78\x1e\x1f\x01\x2f\x47\x7c\xe8\x6a\xa3\x70\x1f\x2e\xf0\x51\xa2\x9d\x27\x1e\xb3\x62\x13\x10\x45\xc2\xd3\xe4\xdd\x56\x64\x05\xbe\x8b\x40\xc7\x18\xc3\x06\x37\x5c\x54\xe1\x28\x6e\x36\x6a\x97\x33\x48\x0a\x53\x56\xca\xe6\x9e\x5c\xe7\xb1\xd9\x68\xf8\x9d\x26\x99\x1e\x1d\x48\xe1\x11\x5a\x17\xf3\xeb\xeb\xd6\x0c\x5e\xfe\x5c\xdc\xfe\x76\xd9\x9a\x35\x1b\x0d\x76\xb9\x16\x2c\x6d\xa5\x5c\x04\x5b\x91\x45\xa5\xbb\xea\xc7\x7f\x0f\x0f\xb6\xa0\xfa\xd7\x7f\x67\xb3\x32\x5e\x18\x9e\x43\xaf\x07\x9e\x84\x7c\x80\x9c\x1c\x90\x2d\xcd\x9a\xcf\xae\x7f\xbb\xbc\xbe\xfc\x3c\x5f\x5e\xbe\xa2\xb0\x58\xce\x97\x57\x17\xe5\xd1\x5f\x49\xfc\x1f\xfe\x07\x3f\xf3\xdf\x68\x3c\x35\x9f\x6f\x85\x9a\x9c\x37\x1b\x75\xd5\x3c\xf1\x9c\xf2\x3c\x8d\xc2\x18\xd1\x3c\x3c\xb9\x2c\x55\x6b\x86\x3e\xe7\x8e\xe1\x0e\x8a\x9b\x8d\x70\xff\x28\xdf\x5a\x45\xa1\xb9\x42\x86\xb7\xc2\xc1\x03\x1e\xe0\x03\xb4\x5a\xf0\x1e\xc8\x7e\xc1\x7d\x5b\xab\x0e\xbc\x87\x56\x97\x4f\xf8\xe6\x79\xb3\xd1\xa0\x54\xfb\x58\x2b\xff\xaf\x07\x3c\xfc\x1b\x3e\xc0\xeb\xff\xef\xa1\x0f\x7f\xfe\x09\xfd\x57\x34\x31\xe7\x85\xa1\xcd\xd6\x3e\xa0\x0a\x92\xe1\x01\x70\x00\x9b\x4b\xab\xaa\x8d\xc1\x11\xfc\xf3\x77\xc0\x3d\xca\x82\xd0\x07\xba\x98\x1f\xb1\xcd\xec\x3a\x02\xb5\xea\x00\xb3\xed\xf5\x60\xf1\xa0\xf3\xb0\xb8\x4a\x14\x5f\xc2\xf0\x46\x34\x96\x40\x1b\x42\x67\x44\x16\xa4\xed\xab\xf8\x24\xd5\x7c\x6b\xf5\x31\x6a\x6c\xf3\x98\xec\x82\x9c\x36\xeb\x76\xa7\xc3\x31\xea\x04\xda\x7f\x93\x54\xfa\xaa\xd2\x7f\x5e\x15\xe3\xd8\x75\xee\xb0\x2b\xed\x26\x0f\x5f\x19\x66\x6b\x65\xd8\xc3\x3e\x02\x4a\x2d\xef\x6f\x87\xf0\x9f\xc2\x13\x24\xc2\xc8\x67\xa2\x15\xbe\xf6\x77\x0e\x2b\x63\xd5\x26\x3b\x57\xca\xa1\xf7\x81\x51\x50\x42\xcc\x6d\xd6\xee\x77\x5e\xc8\xf5\xcf\x3a\x9d\xce\xcf\x48\x7d\x16\x61\xf7\xbf\x0a\xbc\x5e\x62\x55\xfc\xda\x2c\xbe\xc3\x07\x78\xe3\x41\x12\x57\xad\x13\x87\x5e\xbd\x4d\xda\xcf\x19\x08\xd7\x3f\x7e\x80\x51\xe5\xb2\x84\xb8\x4d\x92\x1f\x61\xbc\xb1\x2f\x65\x12\x14\x17\x22\x62\xd1\xbb\x43\xec\x79\x6d\xb5\x03\x48\x54\x61\xbd\x87\x51\x27\x0a\xd4\xba\xa3\x4e\x15\x4f\x2d\x9d\x44\x14\x19\x1d\x6b\x67\x97\x56\xdf\x07\x42\x52\x21\xb2\x4a\x2e\xfc\xad\x63\x13\x10\xa6\x56\x54\x52\x6e\xee\x46\xb0\xff\xa1\x86\xa0\x76\xe1\xd0\xff\xc8\x07\x27\x8f\xfd\xd4\xe2\x0a\x3b\x7f\x85\xdc\x60\x84\x4e\xf0\x47\x8f\xdd\x56\x2d\x56\x0d\xcd\x00\x57\xce\x42\xce\x7f\x05\x5c\x2d\x2e\xde\x1e\x61\xa9\x36\xca\xf3\x23\x52\x92\xf6\x2f\xa2\xae\x9b\xd9\x16\x3c\x3f\xb9\x86\xdc\xc0\x20\x32\x6f\xab\xaa\x48\xda\xc7\xda\xe4\x05\xc5\x19\x9a\x35\xa5\xc7\x15\x3a\x4a\x7a\x99\xe9\xe7\xcb\x11\x9c\x44\x21\xd1\x6f\xcd\xbb\xa3\xce\xeb\x29\x53\xf7\x73\xd9\xc1\x4f\xcd\xff\x06\x00\x00\xff\xff\x8e\xc8\x27\x72\x75\x0b\x00\x00")
+
+func _4byte_tracer_legacyJsBytes() ([]byte, error) {
+ return bindataRead(
+ __4byte_tracer_legacyJs,
+ "4byte_tracer_legacy.js",
+ )
+}
+
+func _4byte_tracer_legacyJs() (*asset, error) {
+ bytes, err := _4byte_tracer_legacyJsBytes()
+ if err != nil {
+ return nil, err
+ }
+
+ info := bindataFileInfo{name: "4byte_tracer_legacy.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb4, 0xc5, 0x48, 0x2d, 0xd9, 0x43, 0x95, 0x93, 0x3b, 0x93, 0x2c, 0x47, 0x8c, 0x84, 0x32, 0x3c, 0x8b, 0x2e, 0xf3, 0x72, 0xc4, 0x57, 0xe6, 0x3a, 0xb3, 0xdf, 0x1d, 0xbf, 0x45, 0x3, 0xfc, 0xa}}
return a, nil
}
@@ -117,7 +139,7 @@ func bigram_tracerJs() (*asset, error) {
return a, nil
}
-var _call_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x5a\xdf\x6f\x1b\x37\xf2\x7f\x96\xfe\x8a\x89\x1f\x6a\x09\x51\x24\x39\xe9\xb7\x5f\xc0\xae\x7a\x50\x1d\x25\x35\xe0\xc6\x81\xad\x34\x08\x82\x3c\x50\xbb\xb3\x12\x6b\x8a\xdc\x92\x5c\xc9\xba\xd6\xff\xfb\x61\x86\xdc\xd5\xae\x24\x3b\xbe\x5e\x71\xe8\xbd\x69\x97\x33\xc3\xe1\xcc\x67\x7e\x71\x35\x18\xc0\xb9\xc9\x37\x56\xce\x17\x1e\x5e\x0e\x4f\xfe\x1f\xa6\x0b\x84\xb9\x79\x81\x7e\x81\x16\x8b\x25\x8c\x0b\xbf\x30\xd6\xb5\x07\x03\x98\x2e\xa4\x83\x4c\x2a\x04\xe9\x20\x17\xd6\x83\xc9\xc0\xef\xd0\x2b\x39\xb3\xc2\x6e\xfa\xed\xc1\x20\xf0\x1c\x5c\x26\x09\x99\x45\x04\x67\x32\xbf\x16\x16\x4f\x61\x63\x0a\x48\x84\x06\x8b\xa9\x74\xde\xca\x59\xe1\x11\xa4\x07\xa1\xd3\x81\xb1\xb0\x34\xa9\xcc\x36\x24\x52\x7a\x28\x74\x8a\x96\xb7\xf6\x68\x97\xae\xd4\xe3\xed\xbb\x0f\x70\x89\xce\xa1\x85\xb7\xa8\xd1\x0a\x05\xef\x8b\x99\x92\x09\x5c\xca\x04\xb5\x43\x10\x0e\x72\x7a\xe3\x16\x98\xc2\x8c\xc5\x11\xe3\x1b\x52\xe5\x26\xaa\x02\x6f\x4c\xa1\x53\xe1\xa5\xd1\x3d\x40\x49\x9a\xc3\x0a\xad\x93\x46\xc3\xab\x72\xab\x28\xb0\x07\xc6\x92\x90\x8e\xf0\x74\x00\x0b\x26\x27\xbe\x2e\x08\xbd\x01\x25\xfc\x96\xf5\x09\x06\xd9\x9e\x3b\x05\xa9\x79\x9b\x85\xc9\x11\xfc\x42\x78\x3a\xf5\x5a\x2a\x05\x33\x84\xc2\x61\x56\xa8\x1e\x49\x9b\x15\x1e\x3e\x5e\x4c\x7f\xba\xfa\x30\x85\xf1\xbb\x4f\xf0\x71\x7c\x7d\x3d\x7e\x37\xfd\x74\x06\x6b\xe9\x17\xa6\xf0\x80\x2b\x0c\xa2\xe4\x32\x57\x12\x53\x58\x0b\x6b\x85\xf6\x1b\x30\x19\x49\xf8\x79\x72\x7d\xfe\xd3\xf8\xdd\x74\xfc\xe3\xc5\xe5\xc5\xf4\x13\x18\x0b\x6f\x2e\xa6\xef\x26\x37\x37\xf0\xe6\xea\x1a\xc6\xf0\x7e\x7c\x3d\xbd\x38\xff\x70\x39\xbe\x86\xf7\x1f\xae\xdf\x5f\xdd\x4c\xfa\x70\x83\xa4\x15\x12\xff\xd7\x6d\x9e\xb1\xf7\x2c\x42\x8a\x5e\x48\xe5\x4a\x4b\x7c\x32\x05\xb8\x85\x29\x54\x0a\x0b\xb1\x42\xb0\x98\xa0\x5c\x61\x0a\x02\x12\x93\x6f\x9e\xec\x54\x92\x25\x94\xd1\x73\x3e\xf3\x83\x80\x84\x8b\x0c\xb4\xf1\x3d\x70\x88\xf0\xfd\xc2\xfb\xfc\x74\x30\x58\xaf\xd7\xfd\xb9\x2e\xfa\xc6\xce\x07\x2a\x88\x73\x83\x1f\xfa\x6d\x92\x99\x08\xa5\xa6\x56\x24\x68\xc9\x39\x02\xb2\x82\xcc\xaf\xcc\x5a\x83\xb7\x42\x3b\x91\x90\xab\xe9\x77\xc2\x60\x14\x1e\xf0\x8e\x9e\xbc\x23\xd0\x82\xc5\xdc\x58\xfa\xad\x54\x89\x33\xa9\x3d\x5a\x2d\x14\xcb\x76\xb0\x14\x29\xc2\x6c\x03\xa2\x2e\xb0\x57\x3f\x0c\xc1\x28\xb8\x1b\xa4\xce\x8c\x5d\x32\x2c\xfb\xed\xdf\xdb\xad\xa8\xa1\xf3\x22\xb9\x25\x05\x49\x7e\x52\x58\x8b\xda\x93\x29\x0b\xeb\xe4\x0a\x99\x04\x02\x4d\xb4\xe7\xe4\x97\x9f\x01\xef\x30\x29\x82\xa4\x56\x25\xe4\x14\x3e\xff\x7e\xff\xa5\xd7\x66\xd1\x29\xba\x04\x75\x8a\x29\x9f\xef\xd6\xc1\x7a\xc1\x16\x85\x35\x1e\xaf\x10\x7e\x2d\x9c\xaf\xd1\x64\xd6\x2c\x41\x68\x30\x05\x21\xbe\x6e\x1d\xa9\xbd\x61\x81\x82\x7e\x6b\xb4\xac\x51\xbf\xdd\xaa\x98\x4f\x21\x13\xca\x61\xdc\xd7\x79\xcc\xe9\x34\x52\xaf\xcc\x2d\x49\x36\x96\x20\x6c\x37\x60\xf2\xc4\xa4\x31\x18\xe8\x1c\xd5\x31\xd0\xf5\xdb\x2d\xe2\x3b\x85\xac\xd0\xbc\x6d\x47\x99\x79\x0f\xd2\x59\x17\x7e\x6f\xb7\x48\xec\xb9\xc8\x7d\x61\x91\xed\x89\xd6\x1a\xeb\x40\x2e\x97\x98\x4a\xe1\x51\x6d\xda\xad\xd6\x4a\xd8\xb0\x00\x23\x50\x66\xde\x9f\xa3\x9f\xd0\x63\xa7\x7b\xd6\x6e\xb5\x64\x06\x9d\xb0\xfa\x6c\x34\xe2\xec\x93\x49\x8d\x69\x10\xdf\xf2\x0b\xe9\xfa\x99\x28\x94\xaf\xf6\x25\xa6\x96\x45\x5f\x58\x4d\x3f\xef\x83\x16\x1f\x11\x8c\x56\x1b\x48\x28\xcb\x88\x19\x85\xa7\xdb\x38\x8f\xcb\x78\x38\xd7\x83\x4c\x38\x32\xa1\xcc\x60\x8d\x90\x5b\x7c\x91\x2c\x90\x7c\xa7\x13\x8c\x5a\xba\x8d\x63\xa7\x8e\x80\x76\xeb\x9b\xbc\xef\xcd\xbb\x62\x39\x43\xdb\xe9\xc2\x37\x30\xbc\xcb\x86\x5d\x18\x8d\xf8\x47\xa9\x7b\xe4\x89\xfa\x92\x14\x93\xc7\x83\x32\xff\x8d\xb7\x52\xcf\xc3\x59\xa3\xae\x17\x19\x08\xd0\xb8\x86\xc4\x68\x06\x35\x79\x65\x86\x52\xcf\x21\xb1\x28\x3c\xa6\x3d\x10\x69\x0a\xde\x04\xe4\x55\x38\x6b\x6e\x09\xdf\x7c\x03\x1d\xda\x6c\x04\xc7\xe7\xd7\x93\xf1\x74\x72\x0c\x7f\xfc\x01\xe1\xcd\x51\x78\xf3\xf2\xa8\x5b\xd3\x4c\xea\xab\x2c\x8b\xca\xb1\xc0\x7e\x8e\x78\xdb\x39\xe9\xf6\x57\x42\x15\x78\x95\x05\x35\x23\xed\x44\xa7\x30\x8a\x3c\xcf\x77\x79\x5e\x36\x78\x88\x69\x30\x80\xb1\x73\xb8\x9c\x29\xdc\x0f\xc8\x18\xb1\x1c\xbc\xce\x53\xc6\x22\xf4\x25\x66\x99\x2b\x24\x54\x95\xbb\x46\xf3\xb3\xc6\x2d\xbf\xc9\xf1\x14\x00\xc0\xe4\x3d\x7e\x41\xb1\xc0\x2f\xbc\xf9\x09\xef\xd8\x47\xa5\x09\x09\x55\xe3\x34\xb5\xe8\x5c\xa7\xdb\x0d\xe4\x52\xe7\x85\x3f\x6d\x90\x2f\x71\x69\xec\xa6\xef\x28\x21\x75\xf8\x68\xbd\x70\xd2\x92\x67\x2e\xdc\x85\x26\x9e\x88\xd4\xb7\xc2\x75\xb6\x4b\xe7\xc6\xf9\xd3\x72\x89\x1e\xca\x35\xb6\x05\xb1\x1d\x0f\xef\x8e\xf7\xad\x35\xec\x6e\x91\x70\xf2\x5d\x97\x58\xee\xcf\x2a\x7c\x57\x69\xa2\x9f\x17\x6e\xd1\x61\x38\x6d\x57\xb7\xa9\x60\x04\xde\x16\x78\x10\xfe\x0c\xa9\x7d\x38\x39\x54\x19\xe5\x12\x6f\x8b\x84\x61\x35\x17\x9c\x69\x38\xd2\x05\x65\x5e\x57\xcc\xd8\xe6\xde\x98\x7d\x74\x45\x70\xdd\x4c\x2e\xdf\xbc\x9e\xdc\x4c\xaf\x3f\x9c\x4f\x8f\x6b\x70\x52\x98\x79\x52\xaa\x79\x06\x85\x7a\xee\x17\xac\x3f\x89\x6b\xae\x7e\x26\x9e\x17\x27\x5f\xc2\x1b\x18\x1d\x08\xf9\xd6\xe3\x1c\xf0\xf9\x0b\xcb\xbe\xdf\x37\x5f\x93\x34\x18\xf3\xaf\x41\x92\x37\x4c\x5c\x92\x7b\x53\x12\x3c\xee\xe7\xbf\x18\x54\xe9\x8c\x28\x7e\x14\x4a\xe8\x04\x1f\xd1\x79\x1f\x6b\xf5\xa4\x79\x20\x0f\x2d\xd1\x2f\x4c\xca\x85\x21\x11\xa1\xb6\x94\x08\x4a\x8d\xc6\x7f\x3f\x1b\x8d\x2f\x2f\x6b\xb9\x88\x9f\xcf\xaf\x5e\xd7\xf3\xd3\xf1\xeb\xc9\xe5\xe4\xed\x78\x3a\xd9\xa5\xbd\x99\x8e\xa7\x17\xe7\xfc\xb6\x4c\x5d\x83\x01\xdc\xdc\xca\x9c\x2b\x0c\xe7\x6d\xb3\xcc\xb9\x55\xae\xf4\x75\x3d\xf0\x0b\x43\x4d\xa8\x8d\x05\x34\x13\x3a\x29\x0b\x9b\x2b\x01\xeb\x0d\xc1\xf5\x21\xe7\x9d\xec\x38\xaf\x82\xb0\x74\xef\x2d\xc6\x4d\xd3\x8e\x37\xa5\x5e\x5b\x83\x06\x34\x72\xf2\xe7\x04\xdb\x79\xfa\x21\xe1\x1f\x30\x84\x53\x38\x89\x59\xf4\x91\x34\xfd\x12\x9e\x93\xf8\x3f\x91\xac\x5f\x1d\xe0\xfc\x7b\xa6\xec\xbd\x40\xfb\xef\xa7\x72\x53\xf8\xab\x2c\x3b\x85\x5d\x23\x7e\xbb\x67\xc4\x8a\xfe\x12\xf5\x3e\xfd\xff\xed\xd1\x6f\xd3\x3e\xa1\xca\xe4\xf0\x6c\x0f\x22\x21\xe9\x3e\xdb\x89\x83\x68\x5c\x6e\xef\x58\x1a\x8c\x1e\x28\x34\x2f\x9b\x18\x7e\x28\x53\xfe\x47\x85\xe6\x60\x9b\x4a\xcd\x68\xb3\x11\xed\x81\x45\x6f\x25\xae\x68\xd4\x3c\x76\x2c\x92\x1a\x76\xb3\xa6\xf4\xd5\x87\x8f\x18\x24\x6a\x44\x4e\x2e\xb1\xc1\xa7\xfe\x8c\x7b\x5e\x6a\xd2\xe3\xa8\xc6\x10\x13\xdc\x87\x5b\x84\xa5\xd8\xd0\xa8\x96\x15\xfa\x76\x03\x73\xe1\x20\xdd\x68\xb1\x94\x89\x0b\xf2\xb8\xb9\xb7\x38\x17\x96\xc5\x5a\xfc\xad\x40\x47\x73\x1f\x01\x59\x24\xbe\x10\x4a\x6d\x60\x2e\x69\x78\x23\xee\xce\xcb\x57\xc3\x21\x38\x2f\x73\xd4\x69\x0f\xbe\x7b\x35\xf8\xee\x5b\xb0\x85\xc2\x6e\xbf\x5d\x2b\x61\xd5\x51\xa3\x37\x68\x21\xa2\xe7\x35\xe6\x7e\xd1\xe9\xc2\x0f\x0f\xd4\xc2\x07\x0a\xdb\x41\x5a\x78\x01\x27\x5f\xfa\xa4\xd7\xa8\x81\xdb\xe0\x49\x40\xe5\x30\x4a\xa3\x81\xf7\xea\xf5\x55\xe7\x56\x58\xa1\xc4\x0c\xbb\xa7\x3c\x00\xb3\xad\xd6\x22\x4e\x40\xe4\x14\xc8\x95\x90\x1a\x44\x92\x98\x42\x7b\x32\x7c\x39\xcc\xa8\x0d\xe5\xf7\x63\x5f\xca\xe3\x59\x51\x24\x09\x3a\x57\xa6\x7b\xf6\x1a\xa9\x23\x96\xc4\x0d\x52\x3b\x99\x62\xcd\x2b\x94\x1d\x0c\xa7\xe6\x48\x41\xa3\x74\x29\x70\x69\x1c\x6d\x32\x43\x58\x5b\x1a\xbc\x9c\xd4\x09\xdf\x3c\xa4\x48\xd6\x76\x60\x34\x08\x50\x86\xaf\x3b\x38\xc6\x41\xd8\xb9\xeb\x87\x7c\x4f\xdb\x52\xce\xd1\x66\xdd\x6f\x02\xb9\x0e\x55\x1e\x71\x76\x5a\x21\x0d\x78\x27\x9d\xe7\x8e\x9a\xb4\x94\x0e\x02\x92\xa5\x9e\xf7\x20\x37\x39\xe7\xe9\xaf\x95\xb3\x98\xac\xaf\x27\xbf\x4c\xae\xab\xc6\xe7\xe9\x4e\x2c\x67\x9e\xa3\x6a\x24\x04\x4b\xf3\x96\xc7\xf4\xe8\xc0\x10\x73\x00\x50\xa3\x07\x00\x45\xf2\xb7\xb5\xf1\x7d\xed\x38\x4a\x38\xbf\x75\xcc\x1c\xc3\x3c\x57\x57\xc0\x15\xca\xbb\x9d\xdc\xbd\x9b\x1c\x4c\x5e\x56\x08\x52\x8a\xd3\x0e\x25\xf6\xdd\x49\xa3\xb1\xb0\x1d\x38\xb6\xf8\xbc\xa8\xd9\x78\xcd\xed\x66\x20\xaa\xa5\x06\x5e\x2f\xfb\x56\x11\xaa\x01\xeb\x6e\x0a\x4f\x70\xa0\xfa\xbd\x4d\x7e\x73\xe1\x3e\x38\xf6\x7a\x4c\x7f\x33\x39\xbf\xd0\xbe\x53\x2e\x5e\x68\x78\x01\xe5\x03\x25\x75\x78\xd1\x88\xa2\x03\xd9\xb1\x95\xa2\x42\x8f\xb0\x15\x71\x06\x3b\xaf\x48\x50\x30\x07\x1b\xcd\xa2\xdf\x2f\xce\xc3\x28\x8d\x0c\xf6\xcc\xa2\xef\xe3\x6f\x85\x50\xae\x33\xac\x9a\x85\x70\x02\x6f\xb8\xbc\x8d\xf6\x3a\x49\xe2\x69\xf6\x8e\x67\x35\xb6\x68\x8d\x92\x2d\x74\x82\xe7\x26\xc5\x47\x25\x44\x11\x31\x6d\x54\xbe\x8c\xc0\x3c\xd4\x7b\xb7\xea\x04\x70\x54\x35\x04\x99\x90\xaa\xb0\x78\x74\x06\x07\xd2\x8e\x2b\x6c\x26\x12\xf6\xa5\x43\xe0\x69\xdd\x81\x33\x4b\x5c\x98\x75\x50\xe0\x50\xf2\xda\x07\x47\x85\x83\x9d\xf2\xc1\xd7\x4e\xc2\x41\xe1\xc4\x1c\x6b\xe0\xa8\x0c\x5e\x3a\xea\xe0\x15\xc2\x9f\x86\xce\xf3\xea\xf1\x09\x28\xba\xff\x6b\xe0\xb1\xe3\xe7\xbd\x3e\xa7\x24\xe2\x6e\xa7\xf6\x50\x2a\x1b\x9a\x91\xbf\x97\xe3\x9f\x1c\x61\xbb\xb4\xe1\x68\x4d\xe2\x70\xc0\x6d\x5f\xf3\x75\xf7\x57\xab\x0f\x79\xfe\xa1\x96\x89\x30\xaa\x7f\xc5\xc4\x6f\x71\xca\x5d\x0e\x3d\xe5\x16\x57\xd2\x14\x54\xc0\xf0\x7f\x69\x1c\xae\x5a\xbe\xfb\x76\xeb\x3e\xde\x0b\xb2\xdf\xea\x17\x83\xeb\x45\xbc\xd7\x0e\xdd\x52\xad\x7c\x18\xae\xad\xf1\xba\x30\x0b\x37\xce\x2d\xe6\x7f\xe4\x82\x30\x06\xba\x37\x39\xb5\x03\xb1\x3a\x29\x8b\x22\xdd\x54\x05\xb1\x17\x1a\x11\x58\x08\x9d\xc6\x61\x44\xa4\xa9\x24\x79\x0c\x42\xd2\x50\xcc\x85\xd4\xed\x83\x66\xfc\x6a\x15\x3e\x84\x8c\xbd\xde\xb6\x5e\x48\xe3\x10\x49\x13\x1f\x6b\xdc\x7e\x42\xc1\xdc\x09\xa2\xdd\xbb\xce\x78\x5d\x6a\xb4\x2b\x96\xdc\x09\x83\x58\x09\xa9\x04\x4d\x5f\xdc\x61\xe9\x14\x12\x85\x42\x87\x2f\x1c\x98\x79\xb3\x42\xeb\xda\x4f\x00\xf9\x9f\xc1\xf8\x4e\x56\x2c\x1f\xa3\x39\x9e\x1e\xb3\x4f\x8d\xd8\x70\xfc\x37\x4a\x78\x1f\xe1\x55\x33\x6f\x88\x2c\xe9\xf9\xe3\x17\x6a\xdf\x7e\x5a\x48\x71\xcf\x44\x34\x3f\xc0\xb0\xd6\x97\xff\x5d\x82\x6c\x1f\x62\x97\x55\x7f\x16\x0f\xef\x8d\xe9\x81\x42\xc1\x53\x52\xf9\x69\xaa\xec\x47\x1f\x1b\xda\xca\xe8\x0d\x1d\xdd\x5e\xf8\xf2\x9d\xde\x02\xcb\x1b\x90\xd0\xda\xcf\x10\x35\x48\x8f\x56\xd0\x3c\x44\xe8\x8a\x5f\x53\x48\x4b\xc7\xe2\xd8\x2f\x92\x82\x2e\x0a\x8e\x9f\x36\xa8\x30\x4b\x3d\xef\xb7\x5b\xe1\x7d\x2d\xde\x13\x7f\xb7\x8d\xf7\x50\x01\x99\x33\xde\x09\x54\x57\x02\x89\xbf\xe3\x6e\x91\xc7\xe6\x9d\x7b\x01\x5a\xa3\x57\x61\xa6\xde\xb9\x05\x60\xc6\x78\x13\xb0\x7b\x27\x46\x6b\xfc\xae\x01\x70\x26\x9d\x0b\x17\xc4\xec\x84\x84\xbf\xdb\x8f\x88\x92\x81\x82\xe1\xf4\x30\x03\x2d\x1d\x60\xda\xb9\x99\x20\x62\x7e\x15\x56\x43\x3d\x3f\xad\xaf\x86\x57\xf1\xa0\x72\x59\xb3\x8d\x5c\xb2\x6d\xee\xcf\x0e\x27\xb9\x61\x89\xc7\xc3\xc9\x8c\x6c\x5e\x01\xf6\x01\xd6\xfa\xac\xb1\x4f\xf2\x58\xaa\x64\xe9\x65\x66\x7b\x80\x95\xa5\xd7\x5a\x0e\x7f\xf7\x74\x91\x15\x71\x5d\xc5\x06\x4d\x43\x08\xdf\x36\xee\x2d\x1f\x9a\xb4\x68\x50\x89\x84\x65\x73\x35\x1a\x1d\x0d\xef\xaa\x0f\x23\x31\x57\x35\x68\x4a\x25\x42\x64\x84\xf3\x72\x54\xc8\x7f\x62\xdc\xb6\x1e\x83\xe5\x12\x58\x0c\x1f\x70\xb8\x9b\xa5\x10\x34\x33\x6e\x20\x0a\x47\xa3\xe8\x36\xb6\x52\x74\xd2\x62\x0a\x99\x44\x95\x82\x49\xd1\xf2\xa0\xfb\xab\x33\x3a\x7c\xaa\x43\x2b\x49\x62\xf8\x24\x19\xfe\x1d\xc0\x1f\x4a\xb5\x4c\xd0\x6f\x20\x43\xc1\xdf\xdc\xbc\x81\x5c\x38\x07\x4b\x14\x34\xda\x66\x85\x52\x1b\x30\x36\x45\x12\x5e\xcd\x7a\x14\xd6\x06\x0a\x87\xd6\xc1\x7a\x61\x62\xa9\xe5\x16\x2f\xa7\x6e\x55\xfa\x5e\xbc\xce\x91\x2e\x57\x62\x03\xd2\x53\x59\x8f\x87\xaa\x47\x7a\xf5\xa1\x8b\xbf\x96\x19\x32\xf0\x7e\x98\x97\x53\x61\x33\xce\xf9\x35\x3d\x35\x23\x3c\x0e\x45\xcd\xd8\xde\x5e\x74\x35\x03\xb9\x2c\x3d\xcd\x68\xad\x17\xb2\x66\x48\xf2\x0a\x3f\x35\x83\xb1\xd6\x6a\xf3\x02\x23\xa8\x62\xe0\xa7\x9d\xf0\x64\x2d\x63\x7c\x86\xcf\xba\x15\x39\x3f\xf5\x22\x60\xc8\x8b\x1d\x32\xce\x2d\x6e\x28\x9b\x07\x1b\xd5\x4a\x53\x78\xf1\xf9\x16\x37\x5f\x0e\x57\xa2\x08\xc7\x1a\x5d\x55\x7a\xca\xb0\x08\x6b\x8f\x24\x83\x4a\x0b\x39\x1a\x9e\x81\xfc\xbe\xce\x50\x56\x4f\x90\xcf\x9f\x97\x7b\xd6\xd7\x3f\xcb\x2f\x65\x84\x57\x88\xdf\x59\xef\x36\x34\x8a\x31\x12\x68\x28\x28\xda\xf7\xed\x7f\x05\x00\x00\xff\xff\xfb\x65\x93\x4f\xfc\x22\x00\x00")
+var _call_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x57\x4f\x6f\xdb\xb8\x12\x3f\xcb\x9f\x62\x5e\x0e\xb5\x8d\xba\x56\xd2\x07\xf4\xe0\xd6\x05\xfc\x82\xa4\x35\x90\x97\x04\x8e\xb3\x45\x10\xe4\x40\x5b\x23\x89\x2d\x4d\x0a\x24\x15\xc7\xdb\xfa\xbb\x2f\x86\x94\x64\x49\xb1\xd3\xec\x69\xb1\x39\xc5\xc3\xdf\xfc\x66\x38\xff\x38\x0a\x43\x38\x55\xd9\x46\xf3\x24\xb5\xf0\xfe\xf8\xfd\x09\xcc\x53\x84\x44\xbd\x43\x9b\xa2\xc6\x7c\x05\x93\xdc\xa6\x4a\x9b\x4e\x18\xc2\x3c\xe5\x06\x62\x2e\x10\xb8\x81\x8c\x69\x0b\x2a\x06\xdb\xc2\x0b\xbe\xd0\x4c\x6f\x86\x9d\x30\xf4\x3a\x7b\x8f\x89\x21\xd6\x88\x60\x54\x6c\xd7\x4c\xe3\x08\x36\x2a\x87\x25\x93\xa0\x31\xe2\xc6\x6a\xbe\xc8\x2d\x02\xb7\xc0\x64\x14\x2a\x0d\x2b\x15\xf1\x78\x43\x94\xdc\x42\x2e\x23\xd4\xce\xb4\x45\xbd\x32\xa5\x1f\x5f\x2e\x6f\xe1\x02\x8d\x41\x0d\x5f\x50\xa2\x66\x02\xae\xf3\x85\xe0\x4b\xb8\xe0\x4b\x94\x06\x81\x19\xc8\x48\x62\x52\x8c\x60\xe1\xe8\x48\xf1\x9c\x5c\xb9\x29\x5c\x81\x73\x95\xcb\x88\x59\xae\xe4\x00\x90\x93\xe7\xf0\x88\xda\x70\x25\xe1\xbf\xa5\xa9\x82\x70\x00\x4a\x13\x49\x8f\x59\xba\x80\x06\x95\x91\x5e\x1f\x98\xdc\x80\x60\x76\xa7\xfa\x8a\x80\xec\xee\x1d\x01\x97\xce\x4c\xaa\x32\x04\x9b\x32\x4b\xb7\x5e\x73\x21\x60\x81\x90\x1b\x8c\x73\x31\x20\xb6\x45\x6e\xe1\xdb\x74\xfe\xf5\xea\x76\x0e\x93\xcb\x3b\xf8\x36\x99\xcd\x26\x97\xf3\xbb\x8f\xb0\xe6\x36\x55\xb9\x05\x7c\x44\x4f\xc5\x57\x99\xe0\x18\xc1\x9a\x69\xcd\xa4\xdd\x80\x8a\x89\xe1\xff\x67\xb3\xd3\xaf\x93\xcb\xf9\xe4\x7f\xd3\x8b\xe9\xfc\x0e\x94\x86\xf3\xe9\xfc\xf2\xec\xe6\x06\xce\xaf\x66\x30\x81\xeb\xc9\x6c\x3e\x3d\xbd\xbd\x98\xcc\xe0\xfa\x76\x76\x7d\x75\x73\x36\x84\x1b\x24\xaf\x90\xf4\x7f\x1f\xf3\xd8\x65\x4f\x23\x44\x68\x19\x17\xa6\x8c\xc4\x9d\xca\xc1\xa4\x2a\x17\x11\xa4\xec\x11\x41\xe3\x12\xf9\x23\x46\xc0\x60\xa9\xb2\xcd\xab\x93\x4a\x5c\x4c\x28\x99\xb8\x3b\x1f\x2c\x48\x98\xc6\x20\x95\x1d\x80\x41\x84\x4f\xa9\xb5\xd9\x28\x0c\xd7\xeb\xf5\x30\x91\xf9\x50\xe9\x24\x14\x9e\xce\x84\x9f\x87\x9d\x0e\x91\x2e\x99\x10\xe7\x9a\xad\x70\xae\xd9\x12\x35\xc5\xdd\x38\x7a\x89\x6b\x77\x08\x31\x9d\x82\xd5\x6c\xc9\x65\x02\x2b\xb4\xa9\x8a\x0c\x58\x05\x1a\x33\xa5\x6d\x91\x29\xe0\x32\x56\x7a\xe5\x2a\xca\x39\xbb\xa0\xc4\x70\x69\x51\x4b\x26\x60\x85\xc6\xb0\x04\x5d\x15\x33\x22\x93\x86\x2d\xad\x2b\x99\x9f\x1d\x00\x70\xa6\x8c\x65\xcb\x1f\x23\xb8\xff\xb9\x7d\x18\x38\x61\xcc\x72\x61\x47\x10\xe7\xd2\x61\x7b\x42\x25\x03\x88\x16\x7d\xf0\x3a\xf4\xf7\xc8\x34\x08\x94\x30\x06\x9b\x72\x33\xac\x68\x86\x02\x65\x62\xd3\x0a\xc7\x63\xe8\x11\xee\x33\x9c\xd4\xd5\x4b\x0a\x77\xd3\x67\x1c\x99\xca\x7a\xfd\x06\x96\x68\x9a\xa0\x7b\x81\xf2\xdd\xc9\x83\x17\xc0\x78\x3c\x76\x8d\x1b\x73\x89\x51\xdb\x10\xfd\xbd\xa8\x0c\xf7\x0f\x0d\x85\x6d\xe7\x95\xaa\xc3\x2c\x37\x69\x8f\xfe\xdd\xb9\xeb\x95\xb7\x3e\x92\x1a\x4d\x33\x94\x4b\xfb\xd4\x0e\x65\x18\xc2\xb5\xc6\x8c\xa6\x83\xca\xa9\xab\x8b\xa4\xb9\xd4\x36\x02\xee\xd9\x60\xdc\xba\x9f\xdd\x64\x38\x72\xc9\xb4\x4f\x43\xfa\x31\x68\x1c\xc7\x5a\xad\xdc\xb1\x55\x5f\xf1\x89\x3c\x18\x92\xa8\xdf\x44\x59\x35\x2a\xff\x29\x51\x56\xb5\x30\x8f\x4c\xe4\xce\x52\xf7\xf8\xa9\x0b\x6f\x9d\x3d\x27\x1b\x5a\x75\x63\x35\x97\x49\xef\xe4\x43\x4b\x27\x61\xc6\x13\x17\x3a\x0b\x9e\x4c\xa5\x75\xfc\x09\x33\xfd\x97\x35\x6f\x0d\x46\xa3\xfd\x9a\x74\xf4\x92\x36\x97\x59\x6e\x47\x8d\xfb\x38\x51\x0b\xa6\x72\xeb\x71\x3b\x98\x17\xd5\x70\xdb\x46\x35\xb7\xca\xe1\xb8\xac\xa2\xff\x1c\x2e\x41\x9f\xb7\xaa\xda\x0e\x30\xbc\xda\x1e\x6a\xad\xf4\x2b\xec\x79\xdc\x3e\x7b\xee\x64\x67\x0f\x50\x18\x74\xc6\xe8\xfe\x7f\x97\xbe\xd2\x39\x70\x81\x06\xbc\x41\x0b\x6f\xde\xec\x39\x3e\xc2\x27\x5c\xe6\xd4\x2d\xa0\xf1\x11\xb5\xc5\xe8\x08\x7e\xfd\x2a\xcd\xfa\xf4\x50\xc7\x1f\x1d\x3f\x1d\xf5\x9b\xae\x45\x28\xd0\x62\x13\x5a\x73\xab\xb3\xbb\x82\xcd\xb5\xf4\x91\x89\xb9\x64\x82\xff\x89\x85\x27\xfd\x7a\xff\x22\x0d\xd2\x5a\xfb\xba\xa1\xdc\x9e\x83\xc5\x10\xdb\xd7\x94\x0e\x3f\x4c\xd0\xce\x37\x19\xf6\xfa\xfb\x1a\xd3\x17\x5e\x05\x3c\xd7\x6a\xd5\xeb\xef\x69\xce\x16\x6e\xae\x9e\xa1\x8a\x92\x6f\x01\xa7\x24\x7d\x86\x75\x6d\xd9\x6c\xac\x4a\xe3\x0b\x33\xbd\x7e\xad\xb7\xba\x27\x1f\xba\x07\xdb\xa1\xd2\xfa\x83\x06\x41\xaf\xdf\x2a\x9c\x66\x50\x28\x52\x7e\x62\x8c\x0f\xd8\x2e\x58\x9a\x9d\xbd\xc7\x74\xfb\xc5\x68\xce\xe1\x32\x7b\x4f\xdc\xb6\x93\x37\xf3\x49\xfe\xe7\x9e\x32\x17\x83\x62\x80\xc1\x78\x5f\x0e\xbc\x8b\x45\x26\x08\xf6\x3c\x1b\xcf\xac\x97\xcd\xd8\x22\x38\x23\xf1\x9e\xb7\xb4\x80\xff\xee\xd5\x74\xbe\x96\x0d\x57\x2f\xac\x9d\x85\x2b\x77\xda\xeb\x37\x6d\x14\x23\xe5\x00\x63\xe9\x6c\x73\x6a\xd4\xfd\x73\x30\x6a\x21\xe7\x63\xf7\x74\x76\x36\x99\x9f\x75\x69\x0a\xec\x3d\x79\xdf\xdd\xe7\x3d\xec\x06\x82\xd7\x52\xcf\x20\xdb\x17\xde\x7d\xca\xf5\xbb\x31\x9c\xfc\xeb\x17\x91\x20\x0c\xa1\x1c\x72\xb4\x09\x6b\x64\x16\x0d\xad\xc2\x54\xb2\x6a\xf1\x1d\x97\xb4\x4e\xd2\x9a\x49\x1b\xa8\x83\x42\x84\x86\x6b\x8c\x20\xe6\x28\x22\x50\xf4\x4d\x44\xcb\xf6\x77\xa3\xa4\x23\x34\xa8\x39\x31\xba\xcd\x73\xe8\xbf\xdf\x38\x91\x4a\xbe\x44\xbb\x81\x18\x99\xcd\x35\xd2\xc2\x9a\x31\x63\x60\x85\x4c\x72\x99\xc4\xb9\x10\x1b\x50\x3a\x42\x22\xf7\x13\xd7\x38\x42\xab\x68\xa5\xd5\x06\xd6\xa9\x82\x48\xc9\x6e\xb1\xc6\x66\x1a\xe9\x0b\x65\x00\xdf\x73\x63\xe9\x3b\x26\x13\x6c\x03\xdc\x0e\x3b\x41\x79\xa9\xfa\x7e\x45\x21\x80\x9f\x9d\x20\xa0\xae\x30\x8a\x5e\x0f\x37\x9b\x83\x20\xd8\xed\x49\x65\x0d\x0d\x48\x5c\xed\x47\x4e\x4c\xbf\x9c\xb8\x5a\x88\x8a\xda\x71\xc2\x6a\x03\xda\x4d\x32\x27\xaf\xb6\x9c\xb2\xbb\x4b\xa9\xdf\x60\xea\x3d\xef\x4e\xaa\xed\xc4\x9d\xb8\x5f\x4e\x5e\xad\x23\xb5\xce\x73\x07\xae\x55\x46\x8d\x06\xf2\x5e\xf2\x55\xfd\x4e\x7c\xe5\xfd\x71\x45\x51\xc1\xdd\x2f\x92\x6f\x3b\x41\x40\x59\xec\x51\x70\x7e\xe0\x86\x3e\x05\x7d\x8c\x7c\xcc\x02\x2a\x6f\x2f\xb8\xff\x81\x9b\x87\xe7\xe5\x1c\x04\x41\x50\xf4\x54\x0d\x47\xe2\x6d\xc1\xbf\xa3\x38\xb4\x18\x05\x35\x27\xf8\xf8\xf8\x23\xf0\x4f\x75\x85\x62\xee\x7e\x04\xfe\xf6\x6d\x69\xb2\x7e\x7e\xcf\x1f\xca\x39\x5b\x3d\xdd\xad\xf3\x7e\xdd\xa1\xe2\xad\xf7\x90\x4e\xb0\xed\x6c\x3b\x7f\x05\x00\x00\xff\xff\x71\x10\x40\x55\x9b\x10\x00\x00")
func call_tracerJsBytes() ([]byte, error) {
return bindataRead(
@@ -133,6 +155,26 @@ func call_tracerJs() (*asset, error) {
}
info := bindataFileInfo{name: "call_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
+ a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x96, 0x54, 0x29, 0x1, 0x3b, 0x86, 0xea, 0xb2, 0x35, 0xbd, 0x97, 0xb1, 0x17, 0x8c, 0x17, 0x79, 0x1c, 0x4c, 0x8e, 0x7b, 0xe2, 0x5f, 0x11, 0x59, 0xa0, 0x94, 0x35, 0x43, 0xec, 0x18, 0x2a, 0xd9}}
+ return a, nil
+}
+
+var _call_tracer_legacyJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x5a\xdf\x6f\x1b\x37\xf2\x7f\x96\xfe\x8a\x89\x1f\x6a\x09\x51\x24\x39\xe9\xb7\x5f\xc0\xae\x7a\x50\x1d\x25\x35\xe0\xc6\x81\xad\x34\x08\x82\x3c\x50\xbb\xb3\x12\x6b\x8a\xdc\x92\x5c\xc9\xba\xd6\xff\xfb\x61\x86\xdc\xd5\xae\x24\x3b\xbe\x5e\x71\xe8\xbd\x69\x97\x33\xc3\xe1\xcc\x67\x7e\x71\x35\x18\xc0\xb9\xc9\x37\x56\xce\x17\x1e\x5e\x0e\x4f\xfe\x1f\xa6\x0b\x84\xb9\x79\x81\x7e\x81\x16\x8b\x25\x8c\x0b\xbf\x30\xd6\xb5\x07\x03\x98\x2e\xa4\x83\x4c\x2a\x04\xe9\x20\x17\xd6\x83\xc9\xc0\xef\xd0\x2b\x39\xb3\xc2\x6e\xfa\xed\xc1\x20\xf0\x1c\x5c\x26\x09\x99\x45\x04\x67\x32\xbf\x16\x16\x4f\x61\x63\x0a\x48\x84\x06\x8b\xa9\x74\xde\xca\x59\xe1\x11\xa4\x07\xa1\xd3\x81\xb1\xb0\x34\xa9\xcc\x36\x24\x52\x7a\x28\x74\x8a\x96\xb7\xf6\x68\x97\xae\xd4\xe3\xed\xbb\x0f\x70\x89\xce\xa1\x85\xb7\xa8\xd1\x0a\x05\xef\x8b\x99\x92\x09\x5c\xca\x04\xb5\x43\x10\x0e\x72\x7a\xe3\x16\x98\xc2\x8c\xc5\x11\xe3\x1b\x52\xe5\x26\xaa\x02\x6f\x4c\xa1\x53\xe1\xa5\xd1\x3d\x40\x49\x9a\xc3\x0a\xad\x93\x46\xc3\xab\x72\xab\x28\xb0\x07\xc6\x92\x90\x8e\xf0\x74\x00\x0b\x26\x27\xbe\x2e\x08\xbd\x01\x25\xfc\x96\xf5\x09\x06\xd9\x9e\x3b\x05\xa9\x79\x9b\x85\xc9\x11\xfc\x42\x78\x3a\xf5\x5a\x2a\x05\x33\x84\xc2\x61\x56\xa8\x1e\x49\x9b\x15\x1e\x3e\x5e\x4c\x7f\xba\xfa\x30\x85\xf1\xbb\x4f\xf0\x71\x7c\x7d\x3d\x7e\x37\xfd\x74\x06\x6b\xe9\x17\xa6\xf0\x80\x2b\x0c\xa2\xe4\x32\x57\x12\x53\x58\x0b\x6b\x85\xf6\x1b\x30\x19\x49\xf8\x79\x72\x7d\xfe\xd3\xf8\xdd\x74\xfc\xe3\xc5\xe5\xc5\xf4\x13\x18\x0b\x6f\x2e\xa6\xef\x26\x37\x37\xf0\xe6\xea\x1a\xc6\xf0\x7e\x7c\x3d\xbd\x38\xff\x70\x39\xbe\x86\xf7\x1f\xae\xdf\x5f\xdd\x4c\xfa\x70\x83\xa4\x15\x12\xff\xd7\x6d\x9e\xb1\xf7\x2c\x42\x8a\x5e\x48\xe5\x4a\x4b\x7c\x32\x05\xb8\x85\x29\x54\x0a\x0b\xb1\x42\xb0\x98\xa0\x5c\x61\x0a\x02\x12\x93\x6f\x9e\xec\x54\x92\x25\x94\xd1\x73\x3e\xf3\x83\x80\x84\x8b\x0c\xb4\xf1\x3d\x70\x88\xf0\xfd\xc2\xfb\xfc\x74\x30\x58\xaf\xd7\xfd\xb9\x2e\xfa\xc6\xce\x07\x2a\x88\x73\x83\x1f\xfa\x6d\x92\x99\x08\xa5\xa6\x56\x24\x68\xc9\x39\x02\xb2\x82\xcc\xaf\xcc\x5a\x83\xb7\x42\x3b\x91\x90\xab\xe9\x77\xc2\x60\x14\x1e\xf0\x8e\x9e\xbc\x23\xd0\x82\xc5\xdc\x58\xfa\xad\x54\x89\x33\xa9\x3d\x5a\x2d\x14\xcb\x76\xb0\x14\x29\xc2\x6c\x03\xa2\x2e\xb0\x57\x3f\x0c\xc1\x28\xb8\x1b\xa4\xce\x8c\x5d\x32\x2c\xfb\xed\xdf\xdb\xad\xa8\xa1\xf3\x22\xb9\x25\x05\x49\x7e\x52\x58\x8b\xda\x93\x29\x0b\xeb\xe4\x0a\x99\x04\x02\x4d\xb4\xe7\xe4\x97\x9f\x01\xef\x30\x29\x82\xa4\x56\x25\xe4\x14\x3e\xff\x7e\xff\xa5\xd7\x66\xd1\x29\xba\x04\x75\x8a\x29\x9f\xef\xd6\xc1\x7a\xc1\x16\x85\x35\x1e\xaf\x10\x7e\x2d\x9c\xaf\xd1\x64\xd6\x2c\x41\x68\x30\x05\x21\xbe\x6e\x1d\xa9\xbd\x61\x81\x82\x7e\x6b\xb4\xac\x51\xbf\xdd\xaa\x98\x4f\x21\x13\xca\x61\xdc\xd7\x79\xcc\xe9\x34\x52\xaf\xcc\x2d\x49\x36\x96\x20\x6c\x37\x60\xf2\xc4\xa4\x31\x18\xe8\x1c\xd5\x31\xd0\xf5\xdb\x2d\xe2\x3b\x85\xac\xd0\xbc\x6d\x47\x99\x79\x0f\xd2\x59\x17\x7e\x6f\xb7\x48\xec\xb9\xc8\x7d\x61\x91\xed\x89\xd6\x1a\xeb\x40\x2e\x97\x98\x4a\xe1\x51\x6d\xda\xad\xd6\x4a\xd8\xb0\x00\x23\x50\x66\xde\x9f\xa3\x9f\xd0\x63\xa7\x7b\xd6\x6e\xb5\x64\x06\x9d\xb0\xfa\x6c\x34\xe2\xec\x93\x49\x8d\x69\x10\xdf\xf2\x0b\xe9\xfa\x99\x28\x94\xaf\xf6\x25\xa6\x96\x45\x5f\x58\x4d\x3f\xef\x83\x16\x1f\x11\x8c\x56\x1b\x48\x28\xcb\x88\x19\x85\xa7\xdb\x38\x8f\xcb\x78\x38\xd7\x83\x4c\x38\x32\xa1\xcc\x60\x8d\x90\x5b\x7c\x91\x2c\x90\x7c\xa7\x13\x8c\x5a\xba\x8d\x63\xa7\x8e\x80\x76\xeb\x9b\xbc\xef\xcd\xbb\x62\x39\x43\xdb\xe9\xc2\x37\x30\xbc\xcb\x86\x5d\x18\x8d\xf8\x47\xa9\x7b\xe4\x89\xfa\x92\x14\x93\xc7\x83\x32\xff\x8d\xb7\x52\xcf\xc3\x59\xa3\xae\x17\x19\x08\xd0\xb8\x86\xc4\x68\x06\x35\x79\x65\x86\x52\xcf\x21\xb1\x28\x3c\xa6\x3d\x10\x69\x0a\xde\x04\xe4\x55\x38\x6b\x6e\x09\xdf\x7c\x03\x1d\xda\x6c\x04\xc7\xe7\xd7\x93\xf1\x74\x72\x0c\x7f\xfc\x01\xe1\xcd\x51\x78\xf3\xf2\xa8\x5b\xd3\x4c\xea\xab\x2c\x8b\xca\xb1\xc0\x7e\x8e\x78\xdb\x39\xe9\xf6\x57\x42\x15\x78\x95\x05\x35\x23\xed\x44\xa7\x30\x8a\x3c\xcf\x77\x79\x5e\x36\x78\x88\x69\x30\x80\xb1\x73\xb8\x9c\x29\xdc\x0f\xc8\x18\xb1\x1c\xbc\xce\x53\xc6\x22\xf4\x25\x66\x99\x2b\x24\x54\x95\xbb\x46\xf3\xb3\xc6\x2d\xbf\xc9\xf1\x14\x00\xc0\xe4\x3d\x7e\x41\xb1\xc0\x2f\xbc\xf9\x09\xef\xd8\x47\xa5\x09\x09\x55\xe3\x34\xb5\xe8\x5c\xa7\xdb\x0d\xe4\x52\xe7\x85\x3f\x6d\x90\x2f\x71\x69\xec\xa6\xef\x28\x21\x75\xf8\x68\xbd\x70\xd2\x92\x67\x2e\xdc\x85\x26\x9e\x88\xd4\xb7\xc2\x75\xb6\x4b\xe7\xc6\xf9\xd3\x72\x89\x1e\xca\x35\xb6\x05\xb1\x1d\x0f\xef\x8e\xf7\xad\x35\xec\x6e\x91\x70\xf2\x5d\x97\x58\xee\xcf\x2a\x7c\x57\x69\xa2\x9f\x17\x6e\xd1\x61\x38\x6d\x57\xb7\xa9\x60\x04\xde\x16\x78\x10\xfe\x0c\xa9\x7d\x38\x39\x54\x19\xe5\x12\x6f\x8b\x84\x61\x35\x17\x9c\x69\x38\xd2\x05\x65\x5e\x57\xcc\xd8\xe6\xde\x98\x7d\x74\x45\x70\xdd\x4c\x2e\xdf\xbc\x9e\xdc\x4c\xaf\x3f\x9c\x4f\x8f\x6b\x70\x52\x98\x79\x52\xaa\x79\x06\x85\x7a\xee\x17\xac\x3f\x89\x6b\xae\x7e\x26\x9e\x17\x27\x5f\xc2\x1b\x18\x1d\x08\xf9\xd6\xe3\x1c\xf0\xf9\x0b\xcb\xbe\xdf\x37\x5f\x93\x34\x18\xf3\xaf\x41\x92\x37\x4c\x5c\x92\x7b\x53\x12\x3c\xee\xe7\xbf\x18\x54\xe9\x8c\x28\x7e\x14\x4a\xe8\x04\x1f\xd1\x79\x1f\x6b\xf5\xa4\x79\x20\x0f\x2d\xd1\x2f\x4c\xca\x85\x21\x11\xa1\xb6\x94\x08\x4a\x8d\xc6\x7f\x3f\x1b\x8d\x2f\x2f\x6b\xb9\x88\x9f\xcf\xaf\x5e\xd7\xf3\xd3\xf1\xeb\xc9\xe5\xe4\xed\x78\x3a\xd9\xa5\xbd\x99\x8e\xa7\x17\xe7\xfc\xb6\x4c\x5d\x83\x01\xdc\xdc\xca\x9c\x2b\x0c\xe7\x6d\xb3\xcc\xb9\x55\xae\xf4\x75\x3d\xf0\x0b\x43\x4d\xa8\x8d\x05\x34\x13\x3a\x29\x0b\x9b\x2b\x01\xeb\x0d\xc1\xf5\x21\xe7\x9d\xec\x38\xaf\x82\xb0\x74\xef\x2d\xc6\x4d\xd3\x8e\x37\xa5\x5e\x5b\x83\x06\x34\x72\xf2\xe7\x04\xdb\x79\xfa\x21\xe1\x1f\x30\x84\x53\x38\x89\x59\xf4\x91\x34\xfd\x12\x9e\x93\xf8\x3f\x91\xac\x5f\x1d\xe0\xfc\x7b\xa6\xec\xbd\x40\xfb\xef\xa7\x72\x53\xf8\xab\x2c\x3b\x85\x5d\x23\x7e\xbb\x67\xc4\x8a\xfe\x12\xf5\x3e\xfd\xff\xed\xd1\x6f\xd3\x3e\xa1\xca\xe4\xf0\x6c\x0f\x22\x21\xe9\x3e\xdb\x89\x83\x68\x5c\x6e\xef\x58\x1a\x8c\x1e\x28\x34\x2f\x9b\x18\x7e\x28\x53\xfe\x47\x85\xe6\x60\x9b\x4a\xcd\x68\xb3\x11\xed\x81\x45\x6f\x25\xae\x68\xd4\x3c\x76\x2c\x92\x1a\x76\xb3\xa6\xf4\xd5\x87\x8f\x18\x24\x6a\x44\x4e\x2e\xb1\xc1\xa7\xfe\x8c\x7b\x5e\x6a\xd2\xe3\xa8\xc6\x10\x13\xdc\x87\x5b\x84\xa5\xd8\xd0\xa8\x96\x15\xfa\x76\x03\x73\xe1\x20\xdd\x68\xb1\x94\x89\x0b\xf2\xb8\xb9\xb7\x38\x17\x96\xc5\x5a\xfc\xad\x40\x47\x73\x1f\x01\x59\x24\xbe\x10\x4a\x6d\x60\x2e\x69\x78\x23\xee\xce\xcb\x57\xc3\x21\x38\x2f\x73\xd4\x69\x0f\xbe\x7b\x35\xf8\xee\x5b\xb0\x85\xc2\x6e\xbf\x5d\x2b\x61\xd5\x51\xa3\x37\x68\x21\xa2\xe7\x35\xe6\x7e\xd1\xe9\xc2\x0f\x0f\xd4\xc2\x07\x0a\xdb\x41\x5a\x78\x01\x27\x5f\xfa\xa4\xd7\xa8\x81\xdb\xe0\x49\x40\xe5\x30\x4a\xa3\x81\xf7\xea\xf5\x55\xe7\x56\x58\xa1\xc4\x0c\xbb\xa7\x3c\x00\xb3\xad\xd6\x22\x4e\x40\xe4\x14\xc8\x95\x90\x1a\x44\x92\x98\x42\x7b\x32\x7c\x39\xcc\xa8\x0d\xe5\xf7\x63\x5f\xca\xe3\x59\x51\x24\x09\x3a\x57\xa6\x7b\xf6\x1a\xa9\x23\x96\xc4\x0d\x52\x3b\x99\x62\xcd\x2b\x94\x1d\x0c\xa7\xe6\x48\x41\xa3\x74\x29\x70\x69\x1c\x6d\x32\x43\x58\x5b\x1a\xbc\x9c\xd4\x09\xdf\x3c\xa4\x48\xd6\x76\x60\x34\x08\x50\x86\xaf\x3b\x38\xc6\x41\xd8\xb9\xeb\x87\x7c\x4f\xdb\x52\xce\xd1\x66\xdd\x6f\x02\xb9\x0e\x55\x1e\x71\x76\x5a\x21\x0d\x78\x27\x9d\xe7\x8e\x9a\xb4\x94\x0e\x02\x92\xa5\x9e\xf7\x20\x37\x39\xe7\xe9\xaf\x95\xb3\x98\xac\xaf\x27\xbf\x4c\xae\xab\xc6\xe7\xe9\x4e\x2c\x67\x9e\xa3\x6a\x24\x04\x4b\xf3\x96\xc7\xf4\xe8\xc0\x10\x73\x00\x50\xa3\x07\x00\x45\xf2\xb7\xb5\xf1\x7d\xed\x38\x4a\x38\xbf\x75\xcc\x1c\xc3\x3c\x57\x57\xc0\x15\xca\xbb\x9d\xdc\xbd\x9b\x1c\x4c\x5e\x56\x08\x52\x8a\xd3\x0e\x25\xf6\xdd\x49\xa3\xb1\xb0\x1d\x38\xb6\xf8\xbc\xa8\xd9\x78\xcd\xed\x66\x20\xaa\xa5\x06\x5e\x2f\xfb\x56\x11\xaa\x01\xeb\x6e\x0a\x4f\x70\xa0\xfa\xbd\x4d\x7e\x73\xe1\x3e\x38\xf6\x7a\x4c\x7f\x33\x39\xbf\xd0\xbe\x53\x2e\x5e\x68\x78\x01\xe5\x03\x25\x75\x78\xd1\x88\xa2\x03\xd9\xb1\x95\xa2\x42\x8f\xb0\x15\x71\x06\x3b\xaf\x48\x50\x30\x07\x1b\xcd\xa2\xdf\x2f\xce\xc3\x28\x8d\x0c\xf6\xcc\xa2\xef\xe3\x6f\x85\x50\xae\x33\xac\x9a\x85\x70\x02\x6f\xb8\xbc\x8d\xf6\x3a\x49\xe2\x69\xf6\x8e\x67\x35\xb6\x68\x8d\x92\x2d\x74\x82\xe7\x26\xc5\x47\x25\x44\x11\x31\x6d\x54\xbe\x8c\xc0\x3c\xd4\x7b\xb7\xea\x04\x70\x54\x35\x04\x99\x90\xaa\xb0\x78\x74\x06\x07\xd2\x8e\x2b\x6c\x26\x12\xf6\xa5\x43\xe0\x69\xdd\x81\x33\x4b\x5c\x98\x75\x50\xe0\x50\xf2\xda\x07\x47\x85\x83\x9d\xf2\xc1\xd7\x4e\xc2\x41\xe1\xc4\x1c\x6b\xe0\xa8\x0c\x5e\x3a\xea\xe0\x15\xc2\x9f\x86\xce\xf3\xea\xf1\x09\x28\xba\xff\x6b\xe0\xb1\xe3\xe7\xbd\x3e\xa7\x24\xe2\x6e\xa7\xf6\x50\x2a\x1b\x9a\x91\xbf\x97\xe3\x9f\x1c\x61\xbb\xb4\xe1\x68\x4d\xe2\x70\xc0\x6d\x5f\xf3\x75\xf7\x57\xab\x0f\x79\xfe\xa1\x96\x89\x30\xaa\x7f\xc5\xc4\x6f\x71\xca\x5d\x0e\x3d\xe5\x16\x57\xd2\x14\x54\xc0\xf0\x7f\x69\x1c\xae\x5a\xbe\xfb\x76\xeb\x3e\xde\x0b\xb2\xdf\xea\x17\x83\xeb\x45\xbc\xd7\x0e\xdd\x52\xad\x7c\x18\xae\xad\xf1\xba\x30\x0b\x37\xce\x2d\xe6\x7f\xe4\x82\x30\x06\xba\x37\x39\xb5\x03\xb1\x3a\x29\x8b\x22\xdd\x54\x05\xb1\x17\x1a\x11\x58\x08\x9d\xc6\x61\x44\xa4\xa9\x24\x79\x0c\x42\xd2\x50\xcc\x85\xd4\xed\x83\x66\xfc\x6a\x15\x3e\x84\x8c\xbd\xde\xb6\x5e\x48\xe3\x10\x49\x13\x1f\x6b\xdc\x7e\x42\xc1\xdc\x09\xa2\xdd\xbb\xce\x78\x5d\x6a\xb4\x2b\x96\xdc\x09\x83\x58\x09\xa9\x04\x4d\x5f\xdc\x61\xe9\x14\x12\x85\x42\x87\x2f\x1c\x98\x79\xb3\x42\xeb\xda\x4f\x00\xf9\x9f\xc1\xf8\x4e\x56\x2c\x1f\xa3\x39\x9e\x1e\xb3\x4f\x8d\xd8\x70\xfc\x37\x4a\x78\x1f\xe1\x55\x33\x6f\x88\x2c\xe9\xf9\xe3\x17\x6a\xdf\x7e\x5a\x48\x71\xcf\x44\x34\x3f\xc0\xb0\xd6\x97\xff\x5d\x82\x6c\x1f\x62\x97\x55\x7f\x16\x0f\xef\x8d\xe9\x81\x42\xc1\x53\x52\xf9\x69\xaa\xec\x47\x1f\x1b\xda\xca\xe8\x0d\x1d\xdd\x5e\xf8\xf2\x9d\xde\x02\xcb\x1b\x90\xd0\xda\xcf\x10\x35\x48\x8f\x56\xd0\x3c\x44\xe8\x8a\x5f\x53\x48\x4b\xc7\xe2\xd8\x2f\x92\x82\x2e\x0a\x8e\x9f\x36\xa8\x30\x4b\x3d\xef\xb7\x5b\xe1\x7d\x2d\xde\x13\x7f\xb7\x8d\xf7\x50\x01\x99\x33\xde\x09\x54\x57\x02\x89\xbf\xe3\x6e\x91\xc7\xe6\x9d\x7b\x01\x5a\xa3\x57\x61\xa6\xde\xb9\x05\x60\xc6\x78\x13\xb0\x7b\x27\x46\x6b\xfc\xae\x01\x70\x26\x9d\x0b\x17\xc4\xec\x84\x84\xbf\xdb\x8f\x88\x92\x81\x82\xe1\xf4\x30\x03\x2d\x1d\x60\xda\xb9\x99\x20\x62\x7e\x15\x56\x43\x3d\x3f\xad\xaf\x86\x57\xf1\xa0\x72\x59\xb3\x8d\x5c\xb2\x6d\xee\xcf\x0e\x27\xb9\x61\x89\xc7\xc3\xc9\x8c\x6c\x5e\x01\xf6\x01\xd6\xfa\xac\xb1\x4f\xf2\x58\xaa\x64\xe9\x65\x66\x7b\x80\x95\xa5\xd7\x5a\x0e\x7f\xf7\x74\x91\x15\x71\x5d\xc5\x06\x4d\x43\x08\xdf\x36\xee\x2d\x1f\x9a\xb4\x68\x50\x89\x84\x65\x73\x35\x1a\x1d\x0d\xef\xaa\x0f\x23\x31\x57\x35\x68\x4a\x25\x42\x64\x84\xf3\x72\x54\xc8\x7f\x62\xdc\xb6\x1e\x83\xe5\x12\x58\x0c\x1f\x70\xb8\x9b\xa5\x10\x34\x33\x6e\x20\x0a\x47\xa3\xe8\x36\xb6\x52\x74\xd2\x62\x0a\x99\x44\x95\x82\x49\xd1\xf2\xa0\xfb\xab\x33\x3a\x7c\xaa\x43\x2b\x49\x62\xf8\x24\x19\xfe\x1d\xc0\x1f\x4a\xb5\x4c\xd0\x6f\x20\x43\xc1\xdf\xdc\xbc\x81\x5c\x38\x07\x4b\x14\x34\xda\x66\x85\x52\x1b\x30\x36\x45\x12\x5e\xcd\x7a\x14\xd6\x06\x0a\x87\xd6\xc1\x7a\x61\x62\xa9\xe5\x16\x2f\xa7\x6e\x55\xfa\x5e\xbc\xce\x91\x2e\x57\x62\x03\xd2\x53\x59\x8f\x87\xaa\x47\x7a\xf5\xa1\x8b\xbf\x96\x19\x32\xf0\x7e\x98\x97\x53\x61\x33\xce\xf9\x35\x3d\x35\x23\x3c\x0e\x45\xcd\xd8\xde\x5e\x74\x35\x03\xb9\x2c\x3d\xcd\x68\xad\x17\xb2\x66\x48\xf2\x0a\x3f\x35\x83\xb1\xd6\x6a\xf3\x02\x23\xa8\x62\xe0\xa7\x9d\xf0\x64\x2d\x63\x7c\x86\xcf\xba\x15\x39\x3f\xf5\x22\x60\xc8\x8b\x1d\x32\xce\x2d\x6e\x28\x9b\x07\x1b\xd5\x4a\x53\x78\xf1\xf9\x16\x37\x5f\x0e\x57\xa2\x08\xc7\x1a\x5d\x55\x7a\xca\xb0\x08\x6b\x8f\x24\x83\x4a\x0b\x39\x1a\x9e\x81\xfc\xbe\xce\x50\x56\x4f\x90\xcf\x9f\x97\x7b\xd6\xd7\x3f\xcb\x2f\x65\x84\x57\x88\xdf\x59\xef\x36\x34\x8a\x31\x12\x68\x28\x28\xda\xf7\xed\x7f\x05\x00\x00\xff\xff\xfb\x65\x93\x4f\xfc\x22\x00\x00")
+
+func call_tracer_legacyJsBytes() ([]byte, error) {
+ return bindataRead(
+ _call_tracer_legacyJs,
+ "call_tracer_legacy.js",
+ )
+}
+
+func call_tracer_legacyJs() (*asset, error) {
+ bytes, err := call_tracer_legacyJsBytes()
+ if err != nil {
+ return nil, err
+ }
+
+ info := bindataFileInfo{name: "call_tracer_legacy.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x46, 0x79, 0xb6, 0xbc, 0xd2, 0xc, 0x25, 0xb1, 0x22, 0x56, 0xef, 0x77, 0xb9, 0x5e, 0x2e, 0xf4, 0xda, 0xb2, 0x2f, 0x53, 0xa4, 0xff, 0xc8, 0xac, 0xbb, 0x75, 0x22, 0x46, 0x59, 0xe3, 0x1d, 0x7d}}
return a, nil
}
@@ -348,15 +390,17 @@ func AssetNames() []string {
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
- "4byte_tracer.js": _4byte_tracerJs,
- "bigram_tracer.js": bigram_tracerJs,
- "call_tracer.js": call_tracerJs,
- "evmdis_tracer.js": evmdis_tracerJs,
- "noop_tracer.js": noop_tracerJs,
- "opcount_tracer.js": opcount_tracerJs,
- "prestate_tracer.js": prestate_tracerJs,
- "trigram_tracer.js": trigram_tracerJs,
- "unigram_tracer.js": unigram_tracerJs,
+ "4byte_tracer.js": _4byte_tracerJs,
+ "4byte_tracer_legacy.js": _4byte_tracer_legacyJs,
+ "bigram_tracer.js": bigram_tracerJs,
+ "call_tracer.js": call_tracerJs,
+ "call_tracer_legacy.js": call_tracer_legacyJs,
+ "evmdis_tracer.js": evmdis_tracerJs,
+ "noop_tracer.js": noop_tracerJs,
+ "opcount_tracer.js": opcount_tracerJs,
+ "prestate_tracer.js": prestate_tracerJs,
+ "trigram_tracer.js": trigram_tracerJs,
+ "unigram_tracer.js": unigram_tracerJs,
}
// AssetDebug is true if the assets were built with the debug flag enabled.
@@ -403,15 +447,17 @@ type bintree struct {
}
var _bintree = &bintree{nil, map[string]*bintree{
- "4byte_tracer.js": {_4byte_tracerJs, map[string]*bintree{}},
- "bigram_tracer.js": {bigram_tracerJs, map[string]*bintree{}},
- "call_tracer.js": {call_tracerJs, map[string]*bintree{}},
- "evmdis_tracer.js": {evmdis_tracerJs, map[string]*bintree{}},
- "noop_tracer.js": {noop_tracerJs, map[string]*bintree{}},
- "opcount_tracer.js": {opcount_tracerJs, map[string]*bintree{}},
- "prestate_tracer.js": {prestate_tracerJs, map[string]*bintree{}},
- "trigram_tracer.js": {trigram_tracerJs, map[string]*bintree{}},
- "unigram_tracer.js": {unigram_tracerJs, map[string]*bintree{}},
+ "4byte_tracer.js": {_4byte_tracerJs, map[string]*bintree{}},
+ "4byte_tracer_legacy.js": {_4byte_tracer_legacyJs, map[string]*bintree{}},
+ "bigram_tracer.js": {bigram_tracerJs, map[string]*bintree{}},
+ "call_tracer.js": {call_tracerJs, map[string]*bintree{}},
+ "call_tracer_legacy.js": {call_tracer_legacyJs, map[string]*bintree{}},
+ "evmdis_tracer.js": {evmdis_tracerJs, map[string]*bintree{}},
+ "noop_tracer.js": {noop_tracerJs, map[string]*bintree{}},
+ "opcount_tracer.js": {opcount_tracerJs, map[string]*bintree{}},
+ "prestate_tracer.js": {prestate_tracerJs, map[string]*bintree{}},
+ "trigram_tracer.js": {trigram_tracerJs, map[string]*bintree{}},
+ "unigram_tracer.js": {unigram_tracerJs, map[string]*bintree{}},
}}
// RestoreAsset restores an asset under the given directory.
diff --git a/eth/tracers/internal/tracers/call_tracer.js b/eth/tracers/internal/tracers/call_tracer.js
index 3ca737773..98cfa0e6d 100644
--- a/eth/tracers/internal/tracers/call_tracer.js
+++ b/eth/tracers/internal/tracers/call_tracer.js
@@ -1,4 +1,4 @@
-// Copyright 2017 The go-ethereum Authors
+// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@@ -14,212 +14,81 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-// callTracer is a full blown transaction tracer that extracts and reports all
-// the internal calls made by a transaction, along with any useful information.
+
+// callFrameTracer uses the new call frame tracing methods to report useful information
+// about internal messages of a transaction.
{
- // callstack is the current recursive call stack of the EVM execution.
- callstack: [{}],
-
- // descended tracks whether we've just descended from an outer transaction into
- // an inner call.
- descended: false,
-
- // step is invoked for every opcode that the VM executes.
- step: function(log, db) {
- // Capture any errors immediately
- var error = log.getError();
- if (error !== undefined) {
- this.fault(log, db);
- return;
- }
- // We only care about system opcodes, faster if we pre-check once
- var syscall = (log.op.toNumber() & 0xf0) == 0xf0;
- if (syscall) {
- var op = log.op.toString();
- }
- // If a new contract is being created, add to the call stack
- if (syscall && (op == 'CREATE' || op == "CREATE2")) {
- var inOff = log.stack.peek(1).valueOf();
- var inEnd = inOff + log.stack.peek(2).valueOf();
-
- // Assemble the internal call report and store for completion
- var call = {
- type: op,
- from: toHex(log.contract.getAddress()),
- input: toHex(log.memory.slice(inOff, inEnd)),
- gasIn: log.getGas(),
- gasCost: log.getCost(),
- value: '0x' + log.stack.peek(0).toString(16)
- };
- this.callstack.push(call);
- this.descended = true
- return;
- }
- // If a contract is being self destructed, gather that as a subcall too
- if (syscall && op == 'SELFDESTRUCT') {
- var left = this.callstack.length;
- if (this.callstack[left-1].calls === undefined) {
- this.callstack[left-1].calls = [];
- }
- this.callstack[left-1].calls.push({
- type: op,
- from: toHex(log.contract.getAddress()),
- to: toHex(toAddress(log.stack.peek(0).toString(16))),
- gasIn: log.getGas(),
- gasCost: log.getCost(),
- value: '0x' + db.getBalance(log.contract.getAddress()).toString(16)
- });
- return
- }
- // If a new method invocation is being done, add to the call stack
- if (syscall && (op == 'CALL' || op == 'CALLCODE' || op == 'DELEGATECALL' || op == 'STATICCALL')) {
- // Skip any pre-compile invocations, those are just fancy opcodes
- var to = toAddress(log.stack.peek(1).toString(16));
- if (isPrecompiled(to)) {
- return
- }
- var off = (op == 'DELEGATECALL' || op == 'STATICCALL' ? 0 : 1);
-
- var inOff = log.stack.peek(2 + off).valueOf();
- var inEnd = inOff + log.stack.peek(3 + off).valueOf();
-
- // Assemble the internal call report and store for completion
- var call = {
- type: op,
- from: toHex(log.contract.getAddress()),
- to: toHex(to),
- input: toHex(log.memory.slice(inOff, inEnd)),
- gasIn: log.getGas(),
- gasCost: log.getCost(),
- outOff: log.stack.peek(4 + off).valueOf(),
- outLen: log.stack.peek(5 + off).valueOf()
- };
- if (op != 'DELEGATECALL' && op != 'STATICCALL') {
- call.value = '0x' + log.stack.peek(2).toString(16);
- }
- this.callstack.push(call);
- this.descended = true
- return;
- }
- // If we've just descended into an inner call, retrieve it's true allowance. We
- // need to extract if from within the call as there may be funky gas dynamics
- // with regard to requested and actually given gas (2300 stipend, 63/64 rule).
- if (this.descended) {
- if (log.getDepth() >= this.callstack.length) {
- this.callstack[this.callstack.length - 1].gas = log.getGas();
- } else {
- // TODO(karalabe): The call was made to a plain account. We currently don't
- // have access to the true gas amount inside the call and so any amount will
- // mostly be wrong since it depends on a lot of input args. Skip gas for now.
- }
- this.descended = false;
- }
- // If an existing call is returning, pop off the call stack
- if (syscall && op == 'REVERT') {
- this.callstack[this.callstack.length - 1].error = "execution reverted";
- return;
- }
- if (log.getDepth() == this.callstack.length - 1) {
- // Pop off the last call and get the execution results
- var call = this.callstack.pop();
-
- if (call.type == 'CREATE' || call.type == "CREATE2") {
- // If the call was a CREATE, retrieve the contract address and output code
- call.gasUsed = '0x' + bigInt(call.gasIn - call.gasCost - log.getGas()).toString(16);
- delete call.gasIn; delete call.gasCost;
-
- var ret = log.stack.peek(0);
- if (!ret.equals(0)) {
- call.to = toHex(toAddress(ret.toString(16)));
- call.output = toHex(db.getCode(toAddress(ret.toString(16))));
- } else if (call.error === undefined) {
- call.error = "internal failure"; // TODO(karalabe): surface these faults somehow
- }
- } else {
- // If the call was a contract call, retrieve the gas usage and output
- if (call.gas !== undefined) {
- call.gasUsed = '0x' + bigInt(call.gasIn - call.gasCost + call.gas - log.getGas()).toString(16);
- }
- var ret = log.stack.peek(0);
- if (!ret.equals(0)) {
- call.output = toHex(log.memory.slice(call.outOff, call.outOff + call.outLen));
- } else if (call.error === undefined) {
- call.error = "internal failure"; // TODO(karalabe): surface these faults somehow
- }
- delete call.gasIn; delete call.gasCost;
- delete call.outOff; delete call.outLen;
- }
- if (call.gas !== undefined) {
- call.gas = '0x' + bigInt(call.gas).toString(16);
- }
- // Inject the call into the previous one
- var left = this.callstack.length;
- if (this.callstack[left-1].calls === undefined) {
- this.callstack[left-1].calls = [];
- }
- this.callstack[left-1].calls.push(call);
- }
- },
-
- // fault is invoked when the actual execution of an opcode fails.
- fault: function(log, db) {
- // If the topmost call already reverted, don't handle the additional fault again
- if (this.callstack[this.callstack.length - 1].error !== undefined) {
- return;
- }
- // Pop off the just failed call
- var call = this.callstack.pop();
- call.error = log.getError();
-
- // Consume all available gas and clean any leftovers
- if (call.gas !== undefined) {
- call.gas = '0x' + bigInt(call.gas).toString(16);
- call.gasUsed = call.gas
- }
- delete call.gasIn; delete call.gasCost;
- delete call.outOff; delete call.outLen;
-
- // Flatten the failed call into its parent
- var left = this.callstack.length;
- if (left > 0) {
- if (this.callstack[left-1].calls === undefined) {
- this.callstack[left-1].calls = [];
- }
- this.callstack[left-1].calls.push(call);
- return;
- }
- // Last call failed too, leave it in the stack
- this.callstack.push(call);
- },
-
- // result is invoked when all the opcodes have been iterated over and returns
- // the final result of the tracing.
- result: function(ctx, db) {
- var result = {
- type: ctx.type,
- from: toHex(ctx.from),
- to: toHex(ctx.to),
- value: '0x' + ctx.value.toString(16),
- gas: '0x' + bigInt(ctx.gas).toString(16),
- gasUsed: '0x' + bigInt(ctx.gasUsed).toString(16),
- input: toHex(ctx.input),
- output: toHex(ctx.output),
- time: ctx.time,
- };
- if (this.callstack[0].calls !== undefined) {
- result.calls = this.callstack[0].calls;
- }
- if (this.callstack[0].error !== undefined) {
- result.error = this.callstack[0].error;
- } else if (ctx.error !== undefined) {
- result.error = ctx.error;
- }
- if (result.error !== undefined && (result.error !== "execution reverted" || result.output ==="0x")) {
- delete result.output;
- }
- return this.finalize(result);
- },
+ callstack: [{}],
+ fault: function(log, db) {
+ var len = this.callstack.length
+ if (len > 1) {
+ var call = this.callstack.pop()
+ if (this.callstack[len-1].calls === undefined) {
+ this.callstack[len-1].calls = []
+ }
+ this.callstack[len-1].calls.push(call)
+ }
+ },
+ result: function(ctx, db) {
+ // Prepare outer message info
+ var result = {
+ type: ctx.type,
+ from: toHex(ctx.from),
+ to: toHex(ctx.to),
+ value: '0x' + ctx.value.toString(16),
+ gas: '0x' + bigInt(ctx.gas).toString(16),
+ gasUsed: '0x' + bigInt(ctx.gasUsed).toString(16),
+ input: toHex(ctx.input),
+ output: toHex(ctx.output),
+ }
+ if (this.callstack[0].calls !== undefined) {
+ result.calls = this.callstack[0].calls
+ }
+ if (this.callstack[0].error !== undefined) {
+ result.error = this.callstack[0].error
+ } else if (ctx.error !== undefined) {
+ result.error = ctx.error
+ }
+ if (result.error !== undefined && (result.error !== "execution reverted" || result.output ==="0x")) {
+ delete result.output
+ }
+ return this.finalize(result)
+ },
+ enter: function(frame) {
+ var call = {
+ type: frame.getType(),
+ from: toHex(frame.getFrom()),
+ to: toHex(frame.getTo()),
+ input: toHex(frame.getInput()),
+ gas: '0x' + bigInt(frame.getGas()).toString('16'),
+ }
+ if (frame.getValue() !== undefined){
+ call.value='0x' + bigInt(frame.getValue()).toString(16)
+ }
+ this.callstack.push(call)
+ },
+ exit: function(frameResult) {
+ var len = this.callstack.length
+ if (len > 1) {
+ var call = this.callstack.pop()
+ call.gasUsed = '0x' + bigInt(frameResult.getGasUsed()).toString('16')
+ var error = frameResult.getError()
+ if (error === undefined) {
+ call.output = toHex(frameResult.getOutput())
+ } else {
+ call.error = error
+ if (call.type === 'CREATE' || call.type === 'CREATE2') {
+ delete call.to
+ }
+ }
+ len -= 1
+ if (this.callstack[len-1].calls === undefined) {
+ this.callstack[len-1].calls = []
+ }
+ this.callstack[len-1].calls.push(call)
+ }
+ },
// finalize recreates a call object using the final desired field oder for json
// serialization. This is a nicety feature to pass meaningfully ordered results
// to users who don't interpret it, just display it.
@@ -239,14 +108,14 @@
}
for (var key in sorted) {
if (sorted[key] === undefined) {
- delete sorted[key];
+ delete sorted[key]
}
}
if (sorted.calls !== undefined) {
for (var i=0; i.
+
+// callTracer is a full blown transaction tracer that extracts and reports all
+// the internal calls made by a transaction, along with any useful information.
+{
+ // callstack is the current recursive call stack of the EVM execution.
+ callstack: [{}],
+
+ // descended tracks whether we've just descended from an outer transaction into
+ // an inner call.
+ descended: false,
+
+ // step is invoked for every opcode that the VM executes.
+ step: function(log, db) {
+ // Capture any errors immediately
+ var error = log.getError();
+ if (error !== undefined) {
+ this.fault(log, db);
+ return;
+ }
+ // We only care about system opcodes, faster if we pre-check once
+ var syscall = (log.op.toNumber() & 0xf0) == 0xf0;
+ if (syscall) {
+ var op = log.op.toString();
+ }
+ // If a new contract is being created, add to the call stack
+ if (syscall && (op == 'CREATE' || op == "CREATE2")) {
+ var inOff = log.stack.peek(1).valueOf();
+ var inEnd = inOff + log.stack.peek(2).valueOf();
+
+ // Assemble the internal call report and store for completion
+ var call = {
+ type: op,
+ from: toHex(log.contract.getAddress()),
+ input: toHex(log.memory.slice(inOff, inEnd)),
+ gasIn: log.getGas(),
+ gasCost: log.getCost(),
+ value: '0x' + log.stack.peek(0).toString(16)
+ };
+ this.callstack.push(call);
+ this.descended = true
+ return;
+ }
+ // If a contract is being self destructed, gather that as a subcall too
+ if (syscall && op == 'SELFDESTRUCT') {
+ var left = this.callstack.length;
+ if (this.callstack[left-1].calls === undefined) {
+ this.callstack[left-1].calls = [];
+ }
+ this.callstack[left-1].calls.push({
+ type: op,
+ from: toHex(log.contract.getAddress()),
+ to: toHex(toAddress(log.stack.peek(0).toString(16))),
+ gasIn: log.getGas(),
+ gasCost: log.getCost(),
+ value: '0x' + db.getBalance(log.contract.getAddress()).toString(16)
+ });
+ return
+ }
+ // If a new method invocation is being done, add to the call stack
+ if (syscall && (op == 'CALL' || op == 'CALLCODE' || op == 'DELEGATECALL' || op == 'STATICCALL')) {
+ // Skip any pre-compile invocations, those are just fancy opcodes
+ var to = toAddress(log.stack.peek(1).toString(16));
+ if (isPrecompiled(to)) {
+ return
+ }
+ var off = (op == 'DELEGATECALL' || op == 'STATICCALL' ? 0 : 1);
+
+ var inOff = log.stack.peek(2 + off).valueOf();
+ var inEnd = inOff + log.stack.peek(3 + off).valueOf();
+
+ // Assemble the internal call report and store for completion
+ var call = {
+ type: op,
+ from: toHex(log.contract.getAddress()),
+ to: toHex(to),
+ input: toHex(log.memory.slice(inOff, inEnd)),
+ gasIn: log.getGas(),
+ gasCost: log.getCost(),
+ outOff: log.stack.peek(4 + off).valueOf(),
+ outLen: log.stack.peek(5 + off).valueOf()
+ };
+ if (op != 'DELEGATECALL' && op != 'STATICCALL') {
+ call.value = '0x' + log.stack.peek(2).toString(16);
+ }
+ this.callstack.push(call);
+ this.descended = true
+ return;
+ }
+ // If we've just descended into an inner call, retrieve it's true allowance. We
+ // need to extract if from within the call as there may be funky gas dynamics
+ // with regard to requested and actually given gas (2300 stipend, 63/64 rule).
+ if (this.descended) {
+ if (log.getDepth() >= this.callstack.length) {
+ this.callstack[this.callstack.length - 1].gas = log.getGas();
+ } else {
+ // TODO(karalabe): The call was made to a plain account. We currently don't
+ // have access to the true gas amount inside the call and so any amount will
+ // mostly be wrong since it depends on a lot of input args. Skip gas for now.
+ }
+ this.descended = false;
+ }
+ // If an existing call is returning, pop off the call stack
+ if (syscall && op == 'REVERT') {
+ this.callstack[this.callstack.length - 1].error = "execution reverted";
+ return;
+ }
+ if (log.getDepth() == this.callstack.length - 1) {
+ // Pop off the last call and get the execution results
+ var call = this.callstack.pop();
+
+ if (call.type == 'CREATE' || call.type == "CREATE2") {
+ // If the call was a CREATE, retrieve the contract address and output code
+ call.gasUsed = '0x' + bigInt(call.gasIn - call.gasCost - log.getGas()).toString(16);
+ delete call.gasIn; delete call.gasCost;
+
+ var ret = log.stack.peek(0);
+ if (!ret.equals(0)) {
+ call.to = toHex(toAddress(ret.toString(16)));
+ call.output = toHex(db.getCode(toAddress(ret.toString(16))));
+ } else if (call.error === undefined) {
+ call.error = "internal failure"; // TODO(karalabe): surface these faults somehow
+ }
+ } else {
+ // If the call was a contract call, retrieve the gas usage and output
+ if (call.gas !== undefined) {
+ call.gasUsed = '0x' + bigInt(call.gasIn - call.gasCost + call.gas - log.getGas()).toString(16);
+ }
+ var ret = log.stack.peek(0);
+ if (!ret.equals(0)) {
+ call.output = toHex(log.memory.slice(call.outOff, call.outOff + call.outLen));
+ } else if (call.error === undefined) {
+ call.error = "internal failure"; // TODO(karalabe): surface these faults somehow
+ }
+ delete call.gasIn; delete call.gasCost;
+ delete call.outOff; delete call.outLen;
+ }
+ if (call.gas !== undefined) {
+ call.gas = '0x' + bigInt(call.gas).toString(16);
+ }
+ // Inject the call into the previous one
+ var left = this.callstack.length;
+ if (this.callstack[left-1].calls === undefined) {
+ this.callstack[left-1].calls = [];
+ }
+ this.callstack[left-1].calls.push(call);
+ }
+ },
+
+ // fault is invoked when the actual execution of an opcode fails.
+ fault: function(log, db) {
+ // If the topmost call already reverted, don't handle the additional fault again
+ if (this.callstack[this.callstack.length - 1].error !== undefined) {
+ return;
+ }
+ // Pop off the just failed call
+ var call = this.callstack.pop();
+ call.error = log.getError();
+
+ // Consume all available gas and clean any leftovers
+ if (call.gas !== undefined) {
+ call.gas = '0x' + bigInt(call.gas).toString(16);
+ call.gasUsed = call.gas
+ }
+ delete call.gasIn; delete call.gasCost;
+ delete call.outOff; delete call.outLen;
+
+ // Flatten the failed call into its parent
+ var left = this.callstack.length;
+ if (left > 0) {
+ if (this.callstack[left-1].calls === undefined) {
+ this.callstack[left-1].calls = [];
+ }
+ this.callstack[left-1].calls.push(call);
+ return;
+ }
+ // Last call failed too, leave it in the stack
+ this.callstack.push(call);
+ },
+
+ // result is invoked when all the opcodes have been iterated over and returns
+ // the final result of the tracing.
+ result: function(ctx, db) {
+ var result = {
+ type: ctx.type,
+ from: toHex(ctx.from),
+ to: toHex(ctx.to),
+ value: '0x' + ctx.value.toString(16),
+ gas: '0x' + bigInt(ctx.gas).toString(16),
+ gasUsed: '0x' + bigInt(ctx.gasUsed).toString(16),
+ input: toHex(ctx.input),
+ output: toHex(ctx.output),
+ time: ctx.time,
+ };
+ if (this.callstack[0].calls !== undefined) {
+ result.calls = this.callstack[0].calls;
+ }
+ if (this.callstack[0].error !== undefined) {
+ result.error = this.callstack[0].error;
+ } else if (ctx.error !== undefined) {
+ result.error = ctx.error;
+ }
+ if (result.error !== undefined && (result.error !== "execution reverted" || result.output ==="0x")) {
+ delete result.output;
+ }
+ return this.finalize(result);
+ },
+
+ // finalize recreates a call object using the final desired field oder for json
+ // serialization. This is a nicety feature to pass meaningfully ordered results
+ // to users who don't interpret it, just display it.
+ finalize: function(call) {
+ var sorted = {
+ type: call.type,
+ from: call.from,
+ to: call.to,
+ value: call.value,
+ gas: call.gas,
+ gasUsed: call.gasUsed,
+ input: call.input,
+ output: call.output,
+ error: call.error,
+ time: call.time,
+ calls: call.calls,
+ }
+ for (var key in sorted) {
+ if (sorted[key] === undefined) {
+ delete sorted[key];
+ }
+ }
+ if (sorted.calls !== undefined) {
+ for (var i=0; i 0 {
jst.err = jst.reason
+ env.Cancel()
return
}
jst.opWrapper.op = op
@@ -649,41 +760,70 @@ func (jst *Tracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, er
}
}
+// CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct).
+func (jst *Tracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
+ if !jst.traceCallFrames {
+ return
+ }
+ if jst.err != nil {
+ return
+ }
+ // If tracing was interrupted, set the error and stop
+ if atomic.LoadUint32(&jst.interrupt) > 0 {
+ jst.err = jst.reason
+ return
+ }
+
+ *jst.frame.typ = typ.String()
+ *jst.frame.from = from
+ *jst.frame.to = to
+ jst.frame.input = common.CopyBytes(input)
+ *jst.frame.gas = uint(gas)
+ jst.frame.value = nil
+ if value != nil {
+ jst.frame.value = new(big.Int).SetBytes(value.Bytes())
+ }
+
+ if _, err := jst.call(true, "enter", "frame"); err != nil {
+ jst.err = wrapError("enter", err)
+ }
+}
+
+// CaptureExit is called when EVM exits a scope, even if the scope didn't
+// execute any code.
+func (jst *Tracer) CaptureExit(output []byte, gasUsed uint64, err error) {
+ if !jst.traceCallFrames {
+ return
+ }
+ if jst.err != nil {
+ return
+ }
+ // If tracing was interrupted, set the error and stop
+ if atomic.LoadUint32(&jst.interrupt) > 0 {
+ jst.err = jst.reason
+ return
+ }
+
+ jst.frameResult.output = common.CopyBytes(output)
+ *jst.frameResult.gasUsed = uint(gasUsed)
+ jst.frameResult.errorValue = nil
+ if err != nil {
+ jst.frameResult.errorValue = new(string)
+ *jst.frameResult.errorValue = err.Error()
+ }
+
+ if _, err := jst.call(true, "exit", "frameResult"); err != nil {
+ jst.err = wrapError("exit", err)
+ }
+}
+
// GetResult calls the Javascript 'result' function and returns its value, or any accumulated error
func (jst *Tracer) GetResult() (json.RawMessage, error) {
// Transform the context into a JavaScript object and inject into the state
obj := jst.vm.PushObject()
for key, val := range jst.ctx {
- switch val := val.(type) {
- case uint64:
- jst.vm.PushUint(uint(val))
-
- case string:
- jst.vm.PushString(val)
-
- case []byte:
- ptr := jst.vm.PushFixedBuffer(len(val))
- copy(makeSlice(ptr, uint(len(val))), val)
-
- case common.Address:
- ptr := jst.vm.PushFixedBuffer(20)
- copy(makeSlice(ptr, 20), val[:])
-
- case *big.Int:
- pushBigInt(val, jst.vm)
-
- case int:
- jst.vm.PushInt(val)
-
- case common.Hash:
- ptr := jst.vm.PushFixedBuffer(32)
- copy(makeSlice(ptr, 32), val[:])
-
- default:
- panic(fmt.Sprintf("unsupported type: %T", val))
- }
- jst.vm.PutPropString(obj, key)
+ jst.addToObj(obj, key, val)
}
jst.vm.PutPropString(jst.stateObject, "ctx")
@@ -698,3 +838,35 @@ func (jst *Tracer) GetResult() (json.RawMessage, error) {
return result, jst.err
}
+
+// addToObj pushes a field to a JS object.
+func (jst *Tracer) addToObj(obj int, key string, val interface{}) {
+ pushValue(jst.vm, val)
+ jst.vm.PutPropString(obj, key)
+}
+
+func pushValue(ctx *duktape.Context, val interface{}) {
+ switch val := val.(type) {
+ case uint64:
+ ctx.PushUint(uint(val))
+ case string:
+ ctx.PushString(val)
+ case []byte:
+ ptr := ctx.PushFixedBuffer(len(val))
+ copy(makeSlice(ptr, uint(len(val))), val)
+ case common.Address:
+ ptr := ctx.PushFixedBuffer(20)
+ copy(makeSlice(ptr, 20), val[:])
+ case *big.Int:
+ pushBigInt(val, ctx)
+ case int:
+ ctx.PushInt(val)
+ case uint:
+ ctx.PushUint(val)
+ case common.Hash:
+ ptr := ctx.PushFixedBuffer(32)
+ copy(makeSlice(ptr, 32), val[:])
+ default:
+ panic(fmt.Sprintf("unsupported type: %T", val))
+ }
+}
diff --git a/eth/tracers/tracer_test.go b/eth/tracers/tracer_test.go
index d2d3e57c4..3decca225 100644
--- a/eth/tracers/tracer_test.go
+++ b/eth/tracers/tracer_test.go
@@ -236,3 +236,35 @@ func TestIsPrecompile(t *testing.T) {
t.Errorf("Tracer should consider blake2f as precompile in istanbul")
}
}
+
+func TestEnterExit(t *testing.T) {
+ // test that either both or none of enter() and exit() are defined
+ if _, err := New("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}}", new(Context)); err == nil {
+ t.Fatal("tracer creation should've failed without exit() definition")
+ }
+ if _, err := New("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}, exit: function() {}}", new(Context)); err != nil {
+ t.Fatal(err)
+ }
+
+ // test that the enter and exit method are correctly invoked and the values passed
+ tracer, err := New("{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, step: function() {}, fault: function() {}, result: function() { return {enters: this.enters, exits: this.exits, enterGas: this.enterGas, gasUsed: this.gasUsed} }, enter: function(frame) { this.enters++; this.enterGas = frame.getGas(); }, exit: function(res) { this.exits++; this.gasUsed = res.getGasUsed(); }}", new(Context))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ scope := &vm.ScopeContext{
+ Contract: vm.NewContract(&account{}, &account{}, big.NewInt(0), 0),
+ }
+
+ tracer.CaptureEnter(vm.CALL, scope.Contract.Caller(), scope.Contract.Address(), []byte{}, 1000, new(big.Int))
+ tracer.CaptureExit([]byte{}, 400, nil)
+
+ have, err := tracer.GetResult()
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := `{"enters":1,"exits":1,"enterGas":1000,"gasUsed":400}`
+ if string(have) != want {
+ t.Errorf("Number of invocations of enter() and exit() is wrong. Have %s, want %s\n", have, want)
+ }
+}
diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go
index 8fbbf154b..fb817fbc5 100644
--- a/eth/tracers/tracers_test.go
+++ b/eth/tracers/tracers_test.go
@@ -203,21 +203,25 @@ func TestPrestateTracerCreate2(t *testing.T) {
// Iterates over all the input-output datasets in the tracer test harness and
// runs the JavaScript tracers against them.
-func TestCallTracer(t *testing.T) {
- files, err := ioutil.ReadDir("testdata")
+func TestCallTracerLegacy(t *testing.T) {
+ testCallTracer("callTracerLegacy", "call_tracer_legacy", t)
+}
+
+func testCallTracer(tracer string, dirPath string, t *testing.T) {
+ files, err := ioutil.ReadDir(filepath.Join("testdata", dirPath))
if err != nil {
t.Fatalf("failed to retrieve tracer test suite: %v", err)
}
for _, file := range files {
- if !strings.HasPrefix(file.Name(), "call_tracer_") {
+ if !strings.HasSuffix(file.Name(), ".json") {
continue
}
file := file // capture range variable
- t.Run(camel(strings.TrimSuffix(strings.TrimPrefix(file.Name(), "call_tracer_"), ".json")), func(t *testing.T) {
+ t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) {
t.Parallel()
// Call tracer test found, read if from disk
- blob, err := ioutil.ReadFile(filepath.Join("testdata", file.Name()))
+ blob, err := ioutil.ReadFile(filepath.Join("testdata", dirPath, file.Name()))
if err != nil {
t.Fatalf("failed to read testcase: %v", err)
}
@@ -248,7 +252,7 @@ func TestCallTracer(t *testing.T) {
_, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
// Create the tracer, the EVM environment and run it
- tracer, err := New("callTracer", new(Context))
+ tracer, err := New(tracer, new(Context))
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
}
@@ -283,6 +287,10 @@ func TestCallTracer(t *testing.T) {
}
}
+func TestCallTracer(t *testing.T) {
+ testCallTracer("callTracer", "call_tracer", t)
+}
+
// jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to
// comparison
func jsonEqual(x, y interface{}) bool {
@@ -353,8 +361,8 @@ func BenchmarkTransactionTrace(b *testing.B) {
tracer := vm.NewStructLogger(&vm.LogConfig{
Debug: false,
//DisableStorage: true,
- //DisableMemory: true,
- //DisableReturnData: true,
+ //EnableMemory: false,
+ //EnableReturnData: false,
})
evm := vm.NewEVM(context, txContext, statedb, params.AllEthashProtocolChanges, vm.Config{Debug: true, Tracer: tracer})
msg, err := tx.AsMessage(signer, nil)
@@ -378,3 +386,73 @@ func BenchmarkTransactionTrace(b *testing.B) {
tracer.Reset()
}
}
+
+func BenchmarkTracers(b *testing.B) {
+ files, err := ioutil.ReadDir(filepath.Join("testdata", "call_tracer"))
+ if err != nil {
+ b.Fatalf("failed to retrieve tracer test suite: %v", err)
+ }
+ for _, file := range files {
+ if !strings.HasSuffix(file.Name(), ".json") {
+ continue
+ }
+ file := file // capture range variable
+ b.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(b *testing.B) {
+ blob, err := ioutil.ReadFile(filepath.Join("testdata", "call_tracer", file.Name()))
+ if err != nil {
+ b.Fatalf("failed to read testcase: %v", err)
+ }
+ test := new(callTracerTest)
+ if err := json.Unmarshal(blob, test); err != nil {
+ b.Fatalf("failed to parse testcase: %v", err)
+ }
+ benchTracer("callTracer", test, b)
+ })
+ }
+}
+
+func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
+ // Configure a blockchain with the given prestate
+ tx := new(types.Transaction)
+ if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil {
+ b.Fatalf("failed to parse testcase input: %v", err)
+ }
+ signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)))
+ msg, err := tx.AsMessage(signer, nil)
+ if err != nil {
+ b.Fatalf("failed to prepare transaction for tracing: %v", err)
+ }
+ origin, _ := signer.Sender(tx)
+ txContext := vm.TxContext{
+ Origin: origin,
+ GasPrice: tx.GasPrice(),
+ }
+ context := vm.BlockContext{
+ CanTransfer: core.CanTransfer,
+ Transfer: core.Transfer,
+ Coinbase: test.Context.Miner,
+ BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)),
+ Time: new(big.Int).SetUint64(uint64(test.Context.Time)),
+ Difficulty: (*big.Int)(test.Context.Difficulty),
+ GasLimit: uint64(test.Context.GasLimit),
+ }
+ _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
+
+ // Create the tracer, the EVM environment and run it
+ tracer, err := New(tracerName, new(Context))
+ if err != nil {
+ b.Fatalf("failed to create call tracer: %v", err)
+ }
+ evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ snap := statedb.Snapshot()
+ st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
+ if _, err = st.TransitionDb(); err != nil {
+ b.Fatalf("failed to execute transaction: %v", err)
+ }
+ statedb.RevertToSnapshot(snap)
+ }
+}
diff --git a/ethclient/gethclient/gethclient_test.go b/ethclient/gethclient/gethclient_test.go
index c0b9d0c2d..ca2cb1abd 100644
--- a/ethclient/gethclient/gethclient_test.go
+++ b/ethclient/gethclient/gethclient_test.go
@@ -97,37 +97,40 @@ func TestGethClient(t *testing.T) {
defer backend.Close()
defer client.Close()
- tests := map[string]struct {
+ tests := []struct {
+ name string
test func(t *testing.T)
}{
- "TestAccessList": {
+ {
+ "TestAccessList",
func(t *testing.T) { testAccessList(t, client) },
},
- "TestGetProof": {
+ {
+ "TestGetProof",
func(t *testing.T) { testGetProof(t, client) },
- },
- "TestGCStats": {
+ }, {
+ "TestGCStats",
func(t *testing.T) { testGCStats(t, client) },
- },
- "TestMemStats": {
+ }, {
+ "TestMemStats",
func(t *testing.T) { testMemStats(t, client) },
- },
- "TestGetNodeInfo": {
+ }, {
+ "TestGetNodeInfo",
func(t *testing.T) { testGetNodeInfo(t, client) },
- },
- "TestSetHead": {
+ }, {
+ "TestSetHead",
func(t *testing.T) { testSetHead(t, client) },
- },
- "TestSubscribePendingTxs": {
+ }, {
+ "TestSubscribePendingTxs",
func(t *testing.T) { testSubscribePendingTransactions(t, client) },
- },
- "TestCallContract": {
+ }, {
+ "TestCallContract",
func(t *testing.T) { testCallContract(t, client) },
},
}
t.Parallel()
- for name, tt := range tests {
- t.Run(name, tt.test)
+ for _, tt := range tests {
+ t.Run(tt.name, tt.test)
}
}
diff --git a/ethdb/database.go b/ethdb/database.go
index bdc09d5e9..3c6500d1d 100644
--- a/ethdb/database.go
+++ b/ethdb/database.go
@@ -92,9 +92,10 @@ type AncientReader interface {
// AncientWriter contains the methods required to write to immutable ancient data.
type AncientWriter interface {
- // AppendAncient injects all binary blobs belong to block at the end of the
- // append-only immutable table files.
- AppendAncient(number uint64, hash, header, body, receipt, td []byte) error
+ // ModifyAncients runs a write operation on the ancient store.
+ // If the function returns an error, any changes to the underlying store are reverted.
+ // The integer return value is the total size of the written data.
+ ModifyAncients(func(AncientWriteOp) error) (int64, error)
// TruncateAncients discards all but the first n ancient data from the ancient store.
TruncateAncients(n uint64) error
@@ -103,6 +104,15 @@ type AncientWriter interface {
Sync() error
}
+// AncientWriteOp is given to the function argument of ModifyAncients.
+type AncientWriteOp interface {
+ // Append adds an RLP-encoded item.
+ Append(kind string, number uint64, item interface{}) error
+
+ // AppendRaw adds an item without RLP-encoding it.
+ AppendRaw(kind string, number uint64, item []byte) error
+}
+
// Reader contains the methods required to read data from both key-value as well as
// immutable ancient data.
type Reader interface {
diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go
index 5d19cc357..9ff1a2ce1 100644
--- a/ethdb/leveldb/leveldb.go
+++ b/ethdb/leveldb/leveldb.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build !js
// +build !js
// Package leveldb implements the key-value database layer based on LevelDB.
diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go
index 148359110..55c0c880f 100644
--- a/ethstats/ethstats.go
+++ b/ethstats/ethstats.go
@@ -30,12 +30,12 @@ import (
"sync"
"time"
+ "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/eth/downloader"
ethproto "github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/les"
@@ -67,7 +67,7 @@ type backend interface {
HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error)
GetTd(ctx context.Context, hash common.Hash) *big.Int
Stats() (pending int, queued int)
- Downloader() *downloader.Downloader
+ SyncProgress() ethereum.SyncProgress
}
// fullNodeBackend encompasses the functionality necessary for a full node
@@ -777,7 +777,7 @@ func (s *Service) reportStats(conn *connWrapper) error {
mining = fullBackend.Miner().Mining()
hashrate = int(fullBackend.Miner().Hashrate())
- sync := fullBackend.Downloader().Progress()
+ sync := fullBackend.SyncProgress()
syncing = fullBackend.CurrentHeader().Number.Uint64() >= sync.HighestBlock
price, _ := fullBackend.SuggestGasTipCap(context.Background())
@@ -786,7 +786,7 @@ func (s *Service) reportStats(conn *connWrapper) error {
gasprice += int(basefee.Uint64())
}
} else {
- sync := s.backend.Downloader().Progress()
+ sync := s.backend.SyncProgress()
syncing = s.backend.CurrentHeader().Number.Uint64() >= sync.HighestBlock
}
// Assemble the node stats and send it to the server
diff --git a/go.mod b/go.mod
index bab3b302d..85ed22a46 100644
--- a/go.mod
+++ b/go.mod
@@ -36,6 +36,7 @@ require (
github.com/google/uuid v1.1.5
github.com/gorilla/websocket v1.4.2
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29
+ github.com/hashicorp/go-bexpr v0.1.10
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
github.com/holiman/bloomfilter/v2 v2.0.3
github.com/holiman/uint256 v1.2.0
@@ -62,7 +63,7 @@ require (
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4
github.com/stretchr/testify v1.7.0
- github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954
+ github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
github.com/tklauser/go-sysconf v0.3.5 // indirect
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
diff --git a/go.sum b/go.sum
index 825f0c892..72b604f79 100644
--- a/go.sum
+++ b/go.sum
@@ -187,7 +187,6 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -219,6 +218,8 @@ github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0U
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 h1:sezaKhEfPFg8W0Enm61B9Gs911H8iesGY5R8NDPtd1M=
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
+github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
+github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
@@ -312,6 +313,10 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
+github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
@@ -398,8 +403,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 h1:xQdMZ1WLrgkkvOZ/LDQxjVxMLdby7osSh4ZEVa5sIjs=
-github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM=
+github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
+github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4=
github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
diff --git a/graphql/graphql.go b/graphql/graphql.go
index d35994234..0da9faa95 100644
--- a/graphql/graphql.go
+++ b/graphql/graphql.go
@@ -158,14 +158,14 @@ func (l *Log) Data(ctx context.Context) hexutil.Bytes {
// AccessTuple represents EIP-2930
type AccessTuple struct {
address common.Address
- storageKeys *[]common.Hash
+ storageKeys []common.Hash
}
func (at *AccessTuple) Address(ctx context.Context) common.Address {
return at.address
}
-func (at *AccessTuple) StorageKeys(ctx context.Context) *[]common.Hash {
+func (at *AccessTuple) StorageKeys(ctx context.Context) []common.Hash {
return at.storageKeys
}
@@ -442,7 +442,7 @@ func (t *Transaction) AccessList(ctx context.Context) (*[]*AccessTuple, error) {
for _, al := range accessList {
ret = append(ret, &AccessTuple{
address: al.Address,
- storageKeys: &al.StorageKeys,
+ storageKeys: al.StorageKeys,
})
}
return &ret, nil
@@ -1248,7 +1248,7 @@ func (s *SyncState) KnownStates() *hexutil.Uint64 {
// - pulledStates: number of state entries processed until now
// - knownStates: number of known state entries that still need to be pulled
func (r *Resolver) Syncing() (*SyncState, error) {
- progress := r.backend.Downloader().Progress()
+ progress := r.backend.SyncProgress()
// Return not syncing if the synchronisation already completed
if progress.CurrentBlock >= progress.HighestBlock {
diff --git a/graphql/schema.go b/graphql/schema.go
index 811c11f6c..dfd094a42 100644
--- a/graphql/schema.go
+++ b/graphql/schema.go
@@ -72,7 +72,7 @@ const schema string = `
#EIP-2718
type AccessTuple{
address: Address!
- storageKeys : [Bytes32!]
+ storageKeys : [Bytes32!]!
}
# Transaction is an Ethereum transaction.
diff --git a/internal/cmdtest/test_cmd.go b/internal/cmdtest/test_cmd.go
index 82ad9c15b..d75db41e0 100644
--- a/internal/cmdtest/test_cmd.go
+++ b/internal/cmdtest/test_cmd.go
@@ -118,6 +118,13 @@ func (tt *TestCmd) Expect(tplsource string) {
tt.Logf("Matched stdout text:\n%s", want)
}
+// Output reads all output from stdout, and returns the data.
+func (tt *TestCmd) Output() []byte {
+ var buf []byte
+ tt.withKillTimeout(func() { buf, _ = io.ReadAll(tt.stdout) })
+ return buf
+}
+
func (tt *TestCmd) matchExactOutput(want []byte) error {
buf := make([]byte, len(want))
n := 0
diff --git a/internal/debug/api.go b/internal/debug/api.go
index efd862677..1ea0c6377 100644
--- a/internal/debug/api.go
+++ b/internal/debug/api.go
@@ -27,6 +27,7 @@ import (
"os"
"os/user"
"path/filepath"
+ "regexp"
"runtime"
"runtime/debug"
"runtime/pprof"
@@ -35,6 +36,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/log"
+ "github.com/hashicorp/go-bexpr"
)
// Handler is the global debugging handler.
@@ -189,10 +191,44 @@ func (*HandlerT) WriteMemProfile(file string) error {
return writeProfile("heap", file)
}
-// Stacks returns a printed representation of the stacks of all goroutines.
-func (*HandlerT) Stacks() string {
+// Stacks returns a printed representation of the stacks of all goroutines. It
+// also permits the following optional filters to be used:
+// - filter: boolean expression of packages to filter for
+func (*HandlerT) Stacks(filter *string) string {
buf := new(bytes.Buffer)
pprof.Lookup("goroutine").WriteTo(buf, 2)
+
+ // If any filtering was requested, execute them now
+ if filter != nil && len(*filter) > 0 {
+ expanded := *filter
+
+ // The input filter is a logical expression of package names. Transform
+ // it into a proper boolean expression that can be fed into a parser and
+ // interpreter:
+ //
+ // E.g. (eth || snap) && !p2p -> (eth in Value || snap in Value) && p2p not in Value
+ expanded = regexp.MustCompile(`[:/\.A-Za-z0-9_-]+`).ReplaceAllString(expanded, "`$0` in Value")
+ expanded = regexp.MustCompile("!(`[:/\\.A-Za-z0-9_-]+`)").ReplaceAllString(expanded, "$1 not")
+ expanded = strings.Replace(expanded, "||", "or", -1)
+ expanded = strings.Replace(expanded, "&&", "and", -1)
+ log.Info("Expanded filter expression", "filter", *filter, "expanded", expanded)
+
+ expr, err := bexpr.CreateEvaluator(expanded)
+ if err != nil {
+ log.Error("Failed to parse filter expression", "expanded", expanded, "err", err)
+ return ""
+ }
+ // Split the goroutine dump into segments and filter each
+ dump := buf.String()
+ buf.Reset()
+
+ for _, trace := range strings.Split(dump, "\n\n") {
+ if ok, _ := expr.Evaluate(map[string]string{"Value": trace}); ok {
+ buf.WriteString(trace)
+ buf.WriteString("\n\n")
+ }
+ }
+ }
return buf.String()
}
diff --git a/internal/debug/flags.go b/internal/debug/flags.go
index 126ee09a7..3aa990adf 100644
--- a/internal/debug/flags.go
+++ b/internal/debug/flags.go
@@ -90,39 +90,6 @@ var (
Name: "trace",
Usage: "Write execution trace to the given file",
}
- // (Deprecated April 2020)
- legacyPprofPortFlag = cli.IntFlag{
- Name: "pprofport",
- Usage: "pprof HTTP server listening port (deprecated, use --pprof.port)",
- Value: 6060,
- }
- legacyPprofAddrFlag = cli.StringFlag{
- Name: "pprofaddr",
- Usage: "pprof HTTP server listening interface (deprecated, use --pprof.addr)",
- Value: "127.0.0.1",
- }
- legacyMemprofilerateFlag = cli.IntFlag{
- Name: "memprofilerate",
- Usage: "Turn on memory profiling with the given rate (deprecated, use --pprof.memprofilerate)",
- Value: runtime.MemProfileRate,
- }
- legacyBlockprofilerateFlag = cli.IntFlag{
- Name: "blockprofilerate",
- Usage: "Turn on block profiling with the given rate (deprecated, use --pprof.blockprofilerate)",
- }
- legacyCpuprofileFlag = cli.StringFlag{
- Name: "cpuprofile",
- Usage: "Write CPU profile to the given file (deprecated, use --pprof.cpuprofile)",
- }
- legacyBacktraceAtFlag = cli.StringFlag{
- Name: "backtrace",
- Usage: "Request a stack trace at a specific logging statement (e.g. \"block.go:271\") (deprecated, use --log.backtrace)",
- Value: "",
- }
- legacyDebugFlag = cli.BoolFlag{
- Name: "debug",
- Usage: "Prepends log messages with call-site location (file and line number) (deprecated, use --log.debug)",
- }
)
// Flags holds all command-line flags required for debugging.
@@ -141,17 +108,6 @@ var Flags = []cli.Flag{
traceFlag,
}
-// This is the list of deprecated debugging flags.
-var DeprecatedFlags = []cli.Flag{
- legacyPprofPortFlag,
- legacyPprofAddrFlag,
- legacyMemprofilerateFlag,
- legacyBlockprofilerateFlag,
- legacyCpuprofileFlag,
- legacyBacktraceAtFlag,
- legacyDebugFlag,
-}
-
var glogger *log.GlogHandler
func init() {
@@ -183,45 +139,23 @@ func Setup(ctx *cli.Context) error {
glogger.Vmodule(vmodule)
debug := ctx.GlobalBool(debugFlag.Name)
- if ctx.GlobalIsSet(legacyDebugFlag.Name) {
- debug = ctx.GlobalBool(legacyDebugFlag.Name)
- log.Warn("The flag --debug is deprecated and will be removed in the future, please use --log.debug")
- }
if ctx.GlobalIsSet(debugFlag.Name) {
debug = ctx.GlobalBool(debugFlag.Name)
}
log.PrintOrigins(debug)
backtrace := ctx.GlobalString(backtraceAtFlag.Name)
- if b := ctx.GlobalString(legacyBacktraceAtFlag.Name); b != "" {
- backtrace = b
- log.Warn("The flag --backtrace is deprecated and will be removed in the future, please use --log.backtrace")
- }
- if b := ctx.GlobalString(backtraceAtFlag.Name); b != "" {
- backtrace = b
- }
glogger.BacktraceAt(backtrace)
log.Root().SetHandler(glogger)
// profiling, tracing
runtime.MemProfileRate = memprofilerateFlag.Value
- if ctx.GlobalIsSet(legacyMemprofilerateFlag.Name) {
- runtime.MemProfileRate = ctx.GlobalInt(legacyMemprofilerateFlag.Name)
- log.Warn("The flag --memprofilerate is deprecated and will be removed in the future, please use --pprof.memprofilerate")
- }
if ctx.GlobalIsSet(memprofilerateFlag.Name) {
runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name)
}
- blockProfileRate := blockprofilerateFlag.Value
- if ctx.GlobalIsSet(legacyBlockprofilerateFlag.Name) {
- blockProfileRate = ctx.GlobalInt(legacyBlockprofilerateFlag.Name)
- log.Warn("The flag --blockprofilerate is deprecated and will be removed in the future, please use --pprof.blockprofilerate")
- }
- if ctx.GlobalIsSet(blockprofilerateFlag.Name) {
- blockProfileRate = ctx.GlobalInt(blockprofilerateFlag.Name)
- }
+ blockProfileRate := ctx.GlobalInt(blockprofilerateFlag.Name)
Handler.SetBlockProfileRate(blockProfileRate)
if traceFile := ctx.GlobalString(traceFlag.Name); traceFile != "" {
diff --git a/internal/debug/loudpanic.go b/internal/debug/loudpanic.go
index 572ebcefa..86e6bc88f 100644
--- a/internal/debug/loudpanic.go
+++ b/internal/debug/loudpanic.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build go1.6
// +build go1.6
package debug
diff --git a/internal/debug/loudpanic_fallback.go b/internal/debug/loudpanic_fallback.go
index 4ce4985da..377490e5b 100644
--- a/internal/debug/loudpanic_fallback.go
+++ b/internal/debug/loudpanic_fallback.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build !go1.6
// +build !go1.6
package debug
diff --git a/internal/debug/trace.go b/internal/debug/trace.go
index cab5deaaf..a273e4a9d 100644
--- a/internal/debug/trace.go
+++ b/internal/debug/trace.go
@@ -14,7 +14,8 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-//+build go1.5
+//go:build go1.5
+// +build go1.5
package debug
diff --git a/internal/debug/trace_fallback.go b/internal/debug/trace_fallback.go
index 4118ff408..ec07d991e 100644
--- a/internal/debug/trace_fallback.go
+++ b/internal/debug/trace_fallback.go
@@ -14,7 +14,8 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-//+build !go1.5
+//go:build !go1.5
+// +build !go1.5
// no-op implementation of tracing methods for Go < 1.5.
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 1af98e107..6997f2c82 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -32,7 +32,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
- "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/consensus/misc"
@@ -123,7 +122,7 @@ func (s *PublicEthereumAPI) FeeHistory(ctx context.Context, blockCount rpc.Decim
// - pulledStates: number of state entries processed until now
// - knownStates: number of known state entries that still need to be pulled
func (s *PublicEthereumAPI) Syncing() (interface{}, error) {
- progress := s.b.Downloader().Progress()
+ progress := s.b.SyncProgress()
// Return not syncing if the synchronisation already completed
if progress.CurrentBlock >= progress.HighestBlock {
@@ -1176,8 +1175,7 @@ func FormatLogs(logs []vm.StructLog) []StructLogRes {
}
// RPCMarshalHeader converts the given header to the RPC output .
-func RPCMarshalHeader(head *types.Header, engine consensus.Engine) map[string]interface{} {
- miner, _ := engine.Author(head)
+func RPCMarshalHeader(head *types.Header) map[string]interface{} {
result := map[string]interface{}{
"number": (*hexutil.Big)(head.Number),
"hash": head.Hash(),
@@ -1187,7 +1185,7 @@ func RPCMarshalHeader(head *types.Header, engine consensus.Engine) map[string]in
"sha3Uncles": head.UncleHash,
"logsBloom": head.Bloom,
"stateRoot": head.Root,
- "miner": miner,
+ "miner": head.Coinbase,
"difficulty": (*hexutil.Big)(head.Difficulty),
"extraData": hexutil.Bytes(head.Extra),
"size": hexutil.Uint64(head.Size()),
@@ -1208,8 +1206,8 @@ func RPCMarshalHeader(head *types.Header, engine consensus.Engine) map[string]in
// RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are
// returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain
// transaction hashes.
-func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, engine consensus.Engine) (map[string]interface{}, error) {
- fields := RPCMarshalHeader(block.Header(), engine)
+func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) {
+ fields := RPCMarshalHeader(block.Header())
fields["size"] = hexutil.Uint64(block.Size())
if inclTx {
@@ -1244,7 +1242,7 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, engine consen
// rpcMarshalHeader uses the generalized output filler, then adds the total difficulty field, which requires
// a `PublicBlockchainAPI`.
func (s *PublicBlockChainAPI) rpcMarshalHeader(ctx context.Context, header *types.Header) map[string]interface{} {
- fields := RPCMarshalHeader(header, s.b.Engine())
+ fields := RPCMarshalHeader(header)
fields["totalDifficulty"] = (*hexutil.Big)(s.b.GetTd(ctx, header.Hash()))
return fields
}
@@ -1252,7 +1250,7 @@ func (s *PublicBlockChainAPI) rpcMarshalHeader(ctx context.Context, header *type
// rpcMarshalBlock uses the generalized output filler, then adds the total difficulty field, which requires
// a `PublicBlockchainAPI`.
func (s *PublicBlockChainAPI) rpcMarshalBlock(ctx context.Context, b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) {
- fields, err := RPCMarshalBlock(b, inclTx, fullTx, s.b.Engine())
+ fields, err := RPCMarshalBlock(b, inclTx, fullTx)
if err != nil {
return nil, err
}
diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go
index 995454582..1624f4963 100644
--- a/internal/ethapi/backend.go
+++ b/internal/ethapi/backend.go
@@ -21,6 +21,7 @@ import (
"context"
"math/big"
+ "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
@@ -29,7 +30,6 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
@@ -40,7 +40,8 @@ import (
// both full and light clients) with access to necessary functions.
type Backend interface {
// General Ethereum API
- Downloader() *downloader.Downloader
+ SyncProgress() ethereum.SyncProgress
+
SuggestGasTipCap(ctx context.Context) (*big.Int, error)
FeeHistory(ctx context.Context, blockCount int, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error)
ChainDb() ethdb.Database
diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go
index 52811b2a9..2d08d3008 100644
--- a/internal/ethapi/transaction_args.go
+++ b/internal/ethapi/transaction_args.go
@@ -146,6 +146,7 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
if args.Gas == nil {
// These fields are immutable during the estimation, safe to
// pass the pointer directly.
+ data := args.data()
callArgs := TransactionArgs{
From: args.From,
To: args.To,
@@ -153,7 +154,7 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
MaxFeePerGas: args.MaxFeePerGas,
MaxPriorityFeePerGas: args.MaxPriorityFeePerGas,
Value: args.Value,
- Data: args.Data,
+ Data: (*hexutil.Bytes)(&data),
AccessList: args.AccessList,
}
pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)
diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go
index 927dba189..ad459362a 100644
--- a/internal/web3ext/web3ext.go
+++ b/internal/web3ext/web3ext.go
@@ -278,7 +278,8 @@ web3._extend({
new web3._extend.Method({
name: 'stacks',
call: 'debug_stacks',
- params: 0,
+ params: 1,
+ inputFormatter: [null],
outputFormatter: console.log
}),
new web3._extend.Method({
@@ -390,6 +391,12 @@ web3._extend({
params: 2,
inputFormatter: [null, null]
}),
+ new web3._extend.Method({
+ name: 'intermediateRoots',
+ call: 'debug_intermediateRoots',
+ params: 2,
+ inputFormatter: [null, null]
+ }),
new web3._extend.Method({
name: 'standardTraceBlockToFile',
call: 'debug_standardTraceBlockToFile',
diff --git a/les/api_backend.go b/les/api_backend.go
index 9c80270da..e12984cb4 100644
--- a/les/api_backend.go
+++ b/les/api_backend.go
@@ -21,6 +21,7 @@ import (
"errors"
"math/big"
+ "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
@@ -30,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
@@ -257,8 +257,8 @@ func (b *LesApiBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEven
return b.eth.blockchain.SubscribeRemovedLogsEvent(ch)
}
-func (b *LesApiBackend) Downloader() *downloader.Downloader {
- return b.eth.Downloader()
+func (b *LesApiBackend) SyncProgress() ethereum.SyncProgress {
+ return b.eth.Downloader().Progress()
}
func (b *LesApiBackend) ProtocolVersion() int {
diff --git a/les/api_test.go b/les/api_test.go
index f7017c5d9..6a19b0fe4 100644
--- a/les/api_test.go
+++ b/les/api_test.go
@@ -32,8 +32,9 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/eth"
- "github.com/ethereum/go-ethereum/eth/downloader"
+ ethdownloader "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
+ "github.com/ethereum/go-ethereum/les/downloader"
"github.com/ethereum/go-ethereum/les/flowcontrol"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
@@ -494,14 +495,14 @@ func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir []
func newLesClientService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
config := ethconfig.Defaults
- config.SyncMode = downloader.LightSync
+ config.SyncMode = (ethdownloader.SyncMode)(downloader.LightSync)
config.Ethash.PowMode = ethash.ModeFake
return New(stack, &config)
}
func newLesServerService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
config := ethconfig.Defaults
- config.SyncMode = downloader.FullSync
+ config.SyncMode = (ethdownloader.SyncMode)(downloader.FullSync)
config.LightServ = testServerCapacity
config.LightPeers = testMaxClients
ethereum, err := eth.New(stack, &config)
diff --git a/les/client.go b/les/client.go
index 1d8a2c6f9..5d07c783e 100644
--- a/les/client.go
+++ b/les/client.go
@@ -30,12 +30,12 @@ import (
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/internal/ethapi"
+ "github.com/ethereum/go-ethereum/les/downloader"
"github.com/ethereum/go-ethereum/les/vflux"
vfc "github.com/ethereum/go-ethereum/les/vflux/client"
"github.com/ethereum/go-ethereum/light"
diff --git a/les/client_handler.go b/les/client_handler.go
index e95996c51..9583bd57c 100644
--- a/les/client_handler.go
+++ b/les/client_handler.go
@@ -28,8 +28,8 @@ import (
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/forkid"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/protocols/eth"
+ "github.com/ethereum/go-ethereum/les/downloader"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
@@ -100,11 +100,11 @@ func (h *clientHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter)
defer peer.close()
h.wg.Add(1)
defer h.wg.Done()
- err := h.handle(peer)
+ err := h.handle(peer, false)
return err
}
-func (h *clientHandler) handle(p *serverPeer) error {
+func (h *clientHandler) handle(p *serverPeer, noInitAnnounce bool) error {
if h.backend.peers.len() >= h.backend.config.LightPeers && !p.Peer.Info().Network.Trusted {
return p2p.DiscTooManyPeers
}
@@ -143,8 +143,11 @@ func (h *clientHandler) handle(p *serverPeer) error {
connectionTimer.Update(time.Duration(mclock.Now() - connectedAt))
serverConnectionGauge.Update(int64(h.backend.peers.len()))
}()
- h.fetcher.announce(p, &announceData{Hash: p.headInfo.Hash, Number: p.headInfo.Number, Td: p.headInfo.Td})
-
+ // It's mainly used in testing which requires discarding initial
+ // signal to prevent syncing.
+ if !noInitAnnounce {
+ h.fetcher.announce(p, &announceData{Hash: p.headInfo.Hash, Number: p.headInfo.Number, Td: p.headInfo.Td})
+ }
// Mark the peer starts to be served.
atomic.StoreUint32(&p.serving, 1)
defer atomic.StoreUint32(&p.serving, 0)
@@ -472,7 +475,7 @@ func (d *downloaderPeerNotify) registerPeer(p *serverPeer) {
handler: h,
peer: p,
}
- h.downloader.RegisterLightPeer(p.id, eth.ETH65, pc)
+ h.downloader.RegisterLightPeer(p.id, eth.ETH66, pc)
}
func (d *downloaderPeerNotify) unregisterPeer(p *serverPeer) {
diff --git a/les/downloader/api.go b/les/downloader/api.go
new file mode 100644
index 000000000..2024d23de
--- /dev/null
+++ b/les/downloader/api.go
@@ -0,0 +1,166 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package downloader
+
+import (
+ "context"
+ "sync"
+
+ "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/rpc"
+)
+
+// PublicDownloaderAPI provides an API which gives information about the current synchronisation status.
+// It offers only methods that operates on data that can be available to anyone without security risks.
+type PublicDownloaderAPI struct {
+ d *Downloader
+ mux *event.TypeMux
+ installSyncSubscription chan chan interface{}
+ uninstallSyncSubscription chan *uninstallSyncSubscriptionRequest
+}
+
+// NewPublicDownloaderAPI create a new PublicDownloaderAPI. The API has an internal event loop that
+// listens for events from the downloader through the global event mux. In case it receives one of
+// these events it broadcasts it to all syncing subscriptions that are installed through the
+// installSyncSubscription channel.
+func NewPublicDownloaderAPI(d *Downloader, m *event.TypeMux) *PublicDownloaderAPI {
+ api := &PublicDownloaderAPI{
+ d: d,
+ mux: m,
+ installSyncSubscription: make(chan chan interface{}),
+ uninstallSyncSubscription: make(chan *uninstallSyncSubscriptionRequest),
+ }
+
+ go api.eventLoop()
+
+ return api
+}
+
+// eventLoop runs a loop until the event mux closes. It will install and uninstall new
+// sync subscriptions and broadcasts sync status updates to the installed sync subscriptions.
+func (api *PublicDownloaderAPI) eventLoop() {
+ var (
+ sub = api.mux.Subscribe(StartEvent{}, DoneEvent{}, FailedEvent{})
+ syncSubscriptions = make(map[chan interface{}]struct{})
+ )
+
+ for {
+ select {
+ case i := <-api.installSyncSubscription:
+ syncSubscriptions[i] = struct{}{}
+ case u := <-api.uninstallSyncSubscription:
+ delete(syncSubscriptions, u.c)
+ close(u.uninstalled)
+ case event := <-sub.Chan():
+ if event == nil {
+ return
+ }
+
+ var notification interface{}
+ switch event.Data.(type) {
+ case StartEvent:
+ notification = &SyncingResult{
+ Syncing: true,
+ Status: api.d.Progress(),
+ }
+ case DoneEvent, FailedEvent:
+ notification = false
+ }
+ // broadcast
+ for c := range syncSubscriptions {
+ c <- notification
+ }
+ }
+ }
+}
+
+// Syncing provides information when this nodes starts synchronising with the Ethereum network and when it's finished.
+func (api *PublicDownloaderAPI) Syncing(ctx context.Context) (*rpc.Subscription, error) {
+ notifier, supported := rpc.NotifierFromContext(ctx)
+ if !supported {
+ return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
+ }
+
+ rpcSub := notifier.CreateSubscription()
+
+ go func() {
+ statuses := make(chan interface{})
+ sub := api.SubscribeSyncStatus(statuses)
+
+ for {
+ select {
+ case status := <-statuses:
+ notifier.Notify(rpcSub.ID, status)
+ case <-rpcSub.Err():
+ sub.Unsubscribe()
+ return
+ case <-notifier.Closed():
+ sub.Unsubscribe()
+ return
+ }
+ }
+ }()
+
+ return rpcSub, nil
+}
+
+// SyncingResult provides information about the current synchronisation status for this node.
+type SyncingResult struct {
+ Syncing bool `json:"syncing"`
+ Status ethereum.SyncProgress `json:"status"`
+}
+
+// uninstallSyncSubscriptionRequest uninstalles a syncing subscription in the API event loop.
+type uninstallSyncSubscriptionRequest struct {
+ c chan interface{}
+ uninstalled chan interface{}
+}
+
+// SyncStatusSubscription represents a syncing subscription.
+type SyncStatusSubscription struct {
+ api *PublicDownloaderAPI // register subscription in event loop of this api instance
+ c chan interface{} // channel where events are broadcasted to
+ unsubOnce sync.Once // make sure unsubscribe logic is executed once
+}
+
+// Unsubscribe uninstalls the subscription from the DownloadAPI event loop.
+// The status channel that was passed to subscribeSyncStatus isn't used anymore
+// after this method returns.
+func (s *SyncStatusSubscription) Unsubscribe() {
+ s.unsubOnce.Do(func() {
+ req := uninstallSyncSubscriptionRequest{s.c, make(chan interface{})}
+ s.api.uninstallSyncSubscription <- &req
+
+ for {
+ select {
+ case <-s.c:
+ // drop new status events until uninstall confirmation
+ continue
+ case <-req.uninstalled:
+ return
+ }
+ }
+ })
+}
+
+// SubscribeSyncStatus creates a subscription that will broadcast new synchronisation updates.
+// The given channel must receive interface values, the result can either
+func (api *PublicDownloaderAPI) SubscribeSyncStatus(status chan interface{}) *SyncStatusSubscription {
+ api.installSyncSubscription <- status
+ return &SyncStatusSubscription{api: api, c: status}
+}
diff --git a/les/downloader/downloader.go b/les/downloader/downloader.go
new file mode 100644
index 000000000..e7dfc4158
--- /dev/null
+++ b/les/downloader/downloader.go
@@ -0,0 +1,2014 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// This is a temporary package whilst working on the eth/66 blocking refactors.
+// After that work is done, les needs to be refactored to use the new package,
+// or alternatively use a stripped down version of it. Either way, we need to
+// keep the changes scoped so duplicating temporarily seems the sanest.
+package downloader
+
+import (
+ "errors"
+ "fmt"
+ "math/big"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state/snapshot"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/eth/protocols/eth"
+ "github.com/ethereum/go-ethereum/eth/protocols/snap"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
+)
+
+var (
+ MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request
+ MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request
+ MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly
+ MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
+ MaxStateFetch = 384 // Amount of node state values to allow fetching per request
+
+ maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
+ maxHeadersProcess = 2048 // Number of header download results to import at once into the chain
+ maxResultsProcess = 2048 // Number of content download results to import at once into the chain
+ fullMaxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it)
+ lightMaxForkAncestry uint64 = params.LightImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it)
+
+ reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection
+ reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs
+
+ fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync
+ fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected
+ fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it
+ fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download
+ fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync
+)
+
+var (
+ errBusy = errors.New("busy")
+ errUnknownPeer = errors.New("peer is unknown or unhealthy")
+ errBadPeer = errors.New("action from bad peer ignored")
+ errStallingPeer = errors.New("peer is stalling")
+ errUnsyncedPeer = errors.New("unsynced peer")
+ errNoPeers = errors.New("no peers to keep download active")
+ errTimeout = errors.New("timeout")
+ errEmptyHeaderSet = errors.New("empty header set by peer")
+ errPeersUnavailable = errors.New("no peers available or all tried for download")
+ errInvalidAncestor = errors.New("retrieved ancestor is invalid")
+ errInvalidChain = errors.New("retrieved hash chain is invalid")
+ errInvalidBody = errors.New("retrieved block body is invalid")
+ errInvalidReceipt = errors.New("retrieved receipt is invalid")
+ errCancelStateFetch = errors.New("state data download canceled (requested)")
+ errCancelContentProcessing = errors.New("content processing canceled (requested)")
+ errCanceled = errors.New("syncing canceled (requested)")
+ errNoSyncActive = errors.New("no sync active")
+ errTooOld = errors.New("peer's protocol version too old")
+ errNoAncestorFound = errors.New("no common ancestor found")
+)
+
+type Downloader struct {
+ mode uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode
+ mux *event.TypeMux // Event multiplexer to announce sync operation events
+
+ checkpoint uint64 // Checkpoint block number to enforce head against (e.g. fast sync)
+ genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT)
+ queue *queue // Scheduler for selecting the hashes to download
+ peers *peerSet // Set of active peers from which download can proceed
+
+ stateDB ethdb.Database // Database to state sync into (and deduplicate via)
+ stateBloom *trie.SyncBloom // Bloom filter for fast trie node and contract code existence checks
+
+ // Statistics
+ syncStatsChainOrigin uint64 // Origin block number where syncing started at
+ syncStatsChainHeight uint64 // Highest block number known when syncing started
+ syncStatsState stateSyncStats
+ syncStatsLock sync.RWMutex // Lock protecting the sync stats fields
+
+ lightchain LightChain
+ blockchain BlockChain
+
+ // Callbacks
+ dropPeer peerDropFn // Drops a peer for misbehaving
+
+ // Status
+ synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing
+ synchronising int32
+ notified int32
+ committed int32
+ ancientLimit uint64 // The maximum block number which can be regarded as ancient data.
+
+ // Channels
+ headerCh chan dataPack // Channel receiving inbound block headers
+ bodyCh chan dataPack // Channel receiving inbound block bodies
+ receiptCh chan dataPack // Channel receiving inbound receipts
+ bodyWakeCh chan bool // Channel to signal the block body fetcher of new tasks
+ receiptWakeCh chan bool // Channel to signal the receipt fetcher of new tasks
+ headerProcCh chan []*types.Header // Channel to feed the header processor new tasks
+
+ // State sync
+ pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root
+ pivotLock sync.RWMutex // Lock protecting pivot header reads from updates
+
+ snapSync bool // Whether to run state sync over the snap protocol
+ SnapSyncer *snap.Syncer // TODO(karalabe): make private! hack for now
+ stateSyncStart chan *stateSync
+ trackStateReq chan *stateReq
+ stateCh chan dataPack // Channel receiving inbound node state data
+
+ // Cancellation and termination
+ cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop)
+ cancelCh chan struct{} // Channel to cancel mid-flight syncs
+ cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers
+ cancelWg sync.WaitGroup // Make sure all fetcher goroutines have exited.
+
+ quitCh chan struct{} // Quit channel to signal termination
+ quitLock sync.Mutex // Lock to prevent double closes
+
+ // Testing hooks
+ syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run
+ bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch
+ receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch
+ chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations)
+}
+
+// LightChain encapsulates functions required to synchronise a light chain.
+type LightChain interface {
+ // HasHeader verifies a header's presence in the local chain.
+ HasHeader(common.Hash, uint64) bool
+
+ // GetHeaderByHash retrieves a header from the local chain.
+ GetHeaderByHash(common.Hash) *types.Header
+
+ // CurrentHeader retrieves the head header from the local chain.
+ CurrentHeader() *types.Header
+
+ // GetTd returns the total difficulty of a local block.
+ GetTd(common.Hash, uint64) *big.Int
+
+ // InsertHeaderChain inserts a batch of headers into the local chain.
+ InsertHeaderChain([]*types.Header, int) (int, error)
+
+ // SetHead rewinds the local chain to a new head.
+ SetHead(uint64) error
+}
+
+// BlockChain encapsulates functions required to sync a (full or fast) blockchain.
+type BlockChain interface {
+ LightChain
+
+ // HasBlock verifies a block's presence in the local chain.
+ HasBlock(common.Hash, uint64) bool
+
+ // HasFastBlock verifies a fast block's presence in the local chain.
+ HasFastBlock(common.Hash, uint64) bool
+
+ // GetBlockByHash retrieves a block from the local chain.
+ GetBlockByHash(common.Hash) *types.Block
+
+ // CurrentBlock retrieves the head block from the local chain.
+ CurrentBlock() *types.Block
+
+ // CurrentFastBlock retrieves the head fast block from the local chain.
+ CurrentFastBlock() *types.Block
+
+ // FastSyncCommitHead directly commits the head block to a certain entity.
+ FastSyncCommitHead(common.Hash) error
+
+ // InsertChain inserts a batch of blocks into the local chain.
+ InsertChain(types.Blocks) (int, error)
+
+ // InsertReceiptChain inserts a batch of receipts into the local chain.
+ InsertReceiptChain(types.Blocks, []types.Receipts, uint64) (int, error)
+
+ // Snapshots returns the blockchain snapshot tree to paused it during sync.
+ Snapshots() *snapshot.Tree
+}
+
+// New creates a new downloader to fetch hashes and blocks from remote peers.
+func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader {
+ if lightchain == nil {
+ lightchain = chain
+ }
+ dl := &Downloader{
+ stateDB: stateDb,
+ stateBloom: stateBloom,
+ mux: mux,
+ checkpoint: checkpoint,
+ queue: newQueue(blockCacheMaxItems, blockCacheInitialItems),
+ peers: newPeerSet(),
+ blockchain: chain,
+ lightchain: lightchain,
+ dropPeer: dropPeer,
+ headerCh: make(chan dataPack, 1),
+ bodyCh: make(chan dataPack, 1),
+ receiptCh: make(chan dataPack, 1),
+ bodyWakeCh: make(chan bool, 1),
+ receiptWakeCh: make(chan bool, 1),
+ headerProcCh: make(chan []*types.Header, 1),
+ quitCh: make(chan struct{}),
+ stateCh: make(chan dataPack),
+ SnapSyncer: snap.NewSyncer(stateDb),
+ stateSyncStart: make(chan *stateSync),
+ syncStatsState: stateSyncStats{
+ processed: rawdb.ReadFastTrieProgress(stateDb),
+ },
+ trackStateReq: make(chan *stateReq),
+ }
+ go dl.stateFetcher()
+ return dl
+}
+
+// Progress retrieves the synchronisation boundaries, specifically the origin
+// block where synchronisation started at (may have failed/suspended); the block
+// or header sync is currently at; and the latest known block which the sync targets.
+//
+// In addition, during the state download phase of fast synchronisation the number
+// of processed and the total number of known states are also returned. Otherwise
+// these are zero.
+func (d *Downloader) Progress() ethereum.SyncProgress {
+ // Lock the current stats and return the progress
+ d.syncStatsLock.RLock()
+ defer d.syncStatsLock.RUnlock()
+
+ current := uint64(0)
+ mode := d.getMode()
+ switch {
+ case d.blockchain != nil && mode == FullSync:
+ current = d.blockchain.CurrentBlock().NumberU64()
+ case d.blockchain != nil && mode == FastSync:
+ current = d.blockchain.CurrentFastBlock().NumberU64()
+ case d.lightchain != nil:
+ current = d.lightchain.CurrentHeader().Number.Uint64()
+ default:
+ log.Error("Unknown downloader chain/mode combo", "light", d.lightchain != nil, "full", d.blockchain != nil, "mode", mode)
+ }
+ return ethereum.SyncProgress{
+ StartingBlock: d.syncStatsChainOrigin,
+ CurrentBlock: current,
+ HighestBlock: d.syncStatsChainHeight,
+ PulledStates: d.syncStatsState.processed,
+ KnownStates: d.syncStatsState.processed + d.syncStatsState.pending,
+ }
+}
+
+// Synchronising returns whether the downloader is currently retrieving blocks.
+func (d *Downloader) Synchronising() bool {
+ return atomic.LoadInt32(&d.synchronising) > 0
+}
+
+// RegisterPeer injects a new download peer into the set of block source to be
+// used for fetching hashes and blocks from.
+func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error {
+ var logger log.Logger
+ if len(id) < 16 {
+ // Tests use short IDs, don't choke on them
+ logger = log.New("peer", id)
+ } else {
+ logger = log.New("peer", id[:8])
+ }
+ logger.Trace("Registering sync peer")
+ if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil {
+ logger.Error("Failed to register sync peer", "err", err)
+ return err
+ }
+ return nil
+}
+
+// RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer.
+func (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error {
+ return d.RegisterPeer(id, version, &lightPeerWrapper{peer})
+}
+
+// UnregisterPeer remove a peer from the known list, preventing any action from
+// the specified peer. An effort is also made to return any pending fetches into
+// the queue.
+func (d *Downloader) UnregisterPeer(id string) error {
+ // Unregister the peer from the active peer set and revoke any fetch tasks
+ var logger log.Logger
+ if len(id) < 16 {
+ // Tests use short IDs, don't choke on them
+ logger = log.New("peer", id)
+ } else {
+ logger = log.New("peer", id[:8])
+ }
+ logger.Trace("Unregistering sync peer")
+ if err := d.peers.Unregister(id); err != nil {
+ logger.Error("Failed to unregister sync peer", "err", err)
+ return err
+ }
+ d.queue.Revoke(id)
+
+ return nil
+}
+
+// Synchronise tries to sync up our local block chain with a remote peer, both
+// adding various sanity checks as well as wrapping it with various log entries.
+func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
+ err := d.synchronise(id, head, td, mode)
+
+ switch err {
+ case nil, errBusy, errCanceled:
+ return err
+ }
+ if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) ||
+ errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) ||
+ errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) {
+ log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
+ if d.dropPeer == nil {
+ // The dropPeer method is nil when `--copydb` is used for a local copy.
+ // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
+ log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id)
+ } else {
+ d.dropPeer(id)
+ }
+ return err
+ }
+ log.Warn("Synchronisation failed, retrying", "err", err)
+ return err
+}
+
+// synchronise will select the peer and use it for synchronising. If an empty string is given
+// it will use the best peer possible and synchronize if its TD is higher than our own. If any of the
+// checks fail an error will be returned. This method is synchronous
+func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error {
+ // Mock out the synchronisation if testing
+ if d.synchroniseMock != nil {
+ return d.synchroniseMock(id, hash)
+ }
+ // Make sure only one goroutine is ever allowed past this point at once
+ if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) {
+ return errBusy
+ }
+ defer atomic.StoreInt32(&d.synchronising, 0)
+
+ // Post a user notification of the sync (only once per session)
+ if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
+ log.Info("Block synchronisation started")
+ }
+ // If we are already full syncing, but have a fast-sync bloom filter laying
+ // around, make sure it doesn't use memory any more. This is a special case
+ // when the user attempts to fast sync a new empty network.
+ if mode == FullSync && d.stateBloom != nil {
+ d.stateBloom.Close()
+ }
+ // If snap sync was requested, create the snap scheduler and switch to fast
+ // sync mode. Long term we could drop fast sync or merge the two together,
+ // but until snap becomes prevalent, we should support both. TODO(karalabe).
+ if mode == SnapSync {
+ if !d.snapSync {
+ // Snap sync uses the snapshot namespace to store potentially flakey data until
+ // sync completely heals and finishes. Pause snapshot maintenance in the mean
+ // time to prevent access.
+ if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests
+ snapshots.Disable()
+ }
+ log.Warn("Enabling snapshot sync prototype")
+ d.snapSync = true
+ }
+ mode = FastSync
+ }
+ // Reset the queue, peer set and wake channels to clean any internal leftover state
+ d.queue.Reset(blockCacheMaxItems, blockCacheInitialItems)
+ d.peers.Reset()
+
+ for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
+ select {
+ case <-ch:
+ default:
+ }
+ }
+ for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} {
+ for empty := false; !empty; {
+ select {
+ case <-ch:
+ default:
+ empty = true
+ }
+ }
+ }
+ for empty := false; !empty; {
+ select {
+ case <-d.headerProcCh:
+ default:
+ empty = true
+ }
+ }
+ // Create cancel channel for aborting mid-flight and mark the master peer
+ d.cancelLock.Lock()
+ d.cancelCh = make(chan struct{})
+ d.cancelPeer = id
+ d.cancelLock.Unlock()
+
+ defer d.Cancel() // No matter what, we can't leave the cancel channel open
+
+ // Atomically set the requested sync mode
+ atomic.StoreUint32(&d.mode, uint32(mode))
+
+ // Retrieve the origin peer and initiate the downloading process
+ p := d.peers.Peer(id)
+ if p == nil {
+ return errUnknownPeer
+ }
+ return d.syncWithPeer(p, hash, td)
+}
+
+func (d *Downloader) getMode() SyncMode {
+ return SyncMode(atomic.LoadUint32(&d.mode))
+}
+
+// syncWithPeer starts a block synchronization based on the hash chain from the
+// specified peer and head hash.
+func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) {
+ d.mux.Post(StartEvent{})
+ defer func() {
+ // reset on error
+ if err != nil {
+ d.mux.Post(FailedEvent{err})
+ } else {
+ latest := d.lightchain.CurrentHeader()
+ d.mux.Post(DoneEvent{latest})
+ }
+ }()
+ if p.version < eth.ETH66 {
+ return fmt.Errorf("%w: advertized %d < required %d", errTooOld, p.version, eth.ETH66)
+ }
+ mode := d.getMode()
+
+ log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", mode)
+ defer func(start time.Time) {
+ log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start)))
+ }(time.Now())
+
+ // Look up the sync boundaries: the common ancestor and the target block
+ latest, pivot, err := d.fetchHead(p)
+ if err != nil {
+ return err
+ }
+ if mode == FastSync && pivot == nil {
+ // If no pivot block was returned, the head is below the min full block
+ // threshold (i.e. new chain). In that case we won't really fast sync
+ // anyway, but still need a valid pivot block to avoid some code hitting
+ // nil panics on an access.
+ pivot = d.blockchain.CurrentBlock().Header()
+ }
+ height := latest.Number.Uint64()
+
+ origin, err := d.findAncestor(p, latest)
+ if err != nil {
+ return err
+ }
+ d.syncStatsLock.Lock()
+ if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
+ d.syncStatsChainOrigin = origin
+ }
+ d.syncStatsChainHeight = height
+ d.syncStatsLock.Unlock()
+
+ // Ensure our origin point is below any fast sync pivot point
+ if mode == FastSync {
+ if height <= uint64(fsMinFullBlocks) {
+ origin = 0
+ } else {
+ pivotNumber := pivot.Number.Uint64()
+ if pivotNumber <= origin {
+ origin = pivotNumber - 1
+ }
+ // Write out the pivot into the database so a rollback beyond it will
+ // reenable fast sync
+ rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber)
+ }
+ }
+ d.committed = 1
+ if mode == FastSync && pivot.Number.Uint64() != 0 {
+ d.committed = 0
+ }
+ if mode == FastSync {
+ // Set the ancient data limitation.
+ // If we are running fast sync, all block data older than ancientLimit will be
+ // written to the ancient store. More recent data will be written to the active
+ // database and will wait for the freezer to migrate.
+ //
+ // If there is a checkpoint available, then calculate the ancientLimit through
+ // that. Otherwise calculate the ancient limit through the advertised height
+ // of the remote peer.
+ //
+ // The reason for picking checkpoint first is that a malicious peer can give us
+ // a fake (very high) height, forcing the ancient limit to also be very high.
+ // The peer would start to feed us valid blocks until head, resulting in all of
+ // the blocks might be written into the ancient store. A following mini-reorg
+ // could cause issues.
+ if d.checkpoint != 0 && d.checkpoint > fullMaxForkAncestry+1 {
+ d.ancientLimit = d.checkpoint
+ } else if height > fullMaxForkAncestry+1 {
+ d.ancientLimit = height - fullMaxForkAncestry - 1
+ } else {
+ d.ancientLimit = 0
+ }
+ frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here.
+
+ // If a part of blockchain data has already been written into active store,
+ // disable the ancient style insertion explicitly.
+ if origin >= frozen && frozen != 0 {
+ d.ancientLimit = 0
+ log.Info("Disabling direct-ancient mode", "origin", origin, "ancient", frozen-1)
+ } else if d.ancientLimit > 0 {
+ log.Debug("Enabling direct-ancient mode", "ancient", d.ancientLimit)
+ }
+ // Rewind the ancient store and blockchain if reorg happens.
+ if origin+1 < frozen {
+ if err := d.lightchain.SetHead(origin + 1); err != nil {
+ return err
+ }
+ }
+ }
+ // Initiate the sync using a concurrent header and content retrieval algorithm
+ d.queue.Prepare(origin+1, mode)
+ if d.syncInitHook != nil {
+ d.syncInitHook(origin, height)
+ }
+ fetchers := []func() error{
+ func() error { return d.fetchHeaders(p, origin+1) }, // Headers are always retrieved
+ func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync
+ func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync
+ func() error { return d.processHeaders(origin+1, td) },
+ }
+ if mode == FastSync {
+ d.pivotLock.Lock()
+ d.pivotHeader = pivot
+ d.pivotLock.Unlock()
+
+ fetchers = append(fetchers, func() error { return d.processFastSyncContent() })
+ } else if mode == FullSync {
+ fetchers = append(fetchers, d.processFullSyncContent)
+ }
+ return d.spawnSync(fetchers)
+}
+
+// spawnSync runs d.process and all given fetcher functions to completion in
+// separate goroutines, returning the first error that appears.
+func (d *Downloader) spawnSync(fetchers []func() error) error {
+ errc := make(chan error, len(fetchers))
+ d.cancelWg.Add(len(fetchers))
+ for _, fn := range fetchers {
+ fn := fn
+ go func() { defer d.cancelWg.Done(); errc <- fn() }()
+ }
+ // Wait for the first error, then terminate the others.
+ var err error
+ for i := 0; i < len(fetchers); i++ {
+ if i == len(fetchers)-1 {
+ // Close the queue when all fetchers have exited.
+ // This will cause the block processor to end when
+ // it has processed the queue.
+ d.queue.Close()
+ }
+ if err = <-errc; err != nil && err != errCanceled {
+ break
+ }
+ }
+ d.queue.Close()
+ d.Cancel()
+ return err
+}
+
+// cancel aborts all of the operations and resets the queue. However, cancel does
+// not wait for the running download goroutines to finish. This method should be
+// used when cancelling the downloads from inside the downloader.
+func (d *Downloader) cancel() {
+ // Close the current cancel channel
+ d.cancelLock.Lock()
+ defer d.cancelLock.Unlock()
+
+ if d.cancelCh != nil {
+ select {
+ case <-d.cancelCh:
+ // Channel was already closed
+ default:
+ close(d.cancelCh)
+ }
+ }
+}
+
+// Cancel aborts all of the operations and waits for all download goroutines to
+// finish before returning.
+func (d *Downloader) Cancel() {
+ d.cancel()
+ d.cancelWg.Wait()
+}
+
+// Terminate interrupts the downloader, canceling all pending operations.
+// The downloader cannot be reused after calling Terminate.
+func (d *Downloader) Terminate() {
+ // Close the termination channel (make sure double close is allowed)
+ d.quitLock.Lock()
+ select {
+ case <-d.quitCh:
+ default:
+ close(d.quitCh)
+ }
+ if d.stateBloom != nil {
+ d.stateBloom.Close()
+ }
+ d.quitLock.Unlock()
+
+ // Cancel any pending download requests
+ d.Cancel()
+}
+
+// fetchHead retrieves the head header and prior pivot block (if available) from
+// a remote peer.
+func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *types.Header, err error) {
+ p.log.Debug("Retrieving remote chain head")
+ mode := d.getMode()
+
+ // Request the advertised remote head block and wait for the response
+ latest, _ := p.peer.Head()
+ fetch := 1
+ if mode == FastSync {
+ fetch = 2 // head + pivot headers
+ }
+ go p.peer.RequestHeadersByHash(latest, fetch, fsMinFullBlocks-1, true)
+
+ ttl := d.peers.rates.TargetTimeout()
+ timeout := time.After(ttl)
+ for {
+ select {
+ case <-d.cancelCh:
+ return nil, nil, errCanceled
+
+ case packet := <-d.headerCh:
+ // Discard anything not from the origin peer
+ if packet.PeerId() != p.id {
+ log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
+ break
+ }
+ // Make sure the peer gave us at least one and at most the requested headers
+ headers := packet.(*headerPack).headers
+ if len(headers) == 0 || len(headers) > fetch {
+ return nil, nil, fmt.Errorf("%w: returned headers %d != requested %d", errBadPeer, len(headers), fetch)
+ }
+ // The first header needs to be the head, validate against the checkpoint
+ // and request. If only 1 header was returned, make sure there's no pivot
+ // or there was not one requested.
+ head := headers[0]
+ if (mode == FastSync || mode == LightSync) && head.Number.Uint64() < d.checkpoint {
+ return nil, nil, fmt.Errorf("%w: remote head %d below checkpoint %d", errUnsyncedPeer, head.Number, d.checkpoint)
+ }
+ if len(headers) == 1 {
+ if mode == FastSync && head.Number.Uint64() > uint64(fsMinFullBlocks) {
+ return nil, nil, fmt.Errorf("%w: no pivot included along head header", errBadPeer)
+ }
+ p.log.Debug("Remote head identified, no pivot", "number", head.Number, "hash", head.Hash())
+ return head, nil, nil
+ }
+ // At this point we have 2 headers in total and the first is the
+ // validated head of the chain. Check the pivot number and return,
+ pivot := headers[1]
+ if pivot.Number.Uint64() != head.Number.Uint64()-uint64(fsMinFullBlocks) {
+ return nil, nil, fmt.Errorf("%w: remote pivot %d != requested %d", errInvalidChain, pivot.Number, head.Number.Uint64()-uint64(fsMinFullBlocks))
+ }
+ return head, pivot, nil
+
+ case <-timeout:
+ p.log.Debug("Waiting for head header timed out", "elapsed", ttl)
+ return nil, nil, errTimeout
+
+ case <-d.bodyCh:
+ case <-d.receiptCh:
+ // Out of bounds delivery, ignore
+ }
+ }
+}
+
+// calculateRequestSpan calculates what headers to request from a peer when trying to determine the
+// common ancestor.
+// It returns parameters to be used for peer.RequestHeadersByNumber:
+// from - starting block number
+// count - number of headers to request
+// skip - number of headers to skip
+// and also returns 'max', the last block which is expected to be returned by the remote peers,
+// given the (from,count,skip)
+func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) {
+ var (
+ from int
+ count int
+ MaxCount = MaxHeaderFetch / 16
+ )
+ // requestHead is the highest block that we will ask for. If requestHead is not offset,
+ // the highest block that we will get is 16 blocks back from head, which means we
+ // will fetch 14 or 15 blocks unnecessarily in the case the height difference
+ // between us and the peer is 1-2 blocks, which is most common
+ requestHead := int(remoteHeight) - 1
+ if requestHead < 0 {
+ requestHead = 0
+ }
+ // requestBottom is the lowest block we want included in the query
+ // Ideally, we want to include the one just below our own head
+ requestBottom := int(localHeight - 1)
+ if requestBottom < 0 {
+ requestBottom = 0
+ }
+ totalSpan := requestHead - requestBottom
+ span := 1 + totalSpan/MaxCount
+ if span < 2 {
+ span = 2
+ }
+ if span > 16 {
+ span = 16
+ }
+
+ count = 1 + totalSpan/span
+ if count > MaxCount {
+ count = MaxCount
+ }
+ if count < 2 {
+ count = 2
+ }
+ from = requestHead - (count-1)*span
+ if from < 0 {
+ from = 0
+ }
+ max := from + (count-1)*span
+ return int64(from), count, span - 1, uint64(max)
+}
+
+// findAncestor tries to locate the common ancestor link of the local chain and
+// a remote peers blockchain. In the general case when our node was in sync and
+// on the correct chain, checking the top N links should already get us a match.
+// In the rare scenario when we ended up on a long reorganisation (i.e. none of
+// the head links match), we do a binary search to find the common ancestor.
+func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) {
+ // Figure out the valid ancestor range to prevent rewrite attacks
+ var (
+ floor = int64(-1)
+ localHeight uint64
+ remoteHeight = remoteHeader.Number.Uint64()
+ )
+ mode := d.getMode()
+ switch mode {
+ case FullSync:
+ localHeight = d.blockchain.CurrentBlock().NumberU64()
+ case FastSync:
+ localHeight = d.blockchain.CurrentFastBlock().NumberU64()
+ default:
+ localHeight = d.lightchain.CurrentHeader().Number.Uint64()
+ }
+ p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight)
+
+ // Recap floor value for binary search
+ maxForkAncestry := fullMaxForkAncestry
+ if d.getMode() == LightSync {
+ maxForkAncestry = lightMaxForkAncestry
+ }
+ if localHeight >= maxForkAncestry {
+ // We're above the max reorg threshold, find the earliest fork point
+ floor = int64(localHeight - maxForkAncestry)
+ }
+ // If we're doing a light sync, ensure the floor doesn't go below the CHT, as
+ // all headers before that point will be missing.
+ if mode == LightSync {
+ // If we don't know the current CHT position, find it
+ if d.genesis == 0 {
+ header := d.lightchain.CurrentHeader()
+ for header != nil {
+ d.genesis = header.Number.Uint64()
+ if floor >= int64(d.genesis)-1 {
+ break
+ }
+ header = d.lightchain.GetHeaderByHash(header.ParentHash)
+ }
+ }
+ // We already know the "genesis" block number, cap floor to that
+ if floor < int64(d.genesis)-1 {
+ floor = int64(d.genesis) - 1
+ }
+ }
+
+ ancestor, err := d.findAncestorSpanSearch(p, mode, remoteHeight, localHeight, floor)
+ if err == nil {
+ return ancestor, nil
+ }
+ // The returned error was not nil.
+ // If the error returned does not reflect that a common ancestor was not found, return it.
+ // If the error reflects that a common ancestor was not found, continue to binary search,
+ // where the error value will be reassigned.
+ if !errors.Is(err, errNoAncestorFound) {
+ return 0, err
+ }
+
+ ancestor, err = d.findAncestorBinarySearch(p, mode, remoteHeight, floor)
+ if err != nil {
+ return 0, err
+ }
+ return ancestor, nil
+}
+
+func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, remoteHeight, localHeight uint64, floor int64) (commonAncestor uint64, err error) {
+ from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight)
+
+ p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip)
+ go p.peer.RequestHeadersByNumber(uint64(from), count, skip, false)
+
+ // Wait for the remote response to the head fetch
+ number, hash := uint64(0), common.Hash{}
+
+ ttl := d.peers.rates.TargetTimeout()
+ timeout := time.After(ttl)
+
+ for finished := false; !finished; {
+ select {
+ case <-d.cancelCh:
+ return 0, errCanceled
+
+ case packet := <-d.headerCh:
+ // Discard anything not from the origin peer
+ if packet.PeerId() != p.id {
+ log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
+ break
+ }
+ // Make sure the peer actually gave something valid
+ headers := packet.(*headerPack).headers
+ if len(headers) == 0 {
+ p.log.Warn("Empty head header set")
+ return 0, errEmptyHeaderSet
+ }
+ // Make sure the peer's reply conforms to the request
+ for i, header := range headers {
+ expectNumber := from + int64(i)*int64(skip+1)
+ if number := header.Number.Int64(); number != expectNumber {
+ p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number)
+ return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering"))
+ }
+ }
+ // Check if a common ancestor was found
+ finished = true
+ for i := len(headers) - 1; i >= 0; i-- {
+ // Skip any headers that underflow/overflow our requested set
+ if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max {
+ continue
+ }
+ // Otherwise check if we already know the header or not
+ h := headers[i].Hash()
+ n := headers[i].Number.Uint64()
+
+ var known bool
+ switch mode {
+ case FullSync:
+ known = d.blockchain.HasBlock(h, n)
+ case FastSync:
+ known = d.blockchain.HasFastBlock(h, n)
+ default:
+ known = d.lightchain.HasHeader(h, n)
+ }
+ if known {
+ number, hash = n, h
+ break
+ }
+ }
+
+ case <-timeout:
+ p.log.Debug("Waiting for head header timed out", "elapsed", ttl)
+ return 0, errTimeout
+
+ case <-d.bodyCh:
+ case <-d.receiptCh:
+ // Out of bounds delivery, ignore
+ }
+ }
+ // If the head fetch already found an ancestor, return
+ if hash != (common.Hash{}) {
+ if int64(number) <= floor {
+ p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor)
+ return 0, errInvalidAncestor
+ }
+ p.log.Debug("Found common ancestor", "number", number, "hash", hash)
+ return number, nil
+ }
+ return 0, errNoAncestorFound
+}
+
+func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, remoteHeight uint64, floor int64) (commonAncestor uint64, err error) {
+ hash := common.Hash{}
+
+ // Ancestor not found, we need to binary search over our chain
+ start, end := uint64(0), remoteHeight
+ if floor > 0 {
+ start = uint64(floor)
+ }
+ p.log.Trace("Binary searching for common ancestor", "start", start, "end", end)
+
+ for start+1 < end {
+ // Split our chain interval in two, and request the hash to cross check
+ check := (start + end) / 2
+
+ ttl := d.peers.rates.TargetTimeout()
+ timeout := time.After(ttl)
+
+ go p.peer.RequestHeadersByNumber(check, 1, 0, false)
+
+ // Wait until a reply arrives to this request
+ for arrived := false; !arrived; {
+ select {
+ case <-d.cancelCh:
+ return 0, errCanceled
+
+ case packet := <-d.headerCh:
+ // Discard anything not from the origin peer
+ if packet.PeerId() != p.id {
+ log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
+ break
+ }
+ // Make sure the peer actually gave something valid
+ headers := packet.(*headerPack).headers
+ if len(headers) != 1 {
+ p.log.Warn("Multiple headers for single request", "headers", len(headers))
+ return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers))
+ }
+ arrived = true
+
+ // Modify the search interval based on the response
+ h := headers[0].Hash()
+ n := headers[0].Number.Uint64()
+
+ var known bool
+ switch mode {
+ case FullSync:
+ known = d.blockchain.HasBlock(h, n)
+ case FastSync:
+ known = d.blockchain.HasFastBlock(h, n)
+ default:
+ known = d.lightchain.HasHeader(h, n)
+ }
+ if !known {
+ end = check
+ break
+ }
+ header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists
+ if header.Number.Uint64() != check {
+ p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check)
+ return 0, fmt.Errorf("%w: non-requested header (%d)", errBadPeer, header.Number)
+ }
+ start = check
+ hash = h
+
+ case <-timeout:
+ p.log.Debug("Waiting for search header timed out", "elapsed", ttl)
+ return 0, errTimeout
+
+ case <-d.bodyCh:
+ case <-d.receiptCh:
+ // Out of bounds delivery, ignore
+ }
+ }
+ }
+ // Ensure valid ancestry and return
+ if int64(start) <= floor {
+ p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor)
+ return 0, errInvalidAncestor
+ }
+ p.log.Debug("Found common ancestor", "number", start, "hash", hash)
+ return start, nil
+}
+
+// fetchHeaders keeps retrieving headers concurrently from the number
+// requested, until no more are returned, potentially throttling on the way. To
+// facilitate concurrency but still protect against malicious nodes sending bad
+// headers, we construct a header chain skeleton using the "origin" peer we are
+// syncing with, and fill in the missing headers using anyone else. Headers from
+// other peers are only accepted if they map cleanly to the skeleton. If no one
+// can fill in the skeleton - not even the origin peer - it's assumed invalid and
+// the origin is dropped.
+func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error {
+ p.log.Debug("Directing header downloads", "origin", from)
+ defer p.log.Debug("Header download terminated")
+
+ // Create a timeout timer, and the associated header fetcher
+ skeleton := true // Skeleton assembly phase or finishing up
+ pivoting := false // Whether the next request is pivot verification
+ request := time.Now() // time of the last skeleton fetch request
+ timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
+ <-timeout.C // timeout channel should be initially empty
+ defer timeout.Stop()
+
+ var ttl time.Duration
+ getHeaders := func(from uint64) {
+ request = time.Now()
+
+ ttl = d.peers.rates.TargetTimeout()
+ timeout.Reset(ttl)
+
+ if skeleton {
+ p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from)
+ go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
+ } else {
+ p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from)
+ go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false)
+ }
+ }
+ getNextPivot := func() {
+ pivoting = true
+ request = time.Now()
+
+ ttl = d.peers.rates.TargetTimeout()
+ timeout.Reset(ttl)
+
+ d.pivotLock.RLock()
+ pivot := d.pivotHeader.Number.Uint64()
+ d.pivotLock.RUnlock()
+
+ p.log.Trace("Fetching next pivot header", "number", pivot+uint64(fsMinFullBlocks))
+ go p.peer.RequestHeadersByNumber(pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep
+ }
+ // Start pulling the header chain skeleton until all is done
+ ancestor := from
+ getHeaders(from)
+
+ mode := d.getMode()
+ for {
+ select {
+ case <-d.cancelCh:
+ return errCanceled
+
+ case packet := <-d.headerCh:
+ // Make sure the active peer is giving us the skeleton headers
+ if packet.PeerId() != p.id {
+ log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId())
+ break
+ }
+ headerReqTimer.UpdateSince(request)
+ timeout.Stop()
+
+ // If the pivot is being checked, move if it became stale and run the real retrieval
+ var pivot uint64
+
+ d.pivotLock.RLock()
+ if d.pivotHeader != nil {
+ pivot = d.pivotHeader.Number.Uint64()
+ }
+ d.pivotLock.RUnlock()
+
+ if pivoting {
+ if packet.Items() == 2 {
+ // Retrieve the headers and do some sanity checks, just in case
+ headers := packet.(*headerPack).headers
+
+ if have, want := headers[0].Number.Uint64(), pivot+uint64(fsMinFullBlocks); have != want {
+ log.Warn("Peer sent invalid next pivot", "have", have, "want", want)
+ return fmt.Errorf("%w: next pivot number %d != requested %d", errInvalidChain, have, want)
+ }
+ if have, want := headers[1].Number.Uint64(), pivot+2*uint64(fsMinFullBlocks)-8; have != want {
+ log.Warn("Peer sent invalid pivot confirmer", "have", have, "want", want)
+ return fmt.Errorf("%w: next pivot confirmer number %d != requested %d", errInvalidChain, have, want)
+ }
+ log.Warn("Pivot seemingly stale, moving", "old", pivot, "new", headers[0].Number)
+ pivot = headers[0].Number.Uint64()
+
+ d.pivotLock.Lock()
+ d.pivotHeader = headers[0]
+ d.pivotLock.Unlock()
+
+ // Write out the pivot into the database so a rollback beyond
+ // it will reenable fast sync and update the state root that
+ // the state syncer will be downloading.
+ rawdb.WriteLastPivotNumber(d.stateDB, pivot)
+ }
+ pivoting = false
+ getHeaders(from)
+ continue
+ }
+ // If the skeleton's finished, pull any remaining head headers directly from the origin
+ if skeleton && packet.Items() == 0 {
+ skeleton = false
+ getHeaders(from)
+ continue
+ }
+ // If no more headers are inbound, notify the content fetchers and return
+ if packet.Items() == 0 {
+ // Don't abort header fetches while the pivot is downloading
+ if atomic.LoadInt32(&d.committed) == 0 && pivot <= from {
+ p.log.Debug("No headers, waiting for pivot commit")
+ select {
+ case <-time.After(fsHeaderContCheck):
+ getHeaders(from)
+ continue
+ case <-d.cancelCh:
+ return errCanceled
+ }
+ }
+ // Pivot done (or not in fast sync) and no more headers, terminate the process
+ p.log.Debug("No more headers available")
+ select {
+ case d.headerProcCh <- nil:
+ return nil
+ case <-d.cancelCh:
+ return errCanceled
+ }
+ }
+ headers := packet.(*headerPack).headers
+
+ // If we received a skeleton batch, resolve internals concurrently
+ if skeleton {
+ filled, proced, err := d.fillHeaderSkeleton(from, headers)
+ if err != nil {
+ p.log.Debug("Skeleton chain invalid", "err", err)
+ return fmt.Errorf("%w: %v", errInvalidChain, err)
+ }
+ headers = filled[proced:]
+ from += uint64(proced)
+ } else {
+ // If we're closing in on the chain head, but haven't yet reached it, delay
+ // the last few headers so mini reorgs on the head don't cause invalid hash
+ // chain errors.
+ if n := len(headers); n > 0 {
+ // Retrieve the current head we're at
+ var head uint64
+ if mode == LightSync {
+ head = d.lightchain.CurrentHeader().Number.Uint64()
+ } else {
+ head = d.blockchain.CurrentFastBlock().NumberU64()
+ if full := d.blockchain.CurrentBlock().NumberU64(); head < full {
+ head = full
+ }
+ }
+ // If the head is below the common ancestor, we're actually deduplicating
+ // already existing chain segments, so use the ancestor as the fake head.
+ // Otherwise we might end up delaying header deliveries pointlessly.
+ if head < ancestor {
+ head = ancestor
+ }
+ // If the head is way older than this batch, delay the last few headers
+ if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() {
+ delay := reorgProtHeaderDelay
+ if delay > n {
+ delay = n
+ }
+ headers = headers[:n-delay]
+ }
+ }
+ }
+ // Insert all the new headers and fetch the next batch
+ if len(headers) > 0 {
+ p.log.Trace("Scheduling new headers", "count", len(headers), "from", from)
+ select {
+ case d.headerProcCh <- headers:
+ case <-d.cancelCh:
+ return errCanceled
+ }
+ from += uint64(len(headers))
+
+ // If we're still skeleton filling fast sync, check pivot staleness
+ // before continuing to the next skeleton filling
+ if skeleton && pivot > 0 {
+ getNextPivot()
+ } else {
+ getHeaders(from)
+ }
+ } else {
+ // No headers delivered, or all of them being delayed, sleep a bit and retry
+ p.log.Trace("All headers delayed, waiting")
+ select {
+ case <-time.After(fsHeaderContCheck):
+ getHeaders(from)
+ continue
+ case <-d.cancelCh:
+ return errCanceled
+ }
+ }
+
+ case <-timeout.C:
+ if d.dropPeer == nil {
+ // The dropPeer method is nil when `--copydb` is used for a local copy.
+ // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
+ p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id)
+ break
+ }
+ // Header retrieval timed out, consider the peer bad and drop
+ p.log.Debug("Header request timed out", "elapsed", ttl)
+ headerTimeoutMeter.Mark(1)
+ d.dropPeer(p.id)
+
+ // Finish the sync gracefully instead of dumping the gathered data though
+ for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
+ select {
+ case ch <- false:
+ case <-d.cancelCh:
+ }
+ }
+ select {
+ case d.headerProcCh <- nil:
+ case <-d.cancelCh:
+ }
+ return fmt.Errorf("%w: header request timed out", errBadPeer)
+ }
+ }
+}
+
+// fillHeaderSkeleton concurrently retrieves headers from all our available peers
+// and maps them to the provided skeleton header chain.
+//
+// Any partial results from the beginning of the skeleton is (if possible) forwarded
+// immediately to the header processor to keep the rest of the pipeline full even
+// in the case of header stalls.
+//
+// The method returns the entire filled skeleton and also the number of headers
+// already forwarded for processing.
+func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
+ log.Debug("Filling up skeleton", "from", from)
+ d.queue.ScheduleSkeleton(from, skeleton)
+
+ var (
+ deliver = func(packet dataPack) (int, error) {
+ pack := packet.(*headerPack)
+ return d.queue.DeliverHeaders(pack.peerID, pack.headers, d.headerProcCh)
+ }
+ expire = func() map[string]int { return d.queue.ExpireHeaders(d.peers.rates.TargetTimeout()) }
+ reserve = func(p *peerConnection, count int) (*fetchRequest, bool, bool) {
+ return d.queue.ReserveHeaders(p, count), false, false
+ }
+ fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) }
+ capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.peers.rates.TargetRoundTrip()) }
+ setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) {
+ p.SetHeadersIdle(accepted, deliveryTime)
+ }
+ )
+ err := d.fetchParts(d.headerCh, deliver, d.queue.headerContCh, expire,
+ d.queue.PendingHeaders, d.queue.InFlightHeaders, reserve,
+ nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers")
+
+ log.Debug("Skeleton fill terminated", "err", err)
+
+ filled, proced := d.queue.RetrieveHeaders()
+ return filled, proced, err
+}
+
+// fetchBodies iteratively downloads the scheduled block bodies, taking any
+// available peers, reserving a chunk of blocks for each, waiting for delivery
+// and also periodically checking for timeouts.
+func (d *Downloader) fetchBodies(from uint64) error {
+ log.Debug("Downloading block bodies", "origin", from)
+
+ var (
+ deliver = func(packet dataPack) (int, error) {
+ pack := packet.(*bodyPack)
+ return d.queue.DeliverBodies(pack.peerID, pack.transactions, pack.uncles)
+ }
+ expire = func() map[string]int { return d.queue.ExpireBodies(d.peers.rates.TargetTimeout()) }
+ fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) }
+ capacity = func(p *peerConnection) int { return p.BlockCapacity(d.peers.rates.TargetRoundTrip()) }
+ setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) { p.SetBodiesIdle(accepted, deliveryTime) }
+ )
+ err := d.fetchParts(d.bodyCh, deliver, d.bodyWakeCh, expire,
+ d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ReserveBodies,
+ d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies")
+
+ log.Debug("Block body download terminated", "err", err)
+ return err
+}
+
+// fetchReceipts iteratively downloads the scheduled block receipts, taking any
+// available peers, reserving a chunk of receipts for each, waiting for delivery
+// and also periodically checking for timeouts.
+func (d *Downloader) fetchReceipts(from uint64) error {
+ log.Debug("Downloading transaction receipts", "origin", from)
+
+ var (
+ deliver = func(packet dataPack) (int, error) {
+ pack := packet.(*receiptPack)
+ return d.queue.DeliverReceipts(pack.peerID, pack.receipts)
+ }
+ expire = func() map[string]int { return d.queue.ExpireReceipts(d.peers.rates.TargetTimeout()) }
+ fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) }
+ capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.peers.rates.TargetRoundTrip()) }
+ setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) {
+ p.SetReceiptsIdle(accepted, deliveryTime)
+ }
+ )
+ err := d.fetchParts(d.receiptCh, deliver, d.receiptWakeCh, expire,
+ d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ReserveReceipts,
+ d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts")
+
+ log.Debug("Transaction receipt download terminated", "err", err)
+ return err
+}
+
+// fetchParts iteratively downloads scheduled block parts, taking any available
+// peers, reserving a chunk of fetch requests for each, waiting for delivery and
+// also periodically checking for timeouts.
+//
+// As the scheduling/timeout logic mostly is the same for all downloaded data
+// types, this method is used by each for data gathering and is instrumented with
+// various callbacks to handle the slight differences between processing them.
+//
+// The instrumentation parameters:
+// - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer)
+// - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers)
+// - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`)
+// - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed)
+// - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping)
+// - pending: task callback for the number of requests still needing download (detect completion/non-completability)
+// - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish)
+// - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use)
+// - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions)
+// - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic)
+// - fetch: network callback to actually send a particular download request to a physical remote peer
+// - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer)
+// - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping)
+// - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks
+// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping)
+// - kind: textual label of the type being downloaded to display in log messages
+func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool,
+ expire func() map[string]int, pending func() int, inFlight func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, bool),
+ fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int,
+ idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int, time.Time), kind string) error {
+
+ // Create a ticker to detect expired retrieval tasks
+ ticker := time.NewTicker(100 * time.Millisecond)
+ defer ticker.Stop()
+
+ update := make(chan struct{}, 1)
+
+ // Prepare the queue and fetch block parts until the block header fetcher's done
+ finished := false
+ for {
+ select {
+ case <-d.cancelCh:
+ return errCanceled
+
+ case packet := <-deliveryCh:
+ deliveryTime := time.Now()
+ // If the peer was previously banned and failed to deliver its pack
+ // in a reasonable time frame, ignore its message.
+ if peer := d.peers.Peer(packet.PeerId()); peer != nil {
+ // Deliver the received chunk of data and check chain validity
+ accepted, err := deliver(packet)
+ if errors.Is(err, errInvalidChain) {
+ return err
+ }
+ // Unless a peer delivered something completely else than requested (usually
+ // caused by a timed out request which came through in the end), set it to
+ // idle. If the delivery's stale, the peer should have already been idled.
+ if !errors.Is(err, errStaleDelivery) {
+ setIdle(peer, accepted, deliveryTime)
+ }
+ // Issue a log to the user to see what's going on
+ switch {
+ case err == nil && packet.Items() == 0:
+ peer.log.Trace("Requested data not delivered", "type", kind)
+ case err == nil:
+ peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats())
+ default:
+ peer.log.Debug("Failed to deliver retrieved data", "type", kind, "err", err)
+ }
+ }
+ // Blocks assembled, try to update the progress
+ select {
+ case update <- struct{}{}:
+ default:
+ }
+
+ case cont := <-wakeCh:
+ // The header fetcher sent a continuation flag, check if it's done
+ if !cont {
+ finished = true
+ }
+ // Headers arrive, try to update the progress
+ select {
+ case update <- struct{}{}:
+ default:
+ }
+
+ case <-ticker.C:
+ // Sanity check update the progress
+ select {
+ case update <- struct{}{}:
+ default:
+ }
+
+ case <-update:
+ // Short circuit if we lost all our peers
+ if d.peers.Len() == 0 {
+ return errNoPeers
+ }
+ // Check for fetch request timeouts and demote the responsible peers
+ for pid, fails := range expire() {
+ if peer := d.peers.Peer(pid); peer != nil {
+ // If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps
+ // ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times
+ // out that sync wise we need to get rid of the peer.
+ //
+ // The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth
+ // and latency of a peer separately, which requires pushing the measures capacity a bit and seeing
+ // how response times reacts, to it always requests one more than the minimum (i.e. min 2).
+ if fails > 2 {
+ peer.log.Trace("Data delivery timed out", "type", kind)
+ setIdle(peer, 0, time.Now())
+ } else {
+ peer.log.Debug("Stalling delivery, dropping", "type", kind)
+
+ if d.dropPeer == nil {
+ // The dropPeer method is nil when `--copydb` is used for a local copy.
+ // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
+ peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", pid)
+ } else {
+ d.dropPeer(pid)
+
+ // If this peer was the master peer, abort sync immediately
+ d.cancelLock.RLock()
+ master := pid == d.cancelPeer
+ d.cancelLock.RUnlock()
+
+ if master {
+ d.cancel()
+ return errTimeout
+ }
+ }
+ }
+ }
+ }
+ // If there's nothing more to fetch, wait or terminate
+ if pending() == 0 {
+ if !inFlight() && finished {
+ log.Debug("Data fetching completed", "type", kind)
+ return nil
+ }
+ break
+ }
+ // Send a download request to all idle peers, until throttled
+ progressed, throttled, running := false, false, inFlight()
+ idles, total := idle()
+ pendCount := pending()
+ for _, peer := range idles {
+ // Short circuit if throttling activated
+ if throttled {
+ break
+ }
+ // Short circuit if there is no more available task.
+ if pendCount = pending(); pendCount == 0 {
+ break
+ }
+ // Reserve a chunk of fetches for a peer. A nil can mean either that
+ // no more headers are available, or that the peer is known not to
+ // have them.
+ request, progress, throttle := reserve(peer, capacity(peer))
+ if progress {
+ progressed = true
+ }
+ if throttle {
+ throttled = true
+ throttleCounter.Inc(1)
+ }
+ if request == nil {
+ continue
+ }
+ if request.From > 0 {
+ peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From)
+ } else {
+ peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number)
+ }
+ // Fetch the chunk and make sure any errors return the hashes to the queue
+ if fetchHook != nil {
+ fetchHook(request.Headers)
+ }
+ if err := fetch(peer, request); err != nil {
+ // Although we could try and make an attempt to fix this, this error really
+ // means that we've double allocated a fetch task to a peer. If that is the
+ // case, the internal state of the downloader and the queue is very wrong so
+ // better hard crash and note the error instead of silently accumulating into
+ // a much bigger issue.
+ panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind))
+ }
+ running = true
+ }
+ // Make sure that we have peers available for fetching. If all peers have been tried
+ // and all failed throw an error
+ if !progressed && !throttled && !running && len(idles) == total && pendCount > 0 {
+ return errPeersUnavailable
+ }
+ }
+ }
+}
+
+// processHeaders takes batches of retrieved headers from an input channel and
+// keeps processing and scheduling them into the header chain and downloader's
+// queue until the stream ends or a failure occurs.
+func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
+ // Keep a count of uncertain headers to roll back
+ var (
+ rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis)
+ rollbackErr error
+ mode = d.getMode()
+ )
+ defer func() {
+ if rollback > 0 {
+ lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0
+ if mode != LightSync {
+ lastFastBlock = d.blockchain.CurrentFastBlock().Number()
+ lastBlock = d.blockchain.CurrentBlock().Number()
+ }
+ if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block
+ // We're already unwinding the stack, only print the error to make it more visible
+ log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err)
+ }
+ curFastBlock, curBlock := common.Big0, common.Big0
+ if mode != LightSync {
+ curFastBlock = d.blockchain.CurrentFastBlock().Number()
+ curBlock = d.blockchain.CurrentBlock().Number()
+ }
+ log.Warn("Rolled back chain segment",
+ "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number),
+ "fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock),
+ "block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr)
+ }
+ }()
+ // Wait for batches of headers to process
+ gotHeaders := false
+
+ for {
+ select {
+ case <-d.cancelCh:
+ rollbackErr = errCanceled
+ return errCanceled
+
+ case headers := <-d.headerProcCh:
+ // Terminate header processing if we synced up
+ if len(headers) == 0 {
+ // Notify everyone that headers are fully processed
+ for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
+ select {
+ case ch <- false:
+ case <-d.cancelCh:
+ }
+ }
+ // If no headers were retrieved at all, the peer violated its TD promise that it had a
+ // better chain compared to ours. The only exception is if its promised blocks were
+ // already imported by other means (e.g. fetcher):
+ //
+ // R , L : Both at block 10
+ // R: Mine block 11, and propagate it to L
+ // L: Queue block 11 for import
+ // L: Notice that R's head and TD increased compared to ours, start sync
+ // L: Import of block 11 finishes
+ // L: Sync begins, and finds common ancestor at 11
+ // L: Request new headers up from 11 (R's TD was higher, it must have something)
+ // R: Nothing to give
+ if mode != LightSync {
+ head := d.blockchain.CurrentBlock()
+ if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 {
+ return errStallingPeer
+ }
+ }
+ // If fast or light syncing, ensure promised headers are indeed delivered. This is
+ // needed to detect scenarios where an attacker feeds a bad pivot and then bails out
+ // of delivering the post-pivot blocks that would flag the invalid content.
+ //
+ // This check cannot be executed "as is" for full imports, since blocks may still be
+ // queued for processing when the header download completes. However, as long as the
+ // peer gave us something useful, we're already happy/progressed (above check).
+ if mode == FastSync || mode == LightSync {
+ head := d.lightchain.CurrentHeader()
+ if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 {
+ return errStallingPeer
+ }
+ }
+ // Disable any rollback and return
+ rollback = 0
+ return nil
+ }
+ // Otherwise split the chunk of headers into batches and process them
+ gotHeaders = true
+ for len(headers) > 0 {
+ // Terminate if something failed in between processing chunks
+ select {
+ case <-d.cancelCh:
+ rollbackErr = errCanceled
+ return errCanceled
+ default:
+ }
+ // Select the next chunk of headers to import
+ limit := maxHeadersProcess
+ if limit > len(headers) {
+ limit = len(headers)
+ }
+ chunk := headers[:limit]
+
+ // In case of header only syncing, validate the chunk immediately
+ if mode == FastSync || mode == LightSync {
+ // If we're importing pure headers, verify based on their recentness
+ var pivot uint64
+
+ d.pivotLock.RLock()
+ if d.pivotHeader != nil {
+ pivot = d.pivotHeader.Number.Uint64()
+ }
+ d.pivotLock.RUnlock()
+
+ frequency := fsHeaderCheckFrequency
+ if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
+ frequency = 1
+ }
+ if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {
+ rollbackErr = err
+
+ // If some headers were inserted, track them as uncertain
+ if (mode == FastSync || frequency > 1) && n > 0 && rollback == 0 {
+ rollback = chunk[0].Number.Uint64()
+ }
+ log.Warn("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "parent", chunk[n].ParentHash, "err", err)
+ return fmt.Errorf("%w: %v", errInvalidChain, err)
+ }
+ // All verifications passed, track all headers within the alloted limits
+ if mode == FastSync {
+ head := chunk[len(chunk)-1].Number.Uint64()
+ if head-rollback > uint64(fsHeaderSafetyNet) {
+ rollback = head - uint64(fsHeaderSafetyNet)
+ } else {
+ rollback = 1
+ }
+ }
+ }
+ // Unless we're doing light chains, schedule the headers for associated content retrieval
+ if mode == FullSync || mode == FastSync {
+ // If we've reached the allowed number of pending headers, stall a bit
+ for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
+ select {
+ case <-d.cancelCh:
+ rollbackErr = errCanceled
+ return errCanceled
+ case <-time.After(time.Second):
+ }
+ }
+ // Otherwise insert the headers for content retrieval
+ inserts := d.queue.Schedule(chunk, origin)
+ if len(inserts) != len(chunk) {
+ rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunk))
+ return fmt.Errorf("%w: stale headers", errBadPeer)
+ }
+ }
+ headers = headers[limit:]
+ origin += uint64(limit)
+ }
+ // Update the highest block number we know if a higher one is found.
+ d.syncStatsLock.Lock()
+ if d.syncStatsChainHeight < origin {
+ d.syncStatsChainHeight = origin - 1
+ }
+ d.syncStatsLock.Unlock()
+
+ // Signal the content downloaders of the availablility of new tasks
+ for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
+ select {
+ case ch <- true:
+ default:
+ }
+ }
+ }
+ }
+}
+
+// processFullSyncContent takes fetch results from the queue and imports them into the chain.
+func (d *Downloader) processFullSyncContent() error {
+ for {
+ results := d.queue.Results(true)
+ if len(results) == 0 {
+ return nil
+ }
+ if d.chainInsertHook != nil {
+ d.chainInsertHook(results)
+ }
+ if err := d.importBlockResults(results); err != nil {
+ return err
+ }
+ }
+}
+
+func (d *Downloader) importBlockResults(results []*fetchResult) error {
+ // Check for any early termination requests
+ if len(results) == 0 {
+ return nil
+ }
+ select {
+ case <-d.quitCh:
+ return errCancelContentProcessing
+ default:
+ }
+ // Retrieve the a batch of results to import
+ first, last := results[0].Header, results[len(results)-1].Header
+ log.Debug("Inserting downloaded chain", "items", len(results),
+ "firstnum", first.Number, "firsthash", first.Hash(),
+ "lastnum", last.Number, "lasthash", last.Hash(),
+ )
+ blocks := make([]*types.Block, len(results))
+ for i, result := range results {
+ blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
+ }
+ if index, err := d.blockchain.InsertChain(blocks); err != nil {
+ if index < len(results) {
+ log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
+ } else {
+ // The InsertChain method in blockchain.go will sometimes return an out-of-bounds index,
+ // when it needs to preprocess blocks to import a sidechain.
+ // The importer will put together a new list of blocks to import, which is a superset
+ // of the blocks delivered from the downloader, and the indexing will be off.
+ log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err)
+ }
+ return fmt.Errorf("%w: %v", errInvalidChain, err)
+ }
+ return nil
+}
+
+// processFastSyncContent takes fetch results from the queue and writes them to the
+// database. It also controls the synchronisation of state nodes of the pivot block.
+func (d *Downloader) processFastSyncContent() error {
+ // Start syncing state of the reported head block. This should get us most of
+ // the state of the pivot block.
+ d.pivotLock.RLock()
+ sync := d.syncState(d.pivotHeader.Root)
+ d.pivotLock.RUnlock()
+
+ defer func() {
+ // The `sync` object is replaced every time the pivot moves. We need to
+ // defer close the very last active one, hence the lazy evaluation vs.
+ // calling defer sync.Cancel() !!!
+ sync.Cancel()
+ }()
+
+ closeOnErr := func(s *stateSync) {
+ if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled && err != snap.ErrCancelled {
+ d.queue.Close() // wake up Results
+ }
+ }
+ go closeOnErr(sync)
+
+ // To cater for moving pivot points, track the pivot block and subsequently
+ // accumulated download results separately.
+ var (
+ oldPivot *fetchResult // Locked in pivot block, might change eventually
+ oldTail []*fetchResult // Downloaded content after the pivot
+ )
+ for {
+ // Wait for the next batch of downloaded data to be available, and if the pivot
+ // block became stale, move the goalpost
+ results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness
+ if len(results) == 0 {
+ // If pivot sync is done, stop
+ if oldPivot == nil {
+ return sync.Cancel()
+ }
+ // If sync failed, stop
+ select {
+ case <-d.cancelCh:
+ sync.Cancel()
+ return errCanceled
+ default:
+ }
+ }
+ if d.chainInsertHook != nil {
+ d.chainInsertHook(results)
+ }
+ // If we haven't downloaded the pivot block yet, check pivot staleness
+ // notifications from the header downloader
+ d.pivotLock.RLock()
+ pivot := d.pivotHeader
+ d.pivotLock.RUnlock()
+
+ if oldPivot == nil {
+ if pivot.Root != sync.root {
+ sync.Cancel()
+ sync = d.syncState(pivot.Root)
+
+ go closeOnErr(sync)
+ }
+ } else {
+ results = append(append([]*fetchResult{oldPivot}, oldTail...), results...)
+ }
+ // Split around the pivot block and process the two sides via fast/full sync
+ if atomic.LoadInt32(&d.committed) == 0 {
+ latest := results[len(results)-1].Header
+ // If the height is above the pivot block by 2 sets, it means the pivot
+ // become stale in the network and it was garbage collected, move to a
+ // new pivot.
+ //
+ // Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those
+ // need to be taken into account, otherwise we're detecting the pivot move
+ // late and will drop peers due to unavailable state!!!
+ if height := latest.Number.Uint64(); height >= pivot.Number.Uint64()+2*uint64(fsMinFullBlocks)-uint64(reorgProtHeaderDelay) {
+ log.Warn("Pivot became stale, moving", "old", pivot.Number.Uint64(), "new", height-uint64(fsMinFullBlocks)+uint64(reorgProtHeaderDelay))
+ pivot = results[len(results)-1-fsMinFullBlocks+reorgProtHeaderDelay].Header // must exist as lower old pivot is uncommitted
+
+ d.pivotLock.Lock()
+ d.pivotHeader = pivot
+ d.pivotLock.Unlock()
+
+ // Write out the pivot into the database so a rollback beyond it will
+ // reenable fast sync
+ rawdb.WriteLastPivotNumber(d.stateDB, pivot.Number.Uint64())
+ }
+ }
+ P, beforeP, afterP := splitAroundPivot(pivot.Number.Uint64(), results)
+ if err := d.commitFastSyncData(beforeP, sync); err != nil {
+ return err
+ }
+ if P != nil {
+ // If new pivot block found, cancel old state retrieval and restart
+ if oldPivot != P {
+ sync.Cancel()
+ sync = d.syncState(P.Header.Root)
+
+ go closeOnErr(sync)
+ oldPivot = P
+ }
+ // Wait for completion, occasionally checking for pivot staleness
+ select {
+ case <-sync.done:
+ if sync.err != nil {
+ return sync.err
+ }
+ if err := d.commitPivotBlock(P); err != nil {
+ return err
+ }
+ oldPivot = nil
+
+ case <-time.After(time.Second):
+ oldTail = afterP
+ continue
+ }
+ }
+ // Fast sync done, pivot commit done, full import
+ if err := d.importBlockResults(afterP); err != nil {
+ return err
+ }
+ }
+}
+
+func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) {
+ if len(results) == 0 {
+ return nil, nil, nil
+ }
+ if lastNum := results[len(results)-1].Header.Number.Uint64(); lastNum < pivot {
+ // the pivot is somewhere in the future
+ return nil, results, nil
+ }
+ // This can also be optimized, but only happens very seldom
+ for _, result := range results {
+ num := result.Header.Number.Uint64()
+ switch {
+ case num < pivot:
+ before = append(before, result)
+ case num == pivot:
+ p = result
+ default:
+ after = append(after, result)
+ }
+ }
+ return p, before, after
+}
+
+func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error {
+ // Check for any early termination requests
+ if len(results) == 0 {
+ return nil
+ }
+ select {
+ case <-d.quitCh:
+ return errCancelContentProcessing
+ case <-stateSync.done:
+ if err := stateSync.Wait(); err != nil {
+ return err
+ }
+ default:
+ }
+ // Retrieve the a batch of results to import
+ first, last := results[0].Header, results[len(results)-1].Header
+ log.Debug("Inserting fast-sync blocks", "items", len(results),
+ "firstnum", first.Number, "firsthash", first.Hash(),
+ "lastnumn", last.Number, "lasthash", last.Hash(),
+ )
+ blocks := make([]*types.Block, len(results))
+ receipts := make([]types.Receipts, len(results))
+ for i, result := range results {
+ blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
+ receipts[i] = result.Receipts
+ }
+ if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil {
+ log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
+ return fmt.Errorf("%w: %v", errInvalidChain, err)
+ }
+ return nil
+}
+
+func (d *Downloader) commitPivotBlock(result *fetchResult) error {
+ block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
+ log.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash())
+
+ // Commit the pivot block as the new head, will require full sync from here on
+ if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}, d.ancientLimit); err != nil {
+ return err
+ }
+ if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil {
+ return err
+ }
+ atomic.StoreInt32(&d.committed, 1)
+
+ // If we had a bloom filter for the state sync, deallocate it now. Note, we only
+ // deallocate internally, but keep the empty wrapper. This ensures that if we do
+ // a rollback after committing the pivot and restarting fast sync, we don't end
+ // up using a nil bloom. Empty bloom is fine, it just returns that it does not
+ // have the info we need, so reach down to the database instead.
+ if d.stateBloom != nil {
+ d.stateBloom.Close()
+ }
+ return nil
+}
+
+// DeliverHeaders injects a new batch of block headers received from a remote
+// node into the download schedule.
+func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) error {
+ return d.deliver(d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter)
+}
+
+// DeliverBodies injects a new batch of block bodies received from a remote node.
+func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) error {
+ return d.deliver(d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter)
+}
+
+// DeliverReceipts injects a new batch of receipts received from a remote node.
+func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) error {
+ return d.deliver(d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter)
+}
+
+// DeliverNodeData injects a new batch of node state data received from a remote node.
+func (d *Downloader) DeliverNodeData(id string, data [][]byte) error {
+ return d.deliver(d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter)
+}
+
+// DeliverSnapPacket is invoked from a peer's message handler when it transmits a
+// data packet for the local node to consume.
+func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) error {
+ switch packet := packet.(type) {
+ case *snap.AccountRangePacket:
+ hashes, accounts, err := packet.Unpack()
+ if err != nil {
+ return err
+ }
+ return d.SnapSyncer.OnAccounts(peer, packet.ID, hashes, accounts, packet.Proof)
+
+ case *snap.StorageRangesPacket:
+ hashset, slotset := packet.Unpack()
+ return d.SnapSyncer.OnStorage(peer, packet.ID, hashset, slotset, packet.Proof)
+
+ case *snap.ByteCodesPacket:
+ return d.SnapSyncer.OnByteCodes(peer, packet.ID, packet.Codes)
+
+ case *snap.TrieNodesPacket:
+ return d.SnapSyncer.OnTrieNodes(peer, packet.ID, packet.Nodes)
+
+ default:
+ return fmt.Errorf("unexpected snap packet type: %T", packet)
+ }
+}
+
+// deliver injects a new batch of data received from a remote node.
+func (d *Downloader) deliver(destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) {
+ // Update the delivery metrics for both good and failed deliveries
+ inMeter.Mark(int64(packet.Items()))
+ defer func() {
+ if err != nil {
+ dropMeter.Mark(int64(packet.Items()))
+ }
+ }()
+ // Deliver or abort if the sync is canceled while queuing
+ d.cancelLock.RLock()
+ cancel := d.cancelCh
+ d.cancelLock.RUnlock()
+ if cancel == nil {
+ return errNoSyncActive
+ }
+ select {
+ case destCh <- packet:
+ return nil
+ case <-cancel:
+ return errNoSyncActive
+ }
+}
diff --git a/les/downloader/downloader_test.go b/les/downloader/downloader_test.go
new file mode 100644
index 000000000..17cd3630c
--- /dev/null
+++ b/les/downloader/downloader_test.go
@@ -0,0 +1,1622 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package downloader
+
+import (
+ "errors"
+ "fmt"
+ "math/big"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state/snapshot"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/eth/protocols/eth"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/trie"
+)
+
+// Reduce some of the parameters to make the tester faster.
+func init() {
+ fullMaxForkAncestry = 10000
+ lightMaxForkAncestry = 10000
+ blockCacheMaxItems = 1024
+ fsHeaderContCheck = 500 * time.Millisecond
+}
+
+// downloadTester is a test simulator for mocking out local block chain.
+type downloadTester struct {
+ downloader *Downloader
+
+ genesis *types.Block // Genesis blocks used by the tester and peers
+ stateDb ethdb.Database // Database used by the tester for syncing from peers
+ peerDb ethdb.Database // Database of the peers containing all data
+ peers map[string]*downloadTesterPeer
+
+ ownHashes []common.Hash // Hash chain belonging to the tester
+ ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester
+ ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester
+ ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
+ ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain
+
+ ancientHeaders map[common.Hash]*types.Header // Ancient headers belonging to the tester
+ ancientBlocks map[common.Hash]*types.Block // Ancient blocks belonging to the tester
+ ancientReceipts map[common.Hash]types.Receipts // Ancient receipts belonging to the tester
+ ancientChainTd map[common.Hash]*big.Int // Ancient total difficulties of the blocks in the local chain
+
+ lock sync.RWMutex
+}
+
+// newTester creates a new downloader test mocker.
+func newTester() *downloadTester {
+ tester := &downloadTester{
+ genesis: testGenesis,
+ peerDb: testDB,
+ peers: make(map[string]*downloadTesterPeer),
+ ownHashes: []common.Hash{testGenesis.Hash()},
+ ownHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
+ ownBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
+ ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
+ ownChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
+
+ // Initialize ancient store with test genesis block
+ ancientHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},
+ ancientBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},
+ ancientReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},
+ ancientChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},
+ }
+ tester.stateDb = rawdb.NewMemoryDatabase()
+ tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00})
+
+ tester.downloader = New(0, tester.stateDb, trie.NewSyncBloom(1, tester.stateDb), new(event.TypeMux), tester, nil, tester.dropPeer)
+ return tester
+}
+
+// terminate aborts any operations on the embedded downloader and releases all
+// held resources.
+func (dl *downloadTester) terminate() {
+ dl.downloader.Terminate()
+}
+
+// sync starts synchronizing with a remote peer, blocking until it completes.
+func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
+ dl.lock.RLock()
+ hash := dl.peers[id].chain.headBlock().Hash()
+ // If no particular TD was requested, load from the peer's blockchain
+ if td == nil {
+ td = dl.peers[id].chain.td(hash)
+ }
+ dl.lock.RUnlock()
+
+ // Synchronise with the chosen peer and ensure proper cleanup afterwards
+ err := dl.downloader.synchronise(id, hash, td, mode)
+ select {
+ case <-dl.downloader.cancelCh:
+ // Ok, downloader fully cancelled after sync cycle
+ default:
+ // Downloader is still accepting packets, can block a peer up
+ panic("downloader active post sync cycle") // panic will be caught by tester
+ }
+ return err
+}
+
+// HasHeader checks if a header is present in the testers canonical chain.
+func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {
+ return dl.GetHeaderByHash(hash) != nil
+}
+
+// HasBlock checks if a block is present in the testers canonical chain.
+func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {
+ return dl.GetBlockByHash(hash) != nil
+}
+
+// HasFastBlock checks if a block is present in the testers canonical chain.
+func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ if _, ok := dl.ancientReceipts[hash]; ok {
+ return true
+ }
+ _, ok := dl.ownReceipts[hash]
+ return ok
+}
+
+// GetHeader retrieves a header from the testers canonical chain.
+func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+ return dl.getHeaderByHash(hash)
+}
+
+// getHeaderByHash returns the header if found either within ancients or own blocks)
+// This method assumes that the caller holds at least the read-lock (dl.lock)
+func (dl *downloadTester) getHeaderByHash(hash common.Hash) *types.Header {
+ header := dl.ancientHeaders[hash]
+ if header != nil {
+ return header
+ }
+ return dl.ownHeaders[hash]
+}
+
+// GetBlock retrieves a block from the testers canonical chain.
+func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ block := dl.ancientBlocks[hash]
+ if block != nil {
+ return block
+ }
+ return dl.ownBlocks[hash]
+}
+
+// CurrentHeader retrieves the current head header from the canonical chain.
+func (dl *downloadTester) CurrentHeader() *types.Header {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ for i := len(dl.ownHashes) - 1; i >= 0; i-- {
+ if header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil {
+ return header
+ }
+ if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {
+ return header
+ }
+ }
+ return dl.genesis.Header()
+}
+
+// CurrentBlock retrieves the current head block from the canonical chain.
+func (dl *downloadTester) CurrentBlock() *types.Block {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ for i := len(dl.ownHashes) - 1; i >= 0; i-- {
+ if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
+ if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
+ return block
+ }
+ return block
+ }
+ if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
+ if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {
+ return block
+ }
+ }
+ }
+ return dl.genesis
+}
+
+// CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.
+func (dl *downloadTester) CurrentFastBlock() *types.Block {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ for i := len(dl.ownHashes) - 1; i >= 0; i-- {
+ if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {
+ return block
+ }
+ if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {
+ return block
+ }
+ }
+ return dl.genesis
+}
+
+// FastSyncCommitHead manually sets the head block to a given hash.
+func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
+ // For now only check that the state trie is correct
+ if block := dl.GetBlockByHash(hash); block != nil {
+ _, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb))
+ return err
+ }
+ return fmt.Errorf("non existent block: %x", hash[:4])
+}
+
+// GetTd retrieves the block's total difficulty from the canonical chain.
+func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ return dl.getTd(hash)
+}
+
+// getTd retrieves the block's total difficulty if found either within
+// ancients or own blocks).
+// This method assumes that the caller holds at least the read-lock (dl.lock)
+func (dl *downloadTester) getTd(hash common.Hash) *big.Int {
+ if td := dl.ancientChainTd[hash]; td != nil {
+ return td
+ }
+ return dl.ownChainTd[hash]
+}
+
+// InsertHeaderChain injects a new batch of headers into the simulated chain.
+func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) {
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+ // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
+ if dl.getHeaderByHash(headers[0].ParentHash) == nil {
+ return 0, fmt.Errorf("InsertHeaderChain: unknown parent at first position, parent of number %d", headers[0].Number)
+ }
+ var hashes []common.Hash
+ for i := 1; i < len(headers); i++ {
+ hash := headers[i-1].Hash()
+ if headers[i].ParentHash != headers[i-1].Hash() {
+ return i, fmt.Errorf("non-contiguous import at position %d", i)
+ }
+ hashes = append(hashes, hash)
+ }
+ hashes = append(hashes, headers[len(headers)-1].Hash())
+ // Do a full insert if pre-checks passed
+ for i, header := range headers {
+ hash := hashes[i]
+ if dl.getHeaderByHash(hash) != nil {
+ continue
+ }
+ if dl.getHeaderByHash(header.ParentHash) == nil {
+ // This _should_ be impossible, due to precheck and induction
+ return i, fmt.Errorf("InsertHeaderChain: unknown parent at position %d", i)
+ }
+ dl.ownHashes = append(dl.ownHashes, hash)
+ dl.ownHeaders[hash] = header
+
+ td := dl.getTd(header.ParentHash)
+ dl.ownChainTd[hash] = new(big.Int).Add(td, header.Difficulty)
+ }
+ return len(headers), nil
+}
+
+// InsertChain injects a new batch of blocks into the simulated chain.
+func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) {
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+ for i, block := range blocks {
+ if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
+ return i, fmt.Errorf("InsertChain: unknown parent at position %d / %d", i, len(blocks))
+ } else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
+ return i, fmt.Errorf("InsertChain: unknown parent state %x: %v", parent.Root(), err)
+ }
+ if hdr := dl.getHeaderByHash(block.Hash()); hdr == nil {
+ dl.ownHashes = append(dl.ownHashes, block.Hash())
+ dl.ownHeaders[block.Hash()] = block.Header()
+ }
+ dl.ownBlocks[block.Hash()] = block
+ dl.ownReceipts[block.Hash()] = make(types.Receipts, 0)
+ dl.stateDb.Put(block.Root().Bytes(), []byte{0x00})
+ td := dl.getTd(block.ParentHash())
+ dl.ownChainTd[block.Hash()] = new(big.Int).Add(td, block.Difficulty())
+ }
+ return len(blocks), nil
+}
+
+// InsertReceiptChain injects a new batch of receipts into the simulated chain.
+func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts, ancientLimit uint64) (i int, err error) {
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+
+ for i := 0; i < len(blocks) && i < len(receipts); i++ {
+ if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
+ return i, errors.New("unknown owner")
+ }
+ if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok {
+ if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
+ return i, errors.New("InsertReceiptChain: unknown parent")
+ }
+ }
+ if blocks[i].NumberU64() <= ancientLimit {
+ dl.ancientBlocks[blocks[i].Hash()] = blocks[i]
+ dl.ancientReceipts[blocks[i].Hash()] = receipts[i]
+
+ // Migrate from active db to ancient db
+ dl.ancientHeaders[blocks[i].Hash()] = blocks[i].Header()
+ dl.ancientChainTd[blocks[i].Hash()] = new(big.Int).Add(dl.ancientChainTd[blocks[i].ParentHash()], blocks[i].Difficulty())
+ delete(dl.ownHeaders, blocks[i].Hash())
+ delete(dl.ownChainTd, blocks[i].Hash())
+ } else {
+ dl.ownBlocks[blocks[i].Hash()] = blocks[i]
+ dl.ownReceipts[blocks[i].Hash()] = receipts[i]
+ }
+ }
+ return len(blocks), nil
+}
+
+// SetHead rewinds the local chain to a new head.
+func (dl *downloadTester) SetHead(head uint64) error {
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+
+ // Find the hash of the head to reset to
+ var hash common.Hash
+ for h, header := range dl.ownHeaders {
+ if header.Number.Uint64() == head {
+ hash = h
+ }
+ }
+ for h, header := range dl.ancientHeaders {
+ if header.Number.Uint64() == head {
+ hash = h
+ }
+ }
+ if hash == (common.Hash{}) {
+ return fmt.Errorf("unknown head to set: %d", head)
+ }
+ // Find the offset in the header chain
+ var offset int
+ for o, h := range dl.ownHashes {
+ if h == hash {
+ offset = o
+ break
+ }
+ }
+ // Remove all the hashes and associated data afterwards
+ for i := offset + 1; i < len(dl.ownHashes); i++ {
+ delete(dl.ownChainTd, dl.ownHashes[i])
+ delete(dl.ownHeaders, dl.ownHashes[i])
+ delete(dl.ownReceipts, dl.ownHashes[i])
+ delete(dl.ownBlocks, dl.ownHashes[i])
+
+ delete(dl.ancientChainTd, dl.ownHashes[i])
+ delete(dl.ancientHeaders, dl.ownHashes[i])
+ delete(dl.ancientReceipts, dl.ownHashes[i])
+ delete(dl.ancientBlocks, dl.ownHashes[i])
+ }
+ dl.ownHashes = dl.ownHashes[:offset+1]
+ return nil
+}
+
+// Rollback removes some recently added elements from the chain.
+func (dl *downloadTester) Rollback(hashes []common.Hash) {
+}
+
+// newPeer registers a new block download source into the downloader.
+func (dl *downloadTester) newPeer(id string, version uint, chain *testChain) error {
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+
+ peer := &downloadTesterPeer{dl: dl, id: id, chain: chain}
+ dl.peers[id] = peer
+ return dl.downloader.RegisterPeer(id, version, peer)
+}
+
+// dropPeer simulates a hard peer removal from the connection pool.
+func (dl *downloadTester) dropPeer(id string) {
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+
+ delete(dl.peers, id)
+ dl.downloader.UnregisterPeer(id)
+}
+
+// Snapshots implements the BlockChain interface for the downloader, but is a noop.
+func (dl *downloadTester) Snapshots() *snapshot.Tree {
+ return nil
+}
+
+type downloadTesterPeer struct {
+ dl *downloadTester
+ id string
+ chain *testChain
+ missingStates map[common.Hash]bool // State entries that fast sync should not return
+}
+
+// Head constructs a function to retrieve a peer's current head hash
+// and total difficulty.
+func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
+ b := dlp.chain.headBlock()
+ return b.Hash(), dlp.chain.td(b.Hash())
+}
+
+// RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed
+// origin; associated with a particular peer in the download tester. The returned
+// function can be used to retrieve batches of headers from the particular peer.
+func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
+ result := dlp.chain.headersByHash(origin, amount, skip, reverse)
+ go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
+ return nil
+}
+
+// RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
+// origin; associated with a particular peer in the download tester. The returned
+// function can be used to retrieve batches of headers from the particular peer.
+func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
+ result := dlp.chain.headersByNumber(origin, amount, skip, reverse)
+ go dlp.dl.downloader.DeliverHeaders(dlp.id, result)
+ return nil
+}
+
+// RequestBodies constructs a getBlockBodies method associated with a particular
+// peer in the download tester. The returned function can be used to retrieve
+// batches of block bodies from the particularly requested peer.
+func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {
+ txs, uncles := dlp.chain.bodies(hashes)
+ go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles)
+ return nil
+}
+
+// RequestReceipts constructs a getReceipts method associated with a particular
+// peer in the download tester. The returned function can be used to retrieve
+// batches of block receipts from the particularly requested peer.
+func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {
+ receipts := dlp.chain.receipts(hashes)
+ go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts)
+ return nil
+}
+
+// RequestNodeData constructs a getNodeData method associated with a particular
+// peer in the download tester. The returned function can be used to retrieve
+// batches of node state data from the particularly requested peer.
+func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {
+ dlp.dl.lock.RLock()
+ defer dlp.dl.lock.RUnlock()
+
+ results := make([][]byte, 0, len(hashes))
+ for _, hash := range hashes {
+ if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {
+ if !dlp.missingStates[hash] {
+ results = append(results, data)
+ }
+ }
+ }
+ go dlp.dl.downloader.DeliverNodeData(dlp.id, results)
+ return nil
+}
+
+// assertOwnChain checks if the local chain contains the correct number of items
+// of the various chain components.
+func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
+ // Mark this method as a helper to report errors at callsite, not in here
+ t.Helper()
+
+ assertOwnForkedChain(t, tester, 1, []int{length})
+}
+
+// assertOwnForkedChain checks if the local forked chain contains the correct
+// number of items of the various chain components.
+func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {
+ // Mark this method as a helper to report errors at callsite, not in here
+ t.Helper()
+
+ // Initialize the counters for the first fork
+ headers, blocks, receipts := lengths[0], lengths[0], lengths[0]
+
+ // Update the counters for each subsequent fork
+ for _, length := range lengths[1:] {
+ headers += length - common
+ blocks += length - common
+ receipts += length - common
+ }
+ if tester.downloader.getMode() == LightSync {
+ blocks, receipts = 1, 1
+ }
+ if hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers {
+ t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
+ }
+ if bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks {
+ t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
+ }
+ if rs := len(tester.ownReceipts) + len(tester.ancientReceipts) - 1; rs != receipts {
+ t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
+ }
+}
+
+func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) }
+func TestCanonicalSynchronisation66Fast(t *testing.T) { testCanonSync(t, eth.ETH66, FastSync) }
+func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) }
+
+func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+
+ tester := newTester()
+ defer tester.terminate()
+
+ // Create a small enough block chain to download
+ chain := testChainBase.shorten(blockCacheMaxItems - 15)
+ tester.newPeer("peer", protocol, chain)
+
+ // Synchronise with the peer and make sure all relevant data was retrieved
+ if err := tester.sync("peer", nil, mode); err != nil {
+ t.Fatalf("failed to synchronise blocks: %v", err)
+ }
+ assertOwnChain(t, tester, chain.len())
+}
+
+// Tests that if a large batch of blocks are being downloaded, it is throttled
+// until the cached blocks are retrieved.
+func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) }
+func TestThrottling66Fast(t *testing.T) { testThrottling(t, eth.ETH66, FastSync) }
+
+func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+ tester := newTester()
+
+ // Create a long block chain to download and the tester
+ targetBlocks := testChainBase.len() - 1
+ tester.newPeer("peer", protocol, testChainBase)
+
+ // Wrap the importer to allow stepping
+ blocked, proceed := uint32(0), make(chan struct{})
+ tester.downloader.chainInsertHook = func(results []*fetchResult) {
+ atomic.StoreUint32(&blocked, uint32(len(results)))
+ <-proceed
+ }
+ // Start a synchronisation concurrently
+ errc := make(chan error, 1)
+ go func() {
+ errc <- tester.sync("peer", nil, mode)
+ }()
+ // Iteratively take some blocks, always checking the retrieval count
+ for {
+ // Check the retrieval count synchronously (! reason for this ugly block)
+ tester.lock.RLock()
+ retrieved := len(tester.ownBlocks)
+ tester.lock.RUnlock()
+ if retrieved >= targetBlocks+1 {
+ break
+ }
+ // Wait a bit for sync to throttle itself
+ var cached, frozen int
+ for start := time.Now(); time.Since(start) < 3*time.Second; {
+ time.Sleep(25 * time.Millisecond)
+
+ tester.lock.Lock()
+ tester.downloader.queue.lock.Lock()
+ tester.downloader.queue.resultCache.lock.Lock()
+ {
+ cached = tester.downloader.queue.resultCache.countCompleted()
+ frozen = int(atomic.LoadUint32(&blocked))
+ retrieved = len(tester.ownBlocks)
+ }
+ tester.downloader.queue.resultCache.lock.Unlock()
+ tester.downloader.queue.lock.Unlock()
+ tester.lock.Unlock()
+
+ if cached == blockCacheMaxItems ||
+ cached == blockCacheMaxItems-reorgProtHeaderDelay ||
+ retrieved+cached+frozen == targetBlocks+1 ||
+ retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
+ break
+ }
+ }
+ // Make sure we filled up the cache, then exhaust it
+ time.Sleep(25 * time.Millisecond) // give it a chance to screw up
+ tester.lock.RLock()
+ retrieved = len(tester.ownBlocks)
+ tester.lock.RUnlock()
+ if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
+ t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
+ }
+
+ // Permit the blocked blocks to import
+ if atomic.LoadUint32(&blocked) > 0 {
+ atomic.StoreUint32(&blocked, uint32(0))
+ proceed <- struct{}{}
+ }
+ }
+ // Check that we haven't pulled more blocks than available
+ assertOwnChain(t, tester, targetBlocks+1)
+ if err := <-errc; err != nil {
+ t.Fatalf("block synchronization failed: %v", err)
+ }
+ tester.terminate()
+
+}
+
+// Tests that simple synchronization against a forked chain works correctly. In
+// this test common ancestor lookup should *not* be short circuited, and a full
+// binary search should be executed.
+func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) }
+func TestForkedSync66Fast(t *testing.T) { testForkedSync(t, eth.ETH66, FastSync) }
+func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) }
+
+func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+
+ tester := newTester()
+ defer tester.terminate()
+
+ chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
+ chainB := testChainForkLightB.shorten(testChainBase.len() + 80)
+ tester.newPeer("fork A", protocol, chainA)
+ tester.newPeer("fork B", protocol, chainB)
+ // Synchronise with the peer and make sure all blocks were retrieved
+ if err := tester.sync("fork A", nil, mode); err != nil {
+ t.Fatalf("failed to synchronise blocks: %v", err)
+ }
+ assertOwnChain(t, tester, chainA.len())
+
+ // Synchronise with the second peer and make sure that fork is pulled too
+ if err := tester.sync("fork B", nil, mode); err != nil {
+ t.Fatalf("failed to synchronise blocks: %v", err)
+ }
+ assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
+}
+
+// Tests that synchronising against a much shorter but much heavyer fork works
+// corrently and is not dropped.
+func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) }
+func TestHeavyForkedSync66Fast(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FastSync) }
+func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) }
+
+func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+
+ tester := newTester()
+ defer tester.terminate()
+
+ chainA := testChainForkLightA.shorten(testChainBase.len() + 80)
+ chainB := testChainForkHeavy.shorten(testChainBase.len() + 80)
+ tester.newPeer("light", protocol, chainA)
+ tester.newPeer("heavy", protocol, chainB)
+
+ // Synchronise with the peer and make sure all blocks were retrieved
+ if err := tester.sync("light", nil, mode); err != nil {
+ t.Fatalf("failed to synchronise blocks: %v", err)
+ }
+ assertOwnChain(t, tester, chainA.len())
+
+ // Synchronise with the second peer and make sure that fork is pulled too
+ if err := tester.sync("heavy", nil, mode); err != nil {
+ t.Fatalf("failed to synchronise blocks: %v", err)
+ }
+ assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})
+}
+
+// Tests that chain forks are contained within a certain interval of the current
+// chain head, ensuring that malicious peers cannot waste resources by feeding
+// long dead chains.
+func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) }
+func TestBoundedForkedSync66Fast(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FastSync) }
+func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) }
+
+func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+
+ tester := newTester()
+ defer tester.terminate()
+
+ chainA := testChainForkLightA
+ chainB := testChainForkLightB
+ tester.newPeer("original", protocol, chainA)
+ tester.newPeer("rewriter", protocol, chainB)
+
+ // Synchronise with the peer and make sure all blocks were retrieved
+ if err := tester.sync("original", nil, mode); err != nil {
+ t.Fatalf("failed to synchronise blocks: %v", err)
+ }
+ assertOwnChain(t, tester, chainA.len())
+
+ // Synchronise with the second peer and ensure that the fork is rejected to being too old
+ if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor {
+ t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
+ }
+}
+
+// Tests that chain forks are contained within a certain interval of the current
+// chain head for short but heavy forks too. These are a bit special because they
+// take different ancestor lookup paths.
+func TestBoundedHeavyForkedSync66Full(t *testing.T) {
+ testBoundedHeavyForkedSync(t, eth.ETH66, FullSync)
+}
+func TestBoundedHeavyForkedSync66Fast(t *testing.T) {
+ testBoundedHeavyForkedSync(t, eth.ETH66, FastSync)
+}
+func TestBoundedHeavyForkedSync66Light(t *testing.T) {
+ testBoundedHeavyForkedSync(t, eth.ETH66, LightSync)
+}
+
+func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+ tester := newTester()
+
+ // Create a long enough forked chain
+ chainA := testChainForkLightA
+ chainB := testChainForkHeavy
+ tester.newPeer("original", protocol, chainA)
+
+ // Synchronise with the peer and make sure all blocks were retrieved
+ if err := tester.sync("original", nil, mode); err != nil {
+ t.Fatalf("failed to synchronise blocks: %v", err)
+ }
+ assertOwnChain(t, tester, chainA.len())
+
+ tester.newPeer("heavy-rewriter", protocol, chainB)
+ // Synchronise with the second peer and ensure that the fork is rejected to being too old
+ if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor {
+ t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor)
+ }
+ tester.terminate()
+}
+
+// Tests that an inactive downloader will not accept incoming block headers,
+// bodies and receipts.
+func TestInactiveDownloader63(t *testing.T) {
+ t.Parallel()
+
+ tester := newTester()
+ defer tester.terminate()
+
+ // Check that neither block headers nor bodies are accepted
+ if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive {
+ t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
+ }
+ if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {
+ t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
+ }
+ if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive {
+ t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
+ }
+}
+
+// Tests that a canceled download wipes all previously accumulated state.
+func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) }
+func TestCancel66Fast(t *testing.T) { testCancel(t, eth.ETH66, FastSync) }
+func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) }
+
+func testCancel(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+
+ tester := newTester()
+ defer tester.terminate()
+
+ chain := testChainBase.shorten(MaxHeaderFetch)
+ tester.newPeer("peer", protocol, chain)
+
+ // Make sure canceling works with a pristine downloader
+ tester.downloader.Cancel()
+ if !tester.downloader.queue.Idle() {
+ t.Errorf("download queue not idle")
+ }
+ // Synchronise with the peer, but cancel afterwards
+ if err := tester.sync("peer", nil, mode); err != nil {
+ t.Fatalf("failed to synchronise blocks: %v", err)
+ }
+ tester.downloader.Cancel()
+ if !tester.downloader.queue.Idle() {
+ t.Errorf("download queue not idle")
+ }
+}
+
+// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
+func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) }
+func TestMultiSynchronisation66Fast(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FastSync) }
+func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) }
+
+func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+
+ tester := newTester()
+ defer tester.terminate()
+
+ // Create various peers with various parts of the chain
+ targetPeers := 8
+ chain := testChainBase.shorten(targetPeers * 100)
+
+ for i := 0; i < targetPeers; i++ {
+ id := fmt.Sprintf("peer #%d", i)
+ tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1)))
+ }
+ if err := tester.sync("peer #0", nil, mode); err != nil {
+ t.Fatalf("failed to synchronise blocks: %v", err)
+ }
+ assertOwnChain(t, tester, chain.len())
+}
+
+// Tests that synchronisations behave well in multi-version protocol environments
+// and not wreak havoc on other nodes in the network.
+func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) }
+func TestMultiProtoSynchronisation66Fast(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FastSync) }
+func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) }
+
+func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+
+ tester := newTester()
+ defer tester.terminate()
+
+ // Create a small enough block chain to download
+ chain := testChainBase.shorten(blockCacheMaxItems - 15)
+
+ // Create peers of every type
+ tester.newPeer("peer 66", eth.ETH66, chain)
+ //tester.newPeer("peer 65", eth.ETH67, chain)
+
+ // Synchronise with the requested peer and make sure all blocks were retrieved
+ if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
+ t.Fatalf("failed to synchronise blocks: %v", err)
+ }
+ assertOwnChain(t, tester, chain.len())
+
+ // Check that no peers have been dropped off
+ for _, version := range []int{66} {
+ peer := fmt.Sprintf("peer %d", version)
+ if _, ok := tester.peers[peer]; !ok {
+ t.Errorf("%s dropped", peer)
+ }
+ }
+}
+
+// Tests that if a block is empty (e.g. header only), no body request should be
+// made, and instead the header should be assembled into a whole block in itself.
+func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) }
+func TestEmptyShortCircuit66Fast(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FastSync) }
+func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) }
+
+func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+
+ tester := newTester()
+ defer tester.terminate()
+
+ // Create a block chain to download
+ chain := testChainBase
+ tester.newPeer("peer", protocol, chain)
+
+ // Instrument the downloader to signal body requests
+ bodiesHave, receiptsHave := int32(0), int32(0)
+ tester.downloader.bodyFetchHook = func(headers []*types.Header) {
+ atomic.AddInt32(&bodiesHave, int32(len(headers)))
+ }
+ tester.downloader.receiptFetchHook = func(headers []*types.Header) {
+ atomic.AddInt32(&receiptsHave, int32(len(headers)))
+ }
+ // Synchronise with the peer and make sure all blocks were retrieved
+ if err := tester.sync("peer", nil, mode); err != nil {
+ t.Fatalf("failed to synchronise blocks: %v", err)
+ }
+ assertOwnChain(t, tester, chain.len())
+
+ // Validate the number of block bodies that should have been requested
+ bodiesNeeded, receiptsNeeded := 0, 0
+ for _, block := range chain.blockm {
+ if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
+ bodiesNeeded++
+ }
+ }
+ for _, receipt := range chain.receiptm {
+ if mode == FastSync && len(receipt) > 0 {
+ receiptsNeeded++
+ }
+ }
+ if int(bodiesHave) != bodiesNeeded {
+ t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
+ }
+ if int(receiptsHave) != receiptsNeeded {
+ t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
+ }
+}
+
+// Tests that headers are enqueued continuously, preventing malicious nodes from
+// stalling the downloader by feeding gapped header chains.
+func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) }
+func TestMissingHeaderAttack66Fast(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FastSync) }
+func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) }
+
+func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+
+ tester := newTester()
+ defer tester.terminate()
+
+ chain := testChainBase.shorten(blockCacheMaxItems - 15)
+ brokenChain := chain.shorten(chain.len())
+ delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2])
+ tester.newPeer("attack", protocol, brokenChain)
+
+ if err := tester.sync("attack", nil, mode); err == nil {
+ t.Fatalf("succeeded attacker synchronisation")
+ }
+ // Synchronise with the valid peer and make sure sync succeeds
+ tester.newPeer("valid", protocol, chain)
+ if err := tester.sync("valid", nil, mode); err != nil {
+ t.Fatalf("failed to synchronise blocks: %v", err)
+ }
+ assertOwnChain(t, tester, chain.len())
+}
+
+// Tests that if requested headers are shifted (i.e. first is missing), the queue
+// detects the invalid numbering.
+func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) }
+func TestShiftedHeaderAttack66Fast(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FastSync) }
+func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) }
+
+func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+
+ tester := newTester()
+ defer tester.terminate()
+
+ chain := testChainBase.shorten(blockCacheMaxItems - 15)
+
+ // Attempt a full sync with an attacker feeding shifted headers
+ brokenChain := chain.shorten(chain.len())
+ delete(brokenChain.headerm, brokenChain.chain[1])
+ delete(brokenChain.blockm, brokenChain.chain[1])
+ delete(brokenChain.receiptm, brokenChain.chain[1])
+ tester.newPeer("attack", protocol, brokenChain)
+ if err := tester.sync("attack", nil, mode); err == nil {
+ t.Fatalf("succeeded attacker synchronisation")
+ }
+
+ // Synchronise with the valid peer and make sure sync succeeds
+ tester.newPeer("valid", protocol, chain)
+ if err := tester.sync("valid", nil, mode); err != nil {
+ t.Fatalf("failed to synchronise blocks: %v", err)
+ }
+ assertOwnChain(t, tester, chain.len())
+}
+
+// Tests that upon detecting an invalid header, the recent ones are rolled back
+// for various failure scenarios. Afterwards a full sync is attempted to make
+// sure no state was corrupted.
+func TestInvalidHeaderRollback66Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, FastSync) }
+
+func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+
+ tester := newTester()
+
+ // Create a small enough block chain to download
+ targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks
+ chain := testChainBase.shorten(targetBlocks)
+
+ // Attempt to sync with an attacker that feeds junk during the fast sync phase.
+ // This should result in the last fsHeaderSafetyNet headers being rolled back.
+ missing := fsHeaderSafetyNet + MaxHeaderFetch + 1
+ fastAttackChain := chain.shorten(chain.len())
+ delete(fastAttackChain.headerm, fastAttackChain.chain[missing])
+ tester.newPeer("fast-attack", protocol, fastAttackChain)
+
+ if err := tester.sync("fast-attack", nil, mode); err == nil {
+ t.Fatalf("succeeded fast attacker synchronisation")
+ }
+ if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {
+ t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch)
+ }
+
+ // Attempt to sync with an attacker that feeds junk during the block import phase.
+ // This should result in both the last fsHeaderSafetyNet number of headers being
+ // rolled back, and also the pivot point being reverted to a non-block status.
+ missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1
+ blockAttackChain := chain.shorten(chain.len())
+ delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in
+ delete(blockAttackChain.headerm, blockAttackChain.chain[missing])
+ tester.newPeer("block-attack", protocol, blockAttackChain)
+
+ if err := tester.sync("block-attack", nil, mode); err == nil {
+ t.Fatalf("succeeded block attacker synchronisation")
+ }
+ if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
+ t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
+ }
+ if mode == FastSync {
+ if head := tester.CurrentBlock().NumberU64(); head != 0 {
+ t.Errorf("fast sync pivot block #%d not rolled back", head)
+ }
+ }
+
+ // Attempt to sync with an attacker that withholds promised blocks after the
+ // fast sync pivot point. This could be a trial to leave the node with a bad
+ // but already imported pivot block.
+ withholdAttackChain := chain.shorten(chain.len())
+ tester.newPeer("withhold-attack", protocol, withholdAttackChain)
+ tester.downloader.syncInitHook = func(uint64, uint64) {
+ for i := missing; i < withholdAttackChain.len(); i++ {
+ delete(withholdAttackChain.headerm, withholdAttackChain.chain[i])
+ }
+ tester.downloader.syncInitHook = nil
+ }
+ if err := tester.sync("withhold-attack", nil, mode); err == nil {
+ t.Fatalf("succeeded withholding attacker synchronisation")
+ }
+ if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {
+ t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
+ }
+ if mode == FastSync {
+ if head := tester.CurrentBlock().NumberU64(); head != 0 {
+ t.Errorf("fast sync pivot block #%d not rolled back", head)
+ }
+ }
+
+ // synchronise with the valid peer and make sure sync succeeds. Since the last rollback
+ // should also disable fast syncing for this process, verify that we did a fresh full
+ // sync. Note, we can't assert anything about the receipts since we won't purge the
+ // database of them, hence we can't use assertOwnChain.
+ tester.newPeer("valid", protocol, chain)
+ if err := tester.sync("valid", nil, mode); err != nil {
+ t.Fatalf("failed to synchronise blocks: %v", err)
+ }
+ if hs := len(tester.ownHeaders); hs != chain.len() {
+ t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len())
+ }
+ if mode != LightSync {
+ if bs := len(tester.ownBlocks); bs != chain.len() {
+ t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len())
+ }
+ }
+ tester.terminate()
+}
+
+// Tests that a peer advertising a high TD doesn't get to stall the downloader
+// afterwards by not sending any useful hashes.
+func TestHighTDStarvationAttack66Full(t *testing.T) {
+ testHighTDStarvationAttack(t, eth.ETH66, FullSync)
+}
+func TestHighTDStarvationAttack66Fast(t *testing.T) {
+ testHighTDStarvationAttack(t, eth.ETH66, FastSync)
+}
+func TestHighTDStarvationAttack66Light(t *testing.T) {
+ testHighTDStarvationAttack(t, eth.ETH66, LightSync)
+}
+
+func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+
+ tester := newTester()
+
+ chain := testChainBase.shorten(1)
+ tester.newPeer("attack", protocol, chain)
+ if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer {
+ t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
+ }
+ tester.terminate()
+}
+
+// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
+func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) }
+
+func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
+ t.Parallel()
+
+ // Define the disconnection requirement for individual hash fetch errors
+ tests := []struct {
+ result error
+ drop bool
+ }{
+ {nil, false}, // Sync succeeded, all is well
+ {errBusy, false}, // Sync is already in progress, no problem
+ {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop
+ {errBadPeer, true}, // Peer was deemed bad for some reason, drop it
+ {errStallingPeer, true}, // Peer was detected to be stalling, drop it
+ {errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it
+ {errNoPeers, false}, // No peers to download from, soft race, no issue
+ {errTimeout, true}, // No hashes received in due time, drop the peer
+ {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end
+ {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser
+ {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter
+ {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop
+ {errInvalidBody, false}, // A bad peer was detected, but not the sync origin
+ {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin
+ {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop
+ }
+ // Run the tests and check disconnection status
+ tester := newTester()
+ defer tester.terminate()
+ chain := testChainBase.shorten(1)
+
+ for i, tt := range tests {
+ // Register a new peer and ensure its presence
+ id := fmt.Sprintf("test %d", i)
+ if err := tester.newPeer(id, protocol, chain); err != nil {
+ t.Fatalf("test %d: failed to register new peer: %v", i, err)
+ }
+ if _, ok := tester.peers[id]; !ok {
+ t.Fatalf("test %d: registered peer not found", i)
+ }
+ // Simulate a synchronisation and check the required result
+ tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
+
+ tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)
+ if _, ok := tester.peers[id]; !ok != tt.drop {
+ t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
+ }
+ }
+}
+
+// Tests that synchronisation progress (origin block number, current block number
+// and highest block number) is tracked and updated correctly.
+func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) }
+func TestSyncProgress66Fast(t *testing.T) { testSyncProgress(t, eth.ETH66, FastSync) }
+func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) }
+
+func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+
+ tester := newTester()
+ defer tester.terminate()
+ chain := testChainBase.shorten(blockCacheMaxItems - 15)
+
+ // Set a sync init hook to catch progress changes
+ starting := make(chan struct{})
+ progress := make(chan struct{})
+
+ tester.downloader.syncInitHook = func(origin, latest uint64) {
+ starting <- struct{}{}
+ <-progress
+ }
+ checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
+
+ // Synchronise half the blocks and check initial progress
+ tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2))
+ pending := new(sync.WaitGroup)
+ pending.Add(1)
+
+ go func() {
+ defer pending.Done()
+ if err := tester.sync("peer-half", nil, mode); err != nil {
+ panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
+ }
+ }()
+ <-starting
+ checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
+ HighestBlock: uint64(chain.len()/2 - 1),
+ })
+ progress <- struct{}{}
+ pending.Wait()
+
+ // Synchronise all the blocks and check continuation progress
+ tester.newPeer("peer-full", protocol, chain)
+ pending.Add(1)
+ go func() {
+ defer pending.Done()
+ if err := tester.sync("peer-full", nil, mode); err != nil {
+ panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
+ }
+ }()
+ <-starting
+ checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
+ StartingBlock: uint64(chain.len()/2 - 1),
+ CurrentBlock: uint64(chain.len()/2 - 1),
+ HighestBlock: uint64(chain.len() - 1),
+ })
+
+ // Check final progress after successful sync
+ progress <- struct{}{}
+ pending.Wait()
+ checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
+ StartingBlock: uint64(chain.len()/2 - 1),
+ CurrentBlock: uint64(chain.len() - 1),
+ HighestBlock: uint64(chain.len() - 1),
+ })
+}
+
+func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) {
+ // Mark this method as a helper to report errors at callsite, not in here
+ t.Helper()
+
+ p := d.Progress()
+ p.KnownStates, p.PulledStates = 0, 0
+ want.KnownStates, want.PulledStates = 0, 0
+ if p != want {
+ t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want)
+ }
+}
+
+// Tests that synchronisation progress (origin block number and highest block
+// number) is tracked and updated correctly in case of a fork (or manual head
+// revertal).
+func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) }
+func TestForkedSyncProgress66Fast(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FastSync) }
+func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) }
+
+func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+
+ tester := newTester()
+ defer tester.terminate()
+ chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHeaderFetch)
+ chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHeaderFetch)
+
+ // Set a sync init hook to catch progress changes
+ starting := make(chan struct{})
+ progress := make(chan struct{})
+
+ tester.downloader.syncInitHook = func(origin, latest uint64) {
+ starting <- struct{}{}
+ <-progress
+ }
+ checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
+
+ // Synchronise with one of the forks and check progress
+ tester.newPeer("fork A", protocol, chainA)
+ pending := new(sync.WaitGroup)
+ pending.Add(1)
+ go func() {
+ defer pending.Done()
+ if err := tester.sync("fork A", nil, mode); err != nil {
+ panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
+ }
+ }()
+ <-starting
+
+ checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
+ HighestBlock: uint64(chainA.len() - 1),
+ })
+ progress <- struct{}{}
+ pending.Wait()
+
+ // Simulate a successful sync above the fork
+ tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight
+
+ // Synchronise with the second fork and check progress resets
+ tester.newPeer("fork B", protocol, chainB)
+ pending.Add(1)
+ go func() {
+ defer pending.Done()
+ if err := tester.sync("fork B", nil, mode); err != nil {
+ panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
+ }
+ }()
+ <-starting
+ checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{
+ StartingBlock: uint64(testChainBase.len()) - 1,
+ CurrentBlock: uint64(chainA.len() - 1),
+ HighestBlock: uint64(chainB.len() - 1),
+ })
+
+ // Check final progress after successful sync
+ progress <- struct{}{}
+ pending.Wait()
+ checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
+ StartingBlock: uint64(testChainBase.len()) - 1,
+ CurrentBlock: uint64(chainB.len() - 1),
+ HighestBlock: uint64(chainB.len() - 1),
+ })
+}
+
+// Tests that if synchronisation is aborted due to some failure, then the progress
+// origin is not updated in the next sync cycle, as it should be considered the
+// continuation of the previous sync and not a new instance.
+func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) }
+func TestFailedSyncProgress66Fast(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FastSync) }
+func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) }
+
+func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+
+ tester := newTester()
+ defer tester.terminate()
+ chain := testChainBase.shorten(blockCacheMaxItems - 15)
+
+ // Set a sync init hook to catch progress changes
+ starting := make(chan struct{})
+ progress := make(chan struct{})
+
+ tester.downloader.syncInitHook = func(origin, latest uint64) {
+ starting <- struct{}{}
+ <-progress
+ }
+ checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
+
+ // Attempt a full sync with a faulty peer
+ brokenChain := chain.shorten(chain.len())
+ missing := brokenChain.len() / 2
+ delete(brokenChain.headerm, brokenChain.chain[missing])
+ delete(brokenChain.blockm, brokenChain.chain[missing])
+ delete(brokenChain.receiptm, brokenChain.chain[missing])
+ tester.newPeer("faulty", protocol, brokenChain)
+
+ pending := new(sync.WaitGroup)
+ pending.Add(1)
+ go func() {
+ defer pending.Done()
+ if err := tester.sync("faulty", nil, mode); err == nil {
+ panic("succeeded faulty synchronisation")
+ }
+ }()
+ <-starting
+ checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
+ HighestBlock: uint64(brokenChain.len() - 1),
+ })
+ progress <- struct{}{}
+ pending.Wait()
+ afterFailedSync := tester.downloader.Progress()
+
+ // Synchronise with a good peer and check that the progress origin remind the same
+ // after a failure
+ tester.newPeer("valid", protocol, chain)
+ pending.Add(1)
+ go func() {
+ defer pending.Done()
+ if err := tester.sync("valid", nil, mode); err != nil {
+ panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
+ }
+ }()
+ <-starting
+ checkProgress(t, tester.downloader, "completing", afterFailedSync)
+
+ // Check final progress after successful sync
+ progress <- struct{}{}
+ pending.Wait()
+ checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
+ CurrentBlock: uint64(chain.len() - 1),
+ HighestBlock: uint64(chain.len() - 1),
+ })
+}
+
+// Tests that if an attacker fakes a chain height, after the attack is detected,
+// the progress height is successfully reduced at the next sync invocation.
+func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) }
+func TestFakedSyncProgress66Fast(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FastSync) }
+func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) }
+
+func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+
+ tester := newTester()
+ defer tester.terminate()
+ chain := testChainBase.shorten(blockCacheMaxItems - 15)
+
+ // Set a sync init hook to catch progress changes
+ starting := make(chan struct{})
+ progress := make(chan struct{})
+ tester.downloader.syncInitHook = func(origin, latest uint64) {
+ starting <- struct{}{}
+ <-progress
+ }
+ checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{})
+
+ // Create and sync with an attacker that promises a higher chain than available.
+ brokenChain := chain.shorten(chain.len())
+ numMissing := 5
+ for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- {
+ delete(brokenChain.headerm, brokenChain.chain[i])
+ }
+ tester.newPeer("attack", protocol, brokenChain)
+
+ pending := new(sync.WaitGroup)
+ pending.Add(1)
+ go func() {
+ defer pending.Done()
+ if err := tester.sync("attack", nil, mode); err == nil {
+ panic("succeeded attacker synchronisation")
+ }
+ }()
+ <-starting
+ checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{
+ HighestBlock: uint64(brokenChain.len() - 1),
+ })
+ progress <- struct{}{}
+ pending.Wait()
+ afterFailedSync := tester.downloader.Progress()
+
+ // Synchronise with a good peer and check that the progress height has been reduced to
+ // the true value.
+ validChain := chain.shorten(chain.len() - numMissing)
+ tester.newPeer("valid", protocol, validChain)
+ pending.Add(1)
+
+ go func() {
+ defer pending.Done()
+ if err := tester.sync("valid", nil, mode); err != nil {
+ panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
+ }
+ }()
+ <-starting
+ checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{
+ CurrentBlock: afterFailedSync.CurrentBlock,
+ HighestBlock: uint64(validChain.len() - 1),
+ })
+
+ // Check final progress after successful sync.
+ progress <- struct{}{}
+ pending.Wait()
+ checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{
+ CurrentBlock: uint64(validChain.len() - 1),
+ HighestBlock: uint64(validChain.len() - 1),
+ })
+}
+
+// This test reproduces an issue where unexpected deliveries would
+// block indefinitely if they arrived at the right time.
+func TestDeliverHeadersHang66Full(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, FullSync) }
+func TestDeliverHeadersHang66Fast(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, FastSync) }
+func TestDeliverHeadersHang66Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, LightSync) }
+
+func testDeliverHeadersHang(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+
+ master := newTester()
+ defer master.terminate()
+ chain := testChainBase.shorten(15)
+
+ for i := 0; i < 200; i++ {
+ tester := newTester()
+ tester.peerDb = master.peerDb
+ tester.newPeer("peer", protocol, chain)
+
+ // Whenever the downloader requests headers, flood it with
+ // a lot of unrequested header deliveries.
+ tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{
+ peer: tester.downloader.peers.peers["peer"].peer,
+ tester: tester,
+ }
+ if err := tester.sync("peer", nil, mode); err != nil {
+ t.Errorf("test %d: sync failed: %v", i, err)
+ }
+ tester.terminate()
+ }
+}
+
+type floodingTestPeer struct {
+ peer Peer
+ tester *downloadTester
+}
+
+func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }
+func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {
+ return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)
+}
+func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {
+ return ftp.peer.RequestBodies(hashes)
+}
+func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {
+ return ftp.peer.RequestReceipts(hashes)
+}
+func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {
+ return ftp.peer.RequestNodeData(hashes)
+}
+
+func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {
+ deliveriesDone := make(chan struct{}, 500)
+ for i := 0; i < cap(deliveriesDone)-1; i++ {
+ peer := fmt.Sprintf("fake-peer%d", i)
+ go func() {
+ ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})
+ deliveriesDone <- struct{}{}
+ }()
+ }
+
+ // None of the extra deliveries should block.
+ timeout := time.After(60 * time.Second)
+ launched := false
+ for i := 0; i < cap(deliveriesDone); i++ {
+ select {
+ case <-deliveriesDone:
+ if !launched {
+ // Start delivering the requested headers
+ // after one of the flooding responses has arrived.
+ go func() {
+ ftp.peer.RequestHeadersByNumber(from, count, skip, reverse)
+ deliveriesDone <- struct{}{}
+ }()
+ launched = true
+ }
+ case <-timeout:
+ panic("blocked")
+ }
+ }
+ return nil
+}
+
+func TestRemoteHeaderRequestSpan(t *testing.T) {
+ testCases := []struct {
+ remoteHeight uint64
+ localHeight uint64
+ expected []int
+ }{
+ // Remote is way higher. We should ask for the remote head and go backwards
+ {1500, 1000,
+ []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},
+ },
+ {15000, 13006,
+ []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},
+ },
+ // Remote is pretty close to us. We don't have to fetch as many
+ {1200, 1150,
+ []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},
+ },
+ // Remote is equal to us (so on a fork with higher td)
+ // We should get the closest couple of ancestors
+ {1500, 1500,
+ []int{1497, 1499},
+ },
+ // We're higher than the remote! Odd
+ {1000, 1500,
+ []int{997, 999},
+ },
+ // Check some weird edgecases that it behaves somewhat rationally
+ {0, 1500,
+ []int{0, 2},
+ },
+ {6000000, 0,
+ []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},
+ },
+ {0, 0,
+ []int{0, 2},
+ },
+ }
+ reqs := func(from, count, span int) []int {
+ var r []int
+ num := from
+ for len(r) < count {
+ r = append(r, num)
+ num += span + 1
+ }
+ return r
+ }
+ for i, tt := range testCases {
+ from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)
+ data := reqs(int(from), count, span)
+
+ if max != uint64(data[len(data)-1]) {
+ t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max)
+ }
+ failed := false
+ if len(data) != len(tt.expected) {
+ failed = true
+ t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data))
+ } else {
+ for j, n := range data {
+ if n != tt.expected[j] {
+ failed = true
+ break
+ }
+ }
+ }
+ if failed {
+ res := strings.Replace(fmt.Sprint(data), " ", ",", -1)
+ exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1)
+ t.Logf("got: %v\n", res)
+ t.Logf("exp: %v\n", exp)
+ t.Errorf("test %d: wrong values", i)
+ }
+ }
+}
+
+// Tests that peers below a pre-configured checkpoint block are prevented from
+// being fast-synced from, avoiding potential cheap eclipse attacks.
+func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FullSync) }
+func TestCheckpointEnforcement66Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FastSync) }
+func TestCheckpointEnforcement66Light(t *testing.T) {
+ testCheckpointEnforcement(t, eth.ETH66, LightSync)
+}
+
+func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) {
+ t.Parallel()
+
+ // Create a new tester with a particular hard coded checkpoint block
+ tester := newTester()
+ defer tester.terminate()
+
+ tester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256
+ chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1)
+
+ // Attempt to sync with the peer and validate the result
+ tester.newPeer("peer", protocol, chain)
+
+ var expect error
+ if mode == FastSync || mode == LightSync {
+ expect = errUnsyncedPeer
+ }
+ if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) {
+ t.Fatalf("block sync error mismatch: have %v, want %v", err, expect)
+ }
+ if mode == FastSync || mode == LightSync {
+ assertOwnChain(t, tester, 1)
+ } else {
+ assertOwnChain(t, tester, chain.len())
+ }
+}
diff --git a/les/downloader/events.go b/les/downloader/events.go
new file mode 100644
index 000000000..25255a3a7
--- /dev/null
+++ b/les/downloader/events.go
@@ -0,0 +1,25 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package downloader
+
+import "github.com/ethereum/go-ethereum/core/types"
+
+type DoneEvent struct {
+ Latest *types.Header
+}
+type StartEvent struct{}
+type FailedEvent struct{ Err error }
diff --git a/les/downloader/metrics.go b/les/downloader/metrics.go
new file mode 100644
index 000000000..c38732043
--- /dev/null
+++ b/les/downloader/metrics.go
@@ -0,0 +1,45 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Contains the metrics collected by the downloader.
+
+package downloader
+
+import (
+ "github.com/ethereum/go-ethereum/metrics"
+)
+
+var (
+ headerInMeter = metrics.NewRegisteredMeter("eth/downloader/headers/in", nil)
+ headerReqTimer = metrics.NewRegisteredTimer("eth/downloader/headers/req", nil)
+ headerDropMeter = metrics.NewRegisteredMeter("eth/downloader/headers/drop", nil)
+ headerTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/headers/timeout", nil)
+
+ bodyInMeter = metrics.NewRegisteredMeter("eth/downloader/bodies/in", nil)
+ bodyReqTimer = metrics.NewRegisteredTimer("eth/downloader/bodies/req", nil)
+ bodyDropMeter = metrics.NewRegisteredMeter("eth/downloader/bodies/drop", nil)
+ bodyTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/bodies/timeout", nil)
+
+ receiptInMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/in", nil)
+ receiptReqTimer = metrics.NewRegisteredTimer("eth/downloader/receipts/req", nil)
+ receiptDropMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/drop", nil)
+ receiptTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/timeout", nil)
+
+ stateInMeter = metrics.NewRegisteredMeter("eth/downloader/states/in", nil)
+ stateDropMeter = metrics.NewRegisteredMeter("eth/downloader/states/drop", nil)
+
+ throttleCounter = metrics.NewRegisteredCounter("eth/downloader/throttle", nil)
+)
diff --git a/les/downloader/modes.go b/les/downloader/modes.go
new file mode 100644
index 000000000..3ea14d22d
--- /dev/null
+++ b/les/downloader/modes.go
@@ -0,0 +1,81 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package downloader
+
+import "fmt"
+
+// SyncMode represents the synchronisation mode of the downloader.
+// It is a uint32 as it is used with atomic operations.
+type SyncMode uint32
+
+const (
+ FullSync SyncMode = iota // Synchronise the entire blockchain history from full blocks
+ FastSync // Quickly download the headers, full sync only at the chain
+ SnapSync // Download the chain and the state via compact snapshots
+ LightSync // Download only the headers and terminate afterwards
+)
+
+func (mode SyncMode) IsValid() bool {
+ return mode >= FullSync && mode <= LightSync
+}
+
+// String implements the stringer interface.
+func (mode SyncMode) String() string {
+ switch mode {
+ case FullSync:
+ return "full"
+ case FastSync:
+ return "fast"
+ case SnapSync:
+ return "snap"
+ case LightSync:
+ return "light"
+ default:
+ return "unknown"
+ }
+}
+
+func (mode SyncMode) MarshalText() ([]byte, error) {
+ switch mode {
+ case FullSync:
+ return []byte("full"), nil
+ case FastSync:
+ return []byte("fast"), nil
+ case SnapSync:
+ return []byte("snap"), nil
+ case LightSync:
+ return []byte("light"), nil
+ default:
+ return nil, fmt.Errorf("unknown sync mode %d", mode)
+ }
+}
+
+func (mode *SyncMode) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "full":
+ *mode = FullSync
+ case "fast":
+ *mode = FastSync
+ case "snap":
+ *mode = SnapSync
+ case "light":
+ *mode = LightSync
+ default:
+ return fmt.Errorf(`unknown sync mode %q, want "full", "fast" or "light"`, text)
+ }
+ return nil
+}
diff --git a/les/downloader/peer.go b/les/downloader/peer.go
new file mode 100644
index 000000000..863294832
--- /dev/null
+++ b/les/downloader/peer.go
@@ -0,0 +1,501 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Contains the active peer-set of the downloader, maintaining both failures
+// as well as reputation metrics to prioritize the block retrievals.
+
+package downloader
+
+import (
+ "errors"
+ "math/big"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/eth/protocols/eth"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/p2p/msgrate"
+)
+
+const (
+ maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items
+)
+
+var (
+ errAlreadyFetching = errors.New("already fetching blocks from peer")
+ errAlreadyRegistered = errors.New("peer is already registered")
+ errNotRegistered = errors.New("peer is not registered")
+)
+
+// peerConnection represents an active peer from which hashes and blocks are retrieved.
+type peerConnection struct {
+ id string // Unique identifier of the peer
+
+ headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1)
+ blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1)
+ receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1)
+ stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1)
+
+ headerStarted time.Time // Time instance when the last header fetch was started
+ blockStarted time.Time // Time instance when the last block (body) fetch was started
+ receiptStarted time.Time // Time instance when the last receipt fetch was started
+ stateStarted time.Time // Time instance when the last node data fetch was started
+
+ rates *msgrate.Tracker // Tracker to hone in on the number of items retrievable per second
+ lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously)
+
+ peer Peer
+
+ version uint // Eth protocol version number to switch strategies
+ log log.Logger // Contextual logger to add extra infos to peer logs
+ lock sync.RWMutex
+}
+
+// LightPeer encapsulates the methods required to synchronise with a remote light peer.
+type LightPeer interface {
+ Head() (common.Hash, *big.Int)
+ RequestHeadersByHash(common.Hash, int, int, bool) error
+ RequestHeadersByNumber(uint64, int, int, bool) error
+}
+
+// Peer encapsulates the methods required to synchronise with a remote full peer.
+type Peer interface {
+ LightPeer
+ RequestBodies([]common.Hash) error
+ RequestReceipts([]common.Hash) error
+ RequestNodeData([]common.Hash) error
+}
+
+// lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods.
+type lightPeerWrapper struct {
+ peer LightPeer
+}
+
+func (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() }
+func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool) error {
+ return w.peer.RequestHeadersByHash(h, amount, skip, reverse)
+}
+func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool) error {
+ return w.peer.RequestHeadersByNumber(i, amount, skip, reverse)
+}
+func (w *lightPeerWrapper) RequestBodies([]common.Hash) error {
+ panic("RequestBodies not supported in light client mode sync")
+}
+func (w *lightPeerWrapper) RequestReceipts([]common.Hash) error {
+ panic("RequestReceipts not supported in light client mode sync")
+}
+func (w *lightPeerWrapper) RequestNodeData([]common.Hash) error {
+ panic("RequestNodeData not supported in light client mode sync")
+}
+
+// newPeerConnection creates a new downloader peer.
+func newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection {
+ return &peerConnection{
+ id: id,
+ lacking: make(map[common.Hash]struct{}),
+ peer: peer,
+ version: version,
+ log: logger,
+ }
+}
+
+// Reset clears the internal state of a peer entity.
+func (p *peerConnection) Reset() {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ atomic.StoreInt32(&p.headerIdle, 0)
+ atomic.StoreInt32(&p.blockIdle, 0)
+ atomic.StoreInt32(&p.receiptIdle, 0)
+ atomic.StoreInt32(&p.stateIdle, 0)
+
+ p.lacking = make(map[common.Hash]struct{})
+}
+
+// FetchHeaders sends a header retrieval request to the remote peer.
+func (p *peerConnection) FetchHeaders(from uint64, count int) error {
+ // Short circuit if the peer is already fetching
+ if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) {
+ return errAlreadyFetching
+ }
+ p.headerStarted = time.Now()
+
+ // Issue the header retrieval request (absolute upwards without gaps)
+ go p.peer.RequestHeadersByNumber(from, count, 0, false)
+
+ return nil
+}
+
+// FetchBodies sends a block body retrieval request to the remote peer.
+func (p *peerConnection) FetchBodies(request *fetchRequest) error {
+ // Short circuit if the peer is already fetching
+ if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) {
+ return errAlreadyFetching
+ }
+ p.blockStarted = time.Now()
+
+ go func() {
+ // Convert the header set to a retrievable slice
+ hashes := make([]common.Hash, 0, len(request.Headers))
+ for _, header := range request.Headers {
+ hashes = append(hashes, header.Hash())
+ }
+ p.peer.RequestBodies(hashes)
+ }()
+
+ return nil
+}
+
+// FetchReceipts sends a receipt retrieval request to the remote peer.
+func (p *peerConnection) FetchReceipts(request *fetchRequest) error {
+ // Short circuit if the peer is already fetching
+ if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) {
+ return errAlreadyFetching
+ }
+ p.receiptStarted = time.Now()
+
+ go func() {
+ // Convert the header set to a retrievable slice
+ hashes := make([]common.Hash, 0, len(request.Headers))
+ for _, header := range request.Headers {
+ hashes = append(hashes, header.Hash())
+ }
+ p.peer.RequestReceipts(hashes)
+ }()
+
+ return nil
+}
+
+// FetchNodeData sends a node state data retrieval request to the remote peer.
+func (p *peerConnection) FetchNodeData(hashes []common.Hash) error {
+ // Short circuit if the peer is already fetching
+ if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) {
+ return errAlreadyFetching
+ }
+ p.stateStarted = time.Now()
+
+ go p.peer.RequestNodeData(hashes)
+
+ return nil
+}
+
+// SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval
+// requests. Its estimated header retrieval throughput is updated with that measured
+// just now.
+func (p *peerConnection) SetHeadersIdle(delivered int, deliveryTime time.Time) {
+ p.rates.Update(eth.BlockHeadersMsg, deliveryTime.Sub(p.headerStarted), delivered)
+ atomic.StoreInt32(&p.headerIdle, 0)
+}
+
+// SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval
+// requests. Its estimated body retrieval throughput is updated with that measured
+// just now.
+func (p *peerConnection) SetBodiesIdle(delivered int, deliveryTime time.Time) {
+ p.rates.Update(eth.BlockBodiesMsg, deliveryTime.Sub(p.blockStarted), delivered)
+ atomic.StoreInt32(&p.blockIdle, 0)
+}
+
+// SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt
+// retrieval requests. Its estimated receipt retrieval throughput is updated
+// with that measured just now.
+func (p *peerConnection) SetReceiptsIdle(delivered int, deliveryTime time.Time) {
+ p.rates.Update(eth.ReceiptsMsg, deliveryTime.Sub(p.receiptStarted), delivered)
+ atomic.StoreInt32(&p.receiptIdle, 0)
+}
+
+// SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie
+// data retrieval requests. Its estimated state retrieval throughput is updated
+// with that measured just now.
+func (p *peerConnection) SetNodeDataIdle(delivered int, deliveryTime time.Time) {
+ p.rates.Update(eth.NodeDataMsg, deliveryTime.Sub(p.stateStarted), delivered)
+ atomic.StoreInt32(&p.stateIdle, 0)
+}
+
+// HeaderCapacity retrieves the peers header download allowance based on its
+// previously discovered throughput.
+func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int {
+ cap := p.rates.Capacity(eth.BlockHeadersMsg, targetRTT)
+ if cap > MaxHeaderFetch {
+ cap = MaxHeaderFetch
+ }
+ return cap
+}
+
+// BlockCapacity retrieves the peers block download allowance based on its
+// previously discovered throughput.
+func (p *peerConnection) BlockCapacity(targetRTT time.Duration) int {
+ cap := p.rates.Capacity(eth.BlockBodiesMsg, targetRTT)
+ if cap > MaxBlockFetch {
+ cap = MaxBlockFetch
+ }
+ return cap
+}
+
+// ReceiptCapacity retrieves the peers receipt download allowance based on its
+// previously discovered throughput.
+func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int {
+ cap := p.rates.Capacity(eth.ReceiptsMsg, targetRTT)
+ if cap > MaxReceiptFetch {
+ cap = MaxReceiptFetch
+ }
+ return cap
+}
+
+// NodeDataCapacity retrieves the peers state download allowance based on its
+// previously discovered throughput.
+func (p *peerConnection) NodeDataCapacity(targetRTT time.Duration) int {
+ cap := p.rates.Capacity(eth.NodeDataMsg, targetRTT)
+ if cap > MaxStateFetch {
+ cap = MaxStateFetch
+ }
+ return cap
+}
+
+// MarkLacking appends a new entity to the set of items (blocks, receipts, states)
+// that a peer is known not to have (i.e. have been requested before). If the
+// set reaches its maximum allowed capacity, items are randomly dropped off.
+func (p *peerConnection) MarkLacking(hash common.Hash) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ for len(p.lacking) >= maxLackingHashes {
+ for drop := range p.lacking {
+ delete(p.lacking, drop)
+ break
+ }
+ }
+ p.lacking[hash] = struct{}{}
+}
+
+// Lacks retrieves whether the hash of a blockchain item is on the peers lacking
+// list (i.e. whether we know that the peer does not have it).
+func (p *peerConnection) Lacks(hash common.Hash) bool {
+ p.lock.RLock()
+ defer p.lock.RUnlock()
+
+ _, ok := p.lacking[hash]
+ return ok
+}
+
+// peerSet represents the collection of active peer participating in the chain
+// download procedure.
+type peerSet struct {
+ peers map[string]*peerConnection
+ rates *msgrate.Trackers // Set of rate trackers to give the sync a common beat
+
+ newPeerFeed event.Feed
+ peerDropFeed event.Feed
+
+ lock sync.RWMutex
+}
+
+// newPeerSet creates a new peer set top track the active download sources.
+func newPeerSet() *peerSet {
+ return &peerSet{
+ peers: make(map[string]*peerConnection),
+ rates: msgrate.NewTrackers(log.New("proto", "eth")),
+ }
+}
+
+// SubscribeNewPeers subscribes to peer arrival events.
+func (ps *peerSet) SubscribeNewPeers(ch chan<- *peerConnection) event.Subscription {
+ return ps.newPeerFeed.Subscribe(ch)
+}
+
+// SubscribePeerDrops subscribes to peer departure events.
+func (ps *peerSet) SubscribePeerDrops(ch chan<- *peerConnection) event.Subscription {
+ return ps.peerDropFeed.Subscribe(ch)
+}
+
+// Reset iterates over the current peer set, and resets each of the known peers
+// to prepare for a next batch of block retrieval.
+func (ps *peerSet) Reset() {
+ ps.lock.RLock()
+ defer ps.lock.RUnlock()
+
+ for _, peer := range ps.peers {
+ peer.Reset()
+ }
+}
+
+// Register injects a new peer into the working set, or returns an error if the
+// peer is already known.
+//
+// The method also sets the starting throughput values of the new peer to the
+// average of all existing peers, to give it a realistic chance of being used
+// for data retrievals.
+func (ps *peerSet) Register(p *peerConnection) error {
+ // Register the new peer with some meaningful defaults
+ ps.lock.Lock()
+ if _, ok := ps.peers[p.id]; ok {
+ ps.lock.Unlock()
+ return errAlreadyRegistered
+ }
+ p.rates = msgrate.NewTracker(ps.rates.MeanCapacities(), ps.rates.MedianRoundTrip())
+ if err := ps.rates.Track(p.id, p.rates); err != nil {
+ return err
+ }
+ ps.peers[p.id] = p
+ ps.lock.Unlock()
+
+ ps.newPeerFeed.Send(p)
+ return nil
+}
+
+// Unregister removes a remote peer from the active set, disabling any further
+// actions to/from that particular entity.
+func (ps *peerSet) Unregister(id string) error {
+ ps.lock.Lock()
+ p, ok := ps.peers[id]
+ if !ok {
+ ps.lock.Unlock()
+ return errNotRegistered
+ }
+ delete(ps.peers, id)
+ ps.rates.Untrack(id)
+ ps.lock.Unlock()
+
+ ps.peerDropFeed.Send(p)
+ return nil
+}
+
+// Peer retrieves the registered peer with the given id.
+func (ps *peerSet) Peer(id string) *peerConnection {
+ ps.lock.RLock()
+ defer ps.lock.RUnlock()
+
+ return ps.peers[id]
+}
+
+// Len returns if the current number of peers in the set.
+func (ps *peerSet) Len() int {
+ ps.lock.RLock()
+ defer ps.lock.RUnlock()
+
+ return len(ps.peers)
+}
+
+// AllPeers retrieves a flat list of all the peers within the set.
+func (ps *peerSet) AllPeers() []*peerConnection {
+ ps.lock.RLock()
+ defer ps.lock.RUnlock()
+
+ list := make([]*peerConnection, 0, len(ps.peers))
+ for _, p := range ps.peers {
+ list = append(list, p)
+ }
+ return list
+}
+
+// HeaderIdlePeers retrieves a flat list of all the currently header-idle peers
+// within the active peer set, ordered by their reputation.
+func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) {
+ idle := func(p *peerConnection) bool {
+ return atomic.LoadInt32(&p.headerIdle) == 0
+ }
+ throughput := func(p *peerConnection) int {
+ return p.rates.Capacity(eth.BlockHeadersMsg, time.Second)
+ }
+ return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput)
+}
+
+// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
+// the active peer set, ordered by their reputation.
+func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) {
+ idle := func(p *peerConnection) bool {
+ return atomic.LoadInt32(&p.blockIdle) == 0
+ }
+ throughput := func(p *peerConnection) int {
+ return p.rates.Capacity(eth.BlockBodiesMsg, time.Second)
+ }
+ return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput)
+}
+
+// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers
+// within the active peer set, ordered by their reputation.
+func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) {
+ idle := func(p *peerConnection) bool {
+ return atomic.LoadInt32(&p.receiptIdle) == 0
+ }
+ throughput := func(p *peerConnection) int {
+ return p.rates.Capacity(eth.ReceiptsMsg, time.Second)
+ }
+ return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput)
+}
+
+// NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle
+// peers within the active peer set, ordered by their reputation.
+func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) {
+ idle := func(p *peerConnection) bool {
+ return atomic.LoadInt32(&p.stateIdle) == 0
+ }
+ throughput := func(p *peerConnection) int {
+ return p.rates.Capacity(eth.NodeDataMsg, time.Second)
+ }
+ return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput)
+}
+
+// idlePeers retrieves a flat list of all currently idle peers satisfying the
+// protocol version constraints, using the provided function to check idleness.
+// The resulting set of peers are sorted by their capacity.
+func (ps *peerSet) idlePeers(minProtocol, maxProtocol uint, idleCheck func(*peerConnection) bool, capacity func(*peerConnection) int) ([]*peerConnection, int) {
+ ps.lock.RLock()
+ defer ps.lock.RUnlock()
+
+ var (
+ total = 0
+ idle = make([]*peerConnection, 0, len(ps.peers))
+ tps = make([]int, 0, len(ps.peers))
+ )
+ for _, p := range ps.peers {
+ if p.version >= minProtocol && p.version <= maxProtocol {
+ if idleCheck(p) {
+ idle = append(idle, p)
+ tps = append(tps, capacity(p))
+ }
+ total++
+ }
+ }
+
+ // And sort them
+ sortPeers := &peerCapacitySort{idle, tps}
+ sort.Sort(sortPeers)
+ return sortPeers.p, total
+}
+
+// peerCapacitySort implements sort.Interface.
+// It sorts peer connections by capacity (descending).
+type peerCapacitySort struct {
+ p []*peerConnection
+ tp []int
+}
+
+func (ps *peerCapacitySort) Len() int {
+ return len(ps.p)
+}
+
+func (ps *peerCapacitySort) Less(i, j int) bool {
+ return ps.tp[i] > ps.tp[j]
+}
+
+func (ps *peerCapacitySort) Swap(i, j int) {
+ ps.p[i], ps.p[j] = ps.p[j], ps.p[i]
+ ps.tp[i], ps.tp[j] = ps.tp[j], ps.tp[i]
+}
diff --git a/les/downloader/queue.go b/les/downloader/queue.go
new file mode 100644
index 000000000..04ec12cfa
--- /dev/null
+++ b/les/downloader/queue.go
@@ -0,0 +1,913 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Contains the block download scheduler to collect download tasks and schedule
+// them in an ordered, and throttled way.
+
+package downloader
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/prque"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/trie"
+)
+
+const (
+ bodyType = uint(0)
+ receiptType = uint(1)
+)
+
+var (
+ blockCacheMaxItems = 8192 // Maximum number of blocks to cache before throttling the download
+ blockCacheInitialItems = 2048 // Initial number of blocks to start fetching, before we know the sizes of the blocks
+ blockCacheMemory = 256 * 1024 * 1024 // Maximum amount of memory to use for block caching
+ blockCacheSizeWeight = 0.1 // Multiplier to approximate the average block size based on past ones
+)
+
+var (
+ errNoFetchesPending = errors.New("no fetches pending")
+ errStaleDelivery = errors.New("stale delivery")
+)
+
+// fetchRequest is a currently running data retrieval operation.
+type fetchRequest struct {
+ Peer *peerConnection // Peer to which the request was sent
+ From uint64 // [eth/62] Requested chain element index (used for skeleton fills only)
+ Headers []*types.Header // [eth/62] Requested headers, sorted by request order
+ Time time.Time // Time when the request was made
+}
+
+// fetchResult is a struct collecting partial results from data fetchers until
+// all outstanding pieces complete and the result as a whole can be processed.
+type fetchResult struct {
+ pending int32 // Flag telling what deliveries are outstanding
+
+ Header *types.Header
+ Uncles []*types.Header
+ Transactions types.Transactions
+ Receipts types.Receipts
+}
+
+func newFetchResult(header *types.Header, fastSync bool) *fetchResult {
+ item := &fetchResult{
+ Header: header,
+ }
+ if !header.EmptyBody() {
+ item.pending |= (1 << bodyType)
+ }
+ if fastSync && !header.EmptyReceipts() {
+ item.pending |= (1 << receiptType)
+ }
+ return item
+}
+
+// SetBodyDone flags the body as finished.
+func (f *fetchResult) SetBodyDone() {
+ if v := atomic.LoadInt32(&f.pending); (v & (1 << bodyType)) != 0 {
+ atomic.AddInt32(&f.pending, -1)
+ }
+}
+
+// AllDone checks if item is done.
+func (f *fetchResult) AllDone() bool {
+ return atomic.LoadInt32(&f.pending) == 0
+}
+
+// SetReceiptsDone flags the receipts as finished.
+func (f *fetchResult) SetReceiptsDone() {
+ if v := atomic.LoadInt32(&f.pending); (v & (1 << receiptType)) != 0 {
+ atomic.AddInt32(&f.pending, -2)
+ }
+}
+
+// Done checks if the given type is done already
+func (f *fetchResult) Done(kind uint) bool {
+ v := atomic.LoadInt32(&f.pending)
+ return v&(1< 0
+}
+
+// InFlightBlocks retrieves whether there are block fetch requests currently in
+// flight.
+func (q *queue) InFlightBlocks() bool {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+
+ return len(q.blockPendPool) > 0
+}
+
+// InFlightReceipts retrieves whether there are receipt fetch requests currently
+// in flight.
+func (q *queue) InFlightReceipts() bool {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+
+ return len(q.receiptPendPool) > 0
+}
+
+// Idle returns if the queue is fully idle or has some data still inside.
+func (q *queue) Idle() bool {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+
+ queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size()
+ pending := len(q.blockPendPool) + len(q.receiptPendPool)
+
+ return (queued + pending) == 0
+}
+
+// ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
+// up an already retrieved header skeleton.
+func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+
+ // No skeleton retrieval can be in progress, fail hard if so (huge implementation bug)
+ if q.headerResults != nil {
+ panic("skeleton assembly already in progress")
+ }
+ // Schedule all the header retrieval tasks for the skeleton assembly
+ q.headerTaskPool = make(map[uint64]*types.Header)
+ q.headerTaskQueue = prque.New(nil)
+ q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
+ q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch)
+ q.headerProced = 0
+ q.headerOffset = from
+ q.headerContCh = make(chan bool, 1)
+
+ for i, header := range skeleton {
+ index := from + uint64(i*MaxHeaderFetch)
+
+ q.headerTaskPool[index] = header
+ q.headerTaskQueue.Push(index, -int64(index))
+ }
+}
+
+// RetrieveHeaders retrieves the header chain assemble based on the scheduled
+// skeleton.
+func (q *queue) RetrieveHeaders() ([]*types.Header, int) {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+
+ headers, proced := q.headerResults, q.headerProced
+ q.headerResults, q.headerProced = nil, 0
+
+ return headers, proced
+}
+
+// Schedule adds a set of headers for the download queue for scheduling, returning
+// the new headers encountered.
+func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+
+ // Insert all the headers prioritised by the contained block number
+ inserts := make([]*types.Header, 0, len(headers))
+ for _, header := range headers {
+ // Make sure chain order is honoured and preserved throughout
+ hash := header.Hash()
+ if header.Number == nil || header.Number.Uint64() != from {
+ log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from)
+ break
+ }
+ if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
+ log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash)
+ break
+ }
+ // Make sure no duplicate requests are executed
+ // We cannot skip this, even if the block is empty, since this is
+ // what triggers the fetchResult creation.
+ if _, ok := q.blockTaskPool[hash]; ok {
+ log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash)
+ } else {
+ q.blockTaskPool[hash] = header
+ q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
+ }
+ // Queue for receipt retrieval
+ if q.mode == FastSync && !header.EmptyReceipts() {
+ if _, ok := q.receiptTaskPool[hash]; ok {
+ log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash)
+ } else {
+ q.receiptTaskPool[hash] = header
+ q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
+ }
+ }
+ inserts = append(inserts, header)
+ q.headerHead = hash
+ from++
+ }
+ return inserts
+}
+
+// Results retrieves and permanently removes a batch of fetch results from
+// the cache. the result slice will be empty if the queue has been closed.
+// Results can be called concurrently with Deliver and Schedule,
+// but assumes that there are not two simultaneous callers to Results
+func (q *queue) Results(block bool) []*fetchResult {
+ // Abort early if there are no items and non-blocking requested
+ if !block && !q.resultCache.HasCompletedItems() {
+ return nil
+ }
+ closed := false
+ for !closed && !q.resultCache.HasCompletedItems() {
+ // In order to wait on 'active', we need to obtain the lock.
+ // That may take a while, if someone is delivering at the same
+ // time, so after obtaining the lock, we check again if there
+ // are any results to fetch.
+ // Also, in-between we ask for the lock and the lock is obtained,
+ // someone can have closed the queue. In that case, we should
+ // return the available results and stop blocking
+ q.lock.Lock()
+ if q.resultCache.HasCompletedItems() || q.closed {
+ q.lock.Unlock()
+ break
+ }
+ // No items available, and not closed
+ q.active.Wait()
+ closed = q.closed
+ q.lock.Unlock()
+ }
+ // Regardless if closed or not, we can still deliver whatever we have
+ results := q.resultCache.GetCompleted(maxResultsProcess)
+ for _, result := range results {
+ // Recalculate the result item weights to prevent memory exhaustion
+ size := result.Header.Size()
+ for _, uncle := range result.Uncles {
+ size += uncle.Size()
+ }
+ for _, receipt := range result.Receipts {
+ size += receipt.Size()
+ }
+ for _, tx := range result.Transactions {
+ size += tx.Size()
+ }
+ q.resultSize = common.StorageSize(blockCacheSizeWeight)*size +
+ (1-common.StorageSize(blockCacheSizeWeight))*q.resultSize
+ }
+ // Using the newly calibrated resultsize, figure out the new throttle limit
+ // on the result cache
+ throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize)
+ throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold)
+
+ // Log some info at certain times
+ if time.Since(q.lastStatLog) > 60*time.Second {
+ q.lastStatLog = time.Now()
+ info := q.Stats()
+ info = append(info, "throttle", throttleThreshold)
+ log.Info("Downloader queue stats", info...)
+ }
+ return results
+}
+
+func (q *queue) Stats() []interface{} {
+ q.lock.RLock()
+ defer q.lock.RUnlock()
+
+ return q.stats()
+}
+
+func (q *queue) stats() []interface{} {
+ return []interface{}{
+ "receiptTasks", q.receiptTaskQueue.Size(),
+ "blockTasks", q.blockTaskQueue.Size(),
+ "itemSize", q.resultSize,
+ }
+}
+
+// ReserveHeaders reserves a set of headers for the given peer, skipping any
+// previously failed batches.
+func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+
+ // Short circuit if the peer's already downloading something (sanity check to
+ // not corrupt state)
+ if _, ok := q.headerPendPool[p.id]; ok {
+ return nil
+ }
+ // Retrieve a batch of hashes, skipping previously failed ones
+ send, skip := uint64(0), []uint64{}
+ for send == 0 && !q.headerTaskQueue.Empty() {
+ from, _ := q.headerTaskQueue.Pop()
+ if q.headerPeerMiss[p.id] != nil {
+ if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok {
+ skip = append(skip, from.(uint64))
+ continue
+ }
+ }
+ send = from.(uint64)
+ }
+ // Merge all the skipped batches back
+ for _, from := range skip {
+ q.headerTaskQueue.Push(from, -int64(from))
+ }
+ // Assemble and return the block download request
+ if send == 0 {
+ return nil
+ }
+ request := &fetchRequest{
+ Peer: p,
+ From: send,
+ Time: time.Now(),
+ }
+ q.headerPendPool[p.id] = request
+ return request
+}
+
+// ReserveBodies reserves a set of body fetches for the given peer, skipping any
+// previously failed downloads. Beside the next batch of needed fetches, it also
+// returns a flag whether empty blocks were queued requiring processing.
+func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, bool) {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+
+ return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyType)
+}
+
+// ReserveReceipts reserves a set of receipt fetches for the given peer, skipping
+// any previously failed downloads. Beside the next batch of needed fetches, it
+// also returns a flag whether empty receipts were queued requiring importing.
+func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, bool) {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+
+ return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, receiptType)
+}
+
+// reserveHeaders reserves a set of data download operations for a given peer,
+// skipping any previously failed ones. This method is a generic version used
+// by the individual special reservation functions.
+//
+// Note, this method expects the queue lock to be already held for writing. The
+// reason the lock is not obtained in here is because the parameters already need
+// to access the queue, so they already need a lock anyway.
+//
+// Returns:
+// item - the fetchRequest
+// progress - whether any progress was made
+// throttle - if the caller should throttle for a while
+func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
+ pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) {
+ // Short circuit if the pool has been depleted, or if the peer's already
+ // downloading something (sanity check not to corrupt state)
+ if taskQueue.Empty() {
+ return nil, false, true
+ }
+ if _, ok := pendPool[p.id]; ok {
+ return nil, false, false
+ }
+ // Retrieve a batch of tasks, skipping previously failed ones
+ send := make([]*types.Header, 0, count)
+ skip := make([]*types.Header, 0)
+ progress := false
+ throttled := false
+ for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ {
+ // the task queue will pop items in order, so the highest prio block
+ // is also the lowest block number.
+ h, _ := taskQueue.Peek()
+ header := h.(*types.Header)
+ // we can ask the resultcache if this header is within the
+ // "prioritized" segment of blocks. If it is not, we need to throttle
+
+ stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == FastSync)
+ if stale {
+ // Don't put back in the task queue, this item has already been
+ // delivered upstream
+ taskQueue.PopItem()
+ progress = true
+ delete(taskPool, header.Hash())
+ proc = proc - 1
+ log.Error("Fetch reservation already delivered", "number", header.Number.Uint64())
+ continue
+ }
+ if throttle {
+ // There are no resultslots available. Leave it in the task queue
+ // However, if there are any left as 'skipped', we should not tell
+ // the caller to throttle, since we still want some other
+ // peer to fetch those for us
+ throttled = len(skip) == 0
+ break
+ }
+ if err != nil {
+ // this most definitely should _not_ happen
+ log.Warn("Failed to reserve headers", "err", err)
+ // There are no resultslots available. Leave it in the task queue
+ break
+ }
+ if item.Done(kind) {
+ // If it's a noop, we can skip this task
+ delete(taskPool, header.Hash())
+ taskQueue.PopItem()
+ proc = proc - 1
+ progress = true
+ continue
+ }
+ // Remove it from the task queue
+ taskQueue.PopItem()
+ // Otherwise unless the peer is known not to have the data, add to the retrieve list
+ if p.Lacks(header.Hash()) {
+ skip = append(skip, header)
+ } else {
+ send = append(send, header)
+ }
+ }
+ // Merge all the skipped headers back
+ for _, header := range skip {
+ taskQueue.Push(header, -int64(header.Number.Uint64()))
+ }
+ if q.resultCache.HasCompletedItems() {
+ // Wake Results, resultCache was modified
+ q.active.Signal()
+ }
+ // Assemble and return the block download request
+ if len(send) == 0 {
+ return nil, progress, throttled
+ }
+ request := &fetchRequest{
+ Peer: p,
+ Headers: send,
+ Time: time.Now(),
+ }
+ pendPool[p.id] = request
+ return request, progress, throttled
+}
+
+// CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue.
+func (q *queue) CancelHeaders(request *fetchRequest) {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+ q.cancel(request, q.headerTaskQueue, q.headerPendPool)
+}
+
+// CancelBodies aborts a body fetch request, returning all pending headers to the
+// task queue.
+func (q *queue) CancelBodies(request *fetchRequest) {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+ q.cancel(request, q.blockTaskQueue, q.blockPendPool)
+}
+
+// CancelReceipts aborts a body fetch request, returning all pending headers to
+// the task queue.
+func (q *queue) CancelReceipts(request *fetchRequest) {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+ q.cancel(request, q.receiptTaskQueue, q.receiptPendPool)
+}
+
+// Cancel aborts a fetch request, returning all pending hashes to the task queue.
+func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) {
+ if request.From > 0 {
+ taskQueue.Push(request.From, -int64(request.From))
+ }
+ for _, header := range request.Headers {
+ taskQueue.Push(header, -int64(header.Number.Uint64()))
+ }
+ delete(pendPool, request.Peer.id)
+}
+
+// Revoke cancels all pending requests belonging to a given peer. This method is
+// meant to be called during a peer drop to quickly reassign owned data fetches
+// to remaining nodes.
+func (q *queue) Revoke(peerID string) {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+
+ if request, ok := q.blockPendPool[peerID]; ok {
+ for _, header := range request.Headers {
+ q.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))
+ }
+ delete(q.blockPendPool, peerID)
+ }
+ if request, ok := q.receiptPendPool[peerID]; ok {
+ for _, header := range request.Headers {
+ q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))
+ }
+ delete(q.receiptPendPool, peerID)
+ }
+}
+
+// ExpireHeaders checks for in flight requests that exceeded a timeout allowance,
+// canceling them and returning the responsible peers for penalisation.
+func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+
+ return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter)
+}
+
+// ExpireBodies checks for in flight block body requests that exceeded a timeout
+// allowance, canceling them and returning the responsible peers for penalisation.
+func (q *queue) ExpireBodies(timeout time.Duration) map[string]int {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+
+ return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter)
+}
+
+// ExpireReceipts checks for in flight receipt requests that exceeded a timeout
+// allowance, canceling them and returning the responsible peers for penalisation.
+func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+
+ return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter)
+}
+
+// expire is the generic check that move expired tasks from a pending pool back
+// into a task pool, returning all entities caught with expired tasks.
+//
+// Note, this method expects the queue lock to be already held. The
+// reason the lock is not obtained in here is because the parameters already need
+// to access the queue, so they already need a lock anyway.
+func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int {
+ // Iterate over the expired requests and return each to the queue
+ expiries := make(map[string]int)
+ for id, request := range pendPool {
+ if time.Since(request.Time) > timeout {
+ // Update the metrics with the timeout
+ timeoutMeter.Mark(1)
+
+ // Return any non satisfied requests to the pool
+ if request.From > 0 {
+ taskQueue.Push(request.From, -int64(request.From))
+ }
+ for _, header := range request.Headers {
+ taskQueue.Push(header, -int64(header.Number.Uint64()))
+ }
+ // Add the peer to the expiry report along the number of failed requests
+ expiries[id] = len(request.Headers)
+
+ // Remove the expired requests from the pending pool directly
+ delete(pendPool, id)
+ }
+ }
+ return expiries
+}
+
+// DeliverHeaders injects a header retrieval response into the header results
+// cache. This method either accepts all headers it received, or none of them
+// if they do not map correctly to the skeleton.
+//
+// If the headers are accepted, the method makes an attempt to deliver the set
+// of ready headers to the processor to keep the pipeline full. However it will
+// not block to prevent stalling other pending deliveries.
+func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+
+ var logger log.Logger
+ if len(id) < 16 {
+ // Tests use short IDs, don't choke on them
+ logger = log.New("peer", id)
+ } else {
+ logger = log.New("peer", id[:16])
+ }
+ // Short circuit if the data was never requested
+ request := q.headerPendPool[id]
+ if request == nil {
+ return 0, errNoFetchesPending
+ }
+ headerReqTimer.UpdateSince(request.Time)
+ delete(q.headerPendPool, id)
+
+ // Ensure headers can be mapped onto the skeleton chain
+ target := q.headerTaskPool[request.From].Hash()
+
+ accepted := len(headers) == MaxHeaderFetch
+ if accepted {
+ if headers[0].Number.Uint64() != request.From {
+ logger.Trace("First header broke chain ordering", "number", headers[0].Number, "hash", headers[0].Hash(), "expected", request.From)
+ accepted = false
+ } else if headers[len(headers)-1].Hash() != target {
+ logger.Trace("Last header broke skeleton structure ", "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target)
+ accepted = false
+ }
+ }
+ if accepted {
+ parentHash := headers[0].Hash()
+ for i, header := range headers[1:] {
+ hash := header.Hash()
+ if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
+ logger.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", want)
+ accepted = false
+ break
+ }
+ if parentHash != header.ParentHash {
+ logger.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash)
+ accepted = false
+ break
+ }
+ // Set-up parent hash for next round
+ parentHash = hash
+ }
+ }
+ // If the batch of headers wasn't accepted, mark as unavailable
+ if !accepted {
+ logger.Trace("Skeleton filling not accepted", "from", request.From)
+
+ miss := q.headerPeerMiss[id]
+ if miss == nil {
+ q.headerPeerMiss[id] = make(map[uint64]struct{})
+ miss = q.headerPeerMiss[id]
+ }
+ miss[request.From] = struct{}{}
+
+ q.headerTaskQueue.Push(request.From, -int64(request.From))
+ return 0, errors.New("delivery not accepted")
+ }
+ // Clean up a successful fetch and try to deliver any sub-results
+ copy(q.headerResults[request.From-q.headerOffset:], headers)
+ delete(q.headerTaskPool, request.From)
+
+ ready := 0
+ for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil {
+ ready += MaxHeaderFetch
+ }
+ if ready > 0 {
+ // Headers are ready for delivery, gather them and push forward (non blocking)
+ process := make([]*types.Header, ready)
+ copy(process, q.headerResults[q.headerProced:q.headerProced+ready])
+
+ select {
+ case headerProcCh <- process:
+ logger.Trace("Pre-scheduled new headers", "count", len(process), "from", process[0].Number)
+ q.headerProced += len(process)
+ default:
+ }
+ }
+ // Check for termination and return
+ if len(q.headerTaskPool) == 0 {
+ q.headerContCh <- false
+ }
+ return len(headers), nil
+}
+
+// DeliverBodies injects a block body retrieval response into the results queue.
+// The method returns the number of blocks bodies accepted from the delivery and
+// also wakes any threads waiting for data delivery.
+func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+ trieHasher := trie.NewStackTrie(nil)
+ validate := func(index int, header *types.Header) error {
+ if types.DeriveSha(types.Transactions(txLists[index]), trieHasher) != header.TxHash {
+ return errInvalidBody
+ }
+ if types.CalcUncleHash(uncleLists[index]) != header.UncleHash {
+ return errInvalidBody
+ }
+ return nil
+ }
+
+ reconstruct := func(index int, result *fetchResult) {
+ result.Transactions = txLists[index]
+ result.Uncles = uncleLists[index]
+ result.SetBodyDone()
+ }
+ return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool,
+ bodyReqTimer, len(txLists), validate, reconstruct)
+}
+
+// DeliverReceipts injects a receipt retrieval response into the results queue.
+// The method returns the number of transaction receipts accepted from the delivery
+// and also wakes any threads waiting for data delivery.
+func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+ trieHasher := trie.NewStackTrie(nil)
+ validate := func(index int, header *types.Header) error {
+ if types.DeriveSha(types.Receipts(receiptList[index]), trieHasher) != header.ReceiptHash {
+ return errInvalidReceipt
+ }
+ return nil
+ }
+ reconstruct := func(index int, result *fetchResult) {
+ result.Receipts = receiptList[index]
+ result.SetReceiptsDone()
+ }
+ return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool,
+ receiptReqTimer, len(receiptList), validate, reconstruct)
+}
+
+// deliver injects a data retrieval response into the results queue.
+//
+// Note, this method expects the queue lock to be already held for writing. The
+// reason this lock is not obtained in here is because the parameters already need
+// to access the queue, so they already need a lock anyway.
+func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header,
+ taskQueue *prque.Prque, pendPool map[string]*fetchRequest, reqTimer metrics.Timer,
+ results int, validate func(index int, header *types.Header) error,
+ reconstruct func(index int, result *fetchResult)) (int, error) {
+
+ // Short circuit if the data was never requested
+ request := pendPool[id]
+ if request == nil {
+ return 0, errNoFetchesPending
+ }
+ reqTimer.UpdateSince(request.Time)
+ delete(pendPool, id)
+
+ // If no data items were retrieved, mark them as unavailable for the origin peer
+ if results == 0 {
+ for _, header := range request.Headers {
+ request.Peer.MarkLacking(header.Hash())
+ }
+ }
+ // Assemble each of the results with their headers and retrieved data parts
+ var (
+ accepted int
+ failure error
+ i int
+ hashes []common.Hash
+ )
+ for _, header := range request.Headers {
+ // Short circuit assembly if no more fetch results are found
+ if i >= results {
+ break
+ }
+ // Validate the fields
+ if err := validate(i, header); err != nil {
+ failure = err
+ break
+ }
+ hashes = append(hashes, header.Hash())
+ i++
+ }
+
+ for _, header := range request.Headers[:i] {
+ if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil {
+ reconstruct(accepted, res)
+ } else {
+ // else: betweeen here and above, some other peer filled this result,
+ // or it was indeed a no-op. This should not happen, but if it does it's
+ // not something to panic about
+ log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err)
+ failure = errStaleDelivery
+ }
+ // Clean up a successful fetch
+ delete(taskPool, hashes[accepted])
+ accepted++
+ }
+ // Return all failed or missing fetches to the queue
+ for _, header := range request.Headers[accepted:] {
+ taskQueue.Push(header, -int64(header.Number.Uint64()))
+ }
+ // Wake up Results
+ if accepted > 0 {
+ q.active.Signal()
+ }
+ if failure == nil {
+ return accepted, nil
+ }
+ // If none of the data was good, it's a stale delivery
+ if accepted > 0 {
+ return accepted, fmt.Errorf("partial failure: %v", failure)
+ }
+ return accepted, fmt.Errorf("%w: %v", failure, errStaleDelivery)
+}
+
+// Prepare configures the result cache to allow accepting and caching inbound
+// fetch results.
+func (q *queue) Prepare(offset uint64, mode SyncMode) {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+
+ // Prepare the queue for sync results
+ q.resultCache.Prepare(offset)
+ q.mode = mode
+}
diff --git a/les/downloader/queue_test.go b/les/downloader/queue_test.go
new file mode 100644
index 000000000..cde5f306a
--- /dev/null
+++ b/les/downloader/queue_test.go
@@ -0,0 +1,452 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package downloader
+
+import (
+ "fmt"
+ "math/big"
+ "math/rand"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/params"
+)
+
+var (
+ testdb = rawdb.NewMemoryDatabase()
+ genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000000000))
+)
+
+// makeChain creates a chain of n blocks starting at and including parent.
+// the returned hash chain is ordered head->parent. In addition, every 3rd block
+// contains a transaction and every 5th an uncle to allow testing correct block
+// reassembly.
+func makeChain(n int, seed byte, parent *types.Block, empty bool) ([]*types.Block, []types.Receipts) {
+ blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testdb, n, func(i int, block *core.BlockGen) {
+ block.SetCoinbase(common.Address{seed})
+ // Add one tx to every secondblock
+ if !empty && i%2 == 0 {
+ signer := types.MakeSigner(params.TestChainConfig, block.Number())
+ tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey)
+ if err != nil {
+ panic(err)
+ }
+ block.AddTx(tx)
+ }
+ })
+ return blocks, receipts
+}
+
+type chainData struct {
+ blocks []*types.Block
+ offset int
+}
+
+var chain *chainData
+var emptyChain *chainData
+
+func init() {
+ // Create a chain of blocks to import
+ targetBlocks := 128
+ blocks, _ := makeChain(targetBlocks, 0, genesis, false)
+ chain = &chainData{blocks, 0}
+
+ blocks, _ = makeChain(targetBlocks, 0, genesis, true)
+ emptyChain = &chainData{blocks, 0}
+}
+
+func (chain *chainData) headers() []*types.Header {
+ hdrs := make([]*types.Header, len(chain.blocks))
+ for i, b := range chain.blocks {
+ hdrs[i] = b.Header()
+ }
+ return hdrs
+}
+
+func (chain *chainData) Len() int {
+ return len(chain.blocks)
+}
+
+func dummyPeer(id string) *peerConnection {
+ p := &peerConnection{
+ id: id,
+ lacking: make(map[common.Hash]struct{}),
+ }
+ return p
+}
+
+func TestBasics(t *testing.T) {
+ numOfBlocks := len(emptyChain.blocks)
+ numOfReceipts := len(emptyChain.blocks) / 2
+
+ q := newQueue(10, 10)
+ if !q.Idle() {
+ t.Errorf("new queue should be idle")
+ }
+ q.Prepare(1, FastSync)
+ if res := q.Results(false); len(res) != 0 {
+ t.Fatal("new queue should have 0 results")
+ }
+
+ // Schedule a batch of headers
+ q.Schedule(chain.headers(), 1)
+ if q.Idle() {
+ t.Errorf("queue should not be idle")
+ }
+ if got, exp := q.PendingBlocks(), chain.Len(); got != exp {
+ t.Errorf("wrong pending block count, got %d, exp %d", got, exp)
+ }
+ // Only non-empty receipts get added to task-queue
+ if got, exp := q.PendingReceipts(), 64; got != exp {
+ t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp)
+ }
+ // Items are now queued for downloading, next step is that we tell the
+ // queue that a certain peer will deliver them for us
+ {
+ peer := dummyPeer("peer-1")
+ fetchReq, _, throttle := q.ReserveBodies(peer, 50)
+ if !throttle {
+ // queue size is only 10, so throttling should occur
+ t.Fatal("should throttle")
+ }
+ // But we should still get the first things to fetch
+ if got, exp := len(fetchReq.Headers), 5; got != exp {
+ t.Fatalf("expected %d requests, got %d", exp, got)
+ }
+ if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp {
+ t.Fatalf("expected header %d, got %d", exp, got)
+ }
+ }
+ if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got {
+ t.Errorf("expected block task queue to be %d, got %d", exp, got)
+ }
+ if exp, got := q.receiptTaskQueue.Size(), numOfReceipts; exp != got {
+ t.Errorf("expected receipt task queue to be %d, got %d", exp, got)
+ }
+ {
+ peer := dummyPeer("peer-2")
+ fetchReq, _, throttle := q.ReserveBodies(peer, 50)
+
+ // The second peer should hit throttling
+ if !throttle {
+ t.Fatalf("should not throttle")
+ }
+ // And not get any fetches at all, since it was throttled to begin with
+ if fetchReq != nil {
+ t.Fatalf("should have no fetches, got %d", len(fetchReq.Headers))
+ }
+ }
+ if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got {
+ t.Errorf("expected block task queue to be %d, got %d", exp, got)
+ }
+ if exp, got := q.receiptTaskQueue.Size(), numOfReceipts; exp != got {
+ t.Errorf("expected receipt task queue to be %d, got %d", exp, got)
+ }
+ {
+ // The receipt delivering peer should not be affected
+ // by the throttling of body deliveries
+ peer := dummyPeer("peer-3")
+ fetchReq, _, throttle := q.ReserveReceipts(peer, 50)
+ if !throttle {
+ // queue size is only 10, so throttling should occur
+ t.Fatal("should throttle")
+ }
+ // But we should still get the first things to fetch
+ if got, exp := len(fetchReq.Headers), 5; got != exp {
+ t.Fatalf("expected %d requests, got %d", exp, got)
+ }
+ if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp {
+ t.Fatalf("expected header %d, got %d", exp, got)
+ }
+
+ }
+ if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got {
+ t.Errorf("expected block task queue to be %d, got %d", exp, got)
+ }
+ if exp, got := q.receiptTaskQueue.Size(), numOfReceipts-5; exp != got {
+ t.Errorf("expected receipt task queue to be %d, got %d", exp, got)
+ }
+ if got, exp := q.resultCache.countCompleted(), 0; got != exp {
+ t.Errorf("wrong processable count, got %d, exp %d", got, exp)
+ }
+}
+
+func TestEmptyBlocks(t *testing.T) {
+ numOfBlocks := len(emptyChain.blocks)
+
+ q := newQueue(10, 10)
+
+ q.Prepare(1, FastSync)
+ // Schedule a batch of headers
+ q.Schedule(emptyChain.headers(), 1)
+ if q.Idle() {
+ t.Errorf("queue should not be idle")
+ }
+ if got, exp := q.PendingBlocks(), len(emptyChain.blocks); got != exp {
+ t.Errorf("wrong pending block count, got %d, exp %d", got, exp)
+ }
+ if got, exp := q.PendingReceipts(), 0; got != exp {
+ t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp)
+ }
+ // They won't be processable, because the fetchresults haven't been
+ // created yet
+ if got, exp := q.resultCache.countCompleted(), 0; got != exp {
+ t.Errorf("wrong processable count, got %d, exp %d", got, exp)
+ }
+
+ // Items are now queued for downloading, next step is that we tell the
+ // queue that a certain peer will deliver them for us
+ // That should trigger all of them to suddenly become 'done'
+ {
+ // Reserve blocks
+ peer := dummyPeer("peer-1")
+ fetchReq, _, _ := q.ReserveBodies(peer, 50)
+
+ // there should be nothing to fetch, blocks are empty
+ if fetchReq != nil {
+ t.Fatal("there should be no body fetch tasks remaining")
+ }
+
+ }
+ if q.blockTaskQueue.Size() != numOfBlocks-10 {
+ t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size())
+ }
+ if q.receiptTaskQueue.Size() != 0 {
+ t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size())
+ }
+ {
+ peer := dummyPeer("peer-3")
+ fetchReq, _, _ := q.ReserveReceipts(peer, 50)
+
+ // there should be nothing to fetch, blocks are empty
+ if fetchReq != nil {
+ t.Fatal("there should be no body fetch tasks remaining")
+ }
+ }
+ if q.blockTaskQueue.Size() != numOfBlocks-10 {
+ t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size())
+ }
+ if q.receiptTaskQueue.Size() != 0 {
+ t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size())
+ }
+ if got, exp := q.resultCache.countCompleted(), 10; got != exp {
+ t.Errorf("wrong processable count, got %d, exp %d", got, exp)
+ }
+}
+
+// XTestDelivery does some more extensive testing of events that happen,
+// blocks that become known and peers that make reservations and deliveries.
+// disabled since it's not really a unit-test, but can be executed to test
+// some more advanced scenarios
+func XTestDelivery(t *testing.T) {
+ // the outside network, holding blocks
+ blo, rec := makeChain(128, 0, genesis, false)
+ world := newNetwork()
+ world.receipts = rec
+ world.chain = blo
+ world.progress(10)
+ if false {
+ log.Root().SetHandler(log.StdoutHandler)
+
+ }
+ q := newQueue(10, 10)
+ var wg sync.WaitGroup
+ q.Prepare(1, FastSync)
+ wg.Add(1)
+ go func() {
+ // deliver headers
+ defer wg.Done()
+ c := 1
+ for {
+ //fmt.Printf("getting headers from %d\n", c)
+ hdrs := world.headers(c)
+ l := len(hdrs)
+ //fmt.Printf("scheduling %d headers, first %d last %d\n",
+ // l, hdrs[0].Number.Uint64(), hdrs[len(hdrs)-1].Number.Uint64())
+ q.Schedule(hdrs, uint64(c))
+ c += l
+ }
+ }()
+ wg.Add(1)
+ go func() {
+ // collect results
+ defer wg.Done()
+ tot := 0
+ for {
+ res := q.Results(true)
+ tot += len(res)
+ fmt.Printf("got %d results, %d tot\n", len(res), tot)
+ // Now we can forget about these
+ world.forget(res[len(res)-1].Header.Number.Uint64())
+
+ }
+ }()
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ // reserve body fetch
+ i := 4
+ for {
+ peer := dummyPeer(fmt.Sprintf("peer-%d", i))
+ f, _, _ := q.ReserveBodies(peer, rand.Intn(30))
+ if f != nil {
+ var emptyList []*types.Header
+ var txs [][]*types.Transaction
+ var uncles [][]*types.Header
+ numToSkip := rand.Intn(len(f.Headers))
+ for _, hdr := range f.Headers[0 : len(f.Headers)-numToSkip] {
+ txs = append(txs, world.getTransactions(hdr.Number.Uint64()))
+ uncles = append(uncles, emptyList)
+ }
+ time.Sleep(100 * time.Millisecond)
+ _, err := q.DeliverBodies(peer.id, txs, uncles)
+ if err != nil {
+ fmt.Printf("delivered %d bodies %v\n", len(txs), err)
+ }
+ } else {
+ i++
+ time.Sleep(200 * time.Millisecond)
+ }
+ }
+ }()
+ go func() {
+ defer wg.Done()
+ // reserve receiptfetch
+ peer := dummyPeer("peer-3")
+ for {
+ f, _, _ := q.ReserveReceipts(peer, rand.Intn(50))
+ if f != nil {
+ var rcs [][]*types.Receipt
+ for _, hdr := range f.Headers {
+ rcs = append(rcs, world.getReceipts(hdr.Number.Uint64()))
+ }
+ _, err := q.DeliverReceipts(peer.id, rcs)
+ if err != nil {
+ fmt.Printf("delivered %d receipts %v\n", len(rcs), err)
+ }
+ time.Sleep(100 * time.Millisecond)
+ } else {
+ time.Sleep(200 * time.Millisecond)
+ }
+ }
+ }()
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < 50; i++ {
+ time.Sleep(300 * time.Millisecond)
+ //world.tick()
+ //fmt.Printf("trying to progress\n")
+ world.progress(rand.Intn(100))
+ }
+ for i := 0; i < 50; i++ {
+ time.Sleep(2990 * time.Millisecond)
+
+ }
+ }()
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ time.Sleep(990 * time.Millisecond)
+ fmt.Printf("world block tip is %d\n",
+ world.chain[len(world.chain)-1].Header().Number.Uint64())
+ fmt.Println(q.Stats())
+ }
+ }()
+ wg.Wait()
+}
+
+func newNetwork() *network {
+ var l sync.RWMutex
+ return &network{
+ cond: sync.NewCond(&l),
+ offset: 1, // block 1 is at blocks[0]
+ }
+}
+
+// represents the network
+type network struct {
+ offset int
+ chain []*types.Block
+ receipts []types.Receipts
+ lock sync.RWMutex
+ cond *sync.Cond
+}
+
+func (n *network) getTransactions(blocknum uint64) types.Transactions {
+ index := blocknum - uint64(n.offset)
+ return n.chain[index].Transactions()
+}
+func (n *network) getReceipts(blocknum uint64) types.Receipts {
+ index := blocknum - uint64(n.offset)
+ if got := n.chain[index].Header().Number.Uint64(); got != blocknum {
+ fmt.Printf("Err, got %d exp %d\n", got, blocknum)
+ panic("sd")
+ }
+ return n.receipts[index]
+}
+
+func (n *network) forget(blocknum uint64) {
+ index := blocknum - uint64(n.offset)
+ n.chain = n.chain[index:]
+ n.receipts = n.receipts[index:]
+ n.offset = int(blocknum)
+
+}
+func (n *network) progress(numBlocks int) {
+
+ n.lock.Lock()
+ defer n.lock.Unlock()
+ //fmt.Printf("progressing...\n")
+ newBlocks, newR := makeChain(numBlocks, 0, n.chain[len(n.chain)-1], false)
+ n.chain = append(n.chain, newBlocks...)
+ n.receipts = append(n.receipts, newR...)
+ n.cond.Broadcast()
+
+}
+
+func (n *network) headers(from int) []*types.Header {
+ numHeaders := 128
+ var hdrs []*types.Header
+ index := from - n.offset
+
+ for index >= len(n.chain) {
+ // wait for progress
+ n.cond.L.Lock()
+ //fmt.Printf("header going into wait\n")
+ n.cond.Wait()
+ index = from - n.offset
+ n.cond.L.Unlock()
+ }
+ n.lock.RLock()
+ defer n.lock.RUnlock()
+ for i, b := range n.chain[index:] {
+ hdrs = append(hdrs, b.Header())
+ if i >= numHeaders {
+ break
+ }
+ }
+ return hdrs
+}
diff --git a/les/downloader/resultstore.go b/les/downloader/resultstore.go
new file mode 100644
index 000000000..21928c2a0
--- /dev/null
+++ b/les/downloader/resultstore.go
@@ -0,0 +1,194 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package downloader
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+// resultStore implements a structure for maintaining fetchResults, tracking their
+// download-progress and delivering (finished) results.
+type resultStore struct {
+ items []*fetchResult // Downloaded but not yet delivered fetch results
+ resultOffset uint64 // Offset of the first cached fetch result in the block chain
+
+ // Internal index of first non-completed entry, updated atomically when needed.
+ // If all items are complete, this will equal length(items), so
+ // *important* : is not safe to use for indexing without checking against length
+ indexIncomplete int32 // atomic access
+
+ // throttleThreshold is the limit up to which we _want_ to fill the
+ // results. If blocks are large, we want to limit the results to less
+ // than the number of available slots, and maybe only fill 1024 out of
+ // 8192 possible places. The queue will, at certain times, recalibrate
+ // this index.
+ throttleThreshold uint64
+
+ lock sync.RWMutex
+}
+
+func newResultStore(size int) *resultStore {
+ return &resultStore{
+ resultOffset: 0,
+ items: make([]*fetchResult, size),
+ throttleThreshold: uint64(size),
+ }
+}
+
+// SetThrottleThreshold updates the throttling threshold based on the requested
+// limit and the total queue capacity. It returns the (possibly capped) threshold
+func (r *resultStore) SetThrottleThreshold(threshold uint64) uint64 {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ limit := uint64(len(r.items))
+ if threshold >= limit {
+ threshold = limit
+ }
+ r.throttleThreshold = threshold
+ return r.throttleThreshold
+}
+
+// AddFetch adds a header for body/receipt fetching. This is used when the queue
+// wants to reserve headers for fetching.
+//
+// It returns the following:
+// stale - if true, this item is already passed, and should not be requested again
+// throttled - if true, the store is at capacity, this particular header is not prio now
+// item - the result to store data into
+// err - any error that occurred
+func (r *resultStore) AddFetch(header *types.Header, fastSync bool) (stale, throttled bool, item *fetchResult, err error) {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ var index int
+ item, index, stale, throttled, err = r.getFetchResult(header.Number.Uint64())
+ if err != nil || stale || throttled {
+ return stale, throttled, item, err
+ }
+ if item == nil {
+ item = newFetchResult(header, fastSync)
+ r.items[index] = item
+ }
+ return stale, throttled, item, err
+}
+
+// GetDeliverySlot returns the fetchResult for the given header. If the 'stale' flag
+// is true, that means the header has already been delivered 'upstream'. This method
+// does not bubble up the 'throttle' flag, since it's moot at the point in time when
+// the item is downloaded and ready for delivery
+func (r *resultStore) GetDeliverySlot(headerNumber uint64) (*fetchResult, bool, error) {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ res, _, stale, _, err := r.getFetchResult(headerNumber)
+ return res, stale, err
+}
+
+// getFetchResult returns the fetchResult corresponding to the given item, and
+// the index where the result is stored.
+func (r *resultStore) getFetchResult(headerNumber uint64) (item *fetchResult, index int, stale, throttle bool, err error) {
+ index = int(int64(headerNumber) - int64(r.resultOffset))
+ throttle = index >= int(r.throttleThreshold)
+ stale = index < 0
+
+ if index >= len(r.items) {
+ err = fmt.Errorf("%w: index allocation went beyond available resultStore space "+
+ "(index [%d] = header [%d] - resultOffset [%d], len(resultStore) = %d", errInvalidChain,
+ index, headerNumber, r.resultOffset, len(r.items))
+ return nil, index, stale, throttle, err
+ }
+ if stale {
+ return nil, index, stale, throttle, nil
+ }
+ item = r.items[index]
+ return item, index, stale, throttle, nil
+}
+
+// hasCompletedItems returns true if there are processable items available
+// this method is cheaper than countCompleted
+func (r *resultStore) HasCompletedItems() bool {
+ r.lock.RLock()
+ defer r.lock.RUnlock()
+
+ if len(r.items) == 0 {
+ return false
+ }
+ if item := r.items[0]; item != nil && item.AllDone() {
+ return true
+ }
+ return false
+}
+
+// countCompleted returns the number of items ready for delivery, stopping at
+// the first non-complete item.
+//
+// The mthod assumes (at least) rlock is held.
+func (r *resultStore) countCompleted() int {
+ // We iterate from the already known complete point, and see
+ // if any more has completed since last count
+ index := atomic.LoadInt32(&r.indexIncomplete)
+ for ; ; index++ {
+ if index >= int32(len(r.items)) {
+ break
+ }
+ result := r.items[index]
+ if result == nil || !result.AllDone() {
+ break
+ }
+ }
+ atomic.StoreInt32(&r.indexIncomplete, index)
+ return int(index)
+}
+
+// GetCompleted returns the next batch of completed fetchResults
+func (r *resultStore) GetCompleted(limit int) []*fetchResult {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ completed := r.countCompleted()
+ if limit > completed {
+ limit = completed
+ }
+ results := make([]*fetchResult, limit)
+ copy(results, r.items[:limit])
+
+ // Delete the results from the cache and clear the tail.
+ copy(r.items, r.items[limit:])
+ for i := len(r.items) - limit; i < len(r.items); i++ {
+ r.items[i] = nil
+ }
+ // Advance the expected block number of the first cache entry
+ r.resultOffset += uint64(limit)
+ atomic.AddInt32(&r.indexIncomplete, int32(-limit))
+
+ return results
+}
+
+// Prepare initialises the offset with the given block number
+func (r *resultStore) Prepare(offset uint64) {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ if r.resultOffset < offset {
+ r.resultOffset = offset
+ }
+}
diff --git a/les/downloader/statesync.go b/les/downloader/statesync.go
new file mode 100644
index 000000000..6c53e5577
--- /dev/null
+++ b/les/downloader/statesync.go
@@ -0,0 +1,615 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package downloader
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/trie"
+ "golang.org/x/crypto/sha3"
+)
+
+// stateReq represents a batch of state fetch requests grouped together into
+// a single data retrieval network packet.
+type stateReq struct {
+ nItems uint16 // Number of items requested for download (max is 384, so uint16 is sufficient)
+ trieTasks map[common.Hash]*trieTask // Trie node download tasks to track previous attempts
+ codeTasks map[common.Hash]*codeTask // Byte code download tasks to track previous attempts
+ timeout time.Duration // Maximum round trip time for this to complete
+ timer *time.Timer // Timer to fire when the RTT timeout expires
+ peer *peerConnection // Peer that we're requesting from
+ delivered time.Time // Time when the packet was delivered (independent when we process it)
+ response [][]byte // Response data of the peer (nil for timeouts)
+ dropped bool // Flag whether the peer dropped off early
+}
+
+// timedOut returns if this request timed out.
+func (req *stateReq) timedOut() bool {
+ return req.response == nil
+}
+
+// stateSyncStats is a collection of progress stats to report during a state trie
+// sync to RPC requests as well as to display in user logs.
+type stateSyncStats struct {
+ processed uint64 // Number of state entries processed
+ duplicate uint64 // Number of state entries downloaded twice
+ unexpected uint64 // Number of non-requested state entries received
+ pending uint64 // Number of still pending state entries
+}
+
+// syncState starts downloading state with the given root hash.
+func (d *Downloader) syncState(root common.Hash) *stateSync {
+ // Create the state sync
+ s := newStateSync(d, root)
+ select {
+ case d.stateSyncStart <- s:
+ // If we tell the statesync to restart with a new root, we also need
+ // to wait for it to actually also start -- when old requests have timed
+ // out or been delivered
+ <-s.started
+ case <-d.quitCh:
+ s.err = errCancelStateFetch
+ close(s.done)
+ }
+ return s
+}
+
+// stateFetcher manages the active state sync and accepts requests
+// on its behalf.
+func (d *Downloader) stateFetcher() {
+ for {
+ select {
+ case s := <-d.stateSyncStart:
+ for next := s; next != nil; {
+ next = d.runStateSync(next)
+ }
+ case <-d.stateCh:
+ // Ignore state responses while no sync is running.
+ case <-d.quitCh:
+ return
+ }
+ }
+}
+
+// runStateSync runs a state synchronisation until it completes or another root
+// hash is requested to be switched over to.
+func (d *Downloader) runStateSync(s *stateSync) *stateSync {
+ var (
+ active = make(map[string]*stateReq) // Currently in-flight requests
+ finished []*stateReq // Completed or failed requests
+ timeout = make(chan *stateReq) // Timed out active requests
+ )
+ log.Trace("State sync starting", "root", s.root)
+
+ defer func() {
+ // Cancel active request timers on exit. Also set peers to idle so they're
+ // available for the next sync.
+ for _, req := range active {
+ req.timer.Stop()
+ req.peer.SetNodeDataIdle(int(req.nItems), time.Now())
+ }
+ }()
+ go s.run()
+ defer s.Cancel()
+
+ // Listen for peer departure events to cancel assigned tasks
+ peerDrop := make(chan *peerConnection, 1024)
+ peerSub := s.d.peers.SubscribePeerDrops(peerDrop)
+ defer peerSub.Unsubscribe()
+
+ for {
+ // Enable sending of the first buffered element if there is one.
+ var (
+ deliverReq *stateReq
+ deliverReqCh chan *stateReq
+ )
+ if len(finished) > 0 {
+ deliverReq = finished[0]
+ deliverReqCh = s.deliver
+ }
+
+ select {
+ // The stateSync lifecycle:
+ case next := <-d.stateSyncStart:
+ d.spindownStateSync(active, finished, timeout, peerDrop)
+ return next
+
+ case <-s.done:
+ d.spindownStateSync(active, finished, timeout, peerDrop)
+ return nil
+
+ // Send the next finished request to the current sync:
+ case deliverReqCh <- deliverReq:
+ // Shift out the first request, but also set the emptied slot to nil for GC
+ copy(finished, finished[1:])
+ finished[len(finished)-1] = nil
+ finished = finished[:len(finished)-1]
+
+ // Handle incoming state packs:
+ case pack := <-d.stateCh:
+ // Discard any data not requested (or previously timed out)
+ req := active[pack.PeerId()]
+ if req == nil {
+ log.Debug("Unrequested node data", "peer", pack.PeerId(), "len", pack.Items())
+ continue
+ }
+ // Finalize the request and queue up for processing
+ req.timer.Stop()
+ req.response = pack.(*statePack).states
+ req.delivered = time.Now()
+
+ finished = append(finished, req)
+ delete(active, pack.PeerId())
+
+ // Handle dropped peer connections:
+ case p := <-peerDrop:
+ // Skip if no request is currently pending
+ req := active[p.id]
+ if req == nil {
+ continue
+ }
+ // Finalize the request and queue up for processing
+ req.timer.Stop()
+ req.dropped = true
+ req.delivered = time.Now()
+
+ finished = append(finished, req)
+ delete(active, p.id)
+
+ // Handle timed-out requests:
+ case req := <-timeout:
+ // If the peer is already requesting something else, ignore the stale timeout.
+ // This can happen when the timeout and the delivery happens simultaneously,
+ // causing both pathways to trigger.
+ if active[req.peer.id] != req {
+ continue
+ }
+ req.delivered = time.Now()
+ // Move the timed out data back into the download queue
+ finished = append(finished, req)
+ delete(active, req.peer.id)
+
+ // Track outgoing state requests:
+ case req := <-d.trackStateReq:
+ // If an active request already exists for this peer, we have a problem. In
+ // theory the trie node schedule must never assign two requests to the same
+ // peer. In practice however, a peer might receive a request, disconnect and
+ // immediately reconnect before the previous times out. In this case the first
+ // request is never honored, alas we must not silently overwrite it, as that
+ // causes valid requests to go missing and sync to get stuck.
+ if old := active[req.peer.id]; old != nil {
+ log.Warn("Busy peer assigned new state fetch", "peer", old.peer.id)
+ // Move the previous request to the finished set
+ old.timer.Stop()
+ old.dropped = true
+ old.delivered = time.Now()
+ finished = append(finished, old)
+ }
+ // Start a timer to notify the sync loop if the peer stalled.
+ req.timer = time.AfterFunc(req.timeout, func() {
+ timeout <- req
+ })
+ active[req.peer.id] = req
+ }
+ }
+}
+
+// spindownStateSync 'drains' the outstanding requests; some will be delivered and other
+// will time out. This is to ensure that when the next stateSync starts working, all peers
+// are marked as idle and de facto _are_ idle.
+func (d *Downloader) spindownStateSync(active map[string]*stateReq, finished []*stateReq, timeout chan *stateReq, peerDrop chan *peerConnection) {
+ log.Trace("State sync spinning down", "active", len(active), "finished", len(finished))
+ for len(active) > 0 {
+ var (
+ req *stateReq
+ reason string
+ )
+ select {
+ // Handle (drop) incoming state packs:
+ case pack := <-d.stateCh:
+ req = active[pack.PeerId()]
+ reason = "delivered"
+ // Handle dropped peer connections:
+ case p := <-peerDrop:
+ req = active[p.id]
+ reason = "peerdrop"
+ // Handle timed-out requests:
+ case req = <-timeout:
+ reason = "timeout"
+ }
+ if req == nil {
+ continue
+ }
+ req.peer.log.Trace("State peer marked idle (spindown)", "req.items", int(req.nItems), "reason", reason)
+ req.timer.Stop()
+ delete(active, req.peer.id)
+ req.peer.SetNodeDataIdle(int(req.nItems), time.Now())
+ }
+ // The 'finished' set contains deliveries that we were going to pass to processing.
+ // Those are now moot, but we still need to set those peers as idle, which would
+ // otherwise have been done after processing
+ for _, req := range finished {
+ req.peer.SetNodeDataIdle(int(req.nItems), time.Now())
+ }
+}
+
+// stateSync schedules requests for downloading a particular state trie defined
+// by a given state root.
+type stateSync struct {
+ d *Downloader // Downloader instance to access and manage current peerset
+
+ root common.Hash // State root currently being synced
+ sched *trie.Sync // State trie sync scheduler defining the tasks
+ keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with
+
+ trieTasks map[common.Hash]*trieTask // Set of trie node tasks currently queued for retrieval
+ codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval
+
+ numUncommitted int
+ bytesUncommitted int
+
+ started chan struct{} // Started is signalled once the sync loop starts
+
+ deliver chan *stateReq // Delivery channel multiplexing peer responses
+ cancel chan struct{} // Channel to signal a termination request
+ cancelOnce sync.Once // Ensures cancel only ever gets called once
+ done chan struct{} // Channel to signal termination completion
+ err error // Any error hit during sync (set before completion)
+}
+
+// trieTask represents a single trie node download task, containing a set of
+// peers already attempted retrieval from to detect stalled syncs and abort.
+type trieTask struct {
+ path [][]byte
+ attempts map[string]struct{}
+}
+
+// codeTask represents a single byte code download task, containing a set of
+// peers already attempted retrieval from to detect stalled syncs and abort.
+type codeTask struct {
+ attempts map[string]struct{}
+}
+
+// newStateSync creates a new state trie download scheduler. This method does not
+// yet start the sync. The user needs to call run to initiate.
+func newStateSync(d *Downloader, root common.Hash) *stateSync {
+ return &stateSync{
+ d: d,
+ root: root,
+ sched: state.NewStateSync(root, d.stateDB, d.stateBloom, nil),
+ keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState),
+ trieTasks: make(map[common.Hash]*trieTask),
+ codeTasks: make(map[common.Hash]*codeTask),
+ deliver: make(chan *stateReq),
+ cancel: make(chan struct{}),
+ done: make(chan struct{}),
+ started: make(chan struct{}),
+ }
+}
+
+// run starts the task assignment and response processing loop, blocking until
+// it finishes, and finally notifying any goroutines waiting for the loop to
+// finish.
+func (s *stateSync) run() {
+ close(s.started)
+ if s.d.snapSync {
+ s.err = s.d.SnapSyncer.Sync(s.root, s.cancel)
+ } else {
+ s.err = s.loop()
+ }
+ close(s.done)
+}
+
+// Wait blocks until the sync is done or canceled.
+func (s *stateSync) Wait() error {
+ <-s.done
+ return s.err
+}
+
+// Cancel cancels the sync and waits until it has shut down.
+func (s *stateSync) Cancel() error {
+ s.cancelOnce.Do(func() {
+ close(s.cancel)
+ })
+ return s.Wait()
+}
+
+// loop is the main event loop of a state trie sync. It it responsible for the
+// assignment of new tasks to peers (including sending it to them) as well as
+// for the processing of inbound data. Note, that the loop does not directly
+// receive data from peers, rather those are buffered up in the downloader and
+// pushed here async. The reason is to decouple processing from data receipt
+// and timeouts.
+func (s *stateSync) loop() (err error) {
+ // Listen for new peer events to assign tasks to them
+ newPeer := make(chan *peerConnection, 1024)
+ peerSub := s.d.peers.SubscribeNewPeers(newPeer)
+ defer peerSub.Unsubscribe()
+ defer func() {
+ cerr := s.commit(true)
+ if err == nil {
+ err = cerr
+ }
+ }()
+
+ // Keep assigning new tasks until the sync completes or aborts
+ for s.sched.Pending() > 0 {
+ if err = s.commit(false); err != nil {
+ return err
+ }
+ s.assignTasks()
+ // Tasks assigned, wait for something to happen
+ select {
+ case <-newPeer:
+ // New peer arrived, try to assign it download tasks
+
+ case <-s.cancel:
+ return errCancelStateFetch
+
+ case <-s.d.cancelCh:
+ return errCanceled
+
+ case req := <-s.deliver:
+ // Response, disconnect or timeout triggered, drop the peer if stalling
+ log.Trace("Received node data response", "peer", req.peer.id, "count", len(req.response), "dropped", req.dropped, "timeout", !req.dropped && req.timedOut())
+ if req.nItems <= 2 && !req.dropped && req.timedOut() {
+ // 2 items are the minimum requested, if even that times out, we've no use of
+ // this peer at the moment.
+ log.Warn("Stalling state sync, dropping peer", "peer", req.peer.id)
+ if s.d.dropPeer == nil {
+ // The dropPeer method is nil when `--copydb` is used for a local copy.
+ // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
+ req.peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", req.peer.id)
+ } else {
+ s.d.dropPeer(req.peer.id)
+
+ // If this peer was the master peer, abort sync immediately
+ s.d.cancelLock.RLock()
+ master := req.peer.id == s.d.cancelPeer
+ s.d.cancelLock.RUnlock()
+
+ if master {
+ s.d.cancel()
+ return errTimeout
+ }
+ }
+ }
+ // Process all the received blobs and check for stale delivery
+ delivered, err := s.process(req)
+ req.peer.SetNodeDataIdle(delivered, req.delivered)
+ if err != nil {
+ log.Warn("Node data write error", "err", err)
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (s *stateSync) commit(force bool) error {
+ if !force && s.bytesUncommitted < ethdb.IdealBatchSize {
+ return nil
+ }
+ start := time.Now()
+ b := s.d.stateDB.NewBatch()
+ if err := s.sched.Commit(b); err != nil {
+ return err
+ }
+ if err := b.Write(); err != nil {
+ return fmt.Errorf("DB write error: %v", err)
+ }
+ s.updateStats(s.numUncommitted, 0, 0, time.Since(start))
+ s.numUncommitted = 0
+ s.bytesUncommitted = 0
+ return nil
+}
+
+// assignTasks attempts to assign new tasks to all idle peers, either from the
+// batch currently being retried, or fetching new data from the trie sync itself.
+func (s *stateSync) assignTasks() {
+ // Iterate over all idle peers and try to assign them state fetches
+ peers, _ := s.d.peers.NodeDataIdlePeers()
+ for _, p := range peers {
+ // Assign a batch of fetches proportional to the estimated latency/bandwidth
+ cap := p.NodeDataCapacity(s.d.peers.rates.TargetRoundTrip())
+ req := &stateReq{peer: p, timeout: s.d.peers.rates.TargetTimeout()}
+
+ nodes, _, codes := s.fillTasks(cap, req)
+
+ // If the peer was assigned tasks to fetch, send the network request
+ if len(nodes)+len(codes) > 0 {
+ req.peer.log.Trace("Requesting batch of state data", "nodes", len(nodes), "codes", len(codes), "root", s.root)
+ select {
+ case s.d.trackStateReq <- req:
+ req.peer.FetchNodeData(append(nodes, codes...)) // Unified retrieval under eth/6x
+ case <-s.cancel:
+ case <-s.d.cancelCh:
+ }
+ }
+ }
+}
+
+// fillTasks fills the given request object with a maximum of n state download
+// tasks to send to the remote peer.
+func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) {
+ // Refill available tasks from the scheduler.
+ if fill := n - (len(s.trieTasks) + len(s.codeTasks)); fill > 0 {
+ nodes, paths, codes := s.sched.Missing(fill)
+ for i, hash := range nodes {
+ s.trieTasks[hash] = &trieTask{
+ path: paths[i],
+ attempts: make(map[string]struct{}),
+ }
+ }
+ for _, hash := range codes {
+ s.codeTasks[hash] = &codeTask{
+ attempts: make(map[string]struct{}),
+ }
+ }
+ }
+ // Find tasks that haven't been tried with the request's peer. Prefer code
+ // over trie nodes as those can be written to disk and forgotten about.
+ nodes = make([]common.Hash, 0, n)
+ paths = make([]trie.SyncPath, 0, n)
+ codes = make([]common.Hash, 0, n)
+
+ req.trieTasks = make(map[common.Hash]*trieTask, n)
+ req.codeTasks = make(map[common.Hash]*codeTask, n)
+
+ for hash, t := range s.codeTasks {
+ // Stop when we've gathered enough requests
+ if len(nodes)+len(codes) == n {
+ break
+ }
+ // Skip any requests we've already tried from this peer
+ if _, ok := t.attempts[req.peer.id]; ok {
+ continue
+ }
+ // Assign the request to this peer
+ t.attempts[req.peer.id] = struct{}{}
+ codes = append(codes, hash)
+ req.codeTasks[hash] = t
+ delete(s.codeTasks, hash)
+ }
+ for hash, t := range s.trieTasks {
+ // Stop when we've gathered enough requests
+ if len(nodes)+len(codes) == n {
+ break
+ }
+ // Skip any requests we've already tried from this peer
+ if _, ok := t.attempts[req.peer.id]; ok {
+ continue
+ }
+ // Assign the request to this peer
+ t.attempts[req.peer.id] = struct{}{}
+
+ nodes = append(nodes, hash)
+ paths = append(paths, t.path)
+
+ req.trieTasks[hash] = t
+ delete(s.trieTasks, hash)
+ }
+ req.nItems = uint16(len(nodes) + len(codes))
+ return nodes, paths, codes
+}
+
+// process iterates over a batch of delivered state data, injecting each item
+// into a running state sync, re-queuing any items that were requested but not
+// delivered. Returns whether the peer actually managed to deliver anything of
+// value, and any error that occurred.
+func (s *stateSync) process(req *stateReq) (int, error) {
+ // Collect processing stats and update progress if valid data was received
+ duplicate, unexpected, successful := 0, 0, 0
+
+ defer func(start time.Time) {
+ if duplicate > 0 || unexpected > 0 {
+ s.updateStats(0, duplicate, unexpected, time.Since(start))
+ }
+ }(time.Now())
+
+ // Iterate over all the delivered data and inject one-by-one into the trie
+ for _, blob := range req.response {
+ hash, err := s.processNodeData(blob)
+ switch err {
+ case nil:
+ s.numUncommitted++
+ s.bytesUncommitted += len(blob)
+ successful++
+ case trie.ErrNotRequested:
+ unexpected++
+ case trie.ErrAlreadyProcessed:
+ duplicate++
+ default:
+ return successful, fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err)
+ }
+ // Delete from both queues (one delivery is enough for the syncer)
+ delete(req.trieTasks, hash)
+ delete(req.codeTasks, hash)
+ }
+ // Put unfulfilled tasks back into the retry queue
+ npeers := s.d.peers.Len()
+ for hash, task := range req.trieTasks {
+ // If the node did deliver something, missing items may be due to a protocol
+ // limit or a previous timeout + delayed delivery. Both cases should permit
+ // the node to retry the missing items (to avoid single-peer stalls).
+ if len(req.response) > 0 || req.timedOut() {
+ delete(task.attempts, req.peer.id)
+ }
+ // If we've requested the node too many times already, it may be a malicious
+ // sync where nobody has the right data. Abort.
+ if len(task.attempts) >= npeers {
+ return successful, fmt.Errorf("trie node %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers)
+ }
+ // Missing item, place into the retry queue.
+ s.trieTasks[hash] = task
+ }
+ for hash, task := range req.codeTasks {
+ // If the node did deliver something, missing items may be due to a protocol
+ // limit or a previous timeout + delayed delivery. Both cases should permit
+ // the node to retry the missing items (to avoid single-peer stalls).
+ if len(req.response) > 0 || req.timedOut() {
+ delete(task.attempts, req.peer.id)
+ }
+ // If we've requested the node too many times already, it may be a malicious
+ // sync where nobody has the right data. Abort.
+ if len(task.attempts) >= npeers {
+ return successful, fmt.Errorf("byte code %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers)
+ }
+ // Missing item, place into the retry queue.
+ s.codeTasks[hash] = task
+ }
+ return successful, nil
+}
+
+// processNodeData tries to inject a trie node data blob delivered from a remote
+// peer into the state trie, returning whether anything useful was written or any
+// error occurred.
+func (s *stateSync) processNodeData(blob []byte) (common.Hash, error) {
+ res := trie.SyncResult{Data: blob}
+ s.keccak.Reset()
+ s.keccak.Write(blob)
+ s.keccak.Read(res.Hash[:])
+ err := s.sched.Process(res)
+ return res.Hash, err
+}
+
+// updateStats bumps the various state sync progress counters and displays a log
+// message for the user to see.
+func (s *stateSync) updateStats(written, duplicate, unexpected int, duration time.Duration) {
+ s.d.syncStatsLock.Lock()
+ defer s.d.syncStatsLock.Unlock()
+
+ s.d.syncStatsState.pending = uint64(s.sched.Pending())
+ s.d.syncStatsState.processed += uint64(written)
+ s.d.syncStatsState.duplicate += uint64(duplicate)
+ s.d.syncStatsState.unexpected += uint64(unexpected)
+
+ if written > 0 || duplicate > 0 || unexpected > 0 {
+ log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "trieretry", len(s.trieTasks), "coderetry", len(s.codeTasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected)
+ }
+ if written > 0 {
+ rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed)
+ }
+}
diff --git a/les/downloader/testchain_test.go b/les/downloader/testchain_test.go
new file mode 100644
index 000000000..b9865f7e0
--- /dev/null
+++ b/les/downloader/testchain_test.go
@@ -0,0 +1,230 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package downloader
+
+import (
+ "fmt"
+ "math/big"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/params"
+)
+
+// Test chain parameters.
+var (
+ testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
+ testDB = rawdb.NewMemoryDatabase()
+ testGenesis = core.GenesisBlockForTesting(testDB, testAddress, big.NewInt(1000000000000000))
+)
+
+// The common prefix of all test chains:
+var testChainBase = newTestChain(blockCacheMaxItems+200, testGenesis)
+
+// Different forks on top of the base chain:
+var testChainForkLightA, testChainForkLightB, testChainForkHeavy *testChain
+
+func init() {
+ var forkLen = int(fullMaxForkAncestry + 50)
+ var wg sync.WaitGroup
+ wg.Add(3)
+ go func() { testChainForkLightA = testChainBase.makeFork(forkLen, false, 1); wg.Done() }()
+ go func() { testChainForkLightB = testChainBase.makeFork(forkLen, false, 2); wg.Done() }()
+ go func() { testChainForkHeavy = testChainBase.makeFork(forkLen, true, 3); wg.Done() }()
+ wg.Wait()
+}
+
+type testChain struct {
+ genesis *types.Block
+ chain []common.Hash
+ headerm map[common.Hash]*types.Header
+ blockm map[common.Hash]*types.Block
+ receiptm map[common.Hash][]*types.Receipt
+ tdm map[common.Hash]*big.Int
+}
+
+// newTestChain creates a blockchain of the given length.
+func newTestChain(length int, genesis *types.Block) *testChain {
+ tc := new(testChain).copy(length)
+ tc.genesis = genesis
+ tc.chain = append(tc.chain, genesis.Hash())
+ tc.headerm[tc.genesis.Hash()] = tc.genesis.Header()
+ tc.tdm[tc.genesis.Hash()] = tc.genesis.Difficulty()
+ tc.blockm[tc.genesis.Hash()] = tc.genesis
+ tc.generate(length-1, 0, genesis, false)
+ return tc
+}
+
+// makeFork creates a fork on top of the test chain.
+func (tc *testChain) makeFork(length int, heavy bool, seed byte) *testChain {
+ fork := tc.copy(tc.len() + length)
+ fork.generate(length, seed, tc.headBlock(), heavy)
+ return fork
+}
+
+// shorten creates a copy of the chain with the given length. It panics if the
+// length is longer than the number of available blocks.
+func (tc *testChain) shorten(length int) *testChain {
+ if length > tc.len() {
+ panic(fmt.Errorf("can't shorten test chain to %d blocks, it's only %d blocks long", length, tc.len()))
+ }
+ return tc.copy(length)
+}
+
+func (tc *testChain) copy(newlen int) *testChain {
+ cpy := &testChain{
+ genesis: tc.genesis,
+ headerm: make(map[common.Hash]*types.Header, newlen),
+ blockm: make(map[common.Hash]*types.Block, newlen),
+ receiptm: make(map[common.Hash][]*types.Receipt, newlen),
+ tdm: make(map[common.Hash]*big.Int, newlen),
+ }
+ for i := 0; i < len(tc.chain) && i < newlen; i++ {
+ hash := tc.chain[i]
+ cpy.chain = append(cpy.chain, tc.chain[i])
+ cpy.tdm[hash] = tc.tdm[hash]
+ cpy.blockm[hash] = tc.blockm[hash]
+ cpy.headerm[hash] = tc.headerm[hash]
+ cpy.receiptm[hash] = tc.receiptm[hash]
+ }
+ return cpy
+}
+
+// generate creates a chain of n blocks starting at and including parent.
+// the returned hash chain is ordered head->parent. In addition, every 22th block
+// contains a transaction and every 5th an uncle to allow testing correct block
+// reassembly.
+func (tc *testChain) generate(n int, seed byte, parent *types.Block, heavy bool) {
+ // start := time.Now()
+ // defer func() { fmt.Printf("test chain generated in %v\n", time.Since(start)) }()
+
+ blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) {
+ block.SetCoinbase(common.Address{seed})
+ // If a heavy chain is requested, delay blocks to raise difficulty
+ if heavy {
+ block.OffsetTime(-1)
+ }
+ // Include transactions to the miner to make blocks more interesting.
+ if parent == tc.genesis && i%22 == 0 {
+ signer := types.MakeSigner(params.TestChainConfig, block.Number())
+ tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey)
+ if err != nil {
+ panic(err)
+ }
+ block.AddTx(tx)
+ }
+ // if the block number is a multiple of 5, add a bonus uncle to the block
+ if i > 0 && i%5 == 0 {
+ block.AddUncle(&types.Header{
+ ParentHash: block.PrevBlock(i - 1).Hash(),
+ Number: big.NewInt(block.Number().Int64() - 1),
+ })
+ }
+ })
+
+ // Convert the block-chain into a hash-chain and header/block maps
+ td := new(big.Int).Set(tc.td(parent.Hash()))
+ for i, b := range blocks {
+ td := td.Add(td, b.Difficulty())
+ hash := b.Hash()
+ tc.chain = append(tc.chain, hash)
+ tc.blockm[hash] = b
+ tc.headerm[hash] = b.Header()
+ tc.receiptm[hash] = receipts[i]
+ tc.tdm[hash] = new(big.Int).Set(td)
+ }
+}
+
+// len returns the total number of blocks in the chain.
+func (tc *testChain) len() int {
+ return len(tc.chain)
+}
+
+// headBlock returns the head of the chain.
+func (tc *testChain) headBlock() *types.Block {
+ return tc.blockm[tc.chain[len(tc.chain)-1]]
+}
+
+// td returns the total difficulty of the given block.
+func (tc *testChain) td(hash common.Hash) *big.Int {
+ return tc.tdm[hash]
+}
+
+// headersByHash returns headers in order from the given hash.
+func (tc *testChain) headersByHash(origin common.Hash, amount int, skip int, reverse bool) []*types.Header {
+ num, _ := tc.hashToNumber(origin)
+ return tc.headersByNumber(num, amount, skip, reverse)
+}
+
+// headersByNumber returns headers from the given number.
+func (tc *testChain) headersByNumber(origin uint64, amount int, skip int, reverse bool) []*types.Header {
+ result := make([]*types.Header, 0, amount)
+
+ if !reverse {
+ for num := origin; num < uint64(len(tc.chain)) && len(result) < amount; num += uint64(skip) + 1 {
+ if header, ok := tc.headerm[tc.chain[int(num)]]; ok {
+ result = append(result, header)
+ }
+ }
+ } else {
+ for num := int64(origin); num >= 0 && len(result) < amount; num -= int64(skip) + 1 {
+ if header, ok := tc.headerm[tc.chain[int(num)]]; ok {
+ result = append(result, header)
+ }
+ }
+ }
+ return result
+}
+
+// receipts returns the receipts of the given block hashes.
+func (tc *testChain) receipts(hashes []common.Hash) [][]*types.Receipt {
+ results := make([][]*types.Receipt, 0, len(hashes))
+ for _, hash := range hashes {
+ if receipt, ok := tc.receiptm[hash]; ok {
+ results = append(results, receipt)
+ }
+ }
+ return results
+}
+
+// bodies returns the block bodies of the given block hashes.
+func (tc *testChain) bodies(hashes []common.Hash) ([][]*types.Transaction, [][]*types.Header) {
+ transactions := make([][]*types.Transaction, 0, len(hashes))
+ uncles := make([][]*types.Header, 0, len(hashes))
+ for _, hash := range hashes {
+ if block, ok := tc.blockm[hash]; ok {
+ transactions = append(transactions, block.Transactions())
+ uncles = append(uncles, block.Uncles())
+ }
+ }
+ return transactions, uncles
+}
+
+func (tc *testChain) hashToNumber(target common.Hash) (uint64, bool) {
+ for num, hash := range tc.chain {
+ if hash == target {
+ return uint64(num), true
+ }
+ }
+ return 0, false
+}
diff --git a/les/downloader/types.go b/les/downloader/types.go
new file mode 100644
index 000000000..ff70bfa0e
--- /dev/null
+++ b/les/downloader/types.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package downloader
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+// peerDropFn is a callback type for dropping a peer detected as malicious.
+type peerDropFn func(id string)
+
+// dataPack is a data message returned by a peer for some query.
+type dataPack interface {
+ PeerId() string
+ Items() int
+ Stats() string
+}
+
+// headerPack is a batch of block headers returned by a peer.
+type headerPack struct {
+ peerID string
+ headers []*types.Header
+}
+
+func (p *headerPack) PeerId() string { return p.peerID }
+func (p *headerPack) Items() int { return len(p.headers) }
+func (p *headerPack) Stats() string { return fmt.Sprintf("%d", len(p.headers)) }
+
+// bodyPack is a batch of block bodies returned by a peer.
+type bodyPack struct {
+ peerID string
+ transactions [][]*types.Transaction
+ uncles [][]*types.Header
+}
+
+func (p *bodyPack) PeerId() string { return p.peerID }
+func (p *bodyPack) Items() int {
+ if len(p.transactions) <= len(p.uncles) {
+ return len(p.transactions)
+ }
+ return len(p.uncles)
+}
+func (p *bodyPack) Stats() string { return fmt.Sprintf("%d:%d", len(p.transactions), len(p.uncles)) }
+
+// receiptPack is a batch of receipts returned by a peer.
+type receiptPack struct {
+ peerID string
+ receipts [][]*types.Receipt
+}
+
+func (p *receiptPack) PeerId() string { return p.peerID }
+func (p *receiptPack) Items() int { return len(p.receipts) }
+func (p *receiptPack) Stats() string { return fmt.Sprintf("%d", len(p.receipts)) }
+
+// statePack is a batch of states returned by a peer.
+type statePack struct {
+ peerID string
+ states [][]byte
+}
+
+func (p *statePack) PeerId() string { return p.peerID }
+func (p *statePack) Items() int { return len(p.states) }
+func (p *statePack) Stats() string { return fmt.Sprintf("%d", len(p.states)) }
diff --git a/les/fetcher.go b/les/fetcher.go
index a6d1c93c4..d944d3285 100644
--- a/les/fetcher.go
+++ b/les/fetcher.go
@@ -27,8 +27,8 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/eth/fetcher"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/les/fetcher"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
@@ -153,9 +153,7 @@ type lightFetcher struct {
synchronise func(peer *serverPeer)
// Test fields or hooks
- noAnnounce bool
newHeadHook func(*types.Header)
- newAnnounce func(*serverPeer, *announceData)
}
// newLightFetcher creates a light fetcher instance.
@@ -474,12 +472,6 @@ func (f *lightFetcher) mainloop() {
// announce processes a new announcement message received from a peer.
func (f *lightFetcher) announce(p *serverPeer, head *announceData) {
- if f.newAnnounce != nil {
- f.newAnnounce(p, head)
- }
- if f.noAnnounce {
- return
- }
select {
case f.announceCh <- &announce{peerid: p.ID(), trust: p.trusted, data: head}:
case <-f.closeCh:
diff --git a/les/fetcher/block_fetcher.go b/les/fetcher/block_fetcher.go
new file mode 100644
index 000000000..283008db0
--- /dev/null
+++ b/les/fetcher/block_fetcher.go
@@ -0,0 +1,889 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// This is a temporary package whilst working on the eth/66 blocking refactors.
+// After that work is done, les needs to be refactored to use the new package,
+// or alternatively use a stripped down version of it. Either way, we need to
+// keep the changes scoped so duplicating temporarily seems the sanest.
+package fetcher
+
+import (
+ "errors"
+ "math/rand"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/prque"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/trie"
+)
+
+const (
+ lightTimeout = time.Millisecond // Time allowance before an announced header is explicitly requested
+ arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block/transaction is explicitly requested
+ gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
+ fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block/transaction
+)
+
+const (
+ maxUncleDist = 7 // Maximum allowed backward distance from the chain head
+ maxQueueDist = 32 // Maximum allowed distance from the chain head to queue
+ hashLimit = 256 // Maximum number of unique blocks or headers a peer may have announced
+ blockLimit = 64 // Maximum number of unique blocks a peer may have delivered
+)
+
+var (
+ blockAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/in", nil)
+ blockAnnounceOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/announces/out", nil)
+ blockAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/drop", nil)
+ blockAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/dos", nil)
+
+ blockBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/in", nil)
+ blockBroadcastOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/broadcasts/out", nil)
+ blockBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/drop", nil)
+ blockBroadcastDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/dos", nil)
+
+ headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/headers", nil)
+ bodyFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/bodies", nil)
+
+ headerFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/in", nil)
+ headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/out", nil)
+ bodyFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/in", nil)
+ bodyFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/out", nil)
+)
+
+var errTerminated = errors.New("terminated")
+
+// HeaderRetrievalFn is a callback type for retrieving a header from the local chain.
+type HeaderRetrievalFn func(common.Hash) *types.Header
+
+// blockRetrievalFn is a callback type for retrieving a block from the local chain.
+type blockRetrievalFn func(common.Hash) *types.Block
+
+// headerRequesterFn is a callback type for sending a header retrieval request.
+type headerRequesterFn func(common.Hash) error
+
+// bodyRequesterFn is a callback type for sending a body retrieval request.
+type bodyRequesterFn func([]common.Hash) error
+
+// headerVerifierFn is a callback type to verify a block's header for fast propagation.
+type headerVerifierFn func(header *types.Header) error
+
+// blockBroadcasterFn is a callback type for broadcasting a block to connected peers.
+type blockBroadcasterFn func(block *types.Block, propagate bool)
+
+// chainHeightFn is a callback type to retrieve the current chain height.
+type chainHeightFn func() uint64
+
+// headersInsertFn is a callback type to insert a batch of headers into the local chain.
+type headersInsertFn func(headers []*types.Header) (int, error)
+
+// chainInsertFn is a callback type to insert a batch of blocks into the local chain.
+type chainInsertFn func(types.Blocks) (int, error)
+
+// peerDropFn is a callback type for dropping a peer detected as malicious.
+type peerDropFn func(id string)
+
+// blockAnnounce is the hash notification of the availability of a new block in the
+// network.
+type blockAnnounce struct {
+ hash common.Hash // Hash of the block being announced
+ number uint64 // Number of the block being announced (0 = unknown | old protocol)
+ header *types.Header // Header of the block partially reassembled (new protocol)
+ time time.Time // Timestamp of the announcement
+
+ origin string // Identifier of the peer originating the notification
+
+ fetchHeader headerRequesterFn // Fetcher function to retrieve the header of an announced block
+ fetchBodies bodyRequesterFn // Fetcher function to retrieve the body of an announced block
+}
+
+// headerFilterTask represents a batch of headers needing fetcher filtering.
+type headerFilterTask struct {
+ peer string // The source peer of block headers
+ headers []*types.Header // Collection of headers to filter
+ time time.Time // Arrival time of the headers
+}
+
+// bodyFilterTask represents a batch of block bodies (transactions and uncles)
+// needing fetcher filtering.
+type bodyFilterTask struct {
+ peer string // The source peer of block bodies
+ transactions [][]*types.Transaction // Collection of transactions per block bodies
+ uncles [][]*types.Header // Collection of uncles per block bodies
+ time time.Time // Arrival time of the blocks' contents
+}
+
+// blockOrHeaderInject represents a schedules import operation.
+type blockOrHeaderInject struct {
+ origin string
+
+ header *types.Header // Used for light mode fetcher which only cares about header.
+ block *types.Block // Used for normal mode fetcher which imports full block.
+}
+
+// number returns the block number of the injected object.
+func (inject *blockOrHeaderInject) number() uint64 {
+ if inject.header != nil {
+ return inject.header.Number.Uint64()
+ }
+ return inject.block.NumberU64()
+}
+
+// number returns the block hash of the injected object.
+func (inject *blockOrHeaderInject) hash() common.Hash {
+ if inject.header != nil {
+ return inject.header.Hash()
+ }
+ return inject.block.Hash()
+}
+
+// BlockFetcher is responsible for accumulating block announcements from various peers
+// and scheduling them for retrieval.
+type BlockFetcher struct {
+ light bool // The indicator whether it's a light fetcher or normal one.
+
+ // Various event channels
+ notify chan *blockAnnounce
+ inject chan *blockOrHeaderInject
+
+ headerFilter chan chan *headerFilterTask
+ bodyFilter chan chan *bodyFilterTask
+
+ done chan common.Hash
+ quit chan struct{}
+
+ // Announce states
+ announces map[string]int // Per peer blockAnnounce counts to prevent memory exhaustion
+ announced map[common.Hash][]*blockAnnounce // Announced blocks, scheduled for fetching
+ fetching map[common.Hash]*blockAnnounce // Announced blocks, currently fetching
+ fetched map[common.Hash][]*blockAnnounce // Blocks with headers fetched, scheduled for body retrieval
+ completing map[common.Hash]*blockAnnounce // Blocks with headers, currently body-completing
+
+ // Block cache
+ queue *prque.Prque // Queue containing the import operations (block number sorted)
+ queues map[string]int // Per peer block counts to prevent memory exhaustion
+ queued map[common.Hash]*blockOrHeaderInject // Set of already queued blocks (to dedup imports)
+
+ // Callbacks
+ getHeader HeaderRetrievalFn // Retrieves a header from the local chain
+ getBlock blockRetrievalFn // Retrieves a block from the local chain
+ verifyHeader headerVerifierFn // Checks if a block's headers have a valid proof of work
+ broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
+ chainHeight chainHeightFn // Retrieves the current chain's height
+ insertHeaders headersInsertFn // Injects a batch of headers into the chain
+ insertChain chainInsertFn // Injects a batch of blocks into the chain
+ dropPeer peerDropFn // Drops a peer for misbehaving
+
+ // Testing hooks
+ announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the blockAnnounce list
+ queueChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue
+ fetchingHook func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch
+ completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62)
+ importedHook func(*types.Header, *types.Block) // Method to call upon successful header or block import (both eth/61 and eth/62)
+}
+
+// NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements.
+func NewBlockFetcher(light bool, getHeader HeaderRetrievalFn, getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertHeaders headersInsertFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher {
+ return &BlockFetcher{
+ light: light,
+ notify: make(chan *blockAnnounce),
+ inject: make(chan *blockOrHeaderInject),
+ headerFilter: make(chan chan *headerFilterTask),
+ bodyFilter: make(chan chan *bodyFilterTask),
+ done: make(chan common.Hash),
+ quit: make(chan struct{}),
+ announces: make(map[string]int),
+ announced: make(map[common.Hash][]*blockAnnounce),
+ fetching: make(map[common.Hash]*blockAnnounce),
+ fetched: make(map[common.Hash][]*blockAnnounce),
+ completing: make(map[common.Hash]*blockAnnounce),
+ queue: prque.New(nil),
+ queues: make(map[string]int),
+ queued: make(map[common.Hash]*blockOrHeaderInject),
+ getHeader: getHeader,
+ getBlock: getBlock,
+ verifyHeader: verifyHeader,
+ broadcastBlock: broadcastBlock,
+ chainHeight: chainHeight,
+ insertHeaders: insertHeaders,
+ insertChain: insertChain,
+ dropPeer: dropPeer,
+ }
+}
+
+// Start boots up the announcement based synchroniser, accepting and processing
+// hash notifications and block fetches until termination requested.
+func (f *BlockFetcher) Start() {
+ go f.loop()
+}
+
+// Stop terminates the announcement based synchroniser, canceling all pending
+// operations.
+func (f *BlockFetcher) Stop() {
+ close(f.quit)
+}
+
+// Notify announces the fetcher of the potential availability of a new block in
+// the network.
+func (f *BlockFetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
+ headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
+ block := &blockAnnounce{
+ hash: hash,
+ number: number,
+ time: time,
+ origin: peer,
+ fetchHeader: headerFetcher,
+ fetchBodies: bodyFetcher,
+ }
+ select {
+ case f.notify <- block:
+ return nil
+ case <-f.quit:
+ return errTerminated
+ }
+}
+
+// Enqueue tries to fill gaps the fetcher's future import queue.
+func (f *BlockFetcher) Enqueue(peer string, block *types.Block) error {
+ op := &blockOrHeaderInject{
+ origin: peer,
+ block: block,
+ }
+ select {
+ case f.inject <- op:
+ return nil
+ case <-f.quit:
+ return errTerminated
+ }
+}
+
+// FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
+// returning those that should be handled differently.
+func (f *BlockFetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
+ log.Trace("Filtering headers", "peer", peer, "headers", len(headers))
+
+ // Send the filter channel to the fetcher
+ filter := make(chan *headerFilterTask)
+
+ select {
+ case f.headerFilter <- filter:
+ case <-f.quit:
+ return nil
+ }
+ // Request the filtering of the header list
+ select {
+ case filter <- &headerFilterTask{peer: peer, headers: headers, time: time}:
+ case <-f.quit:
+ return nil
+ }
+ // Retrieve the headers remaining after filtering
+ select {
+ case task := <-filter:
+ return task.headers
+ case <-f.quit:
+ return nil
+ }
+}
+
+// FilterBodies extracts all the block bodies that were explicitly requested by
+// the fetcher, returning those that should be handled differently.
+func (f *BlockFetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
+ log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles))
+
+ // Send the filter channel to the fetcher
+ filter := make(chan *bodyFilterTask)
+
+ select {
+ case f.bodyFilter <- filter:
+ case <-f.quit:
+ return nil, nil
+ }
+ // Request the filtering of the body list
+ select {
+ case filter <- &bodyFilterTask{peer: peer, transactions: transactions, uncles: uncles, time: time}:
+ case <-f.quit:
+ return nil, nil
+ }
+ // Retrieve the bodies remaining after filtering
+ select {
+ case task := <-filter:
+ return task.transactions, task.uncles
+ case <-f.quit:
+ return nil, nil
+ }
+}
+
+// Loop is the main fetcher loop, checking and processing various notification
+// events.
+func (f *BlockFetcher) loop() {
+ // Iterate the block fetching until a quit is requested
+ var (
+ fetchTimer = time.NewTimer(0)
+ completeTimer = time.NewTimer(0)
+ )
+ <-fetchTimer.C // clear out the channel
+ <-completeTimer.C
+ defer fetchTimer.Stop()
+ defer completeTimer.Stop()
+
+ for {
+ // Clean up any expired block fetches
+ for hash, announce := range f.fetching {
+ if time.Since(announce.time) > fetchTimeout {
+ f.forgetHash(hash)
+ }
+ }
+ // Import any queued blocks that could potentially fit
+ height := f.chainHeight()
+ for !f.queue.Empty() {
+ op := f.queue.PopItem().(*blockOrHeaderInject)
+ hash := op.hash()
+ if f.queueChangeHook != nil {
+ f.queueChangeHook(hash, false)
+ }
+ // If too high up the chain or phase, continue later
+ number := op.number()
+ if number > height+1 {
+ f.queue.Push(op, -int64(number))
+ if f.queueChangeHook != nil {
+ f.queueChangeHook(hash, true)
+ }
+ break
+ }
+ // Otherwise if fresh and still unknown, try and import
+ if (number+maxUncleDist < height) || (f.light && f.getHeader(hash) != nil) || (!f.light && f.getBlock(hash) != nil) {
+ f.forgetBlock(hash)
+ continue
+ }
+ if f.light {
+ f.importHeaders(op.origin, op.header)
+ } else {
+ f.importBlocks(op.origin, op.block)
+ }
+ }
+ // Wait for an outside event to occur
+ select {
+ case <-f.quit:
+ // BlockFetcher terminating, abort all operations
+ return
+
+ case notification := <-f.notify:
+ // A block was announced, make sure the peer isn't DOSing us
+ blockAnnounceInMeter.Mark(1)
+
+ count := f.announces[notification.origin] + 1
+ if count > hashLimit {
+ log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
+ blockAnnounceDOSMeter.Mark(1)
+ break
+ }
+ // If we have a valid block number, check that it's potentially useful
+ if notification.number > 0 {
+ if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
+ log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
+ blockAnnounceDropMeter.Mark(1)
+ break
+ }
+ }
+ // All is well, schedule the announce if block's not yet downloading
+ if _, ok := f.fetching[notification.hash]; ok {
+ break
+ }
+ if _, ok := f.completing[notification.hash]; ok {
+ break
+ }
+ f.announces[notification.origin] = count
+ f.announced[notification.hash] = append(f.announced[notification.hash], notification)
+ if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 {
+ f.announceChangeHook(notification.hash, true)
+ }
+ if len(f.announced) == 1 {
+ f.rescheduleFetch(fetchTimer)
+ }
+
+ case op := <-f.inject:
+ // A direct block insertion was requested, try and fill any pending gaps
+ blockBroadcastInMeter.Mark(1)
+
+ // Now only direct block injection is allowed, drop the header injection
+ // here silently if we receive.
+ if f.light {
+ continue
+ }
+ f.enqueue(op.origin, nil, op.block)
+
+ case hash := <-f.done:
+ // A pending import finished, remove all traces of the notification
+ f.forgetHash(hash)
+ f.forgetBlock(hash)
+
+ case <-fetchTimer.C:
+ // At least one block's timer ran out, check for needing retrieval
+ request := make(map[string][]common.Hash)
+
+ for hash, announces := range f.announced {
+ // In current LES protocol(les2/les3), only header announce is
+ // available, no need to wait too much time for header broadcast.
+ timeout := arriveTimeout - gatherSlack
+ if f.light {
+ timeout = 0
+ }
+ if time.Since(announces[0].time) > timeout {
+ // Pick a random peer to retrieve from, reset all others
+ announce := announces[rand.Intn(len(announces))]
+ f.forgetHash(hash)
+
+ // If the block still didn't arrive, queue for fetching
+ if (f.light && f.getHeader(hash) == nil) || (!f.light && f.getBlock(hash) == nil) {
+ request[announce.origin] = append(request[announce.origin], hash)
+ f.fetching[hash] = announce
+ }
+ }
+ }
+ // Send out all block header requests
+ for peer, hashes := range request {
+ log.Trace("Fetching scheduled headers", "peer", peer, "list", hashes)
+
+ // Create a closure of the fetch and schedule in on a new thread
+ fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
+ go func() {
+ if f.fetchingHook != nil {
+ f.fetchingHook(hashes)
+ }
+ for _, hash := range hashes {
+ headerFetchMeter.Mark(1)
+ fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
+ }
+ }()
+ }
+ // Schedule the next fetch if blocks are still pending
+ f.rescheduleFetch(fetchTimer)
+
+ case <-completeTimer.C:
+ // At least one header's timer ran out, retrieve everything
+ request := make(map[string][]common.Hash)
+
+ for hash, announces := range f.fetched {
+ // Pick a random peer to retrieve from, reset all others
+ announce := announces[rand.Intn(len(announces))]
+ f.forgetHash(hash)
+
+ // If the block still didn't arrive, queue for completion
+ if f.getBlock(hash) == nil {
+ request[announce.origin] = append(request[announce.origin], hash)
+ f.completing[hash] = announce
+ }
+ }
+ // Send out all block body requests
+ for peer, hashes := range request {
+ log.Trace("Fetching scheduled bodies", "peer", peer, "list", hashes)
+
+ // Create a closure of the fetch and schedule in on a new thread
+ if f.completingHook != nil {
+ f.completingHook(hashes)
+ }
+ bodyFetchMeter.Mark(int64(len(hashes)))
+ go f.completing[hashes[0]].fetchBodies(hashes)
+ }
+ // Schedule the next fetch if blocks are still pending
+ f.rescheduleComplete(completeTimer)
+
+ case filter := <-f.headerFilter:
+ // Headers arrived from a remote peer. Extract those that were explicitly
+ // requested by the fetcher, and return everything else so it's delivered
+ // to other parts of the system.
+ var task *headerFilterTask
+ select {
+ case task = <-filter:
+ case <-f.quit:
+ return
+ }
+ headerFilterInMeter.Mark(int64(len(task.headers)))
+
+ // Split the batch of headers into unknown ones (to return to the caller),
+ // known incomplete ones (requiring body retrievals) and completed blocks.
+ unknown, incomplete, complete, lightHeaders := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}, []*blockAnnounce{}
+ for _, header := range task.headers {
+ hash := header.Hash()
+
+ // Filter fetcher-requested headers from other synchronisation algorithms
+ if announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
+ // If the delivered header does not match the promised number, drop the announcer
+ if header.Number.Uint64() != announce.number {
+ log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number)
+ f.dropPeer(announce.origin)
+ f.forgetHash(hash)
+ continue
+ }
+ // Collect all headers only if we are running in light
+ // mode and the headers are not imported by other means.
+ if f.light {
+ if f.getHeader(hash) == nil {
+ announce.header = header
+ lightHeaders = append(lightHeaders, announce)
+ }
+ f.forgetHash(hash)
+ continue
+ }
+ // Only keep if not imported by other means
+ if f.getBlock(hash) == nil {
+ announce.header = header
+ announce.time = task.time
+
+ // If the block is empty (header only), short circuit into the final import queue
+ if header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash {
+ log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
+
+ block := types.NewBlockWithHeader(header)
+ block.ReceivedAt = task.time
+
+ complete = append(complete, block)
+ f.completing[hash] = announce
+ continue
+ }
+ // Otherwise add to the list of blocks needing completion
+ incomplete = append(incomplete, announce)
+ } else {
+ log.Trace("Block already imported, discarding header", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
+ f.forgetHash(hash)
+ }
+ } else {
+ // BlockFetcher doesn't know about it, add to the return list
+ unknown = append(unknown, header)
+ }
+ }
+ headerFilterOutMeter.Mark(int64(len(unknown)))
+ select {
+ case filter <- &headerFilterTask{headers: unknown, time: task.time}:
+ case <-f.quit:
+ return
+ }
+ // Schedule the retrieved headers for body completion
+ for _, announce := range incomplete {
+ hash := announce.header.Hash()
+ if _, ok := f.completing[hash]; ok {
+ continue
+ }
+ f.fetched[hash] = append(f.fetched[hash], announce)
+ if len(f.fetched) == 1 {
+ f.rescheduleComplete(completeTimer)
+ }
+ }
+ // Schedule the header for light fetcher import
+ for _, announce := range lightHeaders {
+ f.enqueue(announce.origin, announce.header, nil)
+ }
+ // Schedule the header-only blocks for import
+ for _, block := range complete {
+ if announce := f.completing[block.Hash()]; announce != nil {
+ f.enqueue(announce.origin, nil, block)
+ }
+ }
+
+ case filter := <-f.bodyFilter:
+ // Block bodies arrived, extract any explicitly requested blocks, return the rest
+ var task *bodyFilterTask
+ select {
+ case task = <-filter:
+ case <-f.quit:
+ return
+ }
+ bodyFilterInMeter.Mark(int64(len(task.transactions)))
+ blocks := []*types.Block{}
+ // abort early if there's nothing explicitly requested
+ if len(f.completing) > 0 {
+ for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ {
+ // Match up a body to any possible completion request
+ var (
+ matched = false
+ uncleHash common.Hash // calculated lazily and reused
+ txnHash common.Hash // calculated lazily and reused
+ )
+ for hash, announce := range f.completing {
+ if f.queued[hash] != nil || announce.origin != task.peer {
+ continue
+ }
+ if uncleHash == (common.Hash{}) {
+ uncleHash = types.CalcUncleHash(task.uncles[i])
+ }
+ if uncleHash != announce.header.UncleHash {
+ continue
+ }
+ if txnHash == (common.Hash{}) {
+ txnHash = types.DeriveSha(types.Transactions(task.transactions[i]), trie.NewStackTrie(nil))
+ }
+ if txnHash != announce.header.TxHash {
+ continue
+ }
+ // Mark the body matched, reassemble if still unknown
+ matched = true
+ if f.getBlock(hash) == nil {
+ block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i])
+ block.ReceivedAt = task.time
+ blocks = append(blocks, block)
+ } else {
+ f.forgetHash(hash)
+ }
+
+ }
+ if matched {
+ task.transactions = append(task.transactions[:i], task.transactions[i+1:]...)
+ task.uncles = append(task.uncles[:i], task.uncles[i+1:]...)
+ i--
+ continue
+ }
+ }
+ }
+ bodyFilterOutMeter.Mark(int64(len(task.transactions)))
+ select {
+ case filter <- task:
+ case <-f.quit:
+ return
+ }
+ // Schedule the retrieved blocks for ordered import
+ for _, block := range blocks {
+ if announce := f.completing[block.Hash()]; announce != nil {
+ f.enqueue(announce.origin, nil, block)
+ }
+ }
+ }
+ }
+}
+
+// rescheduleFetch resets the specified fetch timer to the next blockAnnounce timeout.
+func (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) {
+ // Short circuit if no blocks are announced
+ if len(f.announced) == 0 {
+ return
+ }
+ // Schedule announcement retrieval quickly for light mode
+ // since server won't send any headers to client.
+ if f.light {
+ fetch.Reset(lightTimeout)
+ return
+ }
+ // Otherwise find the earliest expiring announcement
+ earliest := time.Now()
+ for _, announces := range f.announced {
+ if earliest.After(announces[0].time) {
+ earliest = announces[0].time
+ }
+ }
+ fetch.Reset(arriveTimeout - time.Since(earliest))
+}
+
+// rescheduleComplete resets the specified completion timer to the next fetch timeout.
+func (f *BlockFetcher) rescheduleComplete(complete *time.Timer) {
+ // Short circuit if no headers are fetched
+ if len(f.fetched) == 0 {
+ return
+ }
+ // Otherwise find the earliest expiring announcement
+ earliest := time.Now()
+ for _, announces := range f.fetched {
+ if earliest.After(announces[0].time) {
+ earliest = announces[0].time
+ }
+ }
+ complete.Reset(gatherSlack - time.Since(earliest))
+}
+
+// enqueue schedules a new header or block import operation, if the component
+// to be imported has not yet been seen.
+func (f *BlockFetcher) enqueue(peer string, header *types.Header, block *types.Block) {
+ var (
+ hash common.Hash
+ number uint64
+ )
+ if header != nil {
+ hash, number = header.Hash(), header.Number.Uint64()
+ } else {
+ hash, number = block.Hash(), block.NumberU64()
+ }
+ // Ensure the peer isn't DOSing us
+ count := f.queues[peer] + 1
+ if count > blockLimit {
+ log.Debug("Discarded delivered header or block, exceeded allowance", "peer", peer, "number", number, "hash", hash, "limit", blockLimit)
+ blockBroadcastDOSMeter.Mark(1)
+ f.forgetHash(hash)
+ return
+ }
+ // Discard any past or too distant blocks
+ if dist := int64(number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
+ log.Debug("Discarded delivered header or block, too far away", "peer", peer, "number", number, "hash", hash, "distance", dist)
+ blockBroadcastDropMeter.Mark(1)
+ f.forgetHash(hash)
+ return
+ }
+ // Schedule the block for future importing
+ if _, ok := f.queued[hash]; !ok {
+ op := &blockOrHeaderInject{origin: peer}
+ if header != nil {
+ op.header = header
+ } else {
+ op.block = block
+ }
+ f.queues[peer] = count
+ f.queued[hash] = op
+ f.queue.Push(op, -int64(number))
+ if f.queueChangeHook != nil {
+ f.queueChangeHook(hash, true)
+ }
+ log.Debug("Queued delivered header or block", "peer", peer, "number", number, "hash", hash, "queued", f.queue.Size())
+ }
+}
+
+// importHeaders spawns a new goroutine to run a header insertion into the chain.
+// If the header's number is at the same height as the current import phase, it
+// updates the phase states accordingly.
+func (f *BlockFetcher) importHeaders(peer string, header *types.Header) {
+ hash := header.Hash()
+ log.Debug("Importing propagated header", "peer", peer, "number", header.Number, "hash", hash)
+
+ go func() {
+ defer func() { f.done <- hash }()
+ // If the parent's unknown, abort insertion
+ parent := f.getHeader(header.ParentHash)
+ if parent == nil {
+ log.Debug("Unknown parent of propagated header", "peer", peer, "number", header.Number, "hash", hash, "parent", header.ParentHash)
+ return
+ }
+ // Validate the header and if something went wrong, drop the peer
+ if err := f.verifyHeader(header); err != nil && err != consensus.ErrFutureBlock {
+ log.Debug("Propagated header verification failed", "peer", peer, "number", header.Number, "hash", hash, "err", err)
+ f.dropPeer(peer)
+ return
+ }
+ // Run the actual import and log any issues
+ if _, err := f.insertHeaders([]*types.Header{header}); err != nil {
+ log.Debug("Propagated header import failed", "peer", peer, "number", header.Number, "hash", hash, "err", err)
+ return
+ }
+ // Invoke the testing hook if needed
+ if f.importedHook != nil {
+ f.importedHook(header, nil)
+ }
+ }()
+}
+
+// importBlocks spawns a new goroutine to run a block insertion into the chain. If the
+// block's number is at the same height as the current import phase, it updates
+// the phase states accordingly.
+func (f *BlockFetcher) importBlocks(peer string, block *types.Block) {
+ hash := block.Hash()
+
+ // Run the import on a new thread
+ log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash)
+ go func() {
+ defer func() { f.done <- hash }()
+
+ // If the parent's unknown, abort insertion
+ parent := f.getBlock(block.ParentHash())
+ if parent == nil {
+ log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash())
+ return
+ }
+ // Quickly validate the header and propagate the block if it passes
+ switch err := f.verifyHeader(block.Header()); err {
+ case nil:
+ // All ok, quickly propagate to our peers
+ blockBroadcastOutTimer.UpdateSince(block.ReceivedAt)
+ go f.broadcastBlock(block, true)
+
+ case consensus.ErrFutureBlock:
+ // Weird future block, don't fail, but neither propagate
+
+ default:
+ // Something went very wrong, drop the peer
+ log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
+ f.dropPeer(peer)
+ return
+ }
+ // Run the actual import and log any issues
+ if _, err := f.insertChain(types.Blocks{block}); err != nil {
+ log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
+ return
+ }
+ // If import succeeded, broadcast the block
+ blockAnnounceOutTimer.UpdateSince(block.ReceivedAt)
+ go f.broadcastBlock(block, false)
+
+ // Invoke the testing hook if needed
+ if f.importedHook != nil {
+ f.importedHook(nil, block)
+ }
+ }()
+}
+
+// forgetHash removes all traces of a block announcement from the fetcher's
+// internal state.
+func (f *BlockFetcher) forgetHash(hash common.Hash) {
+ // Remove all pending announces and decrement DOS counters
+ if announceMap, ok := f.announced[hash]; ok {
+ for _, announce := range announceMap {
+ f.announces[announce.origin]--
+ if f.announces[announce.origin] <= 0 {
+ delete(f.announces, announce.origin)
+ }
+ }
+ delete(f.announced, hash)
+ if f.announceChangeHook != nil {
+ f.announceChangeHook(hash, false)
+ }
+ }
+ // Remove any pending fetches and decrement the DOS counters
+ if announce := f.fetching[hash]; announce != nil {
+ f.announces[announce.origin]--
+ if f.announces[announce.origin] <= 0 {
+ delete(f.announces, announce.origin)
+ }
+ delete(f.fetching, hash)
+ }
+
+ // Remove any pending completion requests and decrement the DOS counters
+ for _, announce := range f.fetched[hash] {
+ f.announces[announce.origin]--
+ if f.announces[announce.origin] <= 0 {
+ delete(f.announces, announce.origin)
+ }
+ }
+ delete(f.fetched, hash)
+
+ // Remove any pending completions and decrement the DOS counters
+ if announce := f.completing[hash]; announce != nil {
+ f.announces[announce.origin]--
+ if f.announces[announce.origin] <= 0 {
+ delete(f.announces, announce.origin)
+ }
+ delete(f.completing, hash)
+ }
+}
+
+// forgetBlock removes all traces of a queued block from the fetcher's internal
+// state.
+func (f *BlockFetcher) forgetBlock(hash common.Hash) {
+ if insert := f.queued[hash]; insert != nil {
+ f.queues[insert.origin]--
+ if f.queues[insert.origin] == 0 {
+ delete(f.queues, insert.origin)
+ }
+ delete(f.queued, hash)
+ }
+}
diff --git a/les/fetcher/block_fetcher_test.go b/les/fetcher/block_fetcher_test.go
new file mode 100644
index 000000000..b6d1125b5
--- /dev/null
+++ b/les/fetcher/block_fetcher_test.go
@@ -0,0 +1,896 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package fetcher
+
+import (
+ "errors"
+ "math/big"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
+)
+
+var (
+ testdb = rawdb.NewMemoryDatabase()
+ testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
+ genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000000000))
+ unknownBlock = types.NewBlock(&types.Header{GasLimit: params.GenesisGasLimit, BaseFee: big.NewInt(params.InitialBaseFee)}, nil, nil, nil, trie.NewStackTrie(nil))
+)
+
+// makeChain creates a chain of n blocks starting at and including parent.
+// the returned hash chain is ordered head->parent. In addition, every 3rd block
+// contains a transaction and every 5th an uncle to allow testing correct block
+// reassembly.
+func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) {
+ blocks, _ := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testdb, n, func(i int, block *core.BlockGen) {
+ block.SetCoinbase(common.Address{seed})
+
+ // If the block number is multiple of 3, send a bonus transaction to the miner
+ if parent == genesis && i%3 == 0 {
+ signer := types.MakeSigner(params.TestChainConfig, block.Number())
+ tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey)
+ if err != nil {
+ panic(err)
+ }
+ block.AddTx(tx)
+ }
+ // If the block number is a multiple of 5, add a bonus uncle to the block
+ if i%5 == 0 {
+ block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))})
+ }
+ })
+ hashes := make([]common.Hash, n+1)
+ hashes[len(hashes)-1] = parent.Hash()
+ blockm := make(map[common.Hash]*types.Block, n+1)
+ blockm[parent.Hash()] = parent
+ for i, b := range blocks {
+ hashes[len(hashes)-i-2] = b.Hash()
+ blockm[b.Hash()] = b
+ }
+ return hashes, blockm
+}
+
+// fetcherTester is a test simulator for mocking out local block chain.
+type fetcherTester struct {
+ fetcher *BlockFetcher
+
+ hashes []common.Hash // Hash chain belonging to the tester
+ headers map[common.Hash]*types.Header // Headers belonging to the tester
+ blocks map[common.Hash]*types.Block // Blocks belonging to the tester
+ drops map[string]bool // Map of peers dropped by the fetcher
+
+ lock sync.RWMutex
+}
+
+// newTester creates a new fetcher test mocker.
+func newTester(light bool) *fetcherTester {
+ tester := &fetcherTester{
+ hashes: []common.Hash{genesis.Hash()},
+ headers: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
+ blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
+ drops: make(map[string]bool),
+ }
+ tester.fetcher = NewBlockFetcher(light, tester.getHeader, tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertHeaders, tester.insertChain, tester.dropPeer)
+ tester.fetcher.Start()
+
+ return tester
+}
+
+// getHeader retrieves a header from the tester's block chain.
+func (f *fetcherTester) getHeader(hash common.Hash) *types.Header {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+
+ return f.headers[hash]
+}
+
+// getBlock retrieves a block from the tester's block chain.
+func (f *fetcherTester) getBlock(hash common.Hash) *types.Block {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+
+ return f.blocks[hash]
+}
+
+// verifyHeader is a nop placeholder for the block header verification.
+func (f *fetcherTester) verifyHeader(header *types.Header) error {
+ return nil
+}
+
+// broadcastBlock is a nop placeholder for the block broadcasting.
+func (f *fetcherTester) broadcastBlock(block *types.Block, propagate bool) {
+}
+
+// chainHeight retrieves the current height (block number) of the chain.
+func (f *fetcherTester) chainHeight() uint64 {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+
+ if f.fetcher.light {
+ return f.headers[f.hashes[len(f.hashes)-1]].Number.Uint64()
+ }
+ return f.blocks[f.hashes[len(f.hashes)-1]].NumberU64()
+}
+
+// insertChain injects a new headers into the simulated chain.
+func (f *fetcherTester) insertHeaders(headers []*types.Header) (int, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ for i, header := range headers {
+ // Make sure the parent in known
+ if _, ok := f.headers[header.ParentHash]; !ok {
+ return i, errors.New("unknown parent")
+ }
+ // Discard any new blocks if the same height already exists
+ if header.Number.Uint64() <= f.headers[f.hashes[len(f.hashes)-1]].Number.Uint64() {
+ return i, nil
+ }
+ // Otherwise build our current chain
+ f.hashes = append(f.hashes, header.Hash())
+ f.headers[header.Hash()] = header
+ }
+ return 0, nil
+}
+
+// insertChain injects a new blocks into the simulated chain.
+func (f *fetcherTester) insertChain(blocks types.Blocks) (int, error) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ for i, block := range blocks {
+ // Make sure the parent in known
+ if _, ok := f.blocks[block.ParentHash()]; !ok {
+ return i, errors.New("unknown parent")
+ }
+ // Discard any new blocks if the same height already exists
+ if block.NumberU64() <= f.blocks[f.hashes[len(f.hashes)-1]].NumberU64() {
+ return i, nil
+ }
+ // Otherwise build our current chain
+ f.hashes = append(f.hashes, block.Hash())
+ f.blocks[block.Hash()] = block
+ }
+ return 0, nil
+}
+
+// dropPeer is an emulator for the peer removal, simply accumulating the various
+// peers dropped by the fetcher.
+func (f *fetcherTester) dropPeer(peer string) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ f.drops[peer] = true
+}
+
+// makeHeaderFetcher retrieves a block header fetcher associated with a simulated peer.
+func (f *fetcherTester) makeHeaderFetcher(peer string, blocks map[common.Hash]*types.Block, drift time.Duration) headerRequesterFn {
+ closure := make(map[common.Hash]*types.Block)
+ for hash, block := range blocks {
+ closure[hash] = block
+ }
+ // Create a function that return a header from the closure
+ return func(hash common.Hash) error {
+ // Gather the blocks to return
+ headers := make([]*types.Header, 0, 1)
+ if block, ok := closure[hash]; ok {
+ headers = append(headers, block.Header())
+ }
+ // Return on a new thread
+ go f.fetcher.FilterHeaders(peer, headers, time.Now().Add(drift))
+
+ return nil
+ }
+}
+
+// makeBodyFetcher retrieves a block body fetcher associated with a simulated peer.
+func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*types.Block, drift time.Duration) bodyRequesterFn {
+ closure := make(map[common.Hash]*types.Block)
+ for hash, block := range blocks {
+ closure[hash] = block
+ }
+ // Create a function that returns blocks from the closure
+ return func(hashes []common.Hash) error {
+ // Gather the block bodies to return
+ transactions := make([][]*types.Transaction, 0, len(hashes))
+ uncles := make([][]*types.Header, 0, len(hashes))
+
+ for _, hash := range hashes {
+ if block, ok := closure[hash]; ok {
+ transactions = append(transactions, block.Transactions())
+ uncles = append(uncles, block.Uncles())
+ }
+ }
+ // Return on a new thread
+ go f.fetcher.FilterBodies(peer, transactions, uncles, time.Now().Add(drift))
+
+ return nil
+ }
+}
+
+// verifyFetchingEvent verifies that one single event arrive on a fetching channel.
+func verifyFetchingEvent(t *testing.T, fetching chan []common.Hash, arrive bool) {
+ if arrive {
+ select {
+ case <-fetching:
+ case <-time.After(time.Second):
+ t.Fatalf("fetching timeout")
+ }
+ } else {
+ select {
+ case <-fetching:
+ t.Fatalf("fetching invoked")
+ case <-time.After(10 * time.Millisecond):
+ }
+ }
+}
+
+// verifyCompletingEvent verifies that one single event arrive on an completing channel.
+func verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive bool) {
+ if arrive {
+ select {
+ case <-completing:
+ case <-time.After(time.Second):
+ t.Fatalf("completing timeout")
+ }
+ } else {
+ select {
+ case <-completing:
+ t.Fatalf("completing invoked")
+ case <-time.After(10 * time.Millisecond):
+ }
+ }
+}
+
+// verifyImportEvent verifies that one single event arrive on an import channel.
+func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) {
+ if arrive {
+ select {
+ case <-imported:
+ case <-time.After(time.Second):
+ t.Fatalf("import timeout")
+ }
+ } else {
+ select {
+ case <-imported:
+ t.Fatalf("import invoked")
+ case <-time.After(20 * time.Millisecond):
+ }
+ }
+}
+
+// verifyImportCount verifies that exactly count number of events arrive on an
+// import hook channel.
+func verifyImportCount(t *testing.T, imported chan interface{}, count int) {
+ for i := 0; i < count; i++ {
+ select {
+ case <-imported:
+ case <-time.After(time.Second):
+ t.Fatalf("block %d: import timeout", i+1)
+ }
+ }
+ verifyImportDone(t, imported)
+}
+
+// verifyImportDone verifies that no more events are arriving on an import channel.
+func verifyImportDone(t *testing.T, imported chan interface{}) {
+ select {
+ case <-imported:
+ t.Fatalf("extra block imported")
+ case <-time.After(50 * time.Millisecond):
+ }
+}
+
+// verifyChainHeight verifies the chain height is as expected.
+func verifyChainHeight(t *testing.T, fetcher *fetcherTester, height uint64) {
+ if fetcher.chainHeight() != height {
+ t.Fatalf("chain height mismatch, got %d, want %d", fetcher.chainHeight(), height)
+ }
+}
+
+// Tests that a fetcher accepts block/header announcements and initiates retrievals
+// for them, successfully importing into the local chain.
+func TestFullSequentialAnnouncements(t *testing.T) { testSequentialAnnouncements(t, false) }
+func TestLightSequentialAnnouncements(t *testing.T) { testSequentialAnnouncements(t, true) }
+
+func testSequentialAnnouncements(t *testing.T, light bool) {
+ // Create a chain of blocks to import
+ targetBlocks := 4 * hashLimit
+ hashes, blocks := makeChain(targetBlocks, 0, genesis)
+
+ tester := newTester(light)
+ headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
+ bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
+
+ // Iteratively announce blocks until all are imported
+ imported := make(chan interface{})
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
+ if light {
+ if header == nil {
+ t.Fatalf("Fetcher try to import empty header")
+ }
+ imported <- header
+ } else {
+ if block == nil {
+ t.Fatalf("Fetcher try to import empty block")
+ }
+ imported <- block
+ }
+ }
+ for i := len(hashes) - 2; i >= 0; i-- {
+ tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
+ verifyImportEvent(t, imported, true)
+ }
+ verifyImportDone(t, imported)
+ verifyChainHeight(t, tester, uint64(len(hashes)-1))
+}
+
+// Tests that if blocks are announced by multiple peers (or even the same buggy
+// peer), they will only get downloaded at most once.
+func TestFullConcurrentAnnouncements(t *testing.T) { testConcurrentAnnouncements(t, false) }
+func TestLightConcurrentAnnouncements(t *testing.T) { testConcurrentAnnouncements(t, true) }
+
+func testConcurrentAnnouncements(t *testing.T, light bool) {
+ // Create a chain of blocks to import
+ targetBlocks := 4 * hashLimit
+ hashes, blocks := makeChain(targetBlocks, 0, genesis)
+
+ // Assemble a tester with a built in counter for the requests
+ tester := newTester(light)
+ firstHeaderFetcher := tester.makeHeaderFetcher("first", blocks, -gatherSlack)
+ firstBodyFetcher := tester.makeBodyFetcher("first", blocks, 0)
+ secondHeaderFetcher := tester.makeHeaderFetcher("second", blocks, -gatherSlack)
+ secondBodyFetcher := tester.makeBodyFetcher("second", blocks, 0)
+
+ counter := uint32(0)
+ firstHeaderWrapper := func(hash common.Hash) error {
+ atomic.AddUint32(&counter, 1)
+ return firstHeaderFetcher(hash)
+ }
+ secondHeaderWrapper := func(hash common.Hash) error {
+ atomic.AddUint32(&counter, 1)
+ return secondHeaderFetcher(hash)
+ }
+ // Iteratively announce blocks until all are imported
+ imported := make(chan interface{})
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
+ if light {
+ if header == nil {
+ t.Fatalf("Fetcher try to import empty header")
+ }
+ imported <- header
+ } else {
+ if block == nil {
+ t.Fatalf("Fetcher try to import empty block")
+ }
+ imported <- block
+ }
+ }
+ for i := len(hashes) - 2; i >= 0; i-- {
+ tester.fetcher.Notify("first", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), firstHeaderWrapper, firstBodyFetcher)
+ tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout+time.Millisecond), secondHeaderWrapper, secondBodyFetcher)
+ tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout-time.Millisecond), secondHeaderWrapper, secondBodyFetcher)
+ verifyImportEvent(t, imported, true)
+ }
+ verifyImportDone(t, imported)
+
+ // Make sure no blocks were retrieved twice
+ if int(counter) != targetBlocks {
+ t.Fatalf("retrieval count mismatch: have %v, want %v", counter, targetBlocks)
+ }
+ verifyChainHeight(t, tester, uint64(len(hashes)-1))
+}
+
+// Tests that announcements arriving while a previous is being fetched still
+// results in a valid import.
+func TestFullOverlappingAnnouncements(t *testing.T) { testOverlappingAnnouncements(t, false) }
+func TestLightOverlappingAnnouncements(t *testing.T) { testOverlappingAnnouncements(t, true) }
+
+func testOverlappingAnnouncements(t *testing.T, light bool) {
+ // Create a chain of blocks to import
+ targetBlocks := 4 * hashLimit
+ hashes, blocks := makeChain(targetBlocks, 0, genesis)
+
+ tester := newTester(light)
+ headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
+ bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
+
+ // Iteratively announce blocks, but overlap them continuously
+ overlap := 16
+ imported := make(chan interface{}, len(hashes)-1)
+ for i := 0; i < overlap; i++ {
+ imported <- nil
+ }
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
+ if light {
+ if header == nil {
+ t.Fatalf("Fetcher try to import empty header")
+ }
+ imported <- header
+ } else {
+ if block == nil {
+ t.Fatalf("Fetcher try to import empty block")
+ }
+ imported <- block
+ }
+ }
+
+ for i := len(hashes) - 2; i >= 0; i-- {
+ tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
+ select {
+ case <-imported:
+ case <-time.After(time.Second):
+ t.Fatalf("block %d: import timeout", len(hashes)-i)
+ }
+ }
+ // Wait for all the imports to complete and check count
+ verifyImportCount(t, imported, overlap)
+ verifyChainHeight(t, tester, uint64(len(hashes)-1))
+}
+
+// Tests that announces already being retrieved will not be duplicated.
+func TestFullPendingDeduplication(t *testing.T) { testPendingDeduplication(t, false) }
+func TestLightPendingDeduplication(t *testing.T) { testPendingDeduplication(t, true) }
+
+func testPendingDeduplication(t *testing.T, light bool) {
+ // Create a hash and corresponding block
+ hashes, blocks := makeChain(1, 0, genesis)
+
+ // Assemble a tester with a built in counter and delayed fetcher
+ tester := newTester(light)
+ headerFetcher := tester.makeHeaderFetcher("repeater", blocks, -gatherSlack)
+ bodyFetcher := tester.makeBodyFetcher("repeater", blocks, 0)
+
+ delay := 50 * time.Millisecond
+ counter := uint32(0)
+ headerWrapper := func(hash common.Hash) error {
+ atomic.AddUint32(&counter, 1)
+
+ // Simulate a long running fetch
+ go func() {
+ time.Sleep(delay)
+ headerFetcher(hash)
+ }()
+ return nil
+ }
+ checkNonExist := func() bool {
+ return tester.getBlock(hashes[0]) == nil
+ }
+ if light {
+ checkNonExist = func() bool {
+ return tester.getHeader(hashes[0]) == nil
+ }
+ }
+ // Announce the same block many times until it's fetched (wait for any pending ops)
+ for checkNonExist() {
+ tester.fetcher.Notify("repeater", hashes[0], 1, time.Now().Add(-arriveTimeout), headerWrapper, bodyFetcher)
+ time.Sleep(time.Millisecond)
+ }
+ time.Sleep(delay)
+
+ // Check that all blocks were imported and none fetched twice
+ if int(counter) != 1 {
+ t.Fatalf("retrieval count mismatch: have %v, want %v", counter, 1)
+ }
+ verifyChainHeight(t, tester, 1)
+}
+
+// Tests that announcements retrieved in a random order are cached and eventually
+// imported when all the gaps are filled in.
+func TestFullRandomArrivalImport(t *testing.T) { testRandomArrivalImport(t, false) }
+func TestLightRandomArrivalImport(t *testing.T) { testRandomArrivalImport(t, true) }
+
+func testRandomArrivalImport(t *testing.T, light bool) {
+ // Create a chain of blocks to import, and choose one to delay
+ targetBlocks := maxQueueDist
+ hashes, blocks := makeChain(targetBlocks, 0, genesis)
+ skip := targetBlocks / 2
+
+ tester := newTester(light)
+ headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
+ bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
+
+ // Iteratively announce blocks, skipping one entry
+ imported := make(chan interface{}, len(hashes)-1)
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
+ if light {
+ if header == nil {
+ t.Fatalf("Fetcher try to import empty header")
+ }
+ imported <- header
+ } else {
+ if block == nil {
+ t.Fatalf("Fetcher try to import empty block")
+ }
+ imported <- block
+ }
+ }
+ for i := len(hashes) - 1; i >= 0; i-- {
+ if i != skip {
+ tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
+ time.Sleep(time.Millisecond)
+ }
+ }
+ // Finally announce the skipped entry and check full import
+ tester.fetcher.Notify("valid", hashes[skip], uint64(len(hashes)-skip-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
+ verifyImportCount(t, imported, len(hashes)-1)
+ verifyChainHeight(t, tester, uint64(len(hashes)-1))
+}
+
+// Tests that direct block enqueues (due to block propagation vs. hash announce)
+// are correctly schedule, filling and import queue gaps.
+func TestQueueGapFill(t *testing.T) {
+ // Create a chain of blocks to import, and choose one to not announce at all
+ targetBlocks := maxQueueDist
+ hashes, blocks := makeChain(targetBlocks, 0, genesis)
+ skip := targetBlocks / 2
+
+ tester := newTester(false)
+ headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
+ bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
+
+ // Iteratively announce blocks, skipping one entry
+ imported := make(chan interface{}, len(hashes)-1)
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block }
+
+ for i := len(hashes) - 1; i >= 0; i-- {
+ if i != skip {
+ tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
+ time.Sleep(time.Millisecond)
+ }
+ }
+ // Fill the missing block directly as if propagated
+ tester.fetcher.Enqueue("valid", blocks[hashes[skip]])
+ verifyImportCount(t, imported, len(hashes)-1)
+ verifyChainHeight(t, tester, uint64(len(hashes)-1))
+}
+
+// Tests that blocks arriving from various sources (multiple propagations, hash
+// announces, etc) do not get scheduled for import multiple times.
+func TestImportDeduplication(t *testing.T) {
+ // Create two blocks to import (one for duplication, the other for stalling)
+ hashes, blocks := makeChain(2, 0, genesis)
+
+ // Create the tester and wrap the importer with a counter
+ tester := newTester(false)
+ headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
+ bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
+
+ counter := uint32(0)
+ tester.fetcher.insertChain = func(blocks types.Blocks) (int, error) {
+ atomic.AddUint32(&counter, uint32(len(blocks)))
+ return tester.insertChain(blocks)
+ }
+ // Instrument the fetching and imported events
+ fetching := make(chan []common.Hash)
+ imported := make(chan interface{}, len(hashes)-1)
+ tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes }
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block }
+
+ // Announce the duplicating block, wait for retrieval, and also propagate directly
+ tester.fetcher.Notify("valid", hashes[0], 1, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
+ <-fetching
+
+ tester.fetcher.Enqueue("valid", blocks[hashes[0]])
+ tester.fetcher.Enqueue("valid", blocks[hashes[0]])
+ tester.fetcher.Enqueue("valid", blocks[hashes[0]])
+
+ // Fill the missing block directly as if propagated, and check import uniqueness
+ tester.fetcher.Enqueue("valid", blocks[hashes[1]])
+ verifyImportCount(t, imported, 2)
+
+ if counter != 2 {
+ t.Fatalf("import invocation count mismatch: have %v, want %v", counter, 2)
+ }
+}
+
+// Tests that blocks with numbers much lower or higher than out current head get
+// discarded to prevent wasting resources on useless blocks from faulty peers.
+func TestDistantPropagationDiscarding(t *testing.T) {
+ // Create a long chain to import and define the discard boundaries
+ hashes, blocks := makeChain(3*maxQueueDist, 0, genesis)
+ head := hashes[len(hashes)/2]
+
+ low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1
+
+ // Create a tester and simulate a head block being the middle of the above chain
+ tester := newTester(false)
+
+ tester.lock.Lock()
+ tester.hashes = []common.Hash{head}
+ tester.blocks = map[common.Hash]*types.Block{head: blocks[head]}
+ tester.lock.Unlock()
+
+ // Ensure that a block with a lower number than the threshold is discarded
+ tester.fetcher.Enqueue("lower", blocks[hashes[low]])
+ time.Sleep(10 * time.Millisecond)
+ if !tester.fetcher.queue.Empty() {
+ t.Fatalf("fetcher queued stale block")
+ }
+ // Ensure that a block with a higher number than the threshold is discarded
+ tester.fetcher.Enqueue("higher", blocks[hashes[high]])
+ time.Sleep(10 * time.Millisecond)
+ if !tester.fetcher.queue.Empty() {
+ t.Fatalf("fetcher queued future block")
+ }
+}
+
+// Tests that announcements with numbers much lower or higher than out current
+// head get discarded to prevent wasting resources on useless blocks from faulty
+// peers.
+func TestFullDistantAnnouncementDiscarding(t *testing.T) { testDistantAnnouncementDiscarding(t, false) }
+func TestLightDistantAnnouncementDiscarding(t *testing.T) { testDistantAnnouncementDiscarding(t, true) }
+
+func testDistantAnnouncementDiscarding(t *testing.T, light bool) {
+ // Create a long chain to import and define the discard boundaries
+ hashes, blocks := makeChain(3*maxQueueDist, 0, genesis)
+ head := hashes[len(hashes)/2]
+
+ low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1
+
+ // Create a tester and simulate a head block being the middle of the above chain
+ tester := newTester(light)
+
+ tester.lock.Lock()
+ tester.hashes = []common.Hash{head}
+ tester.headers = map[common.Hash]*types.Header{head: blocks[head].Header()}
+ tester.blocks = map[common.Hash]*types.Block{head: blocks[head]}
+ tester.lock.Unlock()
+
+ headerFetcher := tester.makeHeaderFetcher("lower", blocks, -gatherSlack)
+ bodyFetcher := tester.makeBodyFetcher("lower", blocks, 0)
+
+ fetching := make(chan struct{}, 2)
+ tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- struct{}{} }
+
+ // Ensure that a block with a lower number than the threshold is discarded
+ tester.fetcher.Notify("lower", hashes[low], blocks[hashes[low]].NumberU64(), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-fetching:
+ t.Fatalf("fetcher requested stale header")
+ }
+ // Ensure that a block with a higher number than the threshold is discarded
+ tester.fetcher.Notify("higher", hashes[high], blocks[hashes[high]].NumberU64(), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
+ select {
+ case <-time.After(50 * time.Millisecond):
+ case <-fetching:
+ t.Fatalf("fetcher requested future header")
+ }
+}
+
+// Tests that peers announcing blocks with invalid numbers (i.e. not matching
+// the headers provided afterwards) get dropped as malicious.
+func TestFullInvalidNumberAnnouncement(t *testing.T) { testInvalidNumberAnnouncement(t, false) }
+func TestLightInvalidNumberAnnouncement(t *testing.T) { testInvalidNumberAnnouncement(t, true) }
+
+func testInvalidNumberAnnouncement(t *testing.T, light bool) {
+ // Create a single block to import and check numbers against
+ hashes, blocks := makeChain(1, 0, genesis)
+
+ tester := newTester(light)
+ badHeaderFetcher := tester.makeHeaderFetcher("bad", blocks, -gatherSlack)
+ badBodyFetcher := tester.makeBodyFetcher("bad", blocks, 0)
+
+ imported := make(chan interface{})
+ announced := make(chan interface{})
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
+ if light {
+ if header == nil {
+ t.Fatalf("Fetcher try to import empty header")
+ }
+ imported <- header
+ } else {
+ if block == nil {
+ t.Fatalf("Fetcher try to import empty block")
+ }
+ imported <- block
+ }
+ }
+ // Announce a block with a bad number, check for immediate drop
+ tester.fetcher.announceChangeHook = func(hash common.Hash, b bool) {
+ announced <- nil
+ }
+ tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), badHeaderFetcher, badBodyFetcher)
+ verifyAnnounce := func() {
+ for i := 0; i < 2; i++ {
+ select {
+ case <-announced:
+ continue
+ case <-time.After(1 * time.Second):
+ t.Fatal("announce timeout")
+ return
+ }
+ }
+ }
+ verifyAnnounce()
+ verifyImportEvent(t, imported, false)
+ tester.lock.RLock()
+ dropped := tester.drops["bad"]
+ tester.lock.RUnlock()
+
+ if !dropped {
+ t.Fatalf("peer with invalid numbered announcement not dropped")
+ }
+ goodHeaderFetcher := tester.makeHeaderFetcher("good", blocks, -gatherSlack)
+ goodBodyFetcher := tester.makeBodyFetcher("good", blocks, 0)
+ // Make sure a good announcement passes without a drop
+ tester.fetcher.Notify("good", hashes[0], 1, time.Now().Add(-arriveTimeout), goodHeaderFetcher, goodBodyFetcher)
+ verifyAnnounce()
+ verifyImportEvent(t, imported, true)
+
+ tester.lock.RLock()
+ dropped = tester.drops["good"]
+ tester.lock.RUnlock()
+
+ if dropped {
+ t.Fatalf("peer with valid numbered announcement dropped")
+ }
+ verifyImportDone(t, imported)
+}
+
+// Tests that if a block is empty (i.e. header only), no body request should be
+// made, and instead the header should be assembled into a whole block in itself.
+func TestEmptyBlockShortCircuit(t *testing.T) {
+ // Create a chain of blocks to import
+ hashes, blocks := makeChain(32, 0, genesis)
+
+ tester := newTester(false)
+ headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
+ bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
+
+ // Add a monitoring hook for all internal events
+ fetching := make(chan []common.Hash)
+ tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes }
+
+ completing := make(chan []common.Hash)
+ tester.fetcher.completingHook = func(hashes []common.Hash) { completing <- hashes }
+
+ imported := make(chan interface{})
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
+ if block == nil {
+ t.Fatalf("Fetcher try to import empty block")
+ }
+ imported <- block
+ }
+ // Iteratively announce blocks until all are imported
+ for i := len(hashes) - 2; i >= 0; i-- {
+ tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)
+
+ // All announces should fetch the header
+ verifyFetchingEvent(t, fetching, true)
+
+ // Only blocks with data contents should request bodies
+ verifyCompletingEvent(t, completing, len(blocks[hashes[i]].Transactions()) > 0 || len(blocks[hashes[i]].Uncles()) > 0)
+
+ // Irrelevant of the construct, import should succeed
+ verifyImportEvent(t, imported, true)
+ }
+ verifyImportDone(t, imported)
+}
+
+// Tests that a peer is unable to use unbounded memory with sending infinite
+// block announcements to a node, but that even in the face of such an attack,
+// the fetcher remains operational.
+func TestHashMemoryExhaustionAttack(t *testing.T) {
+ // Create a tester with instrumented import hooks
+ tester := newTester(false)
+
+ imported, announces := make(chan interface{}), int32(0)
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block }
+ tester.fetcher.announceChangeHook = func(hash common.Hash, added bool) {
+ if added {
+ atomic.AddInt32(&announces, 1)
+ } else {
+ atomic.AddInt32(&announces, -1)
+ }
+ }
+ // Create a valid chain and an infinite junk chain
+ targetBlocks := hashLimit + 2*maxQueueDist
+ hashes, blocks := makeChain(targetBlocks, 0, genesis)
+ validHeaderFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
+ validBodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
+
+ attack, _ := makeChain(targetBlocks, 0, unknownBlock)
+ attackerHeaderFetcher := tester.makeHeaderFetcher("attacker", nil, -gatherSlack)
+ attackerBodyFetcher := tester.makeBodyFetcher("attacker", nil, 0)
+
+ // Feed the tester a huge hashset from the attacker, and a limited from the valid peer
+ for i := 0; i < len(attack); i++ {
+ if i < maxQueueDist {
+ tester.fetcher.Notify("valid", hashes[len(hashes)-2-i], uint64(i+1), time.Now(), validHeaderFetcher, validBodyFetcher)
+ }
+ tester.fetcher.Notify("attacker", attack[i], 1 /* don't distance drop */, time.Now(), attackerHeaderFetcher, attackerBodyFetcher)
+ }
+ if count := atomic.LoadInt32(&announces); count != hashLimit+maxQueueDist {
+ t.Fatalf("queued announce count mismatch: have %d, want %d", count, hashLimit+maxQueueDist)
+ }
+ // Wait for fetches to complete
+ verifyImportCount(t, imported, maxQueueDist)
+
+ // Feed the remaining valid hashes to ensure DOS protection state remains clean
+ for i := len(hashes) - maxQueueDist - 2; i >= 0; i-- {
+ tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), validHeaderFetcher, validBodyFetcher)
+ verifyImportEvent(t, imported, true)
+ }
+ verifyImportDone(t, imported)
+}
+
+// Tests that blocks sent to the fetcher (either through propagation or via hash
+// announces and retrievals) don't pile up indefinitely, exhausting available
+// system memory.
+func TestBlockMemoryExhaustionAttack(t *testing.T) {
+ // Create a tester with instrumented import hooks
+ tester := newTester(false)
+
+ imported, enqueued := make(chan interface{}), int32(0)
+ tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block }
+ tester.fetcher.queueChangeHook = func(hash common.Hash, added bool) {
+ if added {
+ atomic.AddInt32(&enqueued, 1)
+ } else {
+ atomic.AddInt32(&enqueued, -1)
+ }
+ }
+ // Create a valid chain and a batch of dangling (but in range) blocks
+ targetBlocks := hashLimit + 2*maxQueueDist
+ hashes, blocks := makeChain(targetBlocks, 0, genesis)
+ attack := make(map[common.Hash]*types.Block)
+ for i := byte(0); len(attack) < blockLimit+2*maxQueueDist; i++ {
+ hashes, blocks := makeChain(maxQueueDist-1, i, unknownBlock)
+ for _, hash := range hashes[:maxQueueDist-2] {
+ attack[hash] = blocks[hash]
+ }
+ }
+ // Try to feed all the attacker blocks make sure only a limited batch is accepted
+ for _, block := range attack {
+ tester.fetcher.Enqueue("attacker", block)
+ }
+ time.Sleep(200 * time.Millisecond)
+ if queued := atomic.LoadInt32(&enqueued); queued != blockLimit {
+ t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit)
+ }
+ // Queue up a batch of valid blocks, and check that a new peer is allowed to do so
+ for i := 0; i < maxQueueDist-1; i++ {
+ tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-3-i]])
+ }
+ time.Sleep(100 * time.Millisecond)
+ if queued := atomic.LoadInt32(&enqueued); queued != blockLimit+maxQueueDist-1 {
+ t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit+maxQueueDist-1)
+ }
+ // Insert the missing piece (and sanity check the import)
+ tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2]])
+ verifyImportCount(t, imported, maxQueueDist)
+
+ // Insert the remaining blocks in chunks to ensure clean DOS protection
+ for i := maxQueueDist; i < len(hashes)-1; i++ {
+ tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2-i]])
+ verifyImportEvent(t, imported, true)
+ }
+ verifyImportDone(t, imported)
+}
diff --git a/les/fetcher_test.go b/les/fetcher_test.go
index d3a74d25c..ef700651e 100644
--- a/les/fetcher_test.go
+++ b/les/fetcher_test.go
@@ -74,14 +74,12 @@ func testSequentialAnnouncements(t *testing.T, protocol int) {
s, c, teardown := newClientServerEnv(t, netconfig)
defer teardown()
- // Create connected peer pair.
- c.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.
- p1, _, err := newTestPeerPair("peer", protocol, s.handler, c.handler)
+ // Create connected peer pair, the initial signal from LES server
+ // is discarded to prevent syncing.
+ p1, _, err := newTestPeerPair("peer", protocol, s.handler, c.handler, true)
if err != nil {
t.Fatalf("Failed to create peer pair %v", err)
}
- c.handler.fetcher.noAnnounce = false
-
importCh := make(chan interface{})
c.handler.fetcher.newHeadHook = func(header *types.Header) {
importCh <- header
@@ -114,14 +112,12 @@ func testGappedAnnouncements(t *testing.T, protocol int) {
s, c, teardown := newClientServerEnv(t, netconfig)
defer teardown()
- // Create connected peer pair.
- c.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.
- peer, _, err := newTestPeerPair("peer", protocol, s.handler, c.handler)
+ // Create connected peer pair, the initial signal from LES server
+ // is discarded to prevent syncing.
+ peer, _, err := newTestPeerPair("peer", protocol, s.handler, c.handler, true)
if err != nil {
t.Fatalf("Failed to create peer pair %v", err)
}
- c.handler.fetcher.noAnnounce = false
-
done := make(chan *types.Header, 1)
c.handler.fetcher.newHeadHook = func(header *types.Header) { done <- header }
@@ -141,29 +137,11 @@ func testGappedAnnouncements(t *testing.T, protocol int) {
verifyChainHeight(t, c.handler.fetcher, 4)
// Send a reorged announcement
- var newAnno = make(chan struct{}, 1)
- c.handler.fetcher.noAnnounce = true
- c.handler.fetcher.newAnnounce = func(*serverPeer, *announceData) {
- newAnno <- struct{}{}
- }
blocks, _ := core.GenerateChain(rawdb.ReadChainConfig(s.db, s.backend.Blockchain().Genesis().Hash()), s.backend.Blockchain().GetBlockByNumber(3),
ethash.NewFaker(), s.db, 2, func(i int, gen *core.BlockGen) {
gen.OffsetTime(-9) // higher block difficulty
})
s.backend.Blockchain().InsertChain(blocks)
- <-newAnno
- c.handler.fetcher.noAnnounce = false
- c.handler.fetcher.newAnnounce = nil
-
- latest = blocks[len(blocks)-1].Header()
- hash, number = latest.Hash(), latest.Number.Uint64()
- td = rawdb.ReadTd(s.db, hash, number)
-
- announce = announceData{hash, number, td, 1, nil}
- if peer.cpeer.announceType == announceTypeSigned {
- announce.sign(s.handler.server.privateKey)
- }
- peer.cpeer.sendAnnounce(announce)
<-done // Wait syncing
verifyChainHeight(t, c.handler.fetcher, 5)
@@ -206,20 +184,15 @@ func testTrustedAnnouncement(t *testing.T, protocol int) {
teardowns[i]()
}
}()
-
- c.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.
-
// Connect all server instances.
for i := 0; i < len(servers); i++ {
- sp, cp, err := connect(servers[i].handler, nodes[i].ID(), c.handler, protocol)
+ sp, cp, err := connect(servers[i].handler, nodes[i].ID(), c.handler, protocol, true)
if err != nil {
t.Fatalf("connect server and client failed, err %s", err)
}
cpeers = append(cpeers, cp)
speers = append(speers, sp)
}
- c.handler.fetcher.noAnnounce = false
-
newHead := make(chan *types.Header, 1)
c.handler.fetcher.newHeadHook = func(header *types.Header) { newHead <- header }
@@ -262,14 +235,12 @@ func testInvalidAnnounces(t *testing.T, protocol int) {
s, c, teardown := newClientServerEnv(t, netconfig)
defer teardown()
- // Create connected peer pair.
- c.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.
- peer, _, err := newTestPeerPair("peer", lpv3, s.handler, c.handler)
+ // Create connected peer pair, the initial signal from LES server
+ // is discarded to prevent syncing.
+ peer, _, err := newTestPeerPair("peer", lpv3, s.handler, c.handler, true)
if err != nil {
t.Fatalf("Failed to create peer pair %v", err)
}
- c.handler.fetcher.noAnnounce = false
-
done := make(chan *types.Header, 1)
c.handler.fetcher.newHeadHook = func(header *types.Header) { done <- header }
diff --git a/les/handler_test.go b/les/handler_test.go
index bb8ad3382..aba45764b 100644
--- a/les/handler_test.go
+++ b/les/handler_test.go
@@ -30,7 +30,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/eth/downloader"
+ "github.com/ethereum/go-ethereum/les/downloader"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/params"
diff --git a/les/odr_test.go b/les/odr_test.go
index ea88495d1..ad77abf5b 100644
--- a/les/odr_test.go
+++ b/les/odr_test.go
@@ -401,9 +401,9 @@ func testGetTxStatusFromUnindexedPeers(t *testing.T, protocol int) {
closeFns = append(closeFns, closePeer)
// Create a one-time routine for serving message
- go func(i int, peer *testPeer) {
- serveMsg(peer, testspec.txLookups[i])
- }(i, peer)
+ go func(i int, peer *testPeer, lookup uint64) {
+ serveMsg(peer, lookup)
+ }(i, peer, testspec.txLookups[i])
}
// Send out the GetTxStatus requests, compare the result with
diff --git a/les/pruner.go b/les/pruner.go
index 622e64868..a1bd51d86 100644
--- a/les/pruner.go
+++ b/les/pruner.go
@@ -62,6 +62,7 @@ func (p *pruner) loop() {
// cleanTicker is the ticker used to trigger a history clean 2 times a day.
var cleanTicker = time.NewTicker(12 * time.Hour)
+ defer cleanTicker.Stop()
// pruning finds the sections that have been processed by all indexers
// and deletes all historical chain data.
diff --git a/les/server_handler.go b/les/server_handler.go
index 80fcf1c44..f36a87a51 100644
--- a/les/server_handler.go
+++ b/les/server_handler.go
@@ -27,7 +27,6 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/forkid"
"github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/les/flowcontrol"
@@ -359,20 +358,20 @@ func (h *serverHandler) AddTxsSync() bool {
}
// getAccount retrieves an account from the state based on root.
-func getAccount(triedb *trie.Database, root, hash common.Hash) (state.Account, error) {
+func getAccount(triedb *trie.Database, root, hash common.Hash) (types.StateAccount, error) {
trie, err := trie.New(root, triedb)
if err != nil {
- return state.Account{}, err
+ return types.StateAccount{}, err
}
blob, err := trie.TryGet(hash[:])
if err != nil {
- return state.Account{}, err
+ return types.StateAccount{}, err
}
- var account state.Account
- if err = rlp.DecodeBytes(blob, &account); err != nil {
- return state.Account{}, err
+ var acc types.StateAccount
+ if err = rlp.DecodeBytes(blob, &acc); err != nil {
+ return types.StateAccount{}, err
}
- return account, nil
+ return acc, nil
}
// getHelperTrie returns the post-processed trie root for the given trie ID and section index
@@ -408,7 +407,7 @@ func (h *serverHandler) broadcastLoop() {
defer headSub.Unsubscribe()
var (
- lastHead *types.Header
+ lastHead = h.blockchain.CurrentHeader()
lastTd = common.Big0
)
for {
diff --git a/les/sync.go b/les/sync.go
index fa5ef4ff8..31cd06ca7 100644
--- a/les/sync.go
+++ b/les/sync.go
@@ -23,7 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/eth/downloader"
+ "github.com/ethereum/go-ethereum/les/downloader"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
diff --git a/les/sync_test.go b/les/sync_test.go
index d3bb90df0..3fc2a9c15 100644
--- a/les/sync_test.go
+++ b/les/sync_test.go
@@ -116,7 +116,7 @@ func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) {
}
// Create connected peer pair.
- peer1, peer2, err := newTestPeerPair("peer", protocol, server.handler, client.handler)
+ peer1, peer2, err := newTestPeerPair("peer", protocol, server.handler, client.handler, false)
if err != nil {
t.Fatalf("Failed to connect testing peers %v", err)
}
@@ -218,7 +218,7 @@ func testMissOracleBackend(t *testing.T, hasCheckpoint bool, protocol int) {
}
}
// Create connected peer pair.
- if _, _, err := newTestPeerPair("peer", 2, server.handler, client.handler); err != nil {
+ if _, _, err := newTestPeerPair("peer", 2, server.handler, client.handler, false); err != nil {
t.Fatalf("Failed to connect testing peers %v", err)
}
select {
@@ -291,7 +291,7 @@ func testSyncFromConfiguredCheckpoint(t *testing.T, protocol int) {
}
}
// Create connected peer pair.
- if _, _, err := newTestPeerPair("peer", 2, server.handler, client.handler); err != nil {
+ if _, _, err := newTestPeerPair("peer", 2, server.handler, client.handler, false); err != nil {
t.Fatalf("Failed to connect testing peers %v", err)
}
@@ -364,7 +364,7 @@ func testSyncAll(t *testing.T, protocol int) {
}
}
// Create connected peer pair.
- if _, _, err := newTestPeerPair("peer", 2, server.handler, client.handler); err != nil {
+ if _, _, err := newTestPeerPair("peer", 2, server.handler, client.handler, false); err != nil {
t.Fatalf("Failed to connect testing peers %v", err)
}
diff --git a/les/test_helper.go b/les/test_helper.go
index 9ff2583b9..21d0f191c 100644
--- a/les/test_helper.go
+++ b/les/test_helper.go
@@ -398,7 +398,7 @@ func (p *testPeer) close() {
p.app.Close()
}
-func newTestPeerPair(name string, version int, server *serverHandler, client *clientHandler) (*testPeer, *testPeer, error) {
+func newTestPeerPair(name string, version int, server *serverHandler, client *clientHandler, noInitAnnounce bool) (*testPeer, *testPeer, error) {
// Create a message pipe to communicate through
app, net := p2p.MsgPipe()
@@ -423,16 +423,16 @@ func newTestPeerPair(name string, version int, server *serverHandler, client *cl
select {
case <-client.closeCh:
errc2 <- p2p.DiscQuitting
- case errc2 <- client.handle(peer2):
+ case errc2 <- client.handle(peer2, noInitAnnounce):
}
}()
// Ensure the connection is established or exits when any error occurs
for {
select {
case err := <-errc1:
- return nil, nil, fmt.Errorf("Failed to establish protocol connection %v", err)
+ return nil, nil, fmt.Errorf("failed to establish protocol connection %v", err)
case err := <-errc2:
- return nil, nil, fmt.Errorf("Failed to establish protocol connection %v", err)
+ return nil, nil, fmt.Errorf("failed to establish protocol connection %v", err)
default:
}
if atomic.LoadUint32(&peer1.serving) == 1 && atomic.LoadUint32(&peer2.serving) == 1 {
@@ -473,7 +473,7 @@ func (client *testClient) newRawPeer(t *testing.T, name string, version int, rec
select {
case <-client.handler.closeCh:
errCh <- p2p.DiscQuitting
- case errCh <- client.handler.handle(peer):
+ case errCh <- client.handler.handle(peer, false):
}
}()
tp := &testPeer{
@@ -623,7 +623,7 @@ func newClientServerEnv(t *testing.T, config testnetConfig) (*testServer, *testC
if config.connect {
done := make(chan struct{})
client.syncEnd = func(_ *types.Header) { close(done) }
- cpeer, speer, err = newTestPeerPair("peer", config.protocol, server, client)
+ cpeer, speer, err = newTestPeerPair("peer", config.protocol, server, client, false)
if err != nil {
t.Fatalf("Failed to connect testing peers %v", err)
}
diff --git a/les/ulc_test.go b/les/ulc_test.go
index d7308fa59..ecef58d97 100644
--- a/les/ulc_test.go
+++ b/les/ulc_test.go
@@ -20,6 +20,7 @@ import (
"crypto/rand"
"fmt"
"net"
+ "sync/atomic"
"testing"
"time"
@@ -65,7 +66,7 @@ func testULCAnnounceThreshold(t *testing.T, protocol int) {
// Connect all servers.
for i := 0; i < len(servers); i++ {
- connect(servers[i].handler, nodes[i].ID(), c.handler, protocol)
+ connect(servers[i].handler, nodes[i].ID(), c.handler, protocol, false)
}
for i := 0; i < len(servers); i++ {
for j := 0; j < testcase.height[i]; j++ {
@@ -86,7 +87,7 @@ func testULCAnnounceThreshold(t *testing.T, protocol int) {
}
}
-func connect(server *serverHandler, serverId enode.ID, client *clientHandler, protocol int) (*serverPeer, *clientPeer, error) {
+func connect(server *serverHandler, serverId enode.ID, client *clientHandler, protocol int, noInitAnnounce bool) (*serverPeer, *clientPeer, error) {
// Create a message pipe to communicate through
app, net := p2p.MsgPipe()
@@ -110,16 +111,22 @@ func connect(server *serverHandler, serverId enode.ID, client *clientHandler, pr
select {
case <-client.closeCh:
errc1 <- p2p.DiscQuitting
- case errc1 <- client.handle(peer1):
+ case errc1 <- client.handle(peer1, noInitAnnounce):
}
}()
-
- select {
- case <-time.After(time.Millisecond * 100):
- case err := <-errc1:
- return nil, nil, fmt.Errorf("peerLight handshake error: %v", err)
- case err := <-errc2:
- return nil, nil, fmt.Errorf("peerFull handshake error: %v", err)
+ // Ensure the connection is established or exits when any error occurs
+ for {
+ select {
+ case err := <-errc1:
+ return nil, nil, fmt.Errorf("failed to establish protocol connection %v", err)
+ case err := <-errc2:
+ return nil, nil, fmt.Errorf("failed to establish protocol connection %v", err)
+ default:
+ }
+ if atomic.LoadUint32(&peer1.serving) == 1 && atomic.LoadUint32(&peer2.serving) == 1 {
+ break
+ }
+ time.Sleep(50 * time.Millisecond)
}
return peer1, peer2, nil
}
diff --git a/light/postprocess.go b/light/postprocess.go
index 891c8a586..ce38d091e 100644
--- a/light/postprocess.go
+++ b/light/postprocess.go
@@ -217,7 +217,7 @@ func (c *ChtIndexerBackend) Process(ctx context.Context, header *types.Header) e
// Commit implements core.ChainIndexerBackend
func (c *ChtIndexerBackend) Commit() error {
- root, err := c.trie.Commit(nil)
+ root, _, err := c.trie.Commit(nil)
if err != nil {
return err
}
@@ -454,7 +454,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
b.trie.Delete(encKey[:])
}
}
- root, err := b.trie.Commit(nil)
+ root, _, err := b.trie.Commit(nil)
if err != nil {
return err
}
diff --git a/light/trie.go b/light/trie.go
index 0516b9448..4ab6f4ace 100644
--- a/light/trie.go
+++ b/light/trie.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
@@ -111,6 +112,17 @@ func (t *odrTrie) TryGet(key []byte) ([]byte, error) {
return res, err
}
+func (t *odrTrie) TryUpdateAccount(key []byte, acc *types.StateAccount) error {
+ key = crypto.Keccak256(key)
+ value, err := rlp.EncodeToBytes(acc)
+ if err != nil {
+ return fmt.Errorf("decoding error in account update: %w", err)
+ }
+ return t.do(key, func() error {
+ return t.trie.TryUpdate(key, value)
+ })
+}
+
func (t *odrTrie) TryUpdate(key, value []byte) error {
key = crypto.Keccak256(key)
return t.do(key, func() error {
@@ -125,9 +137,9 @@ func (t *odrTrie) TryDelete(key []byte) error {
})
}
-func (t *odrTrie) Commit(onleaf trie.LeafCallback) (common.Hash, error) {
+func (t *odrTrie) Commit(onleaf trie.LeafCallback) (common.Hash, int, error) {
if t.trie == nil {
- return t.id.Root, nil
+ return t.id.Root, 0, nil
}
return t.trie.Commit(onleaf)
}
diff --git a/log/handler_go13.go b/log/handler_go13.go
index 0843ed0e5..4df694deb 100644
--- a/log/handler_go13.go
+++ b/log/handler_go13.go
@@ -1,3 +1,4 @@
+//go:build !go1.4
// +build !go1.4
package log
diff --git a/log/handler_go14.go b/log/handler_go14.go
index 05dedbf2a..d0cb14aa0 100644
--- a/log/handler_go14.go
+++ b/log/handler_go14.go
@@ -1,3 +1,4 @@
+//go:build go1.4
// +build go1.4
package log
diff --git a/log/syslog.go b/log/syslog.go
index 71a17b30b..451d831b6 100644
--- a/log/syslog.go
+++ b/log/syslog.go
@@ -1,3 +1,4 @@
+//go:build !windows && !plan9
// +build !windows,!plan9
package log
diff --git a/metrics/cpu_disabled.go b/metrics/cpu_disabled.go
index 6c3428993..025d97aeb 100644
--- a/metrics/cpu_disabled.go
+++ b/metrics/cpu_disabled.go
@@ -14,7 +14,8 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-// +build ios
+//go:build ios || js
+// +build ios js
package metrics
diff --git a/metrics/cpu_enabled.go b/metrics/cpu_enabled.go
index 02192928b..533d40b85 100644
--- a/metrics/cpu_enabled.go
+++ b/metrics/cpu_enabled.go
@@ -14,7 +14,8 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-// +build !ios
+//go:build !ios && !js
+// +build !ios,!js
package metrics
diff --git a/metrics/cpu_windows.go b/metrics/cputime_nop.go
similarity index 95%
rename from metrics/cpu_windows.go
rename to metrics/cputime_nop.go
index fb29a52a8..0188735a7 100644
--- a/metrics/cpu_windows.go
+++ b/metrics/cputime_nop.go
@@ -14,6 +14,9 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build windows || js
+// +build windows js
+
package metrics
// getProcessCPUTime returns 0 on Windows as there is no system call to resolve
diff --git a/metrics/cpu_syscall.go b/metrics/cputime_unix.go
similarity index 96%
rename from metrics/cpu_syscall.go
rename to metrics/cputime_unix.go
index 50e04ef1d..3c56a75d0 100644
--- a/metrics/cpu_syscall.go
+++ b/metrics/cputime_unix.go
@@ -14,7 +14,8 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-// +build !windows
+//go:build !windows && !js
+// +build !windows,!js
package metrics
diff --git a/metrics/disk_nop.go b/metrics/disk_nop.go
index 4319f8b27..58fa4e02f 100644
--- a/metrics/disk_nop.go
+++ b/metrics/disk_nop.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build !linux
// +build !linux
package metrics
diff --git a/metrics/runtime_cgo.go b/metrics/runtime_cgo.go
index e3391f4e8..4307ebdba 100644
--- a/metrics/runtime_cgo.go
+++ b/metrics/runtime_cgo.go
@@ -1,5 +1,5 @@
-// +build cgo
-// +build !appengine
+//go:build cgo && !appengine && !js
+// +build cgo,!appengine,!js
package metrics
diff --git a/metrics/runtime_gccpufraction.go b/metrics/runtime_gccpufraction.go
index ca12c05ba..28cd44752 100644
--- a/metrics/runtime_gccpufraction.go
+++ b/metrics/runtime_gccpufraction.go
@@ -1,3 +1,4 @@
+//go:build go1.5
// +build go1.5
package metrics
diff --git a/metrics/runtime_no_cgo.go b/metrics/runtime_no_cgo.go
index 616a3b475..1799bef63 100644
--- a/metrics/runtime_no_cgo.go
+++ b/metrics/runtime_no_cgo.go
@@ -1,4 +1,5 @@
-// +build !cgo appengine
+//go:build !cgo || appengine || js
+// +build !cgo appengine js
package metrics
diff --git a/metrics/runtime_no_gccpufraction.go b/metrics/runtime_no_gccpufraction.go
index be96aa6f1..af1a4b63c 100644
--- a/metrics/runtime_no_gccpufraction.go
+++ b/metrics/runtime_no_gccpufraction.go
@@ -1,3 +1,4 @@
+//go:build !go1.5
// +build !go1.5
package metrics
diff --git a/metrics/syslog.go b/metrics/syslog.go
index a0ed4b1b2..551a2bd0f 100644
--- a/metrics/syslog.go
+++ b/metrics/syslog.go
@@ -1,3 +1,4 @@
+//go:build !windows
// +build !windows
package metrics
diff --git a/miner/worker_test.go b/miner/worker_test.go
index 9c4dc0f37..5b35c66dc 100644
--- a/miner/worker_test.go
+++ b/miner/worker_test.go
@@ -74,8 +74,10 @@ var (
func init() {
testTxPoolConfig = core.DefaultTxPoolConfig
testTxPoolConfig.Journal = ""
- ethashChainConfig = params.TestChainConfig
- cliqueChainConfig = params.TestChainConfig
+ ethashChainConfig = new(params.ChainConfig)
+ *ethashChainConfig = *params.TestChainConfig
+ cliqueChainConfig = new(params.ChainConfig)
+ *cliqueChainConfig = *params.TestChainConfig
cliqueChainConfig.Clique = ¶ms.CliqueConfig{
Period: 10,
Epoch: 30000,
diff --git a/mobile/geth_android.go b/mobile/geth_android.go
index 8e4ebe638..cfdf1c28c 100644
--- a/mobile/geth_android.go
+++ b/mobile/geth_android.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build android
// +build android
package geth
diff --git a/mobile/geth_ios.go b/mobile/geth_ios.go
index 307cd0858..aab839727 100644
--- a/mobile/geth_ios.go
+++ b/mobile/geth_ios.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build ios
// +build ios
package geth
diff --git a/mobile/geth_other.go b/mobile/geth_other.go
index 6f0c5dda6..c5cad4a7b 100644
--- a/mobile/geth_other.go
+++ b/mobile/geth_other.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build !android && !ios
// +build !android,!ios
package geth
diff --git a/node/config.go b/node/config.go
index ef1da15d7..26f00cd67 100644
--- a/node/config.go
+++ b/node/config.go
@@ -26,11 +26,6 @@ import (
"strings"
"sync"
- "github.com/ethereum/go-ethereum/accounts"
- "github.com/ethereum/go-ethereum/accounts/external"
- "github.com/ethereum/go-ethereum/accounts/keystore"
- "github.com/ethereum/go-ethereum/accounts/scwallet"
- "github.com/ethereum/go-ethereum/accounts/usbwallet"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
@@ -93,6 +88,7 @@ type Config struct {
InsecureUnlockAllowed bool `toml:",omitempty"`
// NoUSB disables hardware wallet monitoring and connectivity.
+ // Deprecated: USB monitoring is disabled by default and must be enabled explicitly.
NoUSB bool `toml:",omitempty"`
// USB enables hardware wallet monitoring and connectivity.
@@ -429,15 +425,8 @@ func (c *Config) parsePersistentNodes(w *bool, path string) []*enode.Node {
return nodes
}
-// AccountConfig determines the settings for scrypt and keydirectory
-func (c *Config) AccountConfig() (int, int, string, error) {
- scryptN := keystore.StandardScryptN
- scryptP := keystore.StandardScryptP
- if c.UseLightweightKDF {
- scryptN = keystore.LightScryptN
- scryptP = keystore.LightScryptP
- }
-
+// KeyDirConfig determines the settings for keydirectory
+func (c *Config) KeyDirConfig() (string, error) {
var (
keydir string
err error
@@ -454,71 +443,31 @@ func (c *Config) AccountConfig() (int, int, string, error) {
case c.KeyStoreDir != "":
keydir, err = filepath.Abs(c.KeyStoreDir)
}
- return scryptN, scryptP, keydir, err
+ return keydir, err
}
-func makeAccountManager(conf *Config) (*accounts.Manager, string, error) {
- scryptN, scryptP, keydir, err := conf.AccountConfig()
- var ephemeral string
+// getKeyStoreDir retrieves the key directory and will create
+// and ephemeral one if necessary.
+func getKeyStoreDir(conf *Config) (string, bool, error) {
+ keydir, err := conf.KeyDirConfig()
+ if err != nil {
+ return "", false, err
+ }
+ isEphemeral := false
if keydir == "" {
// There is no datadir.
keydir, err = ioutil.TempDir("", "go-ethereum-keystore")
- ephemeral = keydir
+ isEphemeral = true
}
if err != nil {
- return nil, "", err
+ return "", false, err
}
if err := os.MkdirAll(keydir, 0700); err != nil {
- return nil, "", err
- }
- // Assemble the account manager and supported backends
- var backends []accounts.Backend
- if len(conf.ExternalSigner) > 0 {
- log.Info("Using external signer", "url", conf.ExternalSigner)
- if extapi, err := external.NewExternalBackend(conf.ExternalSigner); err == nil {
- backends = append(backends, extapi)
- } else {
- return nil, "", fmt.Errorf("error connecting to external signer: %v", err)
- }
- }
- if len(backends) == 0 {
- // For now, we're using EITHER external signer OR local signers.
- // If/when we implement some form of lockfile for USB and keystore wallets,
- // we can have both, but it's very confusing for the user to see the same
- // accounts in both externally and locally, plus very racey.
- backends = append(backends, keystore.NewKeyStore(keydir, scryptN, scryptP))
- if conf.USB {
- // Start a USB hub for Ledger hardware wallets
- if ledgerhub, err := usbwallet.NewLedgerHub(); err != nil {
- log.Warn(fmt.Sprintf("Failed to start Ledger hub, disabling: %v", err))
- } else {
- backends = append(backends, ledgerhub)
- }
- // Start a USB hub for Trezor hardware wallets (HID version)
- if trezorhub, err := usbwallet.NewTrezorHubWithHID(); err != nil {
- log.Warn(fmt.Sprintf("Failed to start HID Trezor hub, disabling: %v", err))
- } else {
- backends = append(backends, trezorhub)
- }
- // Start a USB hub for Trezor hardware wallets (WebUSB version)
- if trezorhub, err := usbwallet.NewTrezorHubWithWebUSB(); err != nil {
- log.Warn(fmt.Sprintf("Failed to start WebUSB Trezor hub, disabling: %v", err))
- } else {
- backends = append(backends, trezorhub)
- }
- }
- if len(conf.SmartCardDaemonPath) > 0 {
- // Start a smart card hub
- if schub, err := scwallet.NewHub(conf.SmartCardDaemonPath, scwallet.Scheme, keydir); err != nil {
- log.Warn(fmt.Sprintf("Failed to start smart card hub, disabling: %v", err))
- } else {
- backends = append(backends, schub)
- }
- }
+ return "", false, err
}
- return accounts.NewManager(&accounts.Config{InsecureUnlockAllowed: conf.InsecureUnlockAllowed}, backends...), ephemeral, nil
+ return keydir, isEphemeral, nil
}
var warnLock sync.Mutex
diff --git a/node/node.go b/node/node.go
index 1e65fff1c..ceab1c909 100644
--- a/node/node.go
+++ b/node/node.go
@@ -42,7 +42,8 @@ type Node struct {
config *Config
accman *accounts.Manager
log log.Logger
- ephemKeystore string // if non-empty, the key directory that will be removed by Stop
+ keyDir string // key store directory
+ keyDirTemp bool // If true, key directory will be removed by Stop
dirLock fileutil.Releaser // prevents concurrent use of instance directory
stop chan struct{} // Channel to wait for termination notifications
server *p2p.Server // Currently running P2P networking layer
@@ -112,14 +113,15 @@ func New(conf *Config) (*Node, error) {
if err := node.openDataDir(); err != nil {
return nil, err
}
- // Ensure that the AccountManager method works before the node has started. We rely on
- // this in cmd/geth.
- am, ephemeralKeystore, err := makeAccountManager(conf)
+ keyDir, isEphem, err := getKeyStoreDir(conf)
if err != nil {
return nil, err
}
- node.accman = am
- node.ephemKeystore = ephemeralKeystore
+ node.keyDir = keyDir
+ node.keyDirTemp = isEphem
+ // Creates an empty AccountManager with no backends. Callers (e.g. cmd/geth)
+ // are required to add the backends later on.
+ node.accman = accounts.NewManager(&accounts.Config{InsecureUnlockAllowed: conf.InsecureUnlockAllowed})
// Initialize the p2p server. This creates the node key and discovery databases.
node.server.Config.PrivateKey = node.config.NodeKey()
@@ -233,8 +235,8 @@ func (n *Node) doClose(errs []error) error {
if err := n.accman.Close(); err != nil {
errs = append(errs, err)
}
- if n.ephemKeystore != "" {
- if err := os.RemoveAll(n.ephemKeystore); err != nil {
+ if n.keyDirTemp {
+ if err := os.RemoveAll(n.keyDir); err != nil {
errs = append(errs, err)
}
}
@@ -514,6 +516,11 @@ func (n *Node) InstanceDir() string {
return n.config.instanceDir()
}
+// KeyStoreDir retrieves the key directory
+func (n *Node) KeyStoreDir() string {
+ return n.keyDir
+}
+
// AccountManager retrieves the account manager used by the protocol stack.
func (n *Node) AccountManager() *accounts.Manager {
return n.accman
diff --git a/p2p/dial.go b/p2p/dial.go
index 83ced3cb3..0d70e6f4a 100644
--- a/p2p/dial.go
+++ b/p2p/dial.go
@@ -107,7 +107,7 @@ type dialScheduler struct {
// Everything below here belongs to loop and
// should only be accessed by code on the loop goroutine.
dialing map[enode.ID]*dialTask // active tasks
- peers map[enode.ID]connFlag // all connected peers
+ peers map[enode.ID]struct{} // all connected peers
dialPeers int // current number of dialed peers
// The static map tracks all static dial tasks. The subset of usable static dial tasks
@@ -166,7 +166,7 @@ func newDialScheduler(config dialConfig, it enode.Iterator, setupFunc dialSetupF
setupFunc: setupFunc,
dialing: make(map[enode.ID]*dialTask),
static: make(map[enode.ID]*dialTask),
- peers: make(map[enode.ID]connFlag),
+ peers: make(map[enode.ID]struct{}),
doneCh: make(chan *dialTask),
nodesIn: make(chan *enode.Node),
addStaticCh: make(chan *enode.Node),
@@ -259,7 +259,7 @@ loop:
d.dialPeers++
}
id := c.node.ID()
- d.peers[id] = c.flags
+ d.peers[id] = struct{}{}
// Remove from static pool because the node is now connected.
task := d.static[id]
if task != nil && task.staticPoolIndex >= 0 {
diff --git a/p2p/discover/v5wire/encoding_test.go b/p2p/discover/v5wire/encoding_test.go
index d9c807e0a..b13041d1b 100644
--- a/p2p/discover/v5wire/encoding_test.go
+++ b/p2p/discover/v5wire/encoding_test.go
@@ -512,9 +512,6 @@ func (n *handshakeTestNode) init(key *ecdsa.PrivateKey, ip net.IP, clock mclock.
db, _ := enode.OpenDB("")
n.ln = enode.NewLocalNode(db, key)
n.ln.SetStaticIP(ip)
- if n.ln.Node().Seq() != 1 {
- panic(fmt.Errorf("unexpected seq %d", n.ln.Node().Seq()))
- }
n.c = NewCodec(n.ln, key, clock)
}
diff --git a/p2p/enode/iter_test.go b/p2p/enode/iter_test.go
index 6009661f3..5014346af 100644
--- a/p2p/enode/iter_test.go
+++ b/p2p/enode/iter_test.go
@@ -268,7 +268,7 @@ func (s *genIter) Node() *Node {
}
func (s *genIter) Close() {
- s.index = ^uint32(0)
+ atomic.StoreUint32(&s.index, ^uint32(0))
}
func testNode(id, seq uint64) *Node {
diff --git a/p2p/enode/localnode.go b/p2p/enode/localnode.go
index d8aa02a77..4827b6c0a 100644
--- a/p2p/enode/localnode.go
+++ b/p2p/enode/localnode.go
@@ -36,20 +36,25 @@ const (
iptrackMinStatements = 10
iptrackWindow = 5 * time.Minute
iptrackContactWindow = 10 * time.Minute
+
+ // time needed to wait between two updates to the local ENR
+ recordUpdateThrottle = time.Millisecond
)
// LocalNode produces the signed node record of a local node, i.e. a node run in the
// current process. Setting ENR entries via the Set method updates the record. A new version
// of the record is signed on demand when the Node method is called.
type LocalNode struct {
- cur atomic.Value // holds a non-nil node pointer while the record is up-to-date.
+ cur atomic.Value // holds a non-nil node pointer while the record is up-to-date
+
id ID
key *ecdsa.PrivateKey
db *DB
// everything below is protected by a lock
- mu sync.Mutex
+ mu sync.RWMutex
seq uint64
+ update time.Time // timestamp when the record was last updated
entries map[string]enr.Entry
endpoint4 lnEndpoint
endpoint6 lnEndpoint
@@ -76,7 +81,8 @@ func NewLocalNode(db *DB, key *ecdsa.PrivateKey) *LocalNode {
},
}
ln.seq = db.localSeq(ln.id)
- ln.invalidate()
+ ln.update = time.Now()
+ ln.cur.Store((*Node)(nil))
return ln
}
@@ -87,14 +93,34 @@ func (ln *LocalNode) Database() *DB {
// Node returns the current version of the local node record.
func (ln *LocalNode) Node() *Node {
+ // If we have a valid record, return that
n := ln.cur.Load().(*Node)
if n != nil {
return n
}
+
// Record was invalidated, sign a new copy.
ln.mu.Lock()
defer ln.mu.Unlock()
+
+ // Double check the current record, since multiple goroutines might be waiting
+ // on the write mutex.
+ if n = ln.cur.Load().(*Node); n != nil {
+ return n
+ }
+
+ // The initial sequence number is the current timestamp in milliseconds. To ensure
+ // that the initial sequence number will always be higher than any previous sequence
+ // number (assuming the clock is correct), we want to avoid updating the record faster
+ // than once per ms. So we need to sleep here until the next possible update time has
+ // arrived.
+ lastChange := time.Since(ln.update)
+ if lastChange < recordUpdateThrottle {
+ time.Sleep(recordUpdateThrottle - lastChange)
+ }
+
ln.sign()
+ ln.update = time.Now()
return ln.cur.Load().(*Node)
}
@@ -114,6 +140,10 @@ func (ln *LocalNode) ID() ID {
// Set puts the given entry into the local record, overwriting any existing value.
// Use Set*IP and SetFallbackUDP to set IP addresses and UDP port, otherwise they'll
// be overwritten by the endpoint predictor.
+//
+// Since node record updates are throttled to one per second, Set is asynchronous.
+// Any update will be queued up and published when at least one second passes from
+// the last change.
func (ln *LocalNode) Set(e enr.Entry) {
ln.mu.Lock()
defer ln.mu.Unlock()
@@ -288,3 +318,12 @@ func (ln *LocalNode) bumpSeq() {
ln.seq++
ln.db.storeLocalSeq(ln.id, ln.seq)
}
+
+// nowMilliseconds gives the current timestamp at millisecond precision.
+func nowMilliseconds() uint64 {
+ ns := time.Now().UnixNano()
+ if ns < 0 {
+ return 0
+ }
+ return uint64(ns / 1000 / 1000)
+}
diff --git a/p2p/enode/localnode_test.go b/p2p/enode/localnode_test.go
index 00746a8d2..312df813b 100644
--- a/p2p/enode/localnode_test.go
+++ b/p2p/enode/localnode_test.go
@@ -49,32 +49,39 @@ func TestLocalNode(t *testing.T) {
}
}
+// This test checks that the sequence number is persisted between restarts.
func TestLocalNodeSeqPersist(t *testing.T) {
+ timestamp := nowMilliseconds()
+
ln, db := newLocalNodeForTesting()
defer db.Close()
- if s := ln.Node().Seq(); s != 1 {
- t.Fatalf("wrong initial seq %d, want 1", s)
+ initialSeq := ln.Node().Seq()
+ if initialSeq < timestamp {
+ t.Fatalf("wrong initial seq %d, want at least %d", initialSeq, timestamp)
}
+
ln.Set(enr.WithEntry("x", uint(1)))
- if s := ln.Node().Seq(); s != 2 {
- t.Fatalf("wrong seq %d after set, want 2", s)
+ if s := ln.Node().Seq(); s != initialSeq+1 {
+ t.Fatalf("wrong seq %d after set, want %d", s, initialSeq+1)
}
// Create a new instance, it should reload the sequence number.
// The number increases just after that because a new record is
// created without the "x" entry.
ln2 := NewLocalNode(db, ln.key)
- if s := ln2.Node().Seq(); s != 3 {
- t.Fatalf("wrong seq %d on new instance, want 3", s)
+ if s := ln2.Node().Seq(); s != initialSeq+2 {
+ t.Fatalf("wrong seq %d on new instance, want %d", s, initialSeq+2)
}
+ finalSeq := ln2.Node().Seq()
+
// Create a new instance with a different node key on the same database.
// This should reset the sequence number.
key, _ := crypto.GenerateKey()
ln3 := NewLocalNode(db, key)
- if s := ln3.Node().Seq(); s != 1 {
- t.Fatalf("wrong seq %d on instance with changed key, want 1", s)
+ if s := ln3.Node().Seq(); s < finalSeq {
+ t.Fatalf("wrong seq %d on instance with changed key, want >= %d", s, finalSeq)
}
}
@@ -91,20 +98,20 @@ func TestLocalNodeEndpoint(t *testing.T) {
// Nothing is set initially.
assert.Equal(t, net.IP(nil), ln.Node().IP())
assert.Equal(t, 0, ln.Node().UDP())
- assert.Equal(t, uint64(1), ln.Node().Seq())
+ initialSeq := ln.Node().Seq()
// Set up fallback address.
ln.SetFallbackIP(fallback.IP)
ln.SetFallbackUDP(fallback.Port)
assert.Equal(t, fallback.IP, ln.Node().IP())
assert.Equal(t, fallback.Port, ln.Node().UDP())
- assert.Equal(t, uint64(2), ln.Node().Seq())
+ assert.Equal(t, initialSeq+1, ln.Node().Seq())
// Add endpoint statements from random hosts.
for i := 0; i < iptrackMinStatements; i++ {
assert.Equal(t, fallback.IP, ln.Node().IP())
assert.Equal(t, fallback.Port, ln.Node().UDP())
- assert.Equal(t, uint64(2), ln.Node().Seq())
+ assert.Equal(t, initialSeq+1, ln.Node().Seq())
from := &net.UDPAddr{IP: make(net.IP, 4), Port: 90}
rand.Read(from.IP)
@@ -112,11 +119,11 @@ func TestLocalNodeEndpoint(t *testing.T) {
}
assert.Equal(t, predicted.IP, ln.Node().IP())
assert.Equal(t, predicted.Port, ln.Node().UDP())
- assert.Equal(t, uint64(3), ln.Node().Seq())
+ assert.Equal(t, initialSeq+2, ln.Node().Seq())
// Static IP overrides prediction.
ln.SetStaticIP(staticIP)
assert.Equal(t, staticIP, ln.Node().IP())
assert.Equal(t, fallback.Port, ln.Node().UDP())
- assert.Equal(t, uint64(4), ln.Node().Seq())
+ assert.Equal(t, initialSeq+3, ln.Node().Seq())
}
diff --git a/p2p/enode/nodedb.go b/p2p/enode/nodedb.go
index d62f383f0..d1712f759 100644
--- a/p2p/enode/nodedb.go
+++ b/p2p/enode/nodedb.go
@@ -427,9 +427,14 @@ func (db *DB) UpdateFindFailsV5(id ID, ip net.IP, fails int) error {
return db.storeInt64(v5Key(id, ip, dbNodeFindFails), int64(fails))
}
-// LocalSeq retrieves the local record sequence counter.
+// localSeq retrieves the local record sequence counter, defaulting to the current
+// timestamp if no previous exists. This ensures that wiping all data associated
+// with a node (apart from its key) will not generate already used sequence nums.
func (db *DB) localSeq(id ID) uint64 {
- return db.fetchUint64(localItemKey(id, dbLocalSeq))
+ if seq := db.fetchUint64(localItemKey(id, dbLocalSeq)); seq > 0 {
+ return seq
+ }
+ return nowMilliseconds()
}
// storeLocalSeq stores the local record sequence counter.
diff --git a/p2p/netutil/toobig_notwindows.go b/p2p/netutil/toobig_notwindows.go
index 47b643857..f9f936ae5 100644
--- a/p2p/netutil/toobig_notwindows.go
+++ b/p2p/netutil/toobig_notwindows.go
@@ -14,7 +14,8 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-//+build !windows
+//go:build !windows
+// +build !windows
package netutil
diff --git a/p2p/netutil/toobig_windows.go b/p2p/netutil/toobig_windows.go
index dfbb6d44f..652903e83 100644
--- a/p2p/netutil/toobig_windows.go
+++ b/p2p/netutil/toobig_windows.go
@@ -14,7 +14,8 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-//+build windows
+//go:build windows
+// +build windows
package netutil
diff --git a/params/version.go b/params/version.go
index 8d222ac04..6b269537a 100644
--- a/params/version.go
+++ b/params/version.go
@@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 10 // Minor version component of the current release
- VersionPatch = 8 // Patch version component of the current release
+ VersionPatch = 9 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)
diff --git a/plugins/wrappers/dbwrapper.go b/plugins/wrappers/dbwrapper.go
index 2a935961c..029c042f5 100644
--- a/plugins/wrappers/dbwrapper.go
+++ b/plugins/wrappers/dbwrapper.go
@@ -1,6 +1,7 @@
package wrappers
import (
+ "fmt"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/openrelayxyz/plugeth-utils/restricted"
)
@@ -19,7 +20,8 @@ func (d *dbWrapper) HasAncient(kind string, number uint64) (bool, error) { retur
func (d *dbWrapper) Ancient(kind string, number uint64) ([]byte, error) { return d.db.Ancient(kind, number) }
func (d *dbWrapper) Ancients() (uint64, error) { return d.db.Ancients() }
func (d *dbWrapper) AncientSize(kind string) (uint64, error) { return d.db.AncientSize(kind) }
-func (d *dbWrapper) AppendAncient(number uint64, hash, header, body, receipt, td []byte) error { return d.db.AppendAncient(number, hash, header, body, receipt, td) }
+func (d *dbWrapper) AppendAncient(number uint64, hash, header, body, receipt, td []byte) error { return fmt.Errorf("AppendAncient is no longer supported in geth 1.10.9 and above. Use ModifyAncients instead.") }
+func (d *dbWrapper) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, error) { return d.db.ModifyAncients(fn) }
func (d *dbWrapper) TruncateAncients(n uint64) error { return d.db.TruncateAncients(n) }
func (d *dbWrapper) Sync() error { return d.db.Sync() }
func (d *dbWrapper) Close() error { return d.db.Close() }
diff --git a/plugins/wrappers/wrappers.go b/plugins/wrappers/wrappers.go
index 5b9c12661..f757a04c5 100644
--- a/plugins/wrappers/wrappers.go
+++ b/plugins/wrappers/wrappers.go
@@ -15,11 +15,11 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/eth/downloader"
+ "github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/plugins/interfaces"
+ // "github.com/ethereum/go-ethereum/plugins/interfaces"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/openrelayxyz/plugeth-utils/core"
@@ -101,6 +101,10 @@ func (w WrappedTracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, c
func (w WrappedTracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) {
w.r.CaptureEnd(output, gasUsed, t, err)
}
+
+// TODO: Align these with PluGeth-utils
+func (w WrappedTracer) CaptureEnter(vm.OpCode, common.Address, common.Address, []byte, uint64, *big.Int) {}
+func (w WrappedTracer) CaptureExit([]byte, uint64, error) {}
func (w WrappedTracer) GetResult() (interface{}, error) {
return w.r.Result()
}
@@ -218,7 +222,7 @@ func (n *Node) Attach() (core.Client, error) {
}
type Backend struct {
- b interfaces.Backend
+ b ethapi.Backend
newTxsFeed event.Feed
newTxsOnce sync.Once
chainFeed event.Feed
@@ -236,7 +240,7 @@ type Backend struct {
chainConfig *params.ChainConfig
}
-func NewBackend(b interfaces.Backend) *Backend {
+func NewBackend(b ethapi.Backend) *Backend {
return &Backend{b: b}
}
@@ -379,8 +383,12 @@ func (b *Backend) GetLogs(ctx context.Context, blockHash core.Hash) ([][]byte, e
return encLogs, nil
} // []RLP encoded logs
+type dli interface {
+ SyncProgress() ethereum.SyncProgress
+}
+
type dl struct {
- dl *downloader.Downloader
+ dl dli
}
type progress struct {
@@ -404,11 +412,11 @@ func (p *progress) KnownStates() uint64 {
}
func (d *dl) Progress() core.Progress {
- return &progress{d.dl.Progress()}
+ return &progress{d.dl.SyncProgress()}
}
func (b *Backend) Downloader() core.Downloader {
- return &dl{b.b.Downloader()}
+ return &dl{b.b}
}
func (b *Backend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) core.Subscription {
diff --git a/rlp/decode.go b/rlp/decode.go
index ac04d5d56..5f2e5ad5f 100644
--- a/rlp/decode.go
+++ b/rlp/decode.go
@@ -379,7 +379,7 @@ func decodeByteArray(s *Stream, val reflect.Value) error {
if err != nil {
return err
}
- slice := byteArrayBytes(val)
+ slice := byteArrayBytes(val, val.Len())
switch kind {
case Byte:
if len(slice) == 0 {
diff --git a/rlp/encode.go b/rlp/encode.go
index 334864434..1623e97a3 100644
--- a/rlp/encode.go
+++ b/rlp/encode.go
@@ -432,7 +432,20 @@ func makeByteArrayWriter(typ reflect.Type) writer {
case 1:
return writeLengthOneByteArray
default:
- return writeByteArray
+ length := typ.Len()
+ return func(val reflect.Value, w *encbuf) error {
+ if !val.CanAddr() {
+ // Getting the byte slice of val requires it to be addressable. Make it
+ // addressable by copying.
+ copy := reflect.New(val.Type()).Elem()
+ copy.Set(val)
+ val = copy
+ }
+ slice := byteArrayBytes(val, length)
+ w.encodeStringHeader(len(slice))
+ w.str = append(w.str, slice...)
+ return nil
+ }
}
}
@@ -451,21 +464,6 @@ func writeLengthOneByteArray(val reflect.Value, w *encbuf) error {
return nil
}
-func writeByteArray(val reflect.Value, w *encbuf) error {
- if !val.CanAddr() {
- // Getting the byte slice of val requires it to be addressable. Make it
- // addressable by copying.
- copy := reflect.New(val.Type()).Elem()
- copy.Set(val)
- val = copy
- }
-
- slice := byteArrayBytes(val)
- w.encodeStringHeader(len(slice))
- w.str = append(w.str, slice...)
- return nil
-}
-
func writeString(val reflect.Value, w *encbuf) error {
s := val.String()
if len(s) == 1 && s[0] <= 0x7f {
@@ -499,19 +497,39 @@ func makeSliceWriter(typ reflect.Type, ts tags) (writer, error) {
if etypeinfo.writerErr != nil {
return nil, etypeinfo.writerErr
}
- writer := func(val reflect.Value, w *encbuf) error {
- if !ts.tail {
- defer w.listEnd(w.list())
- }
- vlen := val.Len()
- for i := 0; i < vlen; i++ {
- if err := etypeinfo.writer(val.Index(i), w); err != nil {
- return err
+
+ var wfn writer
+ if ts.tail {
+ // This is for struct tail slices.
+ // w.list is not called for them.
+ wfn = func(val reflect.Value, w *encbuf) error {
+ vlen := val.Len()
+ for i := 0; i < vlen; i++ {
+ if err := etypeinfo.writer(val.Index(i), w); err != nil {
+ return err
+ }
}
+ return nil
+ }
+ } else {
+ // This is for regular slices and arrays.
+ wfn = func(val reflect.Value, w *encbuf) error {
+ vlen := val.Len()
+ if vlen == 0 {
+ w.str = append(w.str, 0xC0)
+ return nil
+ }
+ listOffset := w.list()
+ for i := 0; i < vlen; i++ {
+ if err := etypeinfo.writer(val.Index(i), w); err != nil {
+ return err
+ }
+ }
+ w.listEnd(listOffset)
+ return nil
}
- return nil
}
- return writer, nil
+ return wfn, nil
}
func makeStructWriter(typ reflect.Type) (writer, error) {
@@ -562,12 +580,8 @@ func makeStructWriter(typ reflect.Type) (writer, error) {
return writer, nil
}
-func makePtrWriter(typ reflect.Type, ts tags) (writer, error) {
- etypeinfo := theTC.infoWhileGenerating(typ.Elem(), tags{})
- if etypeinfo.writerErr != nil {
- return nil, etypeinfo.writerErr
- }
- // Determine how to encode nil pointers.
+// nilEncoding returns the encoded value of a nil pointer.
+func nilEncoding(typ reflect.Type, ts tags) uint8 {
var nilKind Kind
if ts.nilOK {
nilKind = ts.nilKind // use struct tag if provided
@@ -575,16 +589,29 @@ func makePtrWriter(typ reflect.Type, ts tags) (writer, error) {
nilKind = defaultNilKind(typ.Elem())
}
+ switch nilKind {
+ case String:
+ return 0x80
+ case List:
+ return 0xC0
+ default:
+ panic(fmt.Errorf("rlp: invalid nil kind %d", nilKind))
+ }
+}
+
+func makePtrWriter(typ reflect.Type, ts tags) (writer, error) {
+ etypeinfo := theTC.infoWhileGenerating(typ.Elem(), tags{})
+ if etypeinfo.writerErr != nil {
+ return nil, etypeinfo.writerErr
+ }
+ nilEncoding := nilEncoding(typ, ts)
+
writer := func(val reflect.Value, w *encbuf) error {
- if val.IsNil() {
- if nilKind == String {
- w.str = append(w.str, 0x80)
- } else {
- w.listEnd(w.list())
- }
- return nil
+ if ev := val.Elem(); ev.IsValid() {
+ return etypeinfo.writer(ev, w)
}
- return etypeinfo.writer(val.Elem(), w)
+ w.str = append(w.str, nilEncoding)
+ return nil
}
return writer, nil
}
diff --git a/rlp/encode_test.go b/rlp/encode_test.go
index 25d4aac26..a63743440 100644
--- a/rlp/encode_test.go
+++ b/rlp/encode_test.go
@@ -540,3 +540,31 @@ func BenchmarkEncodeByteArrayStruct(b *testing.B) {
}
}
}
+
+type structSliceElem struct {
+ X uint64
+ Y uint64
+ Z uint64
+}
+
+type structPtrSlice []*structSliceElem
+
+func BenchmarkEncodeStructPtrSlice(b *testing.B) {
+ var out bytes.Buffer
+ var value = structPtrSlice{
+ &structSliceElem{1, 1, 1},
+ &structSliceElem{2, 2, 2},
+ &structSliceElem{3, 3, 3},
+ &structSliceElem{5, 5, 5},
+ &structSliceElem{6, 6, 6},
+ &structSliceElem{7, 7, 7},
+ }
+
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ out.Reset()
+ if err := Encode(&out, &value); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/rlp/safe.go b/rlp/safe.go
index a80380aef..3c910337b 100644
--- a/rlp/safe.go
+++ b/rlp/safe.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build nacl || js || !cgo
// +build nacl js !cgo
package rlp
@@ -21,6 +22,6 @@ package rlp
import "reflect"
// byteArrayBytes returns a slice of the byte array v.
-func byteArrayBytes(v reflect.Value) []byte {
- return v.Slice(0, v.Len()).Bytes()
+func byteArrayBytes(v reflect.Value, length int) []byte {
+ return v.Slice(0, length).Bytes()
}
diff --git a/rlp/unsafe.go b/rlp/unsafe.go
index 94ed5405a..2152ba35f 100644
--- a/rlp/unsafe.go
+++ b/rlp/unsafe.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build !nacl && !js && cgo
// +build !nacl,!js,cgo
package rlp
@@ -24,12 +25,11 @@ import (
)
// byteArrayBytes returns a slice of the byte array v.
-func byteArrayBytes(v reflect.Value) []byte {
- len := v.Len()
+func byteArrayBytes(v reflect.Value, length int) []byte {
var s []byte
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s))
hdr.Data = v.UnsafeAddr()
- hdr.Cap = len
- hdr.Len = len
+ hdr.Cap = length
+ hdr.Len = length
return s
}
diff --git a/rpc/client.go b/rpc/client.go
index 198ce6357..e9deb3f6d 100644
--- a/rpc/client.go
+++ b/rpc/client.go
@@ -59,6 +59,12 @@ const (
maxClientSubscriptionBuffer = 20000
)
+const (
+ httpScheme = "http"
+ wsScheme = "ws"
+ ipcScheme = "ipc"
+)
+
// BatchElem is an element in a batch request.
type BatchElem struct {
Method string
@@ -75,7 +81,7 @@ type BatchElem struct {
// Client represents a connection to an RPC server.
type Client struct {
idgen func() ID // for subscriptions
- isHTTP bool
+ scheme string // connection type: http, ws or ipc
services *serviceRegistry
idCounter uint32
@@ -111,6 +117,10 @@ type clientConn struct {
func (c *Client) newClientConn(conn ServerCodec) *clientConn {
ctx := context.WithValue(context.Background(), clientContextKey{}, c)
+ // Http connections have already set the scheme
+ if !c.isHTTP() && c.scheme != "" {
+ ctx = context.WithValue(ctx, "scheme", c.scheme)
+ }
handler := newHandler(ctx, conn, c.idgen, c.services)
return &clientConn{conn, handler}
}
@@ -136,7 +146,7 @@ func (op *requestOp) wait(ctx context.Context, c *Client) (*jsonrpcMessage, erro
select {
case <-ctx.Done():
// Send the timeout to dispatch so it can remove the request IDs.
- if !c.isHTTP {
+ if !c.isHTTP() {
select {
case c.reqTimeout <- op:
case <-c.closing:
@@ -203,10 +213,18 @@ func newClient(initctx context.Context, connect reconnectFunc) (*Client, error)
}
func initClient(conn ServerCodec, idgen func() ID, services *serviceRegistry) *Client {
- _, isHTTP := conn.(*httpConn)
+ scheme := ""
+ switch conn.(type) {
+ case *httpConn:
+ scheme = httpScheme
+ case *websocketCodec:
+ scheme = wsScheme
+ case *jsonCodec:
+ scheme = ipcScheme
+ }
c := &Client{
idgen: idgen,
- isHTTP: isHTTP,
+ scheme: scheme,
services: services,
writeConn: conn,
close: make(chan struct{}),
@@ -219,7 +237,7 @@ func initClient(conn ServerCodec, idgen func() ID, services *serviceRegistry) *C
reqSent: make(chan error, 1),
reqTimeout: make(chan *requestOp),
}
- if !isHTTP {
+ if !c.isHTTP() {
go c.dispatch(conn)
}
return c
@@ -250,7 +268,7 @@ func (c *Client) SupportedModules() (map[string]string, error) {
// Close closes the client, aborting any in-flight requests.
func (c *Client) Close() {
- if c.isHTTP {
+ if c.isHTTP() {
return
}
select {
@@ -264,7 +282,7 @@ func (c *Client) Close() {
// This method only works for clients using HTTP, it doesn't have
// any effect for clients using another transport.
func (c *Client) SetHeader(key, value string) {
- if !c.isHTTP {
+ if !c.isHTTP() {
return
}
conn := c.writeConn.(*httpConn)
@@ -298,7 +316,7 @@ func (c *Client) CallContext(ctx context.Context, result interface{}, method str
}
op := &requestOp{ids: []json.RawMessage{msg.ID}, resp: make(chan *jsonrpcMessage, 1)}
- if c.isHTTP {
+ if c.isHTTP() {
err = c.sendHTTP(ctx, op, msg)
} else {
err = c.send(ctx, op, msg)
@@ -357,7 +375,7 @@ func (c *Client) BatchCallContext(ctx context.Context, b []BatchElem) error {
}
var err error
- if c.isHTTP {
+ if c.isHTTP() {
err = c.sendBatchHTTP(ctx, op, msgs)
} else {
err = c.send(ctx, op, msgs)
@@ -402,7 +420,7 @@ func (c *Client) Notify(ctx context.Context, method string, args ...interface{})
}
msg.ID = nil
- if c.isHTTP {
+ if c.isHTTP() {
return c.sendHTTP(ctx, op, msg)
}
return c.send(ctx, op, msg)
@@ -440,7 +458,7 @@ func (c *Client) Subscribe(ctx context.Context, namespace string, channel interf
if chanVal.IsNil() {
panic("channel given to Subscribe must not be nil")
}
- if c.isHTTP {
+ if c.isHTTP() {
return nil, ErrNotificationsUnsupported
}
@@ -642,3 +660,7 @@ func (c *Client) read(codec ServerCodec) {
c.readOp <- readOp{msgs, batch}
}
}
+
+func (c *Client) isHTTP() bool {
+ return c.scheme == httpScheme
+}
diff --git a/rpc/constants_unix.go b/rpc/constants_unix.go
index 2f98d6499..1f04d15d7 100644
--- a/rpc/constants_unix.go
+++ b/rpc/constants_unix.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build darwin || dragonfly || freebsd || linux || nacl || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
package rpc
diff --git a/rpc/constants_unix_nocgo.go b/rpc/constants_unix_nocgo.go
index ecb231f92..a62e4ee52 100644
--- a/rpc/constants_unix_nocgo.go
+++ b/rpc/constants_unix_nocgo.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build !cgo && !windows
// +build !cgo,!windows
package rpc
diff --git a/rpc/ipc_js.go b/rpc/ipc_js.go
index 7e7554a76..453a20bc1 100644
--- a/rpc/ipc_js.go
+++ b/rpc/ipc_js.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build js
// +build js
package rpc
diff --git a/rpc/ipc_unix.go b/rpc/ipc_unix.go
index f4690cc0a..249a9cf04 100644
--- a/rpc/ipc_unix.go
+++ b/rpc/ipc_unix.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build darwin || dragonfly || freebsd || linux || nacl || netbsd || openbsd || solaris
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
package rpc
diff --git a/rpc/ipc_windows.go b/rpc/ipc_windows.go
index ca56a3ce4..adb1826f0 100644
--- a/rpc/ipc_windows.go
+++ b/rpc/ipc_windows.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build windows
// +build windows
package rpc
diff --git a/rpc/types.go b/rpc/types.go
index ad068defa..d9c2317a7 100644
--- a/rpc/types.go
+++ b/rpc/types.go
@@ -98,6 +98,22 @@ func (bn *BlockNumber) UnmarshalJSON(data []byte) error {
return nil
}
+// MarshalText implements encoding.TextMarshaler. It marshals:
+// - "latest", "earliest" or "pending" as strings
+// - other numbers as hex
+func (bn BlockNumber) MarshalText() ([]byte, error) {
+ switch bn {
+ case EarliestBlockNumber:
+ return []byte("earliest"), nil
+ case LatestBlockNumber:
+ return []byte("latest"), nil
+ case PendingBlockNumber:
+ return []byte("pending"), nil
+ default:
+ return hexutil.Uint64(bn).MarshalText()
+ }
+}
+
func (bn BlockNumber) Int64() int64 {
return (int64)(bn)
}
diff --git a/rpc/types_test.go b/rpc/types_test.go
index 89b0c9171..f110dee7c 100644
--- a/rpc/types_test.go
+++ b/rpc/types_test.go
@@ -18,6 +18,7 @@ package rpc
import (
"encoding/json"
+ "reflect"
"testing"
"github.com/ethereum/go-ethereum/common"
@@ -122,3 +123,33 @@ func TestBlockNumberOrHash_UnmarshalJSON(t *testing.T) {
}
}
}
+
+func TestBlockNumberOrHash_WithNumber_MarshalAndUnmarshal(t *testing.T) {
+ tests := []struct {
+ name string
+ number int64
+ }{
+ {"max", math.MaxInt64},
+ {"pending", int64(PendingBlockNumber)},
+ {"latest", int64(LatestBlockNumber)},
+ {"earliest", int64(EarliestBlockNumber)},
+ }
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ bnh := BlockNumberOrHashWithNumber(BlockNumber(test.number))
+ marshalled, err := json.Marshal(bnh)
+ if err != nil {
+ t.Fatal("cannot marshal:", err)
+ }
+ var unmarshalled BlockNumberOrHash
+ err = json.Unmarshal(marshalled, &unmarshalled)
+ if err != nil {
+ t.Fatal("cannot unmarshal:", err)
+ }
+ if !reflect.DeepEqual(bnh, unmarshalled) {
+ t.Fatalf("wrong result: expected %v, got %v", bnh, unmarshalled)
+ }
+ })
+ }
+}
diff --git a/rpc/websocket.go b/rpc/websocket.go
index afeb4c208..5571324af 100644
--- a/rpc/websocket.go
+++ b/rpc/websocket.go
@@ -37,6 +37,7 @@ const (
wsWriteBuffer = 1024
wsPingInterval = 60 * time.Second
wsPingWriteTimeout = 5 * time.Second
+ wsPongTimeout = 30 * time.Second
wsMessageSizeLimit = 15 * 1024 * 1024
)
@@ -241,6 +242,10 @@ type websocketCodec struct {
func newWebsocketCodec(conn *websocket.Conn) ServerCodec {
conn.SetReadLimit(wsMessageSizeLimit)
+ conn.SetPongHandler(func(appData string) error {
+ conn.SetReadDeadline(time.Time{})
+ return nil
+ })
wc := &websocketCodec{
jsonCodec: NewFuncCodec(conn, conn.WriteJSON, conn.ReadJSON).(*jsonCodec),
conn: conn,
@@ -287,6 +292,7 @@ func (wc *websocketCodec) pingLoop() {
wc.jsonCodec.encMu.Lock()
wc.conn.SetWriteDeadline(time.Now().Add(wsPingWriteTimeout))
wc.conn.WriteMessage(websocket.PingMessage, nil)
+ wc.conn.SetReadDeadline(time.Now().Add(wsPongTimeout))
wc.jsonCodec.encMu.Unlock()
timer.Reset(wsPingInterval)
}
diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go
index 4976853ba..248609283 100644
--- a/rpc/websocket_test.go
+++ b/rpc/websocket_test.go
@@ -18,11 +18,15 @@ package rpc
import (
"context"
+ "io"
"net"
"net/http"
"net/http/httptest"
+ "net/http/httputil"
+ "net/url"
"reflect"
"strings"
+ "sync/atomic"
"testing"
"time"
@@ -188,6 +192,63 @@ func TestClientWebsocketLargeMessage(t *testing.T) {
}
}
+func TestClientWebsocketSevered(t *testing.T) {
+ t.Parallel()
+
+ var (
+ server = wsPingTestServer(t, nil)
+ ctx = context.Background()
+ )
+ defer server.Shutdown(ctx)
+
+ u, err := url.Parse("http://" + server.Addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ rproxy := httputil.NewSingleHostReverseProxy(u)
+ var severable *severableReadWriteCloser
+ rproxy.ModifyResponse = func(response *http.Response) error {
+ severable = &severableReadWriteCloser{ReadWriteCloser: response.Body.(io.ReadWriteCloser)}
+ response.Body = severable
+ return nil
+ }
+ frontendProxy := httptest.NewServer(rproxy)
+ defer frontendProxy.Close()
+
+ wsURL := "ws:" + strings.TrimPrefix(frontendProxy.URL, "http:")
+ client, err := DialWebsocket(ctx, wsURL, "")
+ if err != nil {
+ t.Fatalf("client dial error: %v", err)
+ }
+ defer client.Close()
+
+ resultChan := make(chan int)
+ sub, err := client.EthSubscribe(ctx, resultChan, "foo")
+ if err != nil {
+ t.Fatalf("client subscribe error: %v", err)
+ }
+
+ // sever the connection
+ severable.Sever()
+
+ // Wait for subscription error.
+ timeout := time.NewTimer(3 * wsPingInterval)
+ defer timeout.Stop()
+ for {
+ select {
+ case err := <-sub.Err():
+ t.Log("client subscription error:", err)
+ return
+ case result := <-resultChan:
+ t.Error("unexpected result:", result)
+ return
+ case <-timeout.C:
+ t.Error("didn't get any error within the test timeout")
+ return
+ }
+ }
+}
+
// wsPingTestServer runs a WebSocket server which accepts a single subscription request.
// When a value arrives on sendPing, the server sends a ping frame, waits for a matching
// pong and finally delivers a single subscription result.
@@ -290,3 +351,31 @@ func wsPingTestHandler(t *testing.T, conn *websocket.Conn, shutdown, sendPing <-
}
}
}
+
+// severableReadWriteCloser wraps an io.ReadWriteCloser and provides a Sever() method to drop writes and read empty.
+type severableReadWriteCloser struct {
+ io.ReadWriteCloser
+ severed int32 // atomic
+}
+
+func (s *severableReadWriteCloser) Sever() {
+ atomic.StoreInt32(&s.severed, 1)
+}
+
+func (s *severableReadWriteCloser) Read(p []byte) (n int, err error) {
+ if atomic.LoadInt32(&s.severed) > 0 {
+ return 0, nil
+ }
+ return s.ReadWriteCloser.Read(p)
+}
+
+func (s *severableReadWriteCloser) Write(p []byte) (n int, err error) {
+ if atomic.LoadInt32(&s.severed) > 0 {
+ return len(p), nil
+ }
+ return s.ReadWriteCloser.Write(p)
+}
+
+func (s *severableReadWriteCloser) Close() error {
+ return s.ReadWriteCloser.Close()
+}
diff --git a/tests/fuzzers/bls12381/bls12381_fuzz.go b/tests/fuzzers/bls12381/bls12381_fuzz.go
index c0f452f3e..b283ed11f 100644
--- a/tests/fuzzers/bls12381/bls12381_fuzz.go
+++ b/tests/fuzzers/bls12381/bls12381_fuzz.go
@@ -14,6 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+//go:build gofuzz
// +build gofuzz
package bls
diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go
index 5cea7769c..e73ef4851 100644
--- a/tests/fuzzers/stacktrie/trie_fuzzer.go
+++ b/tests/fuzzers/stacktrie/trie_fuzzer.go
@@ -173,7 +173,7 @@ func (f *fuzzer) fuzz() int {
return 0
}
// Flush trie -> database
- rootA, err := trieA.Commit(nil)
+ rootA, _, err := trieA.Commit(nil)
if err != nil {
panic(err)
}
diff --git a/tests/fuzzers/trie/trie-fuzzer.go b/tests/fuzzers/trie/trie-fuzzer.go
index 762ab5f34..e993af47c 100644
--- a/tests/fuzzers/trie/trie-fuzzer.go
+++ b/tests/fuzzers/trie/trie-fuzzer.go
@@ -69,7 +69,7 @@ func newDataSource(input []byte) *dataSource {
input, bytes.NewReader(input),
}
}
-func (ds *dataSource) ReadByte() byte {
+func (ds *dataSource) readByte() byte {
if b, err := ds.reader.ReadByte(); err != nil {
return 0
} else {
@@ -89,22 +89,22 @@ func Generate(input []byte) randTest {
r := newDataSource(input)
genKey := func() []byte {
- if len(allKeys) < 2 || r.ReadByte() < 0x0f {
+ if len(allKeys) < 2 || r.readByte() < 0x0f {
// new key
- key := make([]byte, r.ReadByte()%50)
+ key := make([]byte, r.readByte()%50)
r.Read(key)
allKeys = append(allKeys, key)
return key
}
// use existing key
- return allKeys[int(r.ReadByte())%len(allKeys)]
+ return allKeys[int(r.readByte())%len(allKeys)]
}
var steps randTest
for i := 0; !r.Ended(); i++ {
- step := randTestStep{op: int(r.ReadByte()) % opMax}
+ step := randTestStep{op: int(r.readByte()) % opMax}
switch step.op {
case opUpdate:
step.key = genKey()
@@ -162,11 +162,11 @@ func runRandTest(rt randTest) error {
rt[i].err = fmt.Errorf("mismatch for key 0x%x, got 0x%x want 0x%x", step.key, v, want)
}
case opCommit:
- _, rt[i].err = tr.Commit(nil)
+ _, _, rt[i].err = tr.Commit(nil)
case opHash:
tr.Hash()
case opReset:
- hash, err := tr.Commit(nil)
+ hash, _, err := tr.Commit(nil)
if err != nil {
return err
}
diff --git a/tests/state_test.go b/tests/state_test.go
index c2ca0e8d6..9554e7563 100644
--- a/tests/state_test.go
+++ b/tests/state_test.go
@@ -42,6 +42,7 @@ func TestState(t *testing.T) {
// Very time consuming
st.skipLoad(`^stTimeConsuming/`)
+ st.skipLoad(`.*vmPerformance/loop.*`)
// Uses 1GB RAM per tested fork
st.skipLoad(`^stStaticCall/static_Call1MB`)
@@ -114,7 +115,7 @@ func withTrace(t *testing.T, gasLimit uint64, test func(vm.Config) error) {
}
buf := new(bytes.Buffer)
w := bufio.NewWriter(buf)
- tracer := vm.NewJSONLogger(&vm.LogConfig{DisableMemory: true}, w)
+ tracer := vm.NewJSONLogger(&vm.LogConfig{}, w)
config.Debug, config.Tracer = true, tracer
err2 := test(config)
if !reflect.DeepEqual(err, err2) {
diff --git a/trie/committer.go b/trie/committer.go
index ce4065f5f..0721990a2 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -72,24 +72,24 @@ func returnCommitterToPool(h *committer) {
committerPool.Put(h)
}
-// commit collapses a node down into a hash node and inserts it into the database
-func (c *committer) Commit(n node, db *Database) (hashNode, error) {
+// Commit collapses a node down into a hash node and inserts it into the database
+func (c *committer) Commit(n node, db *Database) (hashNode, int, error) {
if db == nil {
- return nil, errors.New("no db provided")
+ return nil, 0, errors.New("no db provided")
}
- h, err := c.commit(n, db)
+ h, committed, err := c.commit(n, db)
if err != nil {
- return nil, err
+ return nil, 0, err
}
- return h.(hashNode), nil
+ return h.(hashNode), committed, nil
}
// commit collapses a node down into a hash node and inserts it into the database
-func (c *committer) commit(n node, db *Database) (node, error) {
+func (c *committer) commit(n node, db *Database) (node, int, error) {
// if this path is clean, use available cached data
hash, dirty := n.cache()
if hash != nil && !dirty {
- return hash, nil
+ return hash, 0, nil
}
// Commit children, then parent, and remove remove the dirty flag.
switch cn := n.(type) {
@@ -97,37 +97,38 @@ func (c *committer) commit(n node, db *Database) (node, error) {
// Commit child
collapsed := cn.copy()
- // If the child is fullnode, recursively commit.
- // Otherwise it can only be hashNode or valueNode.
+ // If the child is fullNode, recursively commit,
+ // otherwise it can only be hashNode or valueNode.
+ var childCommitted int
if _, ok := cn.Val.(*fullNode); ok {
- childV, err := c.commit(cn.Val, db)
+ childV, committed, err := c.commit(cn.Val, db)
if err != nil {
- return nil, err
+ return nil, 0, err
}
- collapsed.Val = childV
+ collapsed.Val, childCommitted = childV, committed
}
// The key needs to be copied, since we're delivering it to database
collapsed.Key = hexToCompact(cn.Key)
hashedNode := c.store(collapsed, db)
if hn, ok := hashedNode.(hashNode); ok {
- return hn, nil
+ return hn, childCommitted + 1, nil
}
- return collapsed, nil
+ return collapsed, childCommitted, nil
case *fullNode:
- hashedKids, err := c.commitChildren(cn, db)
+ hashedKids, childCommitted, err := c.commitChildren(cn, db)
if err != nil {
- return nil, err
+ return nil, 0, err
}
collapsed := cn.copy()
collapsed.Children = hashedKids
hashedNode := c.store(collapsed, db)
if hn, ok := hashedNode.(hashNode); ok {
- return hn, nil
+ return hn, childCommitted + 1, nil
}
- return collapsed, nil
+ return collapsed, childCommitted, nil
case hashNode:
- return cn, nil
+ return cn, 0, nil
default:
// nil, valuenode shouldn't be committed
panic(fmt.Sprintf("%T: invalid node: %v", n, n))
@@ -135,8 +136,11 @@ func (c *committer) commit(n node, db *Database) (node, error) {
}
// commitChildren commits the children of the given fullnode
-func (c *committer) commitChildren(n *fullNode, db *Database) ([17]node, error) {
- var children [17]node
+func (c *committer) commitChildren(n *fullNode, db *Database) ([17]node, int, error) {
+ var (
+ committed int
+ children [17]node
+ )
for i := 0; i < 16; i++ {
child := n.Children[i]
if child == nil {
@@ -144,25 +148,26 @@ func (c *committer) commitChildren(n *fullNode, db *Database) ([17]node, error)
}
// If it's the hashed child, save the hash value directly.
// Note: it's impossible that the child in range [0, 15]
- // is a valuenode.
+ // is a valueNode.
if hn, ok := child.(hashNode); ok {
children[i] = hn
continue
}
// Commit the child recursively and store the "hashed" value.
// Note the returned node can be some embedded nodes, so it's
- // possible the type is not hashnode.
- hashed, err := c.commit(child, db)
+ // possible the type is not hashNode.
+ hashed, childCommitted, err := c.commit(child, db)
if err != nil {
- return children, err
+ return children, 0, err
}
children[i] = hashed
+ committed += childCommitted
}
// For the 17th child, it's possible the type is valuenode.
if n.Children[16] != nil {
children[16] = n.Children[16]
}
- return children, nil
+ return children, committed, nil
}
// store hashes the node n and if we have a storage layer specified, it writes
@@ -176,7 +181,7 @@ func (c *committer) store(n node, db *Database) node {
)
if hash == nil {
// This was not generated - must be a small node stored in the parent.
- // In theory we should apply the leafCall here if it's not nil(embedded
+ // In theory, we should apply the leafCall here if it's not nil(embedded
// node usually contains value). But small value(less than 32bytes) is
// not our target.
return n
@@ -224,7 +229,7 @@ func (c *committer) commitLoop(db *Database) {
}
case *fullNode:
// For children in range [0, 15], it's impossible
- // to contain valuenode. Only check the 17th child.
+ // to contain valueNode. Only check the 17th child.
if n.Children[16] != nil {
c.onleaf(nil, nil, n.Children[16].(valueNode), hash)
}
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index 2518f7bac..95cafdd3b 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -393,7 +393,7 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) {
for _, val := range testdata1 {
ctr.Update([]byte(val.k), []byte(val.v))
}
- root, _ := ctr.Commit(nil)
+ root, _, _ := ctr.Commit(nil)
if !memonly {
triedb.Commit(root, true, nil)
}
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index e38471c1b..18be12d34 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -20,7 +20,9 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
)
// SecureTrie wraps a trie with key hashing. In a secure trie, all
@@ -85,6 +87,21 @@ func (t *SecureTrie) TryGetNode(path []byte) ([]byte, int, error) {
return t.trie.TryGetNode(path)
}
+// TryUpdate account will abstract the write of an account to the
+// secure trie.
+func (t *SecureTrie) TryUpdateAccount(key []byte, acc *types.StateAccount) error {
+ hk := t.hashKey(key)
+ data, err := rlp.EncodeToBytes(acc)
+ if err != nil {
+ return err
+ }
+ if err := t.trie.TryUpdate(hk, data); err != nil {
+ return err
+ }
+ t.getSecKeyCache()[string(hk)] = common.CopyBytes(key)
+ return nil
+}
+
// Update associates key with value in the trie. Subsequent calls to
// Get will return value. If value has length zero, any existing value
// is deleted from the trie and calls to Get will return nil.
@@ -144,7 +161,7 @@ func (t *SecureTrie) GetKey(shaKey []byte) []byte {
//
// Committing flushes nodes from memory. Subsequent Get calls will load nodes
// from the database.
-func (t *SecureTrie) Commit(onleaf LeafCallback) (root common.Hash, err error) {
+func (t *SecureTrie) Commit(onleaf LeafCallback) (common.Hash, int, error) {
// Write all the pre-images to the actual disk database
if len(t.getSecKeyCache()) > 0 {
if t.trie.db.preimages != nil { // Ugly direct check but avoids the below write lock
diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go
index ccdf389d5..bd2574d5d 100644
--- a/trie/stacktrie_test.go
+++ b/trie/stacktrie_test.go
@@ -1,3 +1,19 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
package trie
import (
diff --git a/trie/sync_bloom.go b/trie/sync_bloom.go
index 49986fcf0..91e5e6711 100644
--- a/trie/sync_bloom.go
+++ b/trie/sync_bloom.go
@@ -129,6 +129,8 @@ func (b *SyncBloom) init(database ethdb.Iteratee) {
func (b *SyncBloom) meter() {
// check every second
tick := time.NewTicker(1 * time.Second)
+ defer tick.Stop()
+
for {
select {
case <-tick.C:
diff --git a/trie/trie.go b/trie/trie.go
index e492a532c..13343112b 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -24,8 +24,10 @@ import (
"sync"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
)
var (
@@ -174,6 +176,10 @@ func (t *Trie) TryGetNode(path []byte) ([]byte, int, error) {
}
func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, newnode node, resolved int, err error) {
+ // If non-existent path requested, abort
+ if origNode == nil {
+ return nil, nil, 0, nil
+ }
// If we reached the requested path, return the current node
if pos >= len(path) {
// Although we most probably have the original node expanded, encoding
@@ -193,10 +199,6 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new
}
// Path still needs to be traversed, descend into children
switch n := (origNode).(type) {
- case nil:
- // Non-existent path requested, abort
- return nil, nil, 0, nil
-
case valueNode:
// Path prematurely ended, abort
return nil, nil, 0, nil
@@ -246,6 +248,14 @@ func (t *Trie) Update(key, value []byte) {
}
}
+func (t *Trie) TryUpdateAccount(key []byte, acc *types.StateAccount) error {
+ data, err := rlp.EncodeToBytes(acc)
+ if err != nil {
+ return fmt.Errorf("can't encode object at %x: %w", key[:], err)
+ }
+ return t.TryUpdate(key, data)
+}
+
// TryUpdate associates key with value in the trie. Subsequent calls to
// Get will return value. If value has length zero, any existing value
// is deleted from the trie and calls to Get will return nil.
@@ -514,12 +524,12 @@ func (t *Trie) Hash() common.Hash {
// Commit writes all nodes to the trie's memory database, tracking the internal
// and external (for account tries) references.
-func (t *Trie) Commit(onleaf LeafCallback) (root common.Hash, err error) {
+func (t *Trie) Commit(onleaf LeafCallback) (common.Hash, int, error) {
if t.db == nil {
panic("commit called on trie with nil database")
}
if t.root == nil {
- return emptyRoot, nil
+ return emptyRoot, 0, nil
}
// Derive the hash for all dirty nodes first. We hold the assumption
// in the following procedure that all nodes are hashed.
@@ -531,7 +541,7 @@ func (t *Trie) Commit(onleaf LeafCallback) (root common.Hash, err error) {
// up goroutines. This can happen e.g. if we load a trie for reading storage
// values, but don't write to it.
if _, dirty := t.root.cache(); !dirty {
- return rootHash, nil
+ return rootHash, 0, nil
}
var wg sync.WaitGroup
if onleaf != nil {
@@ -543,8 +553,7 @@ func (t *Trie) Commit(onleaf LeafCallback) (root common.Hash, err error) {
h.commitLoop(t.db)
}()
}
- var newRoot hashNode
- newRoot, err = h.Commit(t.root, t.db)
+ newRoot, committed, err := h.Commit(t.root, t.db)
if onleaf != nil {
// The leafch is created in newCommitter if there was an onleaf callback
// provided. The commitLoop only _reads_ from it, and the commit
@@ -554,10 +563,10 @@ func (t *Trie) Commit(onleaf LeafCallback) (root common.Hash, err error) {
wg.Wait()
}
if err != nil {
- return common.Hash{}, err
+ return common.Hash{}, 0, err
}
t.root = newRoot
- return rootHash, nil
+ return rootHash, committed, nil
}
// hashRoot calculates the root hash of the given trie
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 492b423c2..be0df8a54 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -32,6 +32,7 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/leveldb"
@@ -90,7 +91,7 @@ func testMissingNode(t *testing.T, memonly bool) {
trie, _ := New(common.Hash{}, triedb)
updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer")
updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf")
- root, _ := trie.Commit(nil)
+ root, _, _ := trie.Commit(nil)
if !memonly {
triedb.Commit(root, true, nil)
}
@@ -172,7 +173,7 @@ func TestInsert(t *testing.T) {
updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
exp = common.HexToHash("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab")
- root, err := trie.Commit(nil)
+ root, _, err := trie.Commit(nil)
if err != nil {
t.Fatalf("commit error: %v", err)
}
@@ -270,7 +271,7 @@ func TestReplication(t *testing.T) {
for _, val := range vals {
updateString(trie, val.k, val.v)
}
- exp, err := trie.Commit(nil)
+ exp, _, err := trie.Commit(nil)
if err != nil {
t.Fatalf("commit error: %v", err)
}
@@ -285,7 +286,7 @@ func TestReplication(t *testing.T) {
t.Errorf("trie2 doesn't have %q => %q", kv.k, kv.v)
}
}
- hash, err := trie2.Commit(nil)
+ hash, _, err := trie2.Commit(nil)
if err != nil {
t.Fatalf("commit error: %v", err)
}
@@ -429,11 +430,11 @@ func runRandTest(rt randTest) bool {
rt[i].err = fmt.Errorf("mismatch for key 0x%x, got 0x%x want 0x%x", step.key, v, want)
}
case opCommit:
- _, rt[i].err = tr.Commit(nil)
+ _, _, rt[i].err = tr.Commit(nil)
case opHash:
tr.Hash()
case opReset:
- hash, err := tr.Commit(nil)
+ hash, _, err := tr.Commit(nil)
if err != nil {
rt[i].err = err
return false
@@ -553,13 +554,6 @@ func BenchmarkHash(b *testing.B) {
trie.Hash()
}
-type account struct {
- Nonce uint64
- Balance *big.Int
- Root common.Hash
- Code []byte
-}
-
// Benchmarks the trie Commit following a Hash. Since the trie caches the result of any operation,
// we cannot use b.N as the number of hashing rouns, since all rounds apart from
// the first one will be NOOP. As such, we'll use b.N as the number of account to
@@ -568,7 +562,7 @@ func BenchmarkCommitAfterHash(b *testing.B) {
b.Run("no-onleaf", func(b *testing.B) {
benchmarkCommitAfterHash(b, nil)
})
- var a account
+ var a types.StateAccount
onleaf := func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error {
rlp.DecodeBytes(leaf, &a)
return nil
@@ -633,7 +627,7 @@ func TestCommitAfterHash(t *testing.T) {
if exp != root {
t.Errorf("got %x, exp %x", root, exp)
}
- root, _ = trie.Commit(nil)
+ root, _, _ = trie.Commit(nil)
if exp != root {
t.Errorf("got %x, exp %x", root, exp)
}
@@ -664,7 +658,7 @@ func makeAccounts(size int) (addresses [][20]byte, accounts [][]byte) {
balanceBytes := make([]byte, numBytes)
random.Read(balanceBytes)
balance := new(big.Int).SetBytes(balanceBytes)
- data, _ := rlp.EncodeToBytes(&account{nonce, balance, root, code})
+ data, _ := rlp.EncodeToBytes(&types.StateAccount{Nonce: nonce, Balance: balance, Root: root, CodeHash: code})
accounts[i] = data
}
return addresses, accounts
@@ -740,7 +734,7 @@ func TestCommitSequence(t *testing.T) {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
}
// Flush trie -> database
- root, _ := trie.Commit(nil)
+ root, _, _ := trie.Commit(nil)
// Flush memdb -> disk (sponge)
db.Commit(root, false, func(c common.Hash) {
// And spongify the callback-order
@@ -792,7 +786,7 @@ func TestCommitSequenceRandomBlobs(t *testing.T) {
trie.Update(key, val)
}
// Flush trie -> database
- root, _ := trie.Commit(nil)
+ root, _, _ := trie.Commit(nil)
// Flush memdb -> disk (sponge)
db.Commit(root, false, func(c common.Hash) {
// And spongify the callback-order
@@ -834,7 +828,7 @@ func TestCommitSequenceStackTrie(t *testing.T) {
stTrie.TryUpdate(key, val)
}
// Flush trie -> database
- root, _ := trie.Commit(nil)
+ root, _, _ := trie.Commit(nil)
// Flush memdb -> disk (sponge)
db.Commit(root, false, nil)
// And flush stacktrie -> disk
@@ -879,7 +873,7 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
trie.TryUpdate(key, []byte{0x1})
stTrie.TryUpdate(key, []byte{0x1})
// Flush trie -> database
- root, _ := trie.Commit(nil)
+ root, _, _ := trie.Commit(nil)
// Flush memdb -> disk (sponge)
db.Commit(root, false, nil)
// And flush stacktrie -> disk