diff --git a/.gitmodules b/.gitmodules
index 90d1be0a3..241c169c4 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -2,3 +2,7 @@
path = tests/testdata
url = https://github.com/ethereum/tests
shallow = true
+[submodule "evm-benchmarks"]
+ path = tests/evm-benchmarks
+ url = https://github.com/ipsilon/evm-benchmarks
+ shallow = true
diff --git a/.golangci.yml b/.golangci.yml
index 395a91fe1..4950b98c2 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,7 +1,7 @@
# This file configures github.com/golangci/golangci-lint.
run:
- timeout: 5m
+ timeout: 20m
tests: true
# default is true. Enables skipping of directories:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
diff --git a/README.md b/README.md
index 0e8bdca4d..81b7215ba 100644
--- a/README.md
+++ b/README.md
@@ -165,7 +165,7 @@ saving your blockchain as well as map the default ports. There is also an `alpin
available for a slim version of the image.
Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers
-and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not
+and/or hosts. By default, `geth` binds to the local interface and RPC endpoints are not
accessible from the outside.
### Programmatically interfacing `geth` nodes
diff --git a/SECURITY.md b/SECURITY.md
index 88b3f8fe1..41b900d5e 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -19,7 +19,7 @@ Audit reports are published in the `docs` folder: https://github.com/ethereum/go
**Please do not file a public ticket** mentioning the vulnerability.
-To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publically disclosed security vulnerabilities.
+To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publicly disclosed security vulnerabilities.
Use the built-in `geth version-check` feature to check whether the software is affected by any known vulnerability. This command will fetch the latest [`vulnerabilities.json`](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json) file which contains known security vulnerabilities concerning `geth`, and cross-check the data against its own version number.
diff --git a/accounts/abi/argument.go b/accounts/abi/argument.go
index 261b4d1b8..e6c117fe5 100644
--- a/accounts/abi/argument.go
+++ b/accounts/abi/argument.go
@@ -81,13 +81,7 @@ func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
if len(arguments) != 0 {
return nil, fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
}
- // Nothing to unmarshal, return default variables
- nonIndexedArgs := arguments.NonIndexed()
- defaultVars := make([]interface{}, len(nonIndexedArgs))
- for index, arg := range nonIndexedArgs {
- defaultVars[index] = reflect.New(arg.Type.GetType())
- }
- return defaultVars, nil
+ return make([]interface{}, 0), nil
}
return arguments.UnpackValues(data)
}
diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index 27d40f1d6..ac696f446 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -230,6 +230,9 @@ func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common
defer b.mu.Unlock()
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config)
+ if receipt == nil {
+ return nil, ethereum.NotFound
+ }
return receipt, nil
}
@@ -639,7 +642,6 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
}
// SendTransaction updates the pending block to include the given transaction.
-// It panics if the transaction is invalid.
func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error {
b.mu.Lock()
defer b.mu.Unlock()
@@ -647,17 +649,17 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
// Get the last block
block, err := b.blockByHash(ctx, b.pendingBlock.ParentHash())
if err != nil {
- panic("could not fetch parent")
+ return fmt.Errorf("could not fetch parent")
}
// Check transaction validity
signer := types.MakeSigner(b.blockchain.Config(), block.Number())
sender, err := types.Sender(signer, tx)
if err != nil {
- panic(fmt.Errorf("invalid transaction: %v", err))
+ return fmt.Errorf("invalid transaction: %v", err)
}
nonce := b.pendingState.GetNonce(sender)
if tx.Nonce() != nonce {
- panic(fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce))
+ return fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce)
}
// Include tx in chain
blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
diff --git a/accounts/abi/bind/util.go b/accounts/abi/bind/util.go
index 118abc59a..b931fbb04 100644
--- a/accounts/abi/bind/util.go
+++ b/accounts/abi/bind/util.go
@@ -21,6 +21,7 @@ import (
"errors"
"time"
+ "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
@@ -35,14 +36,16 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty
logger := log.New("hash", tx.Hash())
for {
receipt, err := b.TransactionReceipt(ctx, tx.Hash())
- if receipt != nil {
+ if err == nil {
return receipt, nil
}
- if err != nil {
- logger.Trace("Receipt retrieval failed", "err", err)
- } else {
+
+ if errors.Is(err, ethereum.NotFound) {
logger.Trace("Transaction not yet mined")
+ } else {
+ logger.Trace("Receipt retrieval failed", "err", err)
}
+
// Wait for the next round.
select {
case <-ctx.Done():
diff --git a/accounts/accounts.go b/accounts/accounts.go
index af870dad1..179a33c59 100644
--- a/accounts/accounts.go
+++ b/accounts/accounts.go
@@ -46,7 +46,7 @@ const (
// accounts (derived from the same seed).
type Wallet interface {
// URL retrieves the canonical path under which this wallet is reachable. It is
- // user by upper layers to define a sorting order over all wallets from multiple
+ // used by upper layers to define a sorting order over all wallets from multiple
// backends.
URL() URL
@@ -89,7 +89,7 @@ type Wallet interface {
// accounts.
//
// Note, self derivation will increment the last component of the specified path
- // opposed to decending into a child path to allow discovering accounts starting
+ // opposed to descending into a child path to allow discovering accounts starting
// from non zero components.
//
// Some hardware wallets switched derivation paths through their evolution, so
@@ -105,7 +105,7 @@ type Wallet interface {
// or optionally with the aid of any location metadata from the embedded URL field.
//
// If the wallet requires additional authentication to sign the request (e.g.
- // a password to decrypt the account, or a PIN code o verify the transaction),
+ // a password to decrypt the account, or a PIN code to verify the transaction),
// an AuthNeededError instance will be returned, containing infos for the user
// about which fields or actions are needed. The user may retry by providing
// the needed details via SignDataWithPassphrase, or by other means (e.g. unlock
@@ -124,13 +124,13 @@ type Wallet interface {
// or optionally with the aid of any location metadata from the embedded URL field.
//
// If the wallet requires additional authentication to sign the request (e.g.
- // a password to decrypt the account, or a PIN code o verify the transaction),
+ // a password to decrypt the account, or a PIN code to verify the transaction),
// an AuthNeededError instance will be returned, containing infos for the user
// about which fields or actions are needed. The user may retry by providing
// the needed details via SignTextWithPassphrase, or by other means (e.g. unlock
// the account in a keystore).
//
- // This method should return the signature in 'canonical' format, with v 0 or 1
+ // This method should return the signature in 'canonical' format, with v 0 or 1.
SignText(account Account, text []byte) ([]byte, error)
// SignTextWithPassphrase is identical to Signtext, but also takes a password
diff --git a/accounts/errors.go b/accounts/errors.go
index 2fed35f9d..727e5329b 100644
--- a/accounts/errors.go
+++ b/accounts/errors.go
@@ -42,7 +42,7 @@ var ErrInvalidPassphrase = errors.New("invalid password")
var ErrWalletAlreadyOpen = errors.New("wallet already open")
// ErrWalletClosed is returned if a wallet is attempted to be opened the
-// secodn time.
+// second time.
var ErrWalletClosed = errors.New("wallet closed")
// AuthNeededError is returned by backends for signing requests where the user
diff --git a/accounts/scwallet/wallet.go b/accounts/scwallet/wallet.go
index b4d229bc0..2a2b83bd1 100644
--- a/accounts/scwallet/wallet.go
+++ b/accounts/scwallet/wallet.go
@@ -638,7 +638,7 @@ func (w *Wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun
// accounts.
//
// Note, self derivation will increment the last component of the specified path
-// opposed to decending into a child path to allow discovering accounts starting
+// opposed to descending into a child path to allow discovering accounts starting
// from non zero components.
//
// Some hardware wallets switched derivation paths through their evolution, so
diff --git a/accounts/usbwallet/wallet.go b/accounts/usbwallet/wallet.go
index b6f181448..382f3ddae 100644
--- a/accounts/usbwallet/wallet.go
+++ b/accounts/usbwallet/wallet.go
@@ -496,7 +496,7 @@ func (w *wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun
// accounts.
//
// Note, self derivation will increment the last component of the specified path
-// opposed to decending into a child path to allow discovering accounts starting
+// opposed to descending into a child path to allow discovering accounts starting
// from non zero components.
//
// Some hardware wallets switched derivation paths through their evolution, so
diff --git a/build/ci.go b/build/ci.go
index 8b302511a..b39dc15ca 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -334,7 +334,11 @@ func downloadLinter(cachedir string) string {
const version = "1.42.0"
csdb := build.MustLoadChecksums("build/checksums.txt")
- base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH)
+ arch := runtime.GOARCH
+ if arch == "arm" {
+ arch += "v" + os.Getenv("GOARM")
+ }
+ base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, arch)
url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s.tar.gz", version, base)
archivePath := filepath.Join(cachedir, base+".tar.gz")
if err := csdb.DownloadFile(url, archivePath); err != nil {
diff --git a/cmd/devp2p/internal/ethtest/chain.go b/cmd/devp2p/internal/ethtest/chain.go
index 7dcb412b5..d0d55a455 100644
--- a/cmd/devp2p/internal/ethtest/chain.go
+++ b/cmd/devp2p/internal/ethtest/chain.go
@@ -26,6 +26,7 @@ import (
"os"
"strings"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/forkid"
"github.com/ethereum/go-ethereum/core/types"
@@ -67,6 +68,13 @@ func (c *Chain) TotalDifficultyAt(height int) *big.Int {
return sum
}
+func (c *Chain) RootAt(height int) common.Hash {
+ if height < c.Len() {
+ return c.blocks[height].Root()
+ }
+ return common.Hash{}
+}
+
// ForkID gets the fork id of the chain.
func (c *Chain) ForkID() forkid.ID {
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()))
diff --git a/cmd/devp2p/internal/ethtest/helpers.go b/cmd/devp2p/internal/ethtest/helpers.go
index e695cd42d..dd9dfd861 100644
--- a/cmd/devp2p/internal/ethtest/helpers.go
+++ b/cmd/devp2p/internal/ethtest/helpers.go
@@ -96,6 +96,19 @@ func (s *Suite) dial66() (*Conn, error) {
return conn, nil
}
+// dial66 attempts to dial the given node and perform a handshake,
+// returning the created Conn with additional snap/1 capabilities if
+// successful.
+func (s *Suite) dialSnap() (*Conn, error) {
+ conn, err := s.dial66()
+ if err != nil {
+ return nil, fmt.Errorf("dial failed: %v", err)
+ }
+ conn.caps = append(conn.caps, p2p.Cap{Name: "snap", Version: 1})
+ conn.ourHighestSnapProtoVersion = 1
+ return conn, nil
+}
+
// peer performs both the protocol handshake and the status message
// exchange with the node in order to peer with it.
func (c *Conn) peer(chain *Chain, status *Status) error {
@@ -131,7 +144,11 @@ func (c *Conn) handshake() error {
}
c.negotiateEthProtocol(msg.Caps)
if c.negotiatedProtoVersion == 0 {
- return fmt.Errorf("could not negotiate protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion)
+ return fmt.Errorf("could not negotiate eth protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion)
+ }
+ // If we require snap, verify that it was negotiated
+ if c.ourHighestSnapProtoVersion != c.negotiatedSnapProtoVersion {
+ return fmt.Errorf("could not negotiate snap protocol (remote caps: %v, local snap version: %v)", msg.Caps, c.ourHighestSnapProtoVersion)
}
return nil
default:
@@ -143,15 +160,21 @@ func (c *Conn) handshake() error {
// advertised capability from peer.
func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) {
var highestEthVersion uint
+ var highestSnapVersion uint
for _, capability := range caps {
- if capability.Name != "eth" {
- continue
- }
- if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion {
- highestEthVersion = capability.Version
+ switch capability.Name {
+ case "eth":
+ if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion {
+ highestEthVersion = capability.Version
+ }
+ case "snap":
+ if capability.Version > highestSnapVersion && capability.Version <= c.ourHighestSnapProtoVersion {
+ highestSnapVersion = capability.Version
+ }
}
}
c.negotiatedProtoVersion = highestEthVersion
+ c.negotiatedSnapProtoVersion = highestSnapVersion
}
// statusExchange performs a `Status` message exchange with the given node.
@@ -325,6 +348,15 @@ func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, isEth66 bo
}
}
+func (c *Conn) snapRequest(msg Message, id uint64, chain *Chain) (Message, error) {
+ defer c.SetReadDeadline(time.Time{})
+ c.SetReadDeadline(time.Now().Add(5 * time.Second))
+ if err := c.Write(msg); err != nil {
+ return nil, fmt.Errorf("could not write to connection: %v", err)
+ }
+ return c.ReadSnap(id)
+}
+
// getBlockHeaders66 executes the given `GetBlockHeaders` request over the eth66 protocol.
func getBlockHeaders66(chain *Chain, conn *Conn, request *GetBlockHeaders, id uint64) (BlockHeaders, error) {
// write request
diff --git a/cmd/devp2p/internal/ethtest/snap.go b/cmd/devp2p/internal/ethtest/snap.go
new file mode 100644
index 000000000..95dd90fd3
--- /dev/null
+++ b/cmd/devp2p/internal/ethtest/snap.go
@@ -0,0 +1,675 @@
+// Copyright 2014 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package ethtest
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/rand"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/eth/protocols/snap"
+ "github.com/ethereum/go-ethereum/internal/utesting"
+ "github.com/ethereum/go-ethereum/light"
+ "github.com/ethereum/go-ethereum/trie"
+ "golang.org/x/crypto/sha3"
+)
+
+func (s *Suite) TestSnapStatus(t *utesting.T) {
+ conn, err := s.dialSnap()
+ if err != nil {
+ t.Fatalf("dial failed: %v", err)
+ }
+ defer conn.Close()
+ if err := conn.peer(s.chain, nil); err != nil {
+ t.Fatalf("peering failed: %v", err)
+ }
+}
+
+type accRangeTest struct {
+ nBytes uint64
+ root common.Hash
+ origin common.Hash
+ limit common.Hash
+
+ expAccounts int
+ expFirst common.Hash
+ expLast common.Hash
+}
+
+// TestSnapGetAccountRange various forms of GetAccountRange requests.
+func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
+ var (
+ root = s.chain.RootAt(999)
+ ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+ zero = common.Hash{}
+ firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29")
+ firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
+ firstKeyPlus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2b")
+ secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
+ storageRoot = common.HexToHash("0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790")
+ )
+ for i, tc := range []accRangeTest{
+ // Tests decreasing the number of bytes
+ {4000, root, zero, ffHash, 76, firstKey, common.HexToHash("0xd2669dcf3858e7f1eecb8b5fedbf22fbea3e9433848a75035f79d68422c2dcda")},
+ {3000, root, zero, ffHash, 57, firstKey, common.HexToHash("0x9b63fa753ece5cb90657d02ecb15df4dc1508d8c1d187af1bf7f1a05e747d3c7")},
+ {2000, root, zero, ffHash, 38, firstKey, common.HexToHash("0x5e6140ecae4354a9e8f47559a8c6209c1e0e69cb077b067b528556c11698b91f")},
+ {1, root, zero, ffHash, 1, firstKey, firstKey},
+
+ // Tests variations of the range
+ //
+ // [00b to firstkey]: should return [firstkey, secondkey], where secondkey is out of bounds
+ {4000, root, common.HexToHash("0x00bf000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2b"), 2, firstKey, secondKey},
+ // [00b0 to 0bf0]: where both are before firstkey. Should return firstKey (even though it's out of bounds)
+ {4000, root, common.HexToHash("0x00b0000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x00bf100000000000000000000000000000000000000000000000000000000000"), 1, firstKey, firstKey},
+ {4000, root, zero, zero, 1, firstKey, firstKey},
+ {4000, root, firstKey, ffHash, 76, firstKey, common.HexToHash("0xd2669dcf3858e7f1eecb8b5fedbf22fbea3e9433848a75035f79d68422c2dcda")},
+ {4000, root, firstKeyPlus1, ffHash, 76, secondKey, common.HexToHash("0xd28f55d3b994f16389f36944ad685b48e0fc3f8fbe86c3ca92ebecadf16a783f")},
+
+ // Test different root hashes
+ //
+ // A stateroot that does not exist
+ {4000, common.Hash{0x13, 37}, zero, ffHash, 0, zero, zero},
+ // The genesis stateroot (we expect it to not be served)
+ {4000, s.chain.RootAt(0), zero, ffHash, 0, zero, zero},
+ // A 127 block old stateroot, expected to be served
+ {4000, s.chain.RootAt(999 - 127), zero, ffHash, 77, firstKey, common.HexToHash("0xe4c6fdef5dd4e789a2612390806ee840b8ec0fe52548f8b4efe41abb20c37aac")},
+ // A root which is not actually an account root, but a storage orot
+ {4000, storageRoot, zero, ffHash, 0, zero, zero},
+
+ // And some non-sensical requests
+ //
+ // range from [0xFF to 0x00], wrong order. Expect not to be serviced
+ {4000, root, ffHash, zero, 0, zero, zero},
+ // range from [firstkey, firstkey-1], wrong order. Expect to get first key.
+ {4000, root, firstKey, firstKeyMinus1, 1, firstKey, firstKey},
+ // range from [firstkey, 0], wrong order. Expect to get first key.
+ {4000, root, firstKey, zero, 1, firstKey, firstKey},
+ // Max bytes: 0. Expect to deliver one account.
+ {0, root, zero, ffHash, 1, firstKey, firstKey},
+ } {
+ if err := s.snapGetAccountRange(t, &tc); err != nil {
+ t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\nfailed: %v", i, tc.root, tc.origin, tc.limit, tc.nBytes, err)
+ }
+ }
+}
+
+type stRangesTest struct {
+ root common.Hash
+ accounts []common.Hash
+ origin []byte
+ limit []byte
+ nBytes uint64
+
+ expSlots int
+}
+
+// TestSnapGetStorageRange various forms of GetStorageRanges requests.
+func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
+ var (
+ ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+ zero = common.Hash{}
+ firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
+ secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
+ )
+ for i, tc := range []stRangesTest{
+ {
+ root: s.chain.RootAt(999),
+ accounts: []common.Hash{secondKey, firstKey},
+ origin: zero[:],
+ limit: ffHash[:],
+ nBytes: 500,
+ expSlots: 0,
+ },
+
+ /*
+ Some tests against this account:
+ {
+ "balance": "0",
+ "nonce": 1,
+ "root": "0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790",
+ "codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
+ "storage": {
+ "0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace": "02",
+ "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6": "01",
+ "0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b": "03"
+ },
+ "key": "0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844"
+ }
+ */
+ { // [:] -> [slot1, slot2, slot3]
+ root: s.chain.RootAt(999),
+ accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
+ origin: zero[:],
+ limit: ffHash[:],
+ nBytes: 500,
+ expSlots: 3,
+ },
+ { // [slot1:] -> [slot1, slot2, slot3]
+ root: s.chain.RootAt(999),
+ accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
+ origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"),
+ limit: ffHash[:],
+ nBytes: 500,
+ expSlots: 3,
+ },
+ { // [slot1+ :] -> [slot2, slot3]
+ root: s.chain.RootAt(999),
+ accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
+ origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5acf"),
+ limit: ffHash[:],
+ nBytes: 500,
+ expSlots: 2,
+ },
+ { // [slot1:slot2] -> [slot1, slot2]
+ root: s.chain.RootAt(999),
+ accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
+ origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"),
+ limit: common.FromHex("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6"),
+ nBytes: 500,
+ expSlots: 2,
+ },
+ { // [slot1+:slot2+] -> [slot2, slot3]
+ root: s.chain.RootAt(999),
+ accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
+ origin: common.FromHex("0x4fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
+ limit: common.FromHex("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf7"),
+ nBytes: 500,
+ expSlots: 2,
+ },
+ } {
+ if err := s.snapGetStorageRanges(t, &tc); err != nil {
+ t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\n #accounts: %d\nfailed: %v",
+ i, tc.root, tc.origin, tc.limit, tc.nBytes, len(tc.accounts), err)
+ }
+ }
+}
+
+type byteCodesTest struct {
+ nBytes uint64
+ hashes []common.Hash
+
+ expHashes int
+}
+
+var (
+ // emptyRoot is the known root hash of an empty trie.
+ emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+ // emptyCode is the known hash of the empty EVM bytecode.
+ emptyCode = common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
+)
+
+// TestSnapGetByteCodes various forms of GetByteCodes requests.
+func (s *Suite) TestSnapGetByteCodes(t *utesting.T) {
+ // The halfchain import should yield these bytecodes
+ var hcBytecodes []common.Hash
+ for _, s := range []string{
+ "0x200c90460d8b0063210d5f5b9918e053c8f2c024485e0f1b48be8b1fc71b1317",
+ "0x20ba67ed4ac6aff626e0d1d4db623e2fada9593daeefc4a6eb4b70e6cff986f3",
+ "0x24b5b4902cb3d897c1cee9f16be8e897d8fa277c04c6dc8214f18295fca5de44",
+ "0x320b9d0a2be39b8a1c858f9f8cb96b1df0983071681de07ded3a7c0d05db5fd6",
+ "0x48cb0d5275936a24632babc7408339f9f7b051274809de565b8b0db76e97e03c",
+ "0x67c7a6f5cdaa43b4baa0e15b2be63346d1b9ce9f2c3d7e5804e0cacd44ee3b04",
+ "0x6d8418059bdc8c3fabf445e6bfc662af3b6a4ae45999b953996e42c7ead2ab49",
+ "0x7043422e5795d03f17ee0463a37235258e609fdd542247754895d72695e3e142",
+ "0x727f9e6f0c4bac1ff8d72c2972122d9c8d37ccb37e04edde2339e8da193546f1",
+ "0x86ccd5e23c78568a8334e0cebaf3e9f48c998307b0bfb1c378cee83b4bfb29cb",
+ "0x8fc89b00d6deafd4c4279531e743365626dbfa28845ec697919d305c2674302d",
+ "0x92cfc353bcb9746bb6f9996b6b9df779c88af2e9e0eeac44879ca19887c9b732",
+ "0x941b4872104f0995a4898fcf0f615ea6bf46bfbdfcf63ea8f2fd45b3f3286b77",
+ "0xa02fe8f41159bb39d2b704c633c3d6389cf4bfcb61a2539a9155f60786cf815f",
+ "0xa4b94e0afdffcb0af599677709dac067d3145489ea7aede57672bee43e3b7373",
+ "0xaf4e64edd3234c1205b725e42963becd1085f013590bd7ed93f8d711c5eb65fb",
+ "0xb69a18fa855b742031420081999086f6fb56c3930ae8840944e8b8ae9931c51e",
+ "0xc246c217bc73ce6666c93a93a94faa5250564f50a3fdc27ea74c231c07fe2ca6",
+ "0xcd6e4ab2c3034df2a8a1dfaaeb1c4baecd162a93d22de35e854ee2945cbe0c35",
+ "0xe24b692d09d6fc2f3d1a6028c400a27c37d7cbb11511907c013946d6ce263d3b",
+ "0xe440c5f0e8603fd1ed25976eee261ccee8038cf79d6a4c0eb31b2bf883be737f",
+ "0xe6eacbc509203d21ac814b350e72934fde686b7f673c19be8cf956b0c70078ce",
+ "0xe8530de4371467b5be7ea0e69e675ab36832c426d6c1ce9513817c0f0ae1486b",
+ "0xe85d487abbbc83bf3423cf9731360cf4f5a37220e18e5add54e72ee20861196a",
+ "0xf195ea389a5eea28db0be93660014275b158963dec44af1dfa7d4743019a9a49",
+ } {
+ hcBytecodes = append(hcBytecodes, common.HexToHash(s))
+ }
+
+ for i, tc := range []byteCodesTest{
+ // A few stateroots
+ {
+ nBytes: 10000, hashes: []common.Hash{s.chain.RootAt(0), s.chain.RootAt(999)},
+ expHashes: 0,
+ },
+ {
+ nBytes: 10000, hashes: []common.Hash{s.chain.RootAt(0), s.chain.RootAt(0)},
+ expHashes: 0,
+ },
+ // Empties
+ {
+ nBytes: 10000, hashes: []common.Hash{emptyRoot},
+ expHashes: 0,
+ },
+ {
+ nBytes: 10000, hashes: []common.Hash{emptyCode},
+ expHashes: 1,
+ },
+ {
+ nBytes: 10000, hashes: []common.Hash{emptyCode, emptyCode, emptyCode},
+ expHashes: 3,
+ },
+ // The existing bytecodes
+ {
+ nBytes: 10000, hashes: hcBytecodes,
+ expHashes: len(hcBytecodes),
+ },
+ // The existing, with limited byte arg
+ {
+ nBytes: 1, hashes: hcBytecodes,
+ expHashes: 1,
+ },
+ {
+ nBytes: 0, hashes: hcBytecodes,
+ expHashes: 1,
+ },
+ {
+ nBytes: 1000, hashes: []common.Hash{hcBytecodes[0], hcBytecodes[0], hcBytecodes[0], hcBytecodes[0]},
+ expHashes: 4,
+ },
+ } {
+ if err := s.snapGetByteCodes(t, &tc); err != nil {
+ t.Errorf("test %d \n bytes: %d\n #hashes: %d\nfailed: %v", i, tc.nBytes, len(tc.hashes), err)
+ }
+ }
+}
+
+type trieNodesTest struct {
+ root common.Hash
+ paths []snap.TrieNodePathSet
+ nBytes uint64
+
+ expHashes []common.Hash
+ expReject bool
+}
+
+func decodeNibbles(nibbles []byte, bytes []byte) {
+ for bi, ni := 0, 0; ni < len(nibbles); bi, ni = bi+1, ni+2 {
+ bytes[bi] = nibbles[ni]<<4 | nibbles[ni+1]
+ }
+}
+
+// hasTerm returns whether a hex key has the terminator flag.
+func hasTerm(s []byte) bool {
+ return len(s) > 0 && s[len(s)-1] == 16
+}
+
+func keybytesToHex(str []byte) []byte {
+ l := len(str)*2 + 1
+ var nibbles = make([]byte, l)
+ for i, b := range str {
+ nibbles[i*2] = b / 16
+ nibbles[i*2+1] = b % 16
+ }
+ nibbles[l-1] = 16
+ return nibbles
+}
+
+func hexToCompact(hex []byte) []byte {
+ terminator := byte(0)
+ if hasTerm(hex) {
+ terminator = 1
+ hex = hex[:len(hex)-1]
+ }
+ buf := make([]byte, len(hex)/2+1)
+ buf[0] = terminator << 5 // the flag byte
+ if len(hex)&1 == 1 {
+ buf[0] |= 1 << 4 // odd flag
+ buf[0] |= hex[0] // first nibble is contained in the first byte
+ hex = hex[1:]
+ }
+ decodeNibbles(hex, buf[1:])
+ return buf
+}
+
+// TestSnapTrieNodes various forms of GetTrieNodes requests.
+func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
+
+ key := common.FromHex("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
+ // helper function to iterate the key, and generate the compact-encoded
+ // trie paths along the way.
+ pathTo := func(length int) snap.TrieNodePathSet {
+ hex := keybytesToHex(key)[:length]
+ hex[len(hex)-1] = 0 // remove term flag
+ hKey := hexToCompact(hex)
+ return snap.TrieNodePathSet{hKey}
+ }
+ var accPaths []snap.TrieNodePathSet
+ for i := 1; i <= 65; i++ {
+ accPaths = append(accPaths, pathTo(i))
+ }
+ empty := emptyCode
+ for i, tc := range []trieNodesTest{
+ {
+ root: s.chain.RootAt(999),
+ paths: nil,
+ nBytes: 500,
+ expHashes: nil,
+ },
+ {
+ root: s.chain.RootAt(999),
+ paths: []snap.TrieNodePathSet{
+ snap.TrieNodePathSet{}, // zero-length pathset should 'abort' and kick us off
+ snap.TrieNodePathSet{[]byte{0}},
+ },
+ nBytes: 5000,
+ expHashes: []common.Hash{},
+ expReject: true,
+ },
+ {
+ root: s.chain.RootAt(999),
+ paths: []snap.TrieNodePathSet{
+ snap.TrieNodePathSet{[]byte{0}},
+ snap.TrieNodePathSet{[]byte{1}, []byte{0}},
+ },
+ nBytes: 5000,
+ //0x6b3724a41b8c38b46d4d02fba2bb2074c47a507eb16a9a4b978f91d32e406faf
+ expHashes: []common.Hash{s.chain.RootAt(999)},
+ },
+ { // nonsensically long path
+ root: s.chain.RootAt(999),
+ paths: []snap.TrieNodePathSet{
+ snap.TrieNodePathSet{[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8}},
+ },
+ nBytes: 5000,
+ expHashes: []common.Hash{common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")},
+ },
+ {
+ root: s.chain.RootAt(0),
+ paths: []snap.TrieNodePathSet{
+ snap.TrieNodePathSet{[]byte{0}},
+ snap.TrieNodePathSet{[]byte{1}, []byte{0}},
+ },
+ nBytes: 5000,
+ expHashes: []common.Hash{},
+ },
+ {
+ // The leaf is only a couple of levels down, so the continued trie traversal causes lookup failures.
+ root: s.chain.RootAt(999),
+ paths: accPaths,
+ nBytes: 5000,
+ expHashes: []common.Hash{
+ common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"),
+ common.HexToHash("0x4fb1e4e2391e4b4da471d59641319b8fa25d76c973d4bec594d7b00a69ae5135"),
+ empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
+ empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
+ empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
+ empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
+ empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
+ empty, empty, empty},
+ },
+ {
+ // Basically the same as above, with different ordering
+ root: s.chain.RootAt(999),
+ paths: []snap.TrieNodePathSet{
+ accPaths[10], accPaths[1], accPaths[0],
+ },
+ nBytes: 5000,
+ expHashes: []common.Hash{
+ empty,
+ common.HexToHash("0x4fb1e4e2391e4b4da471d59641319b8fa25d76c973d4bec594d7b00a69ae5135"),
+ common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"),
+ },
+ },
+ } {
+ if err := s.snapGetTrieNodes(t, &tc); err != nil {
+ t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err)
+ }
+ }
+}
+
+func (s *Suite) snapGetAccountRange(t *utesting.T, tc *accRangeTest) error {
+ conn, err := s.dialSnap()
+ if err != nil {
+ t.Fatalf("dial failed: %v", err)
+ }
+ defer conn.Close()
+ if err = conn.peer(s.chain, nil); err != nil {
+ t.Fatalf("peering failed: %v", err)
+ }
+ // write request
+ req := &GetAccountRange{
+ ID: uint64(rand.Int63()),
+ Root: tc.root,
+ Origin: tc.origin,
+ Limit: tc.limit,
+ Bytes: tc.nBytes,
+ }
+ resp, err := conn.snapRequest(req, req.ID, s.chain)
+ if err != nil {
+ return fmt.Errorf("account range request failed: %v", err)
+ }
+ var res *snap.AccountRangePacket
+ if r, ok := resp.(*AccountRange); !ok {
+ return fmt.Errorf("account range response wrong: %T %v", resp, resp)
+ } else {
+ res = (*snap.AccountRangePacket)(r)
+ }
+ if exp, got := tc.expAccounts, len(res.Accounts); exp != got {
+ return fmt.Errorf("expected %d accounts, got %d", exp, got)
+ }
+ // Check that the encoding order is correct
+ for i := 1; i < len(res.Accounts); i++ {
+ if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {
+ return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
+ }
+ }
+ var (
+ hashes []common.Hash
+ accounts [][]byte
+ proof = res.Proof
+ )
+ hashes, accounts, err = res.Unpack()
+ if err != nil {
+ return err
+ }
+ if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 {
+ return nil
+ }
+ if len(hashes) > 0 {
+ if exp, got := tc.expFirst, res.Accounts[0].Hash; exp != got {
+ return fmt.Errorf("expected first account 0x%x, got 0x%x", exp, got)
+ }
+ if exp, got := tc.expLast, res.Accounts[len(res.Accounts)-1].Hash; exp != got {
+ return fmt.Errorf("expected last account 0x%x, got 0x%x", exp, got)
+ }
+ }
+ // Reconstruct a partial trie from the response and verify it
+ keys := make([][]byte, len(hashes))
+ for i, key := range hashes {
+ keys[i] = common.CopyBytes(key[:])
+ }
+ nodes := make(light.NodeList, len(proof))
+ for i, node := range proof {
+ nodes[i] = node
+ }
+ proofdb := nodes.NodeSet()
+
+ var end []byte
+ if len(keys) > 0 {
+ end = keys[len(keys)-1]
+ }
+ _, err = trie.VerifyRangeProof(tc.root, tc.origin[:], end, keys, accounts, proofdb)
+ return err
+}
+
+func (s *Suite) snapGetStorageRanges(t *utesting.T, tc *stRangesTest) error {
+ conn, err := s.dialSnap()
+ if err != nil {
+ t.Fatalf("dial failed: %v", err)
+ }
+ defer conn.Close()
+ if err = conn.peer(s.chain, nil); err != nil {
+ t.Fatalf("peering failed: %v", err)
+ }
+ // write request
+ req := &GetStorageRanges{
+ ID: uint64(rand.Int63()),
+ Root: tc.root,
+ Accounts: tc.accounts,
+ Origin: tc.origin,
+ Limit: tc.limit,
+ Bytes: tc.nBytes,
+ }
+ resp, err := conn.snapRequest(req, req.ID, s.chain)
+ if err != nil {
+ return fmt.Errorf("account range request failed: %v", err)
+ }
+ var res *snap.StorageRangesPacket
+ if r, ok := resp.(*StorageRanges); !ok {
+ return fmt.Errorf("account range response wrong: %T %v", resp, resp)
+ } else {
+ res = (*snap.StorageRangesPacket)(r)
+ }
+ gotSlots := 0
+ // Ensure the ranges are monotonically increasing
+ for i, slots := range res.Slots {
+ gotSlots += len(slots)
+ for j := 1; j < len(slots); j++ {
+ if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
+ return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])
+ }
+ }
+ }
+ if exp, got := tc.expSlots, gotSlots; exp != got {
+ return fmt.Errorf("expected %d slots, got %d", exp, got)
+ }
+ return nil
+}
+
+func (s *Suite) snapGetByteCodes(t *utesting.T, tc *byteCodesTest) error {
+ conn, err := s.dialSnap()
+ if err != nil {
+ t.Fatalf("dial failed: %v", err)
+ }
+ defer conn.Close()
+ if err = conn.peer(s.chain, nil); err != nil {
+ t.Fatalf("peering failed: %v", err)
+ }
+ // write request
+ req := &GetByteCodes{
+ ID: uint64(rand.Int63()),
+ Hashes: tc.hashes,
+ Bytes: tc.nBytes,
+ }
+ resp, err := conn.snapRequest(req, req.ID, s.chain)
+ if err != nil {
+ return fmt.Errorf("getBytecodes request failed: %v", err)
+ }
+ var res *snap.ByteCodesPacket
+ if r, ok := resp.(*ByteCodes); !ok {
+ return fmt.Errorf("bytecodes response wrong: %T %v", resp, resp)
+ } else {
+ res = (*snap.ByteCodesPacket)(r)
+ }
+ if exp, got := tc.expHashes, len(res.Codes); exp != got {
+ for i, c := range res.Codes {
+ fmt.Printf("%d. %#x\n", i, c)
+ }
+ return fmt.Errorf("expected %d bytecodes, got %d", exp, got)
+ }
+ // Cross reference the requested bytecodes with the response to find gaps
+ // that the serving node is missing
+ var (
+ bytecodes = res.Codes
+ hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState)
+ hash = make([]byte, 32)
+ codes = make([][]byte, len(req.Hashes))
+ )
+
+ for i, j := 0, 0; i < len(bytecodes); i++ {
+ // Find the next hash that we've been served, leaving misses with nils
+ hasher.Reset()
+ hasher.Write(bytecodes[i])
+ hasher.Read(hash)
+
+ for j < len(req.Hashes) && !bytes.Equal(hash, req.Hashes[j][:]) {
+ j++
+ }
+ if j < len(req.Hashes) {
+ codes[j] = bytecodes[i]
+ j++
+ continue
+ }
+ // We've either ran out of hashes, or got unrequested data
+ return errors.New("unexpected bytecode")
+ }
+
+ return nil
+}
+
+func (s *Suite) snapGetTrieNodes(t *utesting.T, tc *trieNodesTest) error {
+ conn, err := s.dialSnap()
+ if err != nil {
+ t.Fatalf("dial failed: %v", err)
+ }
+ defer conn.Close()
+ if err = conn.peer(s.chain, nil); err != nil {
+ t.Fatalf("peering failed: %v", err)
+ }
+ // write request
+ req := &GetTrieNodes{
+ ID: uint64(rand.Int63()),
+ Root: tc.root,
+ Paths: tc.paths,
+ Bytes: tc.nBytes,
+ }
+ resp, err := conn.snapRequest(req, req.ID, s.chain)
+ if err != nil {
+ if tc.expReject {
+ return nil
+ }
+ return fmt.Errorf("trienodes request failed: %v", err)
+ }
+ var res *snap.TrieNodesPacket
+ if r, ok := resp.(*TrieNodes); !ok {
+ return fmt.Errorf("trienodes response wrong: %T %v", resp, resp)
+ } else {
+ res = (*snap.TrieNodesPacket)(r)
+ }
+
+ // Check the correctness
+
+ // Cross reference the requested trienodes with the response to find gaps
+ // that the serving node is missing
+ hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
+ hash := make([]byte, 32)
+ trienodes := res.Nodes
+ if got, want := len(trienodes), len(tc.expHashes); got != want {
+ return fmt.Errorf("wrong trienode count, got %d, want %d\n", got, want)
+ }
+ for i, trienode := range trienodes {
+ hasher.Reset()
+ hasher.Write(trienode)
+ hasher.Read(hash)
+ if got, want := hash, tc.expHashes[i]; !bytes.Equal(got, want[:]) {
+ fmt.Printf("hash %d wrong, got %#x, want %#x\n", i, got, want)
+ err = fmt.Errorf("hash %d wrong, got %#x, want %#x", i, got, want)
+ }
+ }
+ return err
+}
diff --git a/cmd/devp2p/internal/ethtest/snapTypes.go b/cmd/devp2p/internal/ethtest/snapTypes.go
new file mode 100644
index 000000000..bb8638c3d
--- /dev/null
+++ b/cmd/devp2p/internal/ethtest/snapTypes.go
@@ -0,0 +1,36 @@
+package ethtest
+
+import "github.com/ethereum/go-ethereum/eth/protocols/snap"
+
+// GetAccountRange represents an account range query.
+type GetAccountRange snap.GetAccountRangePacket
+
+func (g GetAccountRange) Code() int { return 33 }
+
+type AccountRange snap.AccountRangePacket
+
+func (g AccountRange) Code() int { return 34 }
+
+type GetStorageRanges snap.GetStorageRangesPacket
+
+func (g GetStorageRanges) Code() int { return 35 }
+
+type StorageRanges snap.StorageRangesPacket
+
+func (g StorageRanges) Code() int { return 36 }
+
+type GetByteCodes snap.GetByteCodesPacket
+
+func (g GetByteCodes) Code() int { return 37 }
+
+type ByteCodes snap.ByteCodesPacket
+
+func (g ByteCodes) Code() int { return 38 }
+
+type GetTrieNodes snap.GetTrieNodesPacket
+
+func (g GetTrieNodes) Code() int { return 39 }
+
+type TrieNodes snap.TrieNodesPacket
+
+func (g TrieNodes) Code() int { return 40 }
diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go
index 28ba4aa76..dee59bc57 100644
--- a/cmd/devp2p/internal/ethtest/suite.go
+++ b/cmd/devp2p/internal/ethtest/suite.go
@@ -125,6 +125,16 @@ func (s *Suite) Eth66Tests() []utesting.Test {
}
}
+func (s *Suite) SnapTests() []utesting.Test {
+ return []utesting.Test{
+ {Name: "TestSnapStatus", Fn: s.TestSnapStatus},
+ {Name: "TestSnapAccountRange", Fn: s.TestSnapGetAccountRange},
+ {Name: "TestSnapGetByteCodes", Fn: s.TestSnapGetByteCodes},
+ {Name: "TestSnapGetTrieNodes", Fn: s.TestSnapTrieNodes},
+ {Name: "TestSnapGetStorageRanges", Fn: s.TestSnapGetStorageRanges},
+ }
+}
+
var (
eth66 = true // indicates whether suite should negotiate eth66 connection
eth65 = false // indicates whether suite should negotiate eth65 connection or below.
diff --git a/cmd/devp2p/internal/ethtest/suite_test.go b/cmd/devp2p/internal/ethtest/suite_test.go
index 6d14404e6..9bc55bc0a 100644
--- a/cmd/devp2p/internal/ethtest/suite_test.go
+++ b/cmd/devp2p/internal/ethtest/suite_test.go
@@ -55,6 +55,27 @@ func TestEthSuite(t *testing.T) {
}
}
+func TestSnapSuite(t *testing.T) {
+ geth, err := runGeth()
+ if err != nil {
+ t.Fatalf("could not run geth: %v", err)
+ }
+ defer geth.Close()
+
+ suite, err := NewSuite(geth.Server().Self(), fullchainFile, genesisFile)
+ if err != nil {
+ t.Fatalf("could not create new test suite: %v", err)
+ }
+ for _, test := range suite.SnapTests() {
+ t.Run(test.Name, func(t *testing.T) {
+ result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
+ if result[0].Failed {
+ t.Fatal()
+ }
+ })
+ }
+}
+
// runGeth creates and starts a geth node
func runGeth() (*node.Node, error) {
stack, err := node.New(&node.Config{
diff --git a/cmd/devp2p/internal/ethtest/types.go b/cmd/devp2p/internal/ethtest/types.go
index e49ea284e..09bb218d5 100644
--- a/cmd/devp2p/internal/ethtest/types.go
+++ b/cmd/devp2p/internal/ethtest/types.go
@@ -19,6 +19,7 @@ package ethtest
import (
"crypto/ecdsa"
"fmt"
+ "time"
"github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/p2p"
@@ -126,10 +127,12 @@ func (pt PooledTransactions) Code() int { return 26 }
// Conn represents an individual connection with a peer
type Conn struct {
*rlpx.Conn
- ourKey *ecdsa.PrivateKey
- negotiatedProtoVersion uint
- ourHighestProtoVersion uint
- caps []p2p.Cap
+ ourKey *ecdsa.PrivateKey
+ negotiatedProtoVersion uint
+ negotiatedSnapProtoVersion uint
+ ourHighestProtoVersion uint
+ ourHighestSnapProtoVersion uint
+ caps []p2p.Cap
}
// Read reads an eth packet from the connection.
@@ -259,12 +262,7 @@ func (c *Conn) Read66() (uint64, Message) {
// Write writes a eth packet to the connection.
func (c *Conn) Write(msg Message) error {
- // check if message is eth protocol message
- var (
- payload []byte
- err error
- )
- payload, err = rlp.EncodeToBytes(msg)
+ payload, err := rlp.EncodeToBytes(msg)
if err != nil {
return err
}
@@ -281,3 +279,43 @@ func (c *Conn) Write66(req eth.Packet, code int) error {
_, err = c.Conn.Write(uint64(code), payload)
return err
}
+
+// ReadSnap reads a snap/1 response with the given id from the connection.
+func (c *Conn) ReadSnap(id uint64) (Message, error) {
+ respId := id + 1
+ start := time.Now()
+ for respId != id && time.Since(start) < timeout {
+ code, rawData, _, err := c.Conn.Read()
+ if err != nil {
+ return nil, fmt.Errorf("could not read from connection: %v", err)
+ }
+ var snpMsg interface{}
+ switch int(code) {
+ case (GetAccountRange{}).Code():
+ snpMsg = new(GetAccountRange)
+ case (AccountRange{}).Code():
+ snpMsg = new(AccountRange)
+ case (GetStorageRanges{}).Code():
+ snpMsg = new(GetStorageRanges)
+ case (StorageRanges{}).Code():
+ snpMsg = new(StorageRanges)
+ case (GetByteCodes{}).Code():
+ snpMsg = new(GetByteCodes)
+ case (ByteCodes{}).Code():
+ snpMsg = new(ByteCodes)
+ case (GetTrieNodes{}).Code():
+ snpMsg = new(GetTrieNodes)
+ case (TrieNodes{}).Code():
+ snpMsg = new(TrieNodes)
+ default:
+ //return nil, fmt.Errorf("invalid message code: %d", code)
+ continue
+ }
+ if err := rlp.DecodeBytes(rawData, snpMsg); err != nil {
+ return nil, fmt.Errorf("could not rlp decode message: %v", err)
+ }
+ return snpMsg.(Message), nil
+
+ }
+ return nil, fmt.Errorf("request timed out")
+}
diff --git a/cmd/devp2p/rlpxcmd.go b/cmd/devp2p/rlpxcmd.go
index 24a16f0b3..6557a239d 100644
--- a/cmd/devp2p/rlpxcmd.go
+++ b/cmd/devp2p/rlpxcmd.go
@@ -36,6 +36,7 @@ var (
Subcommands: []cli.Command{
rlpxPingCommand,
rlpxEthTestCommand,
+ rlpxSnapTestCommand,
},
}
rlpxPingCommand = cli.Command{
@@ -53,6 +54,16 @@ var (
testTAPFlag,
},
}
+ rlpxSnapTestCommand = cli.Command{
+ Name: "snap-test",
+ Usage: "Runs tests against a node",
+ ArgsUsage: " ",
+ Action: rlpxSnapTest,
+ Flags: []cli.Flag{
+ testPatternFlag,
+ testTAPFlag,
+ },
+ }
)
func rlpxPing(ctx *cli.Context) error {
@@ -106,3 +117,15 @@ func rlpxEthTest(ctx *cli.Context) error {
}
return runTests(ctx, suite.AllEthTests())
}
+
+// rlpxSnapTest runs the snap protocol test suite.
+func rlpxSnapTest(ctx *cli.Context) error {
+ if ctx.NArg() < 3 {
+ exit("missing path to chain.rlp as command-line argument")
+ }
+ suite, err := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2])
+ if err != nil {
+ exit(err)
+ }
+ return runTests(ctx, suite.SnapTests())
+}
diff --git a/cmd/ethkey/utils.go b/cmd/ethkey/utils.go
index f2986e8ee..70baae92f 100644
--- a/cmd/ethkey/utils.go
+++ b/cmd/ethkey/utils.go
@@ -49,7 +49,7 @@ func getPassphrase(ctx *cli.Context, confirmation bool) string {
// signHash is a helper function that calculates a hash for the given message
// that can be safely used to calculate a signature from.
//
-// The hash is calulcated as
+// The hash is calculated as
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
//
// This gives context to the signed message and prevents signing of transactions.
diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index dfdde4217..874685f15 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -67,6 +67,7 @@ type ommer struct {
type stEnv struct {
Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
Difficulty *big.Int `json:"currentDifficulty"`
+ Random *big.Int `json:"currentRandom"`
ParentDifficulty *big.Int `json:"parentDifficulty"`
GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
Number uint64 `json:"currentNumber" gencodec:"required"`
@@ -81,6 +82,7 @@ type stEnv struct {
type stEnvMarshaling struct {
Coinbase common.UnprefixedAddress
Difficulty *math.HexOrDecimal256
+ Random *math.HexOrDecimal256
ParentDifficulty *math.HexOrDecimal256
GasLimit math.HexOrDecimal64
Number math.HexOrDecimal64
@@ -139,6 +141,11 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
if pre.Env.BaseFee != nil {
vmContext.BaseFee = new(big.Int).Set(pre.Env.BaseFee)
}
+ // If random is defined, add it to the vmContext.
+ if pre.Env.Random != nil {
+ rnd := common.BigToHash(pre.Env.Random)
+ vmContext.Random = &rnd
+ }
// If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's
// done in StateProcessor.Process(block, ...), right before transactions are applied.
if chainConfig.DAOForkSupport &&
diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go
index 1bb3c6a46..a6d774cda 100644
--- a/cmd/evm/internal/t8ntool/gen_stenv.go
+++ b/cmd/evm/internal/t8ntool/gen_stenv.go
@@ -18,6 +18,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
type stEnv struct {
Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
+ Random *math.HexOrDecimal256 `json:"currentRandom"`
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
@@ -31,6 +32,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
var enc stEnv
enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty)
+ enc.Random = (*math.HexOrDecimal256)(s.Random)
enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty)
enc.GasLimit = math.HexOrDecimal64(s.GasLimit)
enc.Number = math.HexOrDecimal64(s.Number)
@@ -48,6 +50,7 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
type stEnv struct {
Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
+ Random *math.HexOrDecimal256 `json:"currentRandom"`
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
@@ -69,6 +72,9 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
if dec.Difficulty != nil {
s.Difficulty = (*big.Int)(dec.Difficulty)
}
+ if dec.Random != nil {
+ s.Random = (*big.Int)(dec.Random)
+ }
if dec.ParentDifficulty != nil {
s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty)
}
diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go
index 11d71e4ce..097f9ce65 100644
--- a/cmd/evm/internal/t8ntool/transition.go
+++ b/cmd/evm/internal/t8ntool/transition.go
@@ -252,6 +252,10 @@ func Transition(ctx *cli.Context) error {
return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
}
}
+ // Sanity check, to not `panic` in state_transition
+ if prestate.Env.Random != nil && !chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) {
+ return NewError(ErrorConfig, errors.New("can only apply RANDOM on top of London chainrules"))
+ }
if env := prestate.Env; env.Difficulty == nil {
// If difficulty was not provided by caller, we need to calculate it.
switch {
diff --git a/cmd/geth/accountcmd_test.go b/cmd/geth/accountcmd_test.go
index 9455eeda3..3a71b8571 100644
--- a/cmd/geth/accountcmd_test.go
+++ b/cmd/geth/accountcmd_test.go
@@ -120,7 +120,7 @@ func importAccountWithExpect(t *testing.T, key string, expected string) {
if err := ioutil.WriteFile(passwordFile, []byte("foobar"), 0600); err != nil {
t.Error(err)
}
- geth := runGeth(t, "account", "import", keyfile, "-password", passwordFile)
+ geth := runGeth(t, "--lightkdf", "account", "import", keyfile, "-password", passwordFile)
defer geth.ExpectExit()
geth.Expect(expected)
}
diff --git a/cmd/geth/config.go b/cmd/geth/config.go
index 7a642edd0..ea4e65162 100644
--- a/cmd/geth/config.go
+++ b/cmd/geth/config.go
@@ -161,7 +161,7 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
if ctx.GlobalIsSet(utils.OverrideTerminalTotalDifficulty.Name) {
cfg.Eth.OverrideTerminalTotalDifficulty = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideTerminalTotalDifficulty.Name))
}
- backend, _ := utils.RegisterEthService(stack, &cfg.Eth, ctx.GlobalBool(utils.CatalystFlag.Name))
+ backend, _ := utils.RegisterEthService(stack, &cfg.Eth)
// Configure GraphQL if requested
if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) {
diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go
index c2c42276b..4799a6388 100644
--- a/cmd/geth/dbcmd.go
+++ b/cmd/geth/dbcmd.go
@@ -34,9 +34,11 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/console/prompt"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/olekukonko/tablewriter"
"gopkg.in/urfave/cli.v1"
)
@@ -69,6 +71,7 @@ Remove blockchain and state databases`,
dbDumpFreezerIndex,
dbImportCmd,
dbExportCmd,
+ dbMetadataCmd,
},
}
dbInspectCmd = cli.Command{
@@ -233,6 +236,21 @@ WARNING: This is a low-level operation which may cause database corruption!`,
},
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
}
+ dbMetadataCmd = cli.Command{
+ Action: utils.MigrateFlags(showMetaData),
+ Name: "metadata",
+ Usage: "Shows metadata about the chain status.",
+ Flags: []cli.Flag{
+ utils.DataDirFlag,
+ utils.SyncModeFlag,
+ utils.MainnetFlag,
+ utils.RopstenFlag,
+ utils.SepoliaFlag,
+ utils.RinkebyFlag,
+ utils.GoerliFlag,
+ },
+ Description: "Shows metadata about the chain status.",
+ }
)
func removeDB(ctx *cli.Context) error {
@@ -539,7 +557,7 @@ func freezerInspect(ctx *cli.Context) error {
defer stack.Close()
path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
log.Info("Opening freezer", "location", path, "name", kind)
- if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy); err != nil {
+ if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy, true); err != nil {
return err
} else {
f.DumpIndex(start, end)
@@ -685,3 +703,50 @@ func exportChaindata(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
}
+
+func showMetaData(ctx *cli.Context) error {
+ stack, _ := makeConfigNode(ctx)
+ defer stack.Close()
+ db := utils.MakeChainDatabase(ctx, stack, true)
+ ancients, err := db.Ancients()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
+ }
+ pp := func(val *uint64) string {
+ if val == nil {
+ return ""
+ }
+ return fmt.Sprintf("%d (0x%x)", *val, *val)
+ }
+ data := [][]string{
+ {"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
+ {"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
+ {"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
+ {"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
+ if b := rawdb.ReadHeadBlock(db); b != nil {
+ data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
+ data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
+ data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())})
+ }
+ if h := rawdb.ReadHeadHeader(db); h != nil {
+ data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
+ data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
+ data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (0x%x)", h.Number, h.Number)})
+ }
+ data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
+ {"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
+ {"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
+ {"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
+ {"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
+ {"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
+ {"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
+ {"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
+ {"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
+ {"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
+ }...)
+ table := tablewriter.NewWriter(os.Stdout)
+ table.SetHeader([]string{"Field", "Value"})
+ table.AppendBulk(data)
+ table.Render()
+ return nil
+}
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index ac6bbab85..e96da7005 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -160,7 +160,6 @@ var (
utils.GpoIgnoreGasPriceFlag,
utils.MinerNotifyFullFlag,
configFileFlag,
- utils.CatalystFlag,
}
rpcFlags = []cli.Flag{
@@ -211,7 +210,7 @@ func init() {
// Initialize the CLI app and start Geth
app.Action = geth
app.HideVersion = true // we have a command to print the version
- app.Copyright = "Copyright 2013-2021 The go-ethereum Authors"
+ app.Copyright = "Copyright 2013-2022 The go-ethereum Authors"
app.Commands = []cli.Command{
// See chaincmd.go:
initCommand,
diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go
index bd2c2443a..fdd46d944 100644
--- a/cmd/geth/snapshot.go
+++ b/cmd/geth/snapshot.go
@@ -418,8 +418,7 @@ func traverseRawState(ctx *cli.Context) error {
// Check the present for non-empty hash node(embedded node doesn't
// have their own hash).
if node != (common.Hash{}) {
- blob := rawdb.ReadTrieNode(chaindb, node)
- if len(blob) == 0 {
+ if !rawdb.HasTrieNode(chaindb, node) {
log.Error("Missing trie node(storage)", "hash", node)
return errors.New("missing storage")
}
diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go
index c63c62fd3..417fba689 100644
--- a/cmd/geth/usage.go
+++ b/cmd/geth/usage.go
@@ -229,7 +229,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.SnapshotFlag,
utils.BloomFilterSizeFlag,
cli.HelpFlag,
- utils.CatalystFlag,
},
},
}
diff --git a/cmd/geth/version_check_test.go b/cmd/geth/version_check_test.go
index 0f056d196..4be32d5e4 100644
--- a/cmd/geth/version_check_test.go
+++ b/cmd/geth/version_check_test.go
@@ -25,6 +25,8 @@ import (
"strconv"
"strings"
"testing"
+
+ "github.com/jedisct1/go-minisign"
)
func TestVerification(t *testing.T) {
@@ -128,3 +130,39 @@ func TestMatching(t *testing.T) {
}
}
}
+
+func TestGethPubKeysParseable(t *testing.T) {
+ for _, pubkey := range gethPubKeys {
+ _, err := minisign.NewPublicKey(pubkey)
+ if err != nil {
+ t.Errorf("Should be parseable")
+ }
+ }
+}
+
+func TestKeyID(t *testing.T) {
+ type args struct {
+ id [8]byte
+ }
+ tests := []struct {
+ name string
+ args args
+ want string
+ }{
+ {"@holiman key", args{id: extractKeyId(gethPubKeys[0])}, "FB1D084D39BAEC24"},
+ {"second key", args{id: extractKeyId(gethPubKeys[1])}, "138B1CA303E51687"},
+ {"third key", args{id: extractKeyId(gethPubKeys[2])}, "FD9813B2D2098484"},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := keyID(tt.args.id); got != tt.want {
+ t.Errorf("keyID() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func extractKeyId(pubkey string) [8]byte {
+ p, _ := minisign.NewPublicKey(pubkey)
+ return p.KeyId
+}
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index ffff2c92c..7d11b0631 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -45,7 +45,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth"
- "github.com/ethereum/go-ethereum/eth/catalyst"
+ ethcatalyst "github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/gasprice"
@@ -56,6 +56,7 @@ import (
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/les"
+ lescatalyst "github.com/ethereum/go-ethereum/les/catalyst"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/metrics/exp"
@@ -789,11 +790,6 @@ var (
Usage: "InfluxDB organization name (v2 only)",
Value: metrics.DefaultConfig.InfluxDBOrganization,
}
-
- CatalystFlag = cli.BoolFlag{
- Name: "catalyst",
- Usage: "Catalyst mode (eth2 integration testing)",
- }
)
// MakeDataDir retrieves the currently requested data directory, terminating
@@ -1673,9 +1669,15 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// Create a new developer genesis block or reuse existing one
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), ctx.GlobalUint64(DeveloperGasLimitFlag.Name), developer.Address)
if ctx.GlobalIsSet(DataDirFlag.Name) {
+ // If datadir doesn't exist we need to open db in write-mode
+ // so leveldb can create files.
+ readonly := true
+ if !common.FileExist(stack.ResolvePath("chaindata")) {
+ readonly = false
+ }
// Check if we have an already initialized chain and fall back to
// that if so. Otherwise we need to generate a new genesis spec.
- chaindb := MakeChainDatabase(ctx, stack, false) // TODO (MariusVanDerWijden) make this read only
+ chaindb := MakeChainDatabase(ctx, stack, readonly)
if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) {
cfg.Genesis = nil // fallback to db content
}
@@ -1710,15 +1712,15 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) {
// RegisterEthService adds an Ethereum client to the stack.
// The second return value is the full node instance, which may be nil if the
// node is running as a light client.
-func RegisterEthService(stack *node.Node, cfg *ethconfig.Config, isCatalyst bool) (ethapi.Backend, *eth.Ethereum) {
+func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend, *eth.Ethereum) {
if cfg.SyncMode == downloader.LightSync {
backend, err := les.New(stack, cfg)
if err != nil {
Fatalf("Failed to register the Ethereum service: %v", err)
}
stack.RegisterAPIs(tracers.APIs(backend.ApiBackend))
- if isCatalyst {
- if err := catalyst.RegisterLight(stack, backend); err != nil {
+ if backend.BlockChain().Config().TerminalTotalDifficulty != nil {
+ if err := lescatalyst.Register(stack, backend); err != nil {
Fatalf("Failed to register the catalyst service: %v", err)
}
}
@@ -1734,8 +1736,8 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config, isCatalyst bool
Fatalf("Failed to create the LES server: %v", err)
}
}
- if isCatalyst {
- if err := catalyst.Register(stack, backend); err != nil {
+ if backend.BlockChain().Config().TerminalTotalDifficulty != nil {
+ if err := ethcatalyst.Register(stack, backend); err != nil {
Fatalf("Failed to register the catalyst service: %v", err)
}
}
diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go
index 9467fea67..1fd7deb87 100644
--- a/consensus/beacon/consensus.go
+++ b/consensus/beacon/consensus.go
@@ -43,7 +43,6 @@ var (
// error types into the consensus package.
var (
errTooManyUncles = errors.New("too many uncles")
- errInvalidMixDigest = errors.New("invalid mix digest")
errInvalidNonce = errors.New("invalid nonce")
errInvalidUncleHash = errors.New("invalid uncle hash")
)
@@ -182,10 +181,7 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa
if len(header.Extra) > 32 {
return fmt.Errorf("extra-data longer than 32 bytes (%d)", len(header.Extra))
}
- // Verify the seal parts. Ensure the mixhash, nonce and uncle hash are the expected value.
- if header.MixDigest != (common.Hash{}) {
- return errInvalidMixDigest
- }
+ // Verify the seal parts. Ensure the nonce and uncle hash are the expected value.
if header.Nonce != beaconNonce {
return errInvalidNonce
}
diff --git a/core/beacon/errors.go b/core/beacon/errors.go
new file mode 100644
index 000000000..5b95c38a2
--- /dev/null
+++ b/core/beacon/errors.go
@@ -0,0 +1,29 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package beacon
+
+import "github.com/ethereum/go-ethereum/rpc"
+
+var (
+ VALID = GenericStringResponse{"VALID"}
+ SUCCESS = GenericStringResponse{"SUCCESS"}
+ INVALID = ForkChoiceResponse{Status: "INVALID", PayloadID: nil}
+ SYNCING = ForkChoiceResponse{Status: "SYNCING", PayloadID: nil}
+ GenericServerError = rpc.CustomError{Code: -32000, ValidationError: "Server error"}
+ UnknownPayload = rpc.CustomError{Code: -32001, ValidationError: "Unknown payload"}
+ InvalidTB = rpc.CustomError{Code: -32002, ValidationError: "Invalid terminal block"}
+)
diff --git a/eth/catalyst/gen_blockparams.go b/core/beacon/gen_blockparams.go
similarity index 99%
rename from eth/catalyst/gen_blockparams.go
rename to core/beacon/gen_blockparams.go
index ccf5c327f..d3d569b7d 100644
--- a/eth/catalyst/gen_blockparams.go
+++ b/core/beacon/gen_blockparams.go
@@ -1,6 +1,6 @@
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
-package catalyst
+package beacon
import (
"encoding/json"
diff --git a/eth/catalyst/gen_ed.go b/core/beacon/gen_ed.go
similarity index 99%
rename from eth/catalyst/gen_ed.go
rename to core/beacon/gen_ed.go
index 46eb45808..ac94f49a5 100644
--- a/eth/catalyst/gen_ed.go
+++ b/core/beacon/gen_ed.go
@@ -1,6 +1,6 @@
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
-package catalyst
+package beacon
import (
"encoding/json"
diff --git a/eth/catalyst/api_types.go b/core/beacon/types.go
similarity index 50%
rename from eth/catalyst/api_types.go
rename to core/beacon/types.go
index 1f6703030..d7f6ba535 100644
--- a/eth/catalyst/api_types.go
+++ b/core/beacon/types.go
@@ -1,4 +1,4 @@
-// Copyright 2020 The go-ethereum Authors
+// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@@ -14,18 +14,21 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package catalyst
+package beacon
import (
+ "fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/trie"
)
//go:generate go run github.com/fjl/gencodec -type PayloadAttributesV1 -field-override payloadAttributesMarshaling -out gen_blockparams.go
-// Structure described at https://github.com/ethereum/execution-apis/pull/74
+// PayloadAttributesV1 structure described at https://github.com/ethereum/execution-apis/pull/74
type PayloadAttributesV1 struct {
Timestamp uint64 `json:"timestamp" gencodec:"required"`
Random common.Hash `json:"random" gencodec:"required"`
@@ -39,7 +42,7 @@ type payloadAttributesMarshaling struct {
//go:generate go run github.com/fjl/gencodec -type ExecutableDataV1 -field-override executableDataMarshaling -out gen_ed.go
-// Structure described at https://github.com/ethereum/execution-apis/src/engine/specification.md
+// ExecutableDataV1 structure described at https://github.com/ethereum/execution-apis/src/engine/specification.md
type ExecutableDataV1 struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
@@ -69,17 +72,6 @@ type executableDataMarshaling struct {
Transactions []hexutil.Bytes
}
-//go:generate go run github.com/fjl/gencodec -type PayloadResponse -field-override payloadResponseMarshaling -out gen_payload.go
-
-type PayloadResponse struct {
- PayloadID uint64 `json:"payloadId"`
-}
-
-// JSON type overrides for payloadResponse.
-type payloadResponseMarshaling struct {
- PayloadID hexutil.Uint64
-}
-
type NewBlockResponse struct {
Valid bool `json:"valid"`
}
@@ -102,9 +94,28 @@ type ConsensusValidatedParams struct {
Status string `json:"status"`
}
+// PayloadID is an identifier of the payload build process
+type PayloadID [8]byte
+
+func (b PayloadID) String() string {
+ return hexutil.Encode(b[:])
+}
+
+func (b PayloadID) MarshalText() ([]byte, error) {
+ return hexutil.Bytes(b[:]).MarshalText()
+}
+
+func (b *PayloadID) UnmarshalText(input []byte) error {
+ err := hexutil.UnmarshalFixedText("PayloadID", input, b[:])
+ if err != nil {
+ return fmt.Errorf("invalid payload id %q: %w", input, err)
+ }
+ return nil
+}
+
type ForkChoiceResponse struct {
- Status string `json:"status"`
- PayloadID *hexutil.Bytes `json:"payloadId"`
+ Status string `json:"status"`
+ PayloadID *PayloadID `json:"payloadId"`
}
type ForkchoiceStateV1 struct {
@@ -112,3 +123,82 @@ type ForkchoiceStateV1 struct {
SafeBlockHash common.Hash `json:"safeBlockHash"`
FinalizedBlockHash common.Hash `json:"finalizedBlockHash"`
}
+
+func encodeTransactions(txs []*types.Transaction) [][]byte {
+ var enc = make([][]byte, len(txs))
+ for i, tx := range txs {
+ enc[i], _ = tx.MarshalBinary()
+ }
+ return enc
+}
+
+func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
+ var txs = make([]*types.Transaction, len(enc))
+ for i, encTx := range enc {
+ var tx types.Transaction
+ if err := tx.UnmarshalBinary(encTx); err != nil {
+ return nil, fmt.Errorf("invalid transaction %d: %v", i, err)
+ }
+ txs[i] = &tx
+ }
+ return txs, nil
+}
+
+// ExecutableDataToBlock constructs a block from executable data.
+// It verifies that the following fields:
+// len(extraData) <= 32
+// uncleHash = emptyUncleHash
+// difficulty = 0
+// and that the blockhash of the constructed block matches the parameters.
+func ExecutableDataToBlock(params ExecutableDataV1) (*types.Block, error) {
+ txs, err := decodeTransactions(params.Transactions)
+ if err != nil {
+ return nil, err
+ }
+ if len(params.ExtraData) > 32 {
+ return nil, fmt.Errorf("invalid extradata length: %v", len(params.ExtraData))
+ }
+ header := &types.Header{
+ ParentHash: params.ParentHash,
+ UncleHash: types.EmptyUncleHash,
+ Coinbase: params.FeeRecipient,
+ Root: params.StateRoot,
+ TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
+ ReceiptHash: params.ReceiptsRoot,
+ Bloom: types.BytesToBloom(params.LogsBloom),
+ Difficulty: common.Big0,
+ Number: new(big.Int).SetUint64(params.Number),
+ GasLimit: params.GasLimit,
+ GasUsed: params.GasUsed,
+ Time: params.Timestamp,
+ BaseFee: params.BaseFeePerGas,
+ Extra: params.ExtraData,
+ MixDigest: params.Random,
+ }
+ block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */)
+ if block.Hash() != params.BlockHash {
+ return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash())
+ }
+ return block, nil
+}
+
+// BlockToExecutableData constructs the executableDataV1 structure by filling the
+// fields from the given block. It assumes the given block is post-merge block.
+func BlockToExecutableData(block *types.Block) *ExecutableDataV1 {
+ return &ExecutableDataV1{
+ BlockHash: block.Hash(),
+ ParentHash: block.ParentHash(),
+ FeeRecipient: block.Coinbase(),
+ StateRoot: block.Root(),
+ Number: block.NumberU64(),
+ GasLimit: block.GasLimit(),
+ GasUsed: block.GasUsed(),
+ BaseFeePerGas: block.BaseFee(),
+ Timestamp: block.Time(),
+ ReceiptsRoot: block.ReceiptHash(),
+ LogsBloom: block.Bloom().Bytes(),
+ Transactions: encodeTransactions(block.Transactions()),
+ Random: block.MixDigest(),
+ ExtraData: block.Extra(),
+ }
+}
diff --git a/core/blockchain.go b/core/blockchain.go
index 9b93f421e..d8b5e4f8b 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -554,7 +554,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
// Degrade the chain markers if they are explicitly reverted.
// In theory we should update all in-memory markers in the
// last step, however the direction of SetHead is from high
- // to low, so it's safe the update in-memory markers directly.
+ // to low, so it's safe to update in-memory markers directly.
bc.currentBlock.Store(newHeadBlock)
headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
}
@@ -979,32 +979,31 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// range. In this case, all tx indices of newly imported blocks should be
// generated.
var batch = bc.db.NewBatch()
- for _, block := range blockChain {
+ for i, block := range blockChain {
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
rawdb.WriteTxLookupEntriesByBlock(batch, block)
} else if rawdb.ReadTxIndexTail(bc.db) != nil {
rawdb.WriteTxLookupEntriesByBlock(batch, block)
}
stats.processed++
- }
- // Flush all tx-lookup index data.
- size += int64(batch.ValueSize())
- if err := batch.Write(); err != nil {
- // The tx index data could not be written.
- // Roll back the ancient store update.
- fastBlock := bc.CurrentFastBlock().NumberU64()
- if err := bc.db.TruncateAncients(fastBlock + 1); err != nil {
- log.Error("Can't truncate ancient store after failed insert", "err", err)
+ if batch.ValueSize() > ethdb.IdealBatchSize || i == len(blockChain)-1 {
+ size += int64(batch.ValueSize())
+ if err = batch.Write(); err != nil {
+ fastBlock := bc.CurrentFastBlock().NumberU64()
+ if err := bc.db.TruncateAncients(fastBlock + 1); err != nil {
+ log.Error("Can't truncate ancient store after failed insert", "err", err)
+ }
+ return 0, err
+ }
+ batch.Reset()
}
- return 0, err
}
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
if err := bc.db.Sync(); err != nil {
return 0, err
}
-
// Update the current fast block because all block data is now present in DB.
previousFastBlock := bc.CurrentFastBlock().NumberU64()
if !updateHead(blockChain[len(blockChain)-1]) {
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
index eb5025ed5..913367179 100644
--- a/core/blockchain_repair_test.go
+++ b/core/blockchain_repair_test.go
@@ -1779,6 +1779,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
SnapshotLimit: 0, // Disable snapshot by default
}
)
+ defer engine.Close()
if snapshots {
config.SnapshotLimit = 256
config.SnapshotWait = true
@@ -1836,25 +1837,25 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
}
defer db.Close()
- chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+ newChain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
- defer chain.Stop()
+ defer newChain.Stop()
// Iterate over all the remaining blocks and ensure there are no gaps
- verifyNoGaps(t, chain, true, canonblocks)
- verifyNoGaps(t, chain, false, sideblocks)
- verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks)
- verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks)
+ verifyNoGaps(t, newChain, true, canonblocks)
+ verifyNoGaps(t, newChain, false, sideblocks)
+ verifyCutoff(t, newChain, true, canonblocks, tt.expCanonicalBlocks)
+ verifyCutoff(t, newChain, false, sideblocks, tt.expSidechainBlocks)
- if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
+ if head := newChain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
}
- if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
+ if head := newChain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock)
}
- if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
+ if head := newChain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock)
}
if frozen, err := db.(freezer).Ancients(); err != nil {
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 6e542fe2f..37a1a42d0 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -2987,10 +2987,10 @@ func TestDeleteRecreateSlots(t *testing.T) {
initCode := []byte{
byte(vm.PUSH1), 0x3, // value
byte(vm.PUSH1), 0x3, // location
- byte(vm.SSTORE), // Set slot[3] = 1
+ byte(vm.SSTORE), // Set slot[3] = 3
byte(vm.PUSH1), 0x4, // value
byte(vm.PUSH1), 0x4, // location
- byte(vm.SSTORE), // Set slot[4] = 1
+ byte(vm.SSTORE), // Set slot[4] = 4
// Slots are set, now return the code
byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack
byte(vm.PUSH1), 0x0, // memory start on stack
diff --git a/core/evm.go b/core/evm.go
index 6c67fc437..536ac673e 100644
--- a/core/evm.go
+++ b/core/evm.go
@@ -40,6 +40,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
var (
beneficiary common.Address
baseFee *big.Int
+ random *common.Hash
)
// If we don't have an explicit author (i.e. not mining), extract from the header
@@ -51,6 +52,9 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
if header.BaseFee != nil {
baseFee = new(big.Int).Set(header.BaseFee)
}
+ if header.Difficulty.Cmp(common.Big0) == 0 {
+ random = &header.MixDigest
+ }
return vm.BlockContext{
CanTransfer: CanTransfer,
Transfer: Transfer,
@@ -61,6 +65,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
Difficulty: new(big.Int).Set(header.Difficulty),
BaseFee: baseFee,
GasLimit: header.GasLimit,
+ Random: random,
}
}
diff --git a/core/genesis.go b/core/genesis.go
index 557440d08..1d17f298a 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -294,7 +294,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
if g.GasLimit == 0 {
head.GasLimit = params.GenesisGasLimit
}
- if g.Difficulty == nil {
+ if g.Difficulty == nil && g.Mixhash == (common.Hash{}) {
head.Difficulty = params.GenesisDifficulty
}
if g.Config != nil && g.Config.IsLondon(common.Big0) {
diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go
index 6112de03a..a239d0766 100644
--- a/core/rawdb/accessors_state.go
+++ b/core/rawdb/accessors_state.go
@@ -41,16 +41,14 @@ func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
// ReadCode retrieves the contract code of the provided code hash.
func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
- // Try with the legacy code scheme first, if not then try with current
- // scheme. Since most of the code will be found with legacy scheme.
- //
- // todo(rjl493456442) change the order when we forcibly upgrade the code
- // scheme with snapshot.
- data, _ := db.Get(hash[:])
+ // Try with the prefixed code scheme first, if not then try with legacy
+ // scheme.
+ data := ReadCodeWithPrefix(db, hash)
if len(data) != 0 {
return data
}
- return ReadCodeWithPrefix(db, hash)
+ data, _ = db.Get(hash[:])
+ return data
}
// ReadCodeWithPrefix retrieves the contract code of the provided code hash.
@@ -61,6 +59,14 @@ func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte {
return data
}
+// HasCodeWithPrefix checks if the contract code corresponding to the
+// provided code hash is present in the db. This function will only check
+// presence using the prefix-scheme.
+func HasCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) bool {
+ ok, _ := db.Has(codeKey(hash))
+ return ok
+}
+
// WriteCode writes the provided contract code database.
func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
if err := db.Put(codeKey(hash), code); err != nil {
@@ -81,6 +87,12 @@ func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
return data
}
+// HasTrieNode checks if the trie node with the provided hash is present in db.
+func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool {
+ ok, _ := db.Has(hash.Bytes())
+ return ok
+}
+
// WriteTrieNode writes the provided trie node database.
func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
if err := db.Put(hash.Bytes(), node); err != nil {
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
index f9b5563a1..1f7e9311f 100644
--- a/core/rawdb/freezer.go
+++ b/core/rawdb/freezer.go
@@ -133,7 +133,7 @@ func newFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
// Create the tables.
for name, disableSnappy := range tables {
- table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy)
+ table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy, readonly)
if err != nil {
for _, table := range freezer.tables {
table.Close()
@@ -144,8 +144,15 @@ func newFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
freezer.tables[name] = table
}
- // Truncate all tables to common length.
- if err := freezer.repair(); err != nil {
+ if freezer.readonly {
+ // In readonly mode only validate, don't truncate.
+ // validate also sets `freezer.frozen`.
+ err = freezer.validate()
+ } else {
+ // Truncate all tables to common length.
+ err = freezer.repair()
+ }
+ if err != nil {
for _, table := range freezer.tables {
table.Close()
}
@@ -309,6 +316,33 @@ func (f *freezer) Sync() error {
return nil
}
+// validate checks that every table has the same length.
+// Used instead of `repair` in readonly mode.
+func (f *freezer) validate() error {
+ if len(f.tables) == 0 {
+ return nil
+ }
+ var (
+ length uint64
+ name string
+ )
+ // Hack to get length of any table
+ for kind, table := range f.tables {
+ length = atomic.LoadUint64(&table.items)
+ name = kind
+ break
+ }
+ // Now check every table against that length
+ for kind, table := range f.tables {
+ items := atomic.LoadUint64(&table.items)
+ if length != items {
+ return fmt.Errorf("freezer tables %s and %s have differing lengths: %d != %d", kind, name, items, length)
+ }
+ }
+ atomic.StoreUint64(&f.frozen, length)
+ return nil
+}
+
// repair truncates all data tables to the same length.
func (f *freezer) repair() error {
min := uint64(math.MaxUint64)
diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go
index 22405cf9b..7cfba70c5 100644
--- a/core/rawdb/freezer_table.go
+++ b/core/rawdb/freezer_table.go
@@ -94,7 +94,8 @@ type freezerTable struct {
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
items uint64 // Number of items stored in the table (including items removed from tail)
- noCompression bool // if true, disables snappy compression. Note: does not work retroactively
+ noCompression bool // if true, disables snappy compression. Note: does not work retroactively
+ readonly bool
maxFileSize uint32 // Max file size for data-files
name string
path string
@@ -119,8 +120,8 @@ type freezerTable struct {
}
// NewFreezerTable opens the given path as a freezer table.
-func NewFreezerTable(path, name string, disableSnappy bool) (*freezerTable, error) {
- return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy)
+func NewFreezerTable(path, name string, disableSnappy, readonly bool) (*freezerTable, error) {
+ return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, readonly)
}
// openFreezerFileForAppend opens a freezer table file and seeks to the end
@@ -164,7 +165,7 @@ func truncateFreezerFile(file *os.File, size int64) error {
// newTable opens a freezer table, creating the data and index files if they are
// non existent. Both files are truncated to the shortest common length to ensure
// they don't go out of sync.
-func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
+func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression, readonly bool) (*freezerTable, error) {
// Ensure the containing directory exists and open the indexEntry file
if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
@@ -177,7 +178,16 @@ func newTable(path string, name string, readMeter metrics.Meter, writeMeter metr
// Compressed idx
idxName = fmt.Sprintf("%s.cidx", name)
}
- offsets, err := openFreezerFileForAppend(filepath.Join(path, idxName))
+ var (
+ err error
+ offsets *os.File
+ )
+ if readonly {
+ // Will fail if table doesn't exist
+ offsets, err = openFreezerFileForReadOnly(filepath.Join(path, idxName))
+ } else {
+ offsets, err = openFreezerFileForAppend(filepath.Join(path, idxName))
+ }
if err != nil {
return nil, err
}
@@ -192,6 +202,7 @@ func newTable(path string, name string, readMeter metrics.Meter, writeMeter metr
path: path,
logger: log.New("database", path, "table", name),
noCompression: noCompression,
+ readonly: readonly,
maxFileSize: maxFilesize,
}
if err := tab.repair(); err != nil {
@@ -252,7 +263,11 @@ func (t *freezerTable) repair() error {
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
lastIndex.unmarshalBinary(buffer)
- t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForAppend)
+ if t.readonly {
+ t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly)
+ } else {
+ t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForAppend)
+ }
if err != nil {
return err
}
@@ -301,12 +316,15 @@ func (t *freezerTable) repair() error {
contentExp = int64(lastIndex.offset)
}
}
- // Ensure all reparation changes have been written to disk
- if err := t.index.Sync(); err != nil {
- return err
- }
- if err := t.head.Sync(); err != nil {
- return err
+ // Sync() fails for read-only files on windows.
+ if !t.readonly {
+ // Ensure all reparation changes have been written to disk
+ if err := t.index.Sync(); err != nil {
+ return err
+ }
+ if err := t.head.Sync(); err != nil {
+ return err
+ }
}
// Update the item and byte counters and return
t.items = uint64(t.itemOffset) + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
@@ -334,8 +352,12 @@ func (t *freezerTable) preopen() (err error) {
return err
}
}
- // Open head in read/write
- t.head, err = t.openFile(t.headId, openFreezerFileForAppend)
+ if t.readonly {
+ t.head, err = t.openFile(t.headId, openFreezerFileForReadOnly)
+ } else {
+ // Open head in read/write
+ t.head, err = t.openFile(t.headId, openFreezerFileForAppend)
+ }
return err
}
diff --git a/core/rawdb/freezer_table_test.go b/core/rawdb/freezer_table_test.go
index 803809b52..15464e1bd 100644
--- a/core/rawdb/freezer_table_test.go
+++ b/core/rawdb/freezer_table_test.go
@@ -40,7 +40,7 @@ func TestFreezerBasics(t *testing.T) {
// set cutoff at 50 bytes
f, err := newTable(os.TempDir(),
fmt.Sprintf("unittest-%d", rand.Uint64()),
- metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true)
+ metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -85,7 +85,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
f *freezerTable
err error
)
- f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -99,7 +99,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
require.NoError(t, batch.commit())
f.Close()
- f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -116,7 +116,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
}
f.Close()
- f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -131,7 +131,7 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
// Fill table
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -160,7 +160,7 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
// Now open it again
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -183,7 +183,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
// Fill a table and close it
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -209,7 +209,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
// Now open it again
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -232,7 +232,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
// And if we open it, we should now be able to read all of them (new values)
{
- f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
for y := 1; y < 255; y++ {
exp := getChunk(15, ^y)
got, err := f.Retrieve(uint64(y))
@@ -254,7 +254,7 @@ func TestSnappyDetection(t *testing.T) {
// Open with snappy
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -265,7 +265,7 @@ func TestSnappyDetection(t *testing.T) {
// Open without snappy
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false, false)
if err != nil {
t.Fatal(err)
}
@@ -277,7 +277,7 @@ func TestSnappyDetection(t *testing.T) {
// Open with snappy
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -309,7 +309,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
// Fill a table and close it
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -345,7 +345,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
// 45, 45, 15
// with 3+3+1 items
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -366,7 +366,7 @@ func TestFreezerTruncate(t *testing.T) {
// Fill table
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -382,7 +382,7 @@ func TestFreezerTruncate(t *testing.T) {
// Reopen, truncate
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -407,7 +407,7 @@ func TestFreezerRepairFirstFile(t *testing.T) {
// Fill table
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -440,7 +440,7 @@ func TestFreezerRepairFirstFile(t *testing.T) {
// Reopen
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -475,7 +475,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
// Fill table
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -491,7 +491,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
// Reopen and read all files
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -523,7 +523,7 @@ func TestFreezerOffset(t *testing.T) {
// Fill table
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
if err != nil {
t.Fatal(err)
}
@@ -584,7 +584,7 @@ func TestFreezerOffset(t *testing.T) {
// Now open again
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
if err != nil {
t.Fatal(err)
}
@@ -638,7 +638,7 @@ func TestFreezerOffset(t *testing.T) {
// Check that existing items have been moved to index 1M.
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
if err != nil {
t.Fatal(err)
}
@@ -726,7 +726,7 @@ func TestSequentialRead(t *testing.T) {
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("batchread-%d", rand.Uint64())
{ // Fill table
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -736,7 +736,7 @@ func TestSequentialRead(t *testing.T) {
f.Close()
}
{ // Open it, iterate, verify iteration
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
if err != nil {
t.Fatal(err)
}
@@ -757,7 +757,7 @@ func TestSequentialRead(t *testing.T) {
}
{ // Open it, iterate, verify byte limit. The byte limit is less than item
// size, so each lookup should only return one item
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
if err != nil {
t.Fatal(err)
}
@@ -786,7 +786,7 @@ func TestSequentialReadByteLimit(t *testing.T) {
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("batchread-2-%d", rand.Uint64())
{ // Fill table
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
if err != nil {
t.Fatal(err)
}
@@ -808,7 +808,7 @@ func TestSequentialReadByteLimit(t *testing.T) {
{100, 109, 10},
} {
{
- f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true)
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
if err != nil {
t.Fatal(err)
}
@@ -829,3 +829,89 @@ func TestSequentialReadByteLimit(t *testing.T) {
}
}
}
+
+func TestFreezerReadonly(t *testing.T) {
+ tmpdir := os.TempDir()
+ // Case 1: Check it fails on non-existent file.
+ _, err := newTable(tmpdir,
+ fmt.Sprintf("readonlytest-%d", rand.Uint64()),
+ metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
+ if err == nil {
+ t.Fatal("readonly table instantiation should fail for non-existent table")
+ }
+
+ // Case 2: Check that it fails on invalid index length.
+ fname := fmt.Sprintf("readonlytest-%d", rand.Uint64())
+ idxFile, err := openFreezerFileForAppend(filepath.Join(tmpdir, fmt.Sprintf("%s.ridx", fname)))
+ if err != nil {
+ t.Errorf("Failed to open index file: %v\n", err)
+ }
+ // size should not be a multiple of indexEntrySize.
+ idxFile.Write(make([]byte, 17))
+ idxFile.Close()
+ _, err = newTable(tmpdir, fname,
+ metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
+ if err == nil {
+ t.Errorf("readonly table instantiation should fail for invalid index size")
+ }
+
+ // Case 3: Open table non-readonly table to write some data.
+ // Then corrupt the head file and make sure opening the table
+ // again in readonly triggers an error.
+ fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
+ f, err := newTable(tmpdir, fname,
+ metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
+ if err != nil {
+ t.Fatalf("failed to instantiate table: %v", err)
+ }
+ writeChunks(t, f, 8, 32)
+ // Corrupt table file
+ if _, err := f.head.Write([]byte{1, 1}); err != nil {
+ t.Fatal(err)
+ }
+ if err := f.Close(); err != nil {
+ t.Fatal(err)
+ }
+ _, err = newTable(tmpdir, fname,
+ metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
+ if err == nil {
+ t.Errorf("readonly table instantiation should fail for corrupt table file")
+ }
+
+ // Case 4: Write some data to a table and later re-open it as readonly.
+ // Should be successful.
+ fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
+ f, err = newTable(tmpdir, fname,
+ metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
+ if err != nil {
+ t.Fatalf("failed to instantiate table: %v\n", err)
+ }
+ writeChunks(t, f, 32, 128)
+ if err := f.Close(); err != nil {
+ t.Fatal(err)
+ }
+ f, err = newTable(tmpdir, fname,
+ metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ v, err := f.Retrieve(10)
+ if err != nil {
+ t.Fatal(err)
+ }
+ exp := getChunk(128, 10)
+ if !bytes.Equal(v, exp) {
+ t.Errorf("retrieved value is incorrect")
+ }
+
+ // Case 5: Now write some data via a batch.
+ // This should fail either during AppendRaw or Commit
+ batch := f.newBatch()
+ writeErr := batch.AppendRaw(32, make([]byte, 1))
+ if writeErr == nil {
+ writeErr = batch.commit()
+ }
+ if writeErr == nil {
+ t.Fatalf("Writing to readonly table should fail")
+ }
+}
diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go
index fa84f8030..d5c3749e5 100644
--- a/core/rawdb/freezer_test.go
+++ b/core/rawdb/freezer_test.go
@@ -253,6 +253,44 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
}
}
+func TestFreezerReadonlyValidate(t *testing.T) {
+ tables := map[string]bool{"a": true, "b": true}
+ dir, err := ioutil.TempDir("", "freezer")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+ // Open non-readonly freezer and fill individual tables
+ // with different amount of data.
+ f, err := newFreezer(dir, "", false, 2049, tables)
+ if err != nil {
+ t.Fatal("can't open freezer", err)
+ }
+ var item = make([]byte, 1024)
+ aBatch := f.tables["a"].newBatch()
+ require.NoError(t, aBatch.AppendRaw(0, item))
+ require.NoError(t, aBatch.AppendRaw(1, item))
+ require.NoError(t, aBatch.AppendRaw(2, item))
+ require.NoError(t, aBatch.commit())
+ bBatch := f.tables["b"].newBatch()
+ require.NoError(t, bBatch.AppendRaw(0, item))
+ require.NoError(t, bBatch.commit())
+ if f.tables["a"].items != 3 {
+ t.Fatalf("unexpected number of items in table")
+ }
+ if f.tables["b"].items != 1 {
+ t.Fatalf("unexpected number of items in table")
+ }
+ require.NoError(t, f.Close())
+
+ // Re-openening as readonly should fail when validating
+ // table lengths.
+ f, err = newFreezer(dir, "", true, 2049, tables)
+ if err == nil {
+ t.Fatal("readonly freezer should fail with differing table lengths")
+ }
+}
+
func newFreezerForTesting(t *testing.T, tables map[string]bool) (*freezer, string) {
t.Helper()
diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go
index 5cfb9a9f2..6836a5740 100644
--- a/core/state/snapshot/journal.go
+++ b/core/state/snapshot/journal.go
@@ -66,6 +66,29 @@ type journalStorage struct {
Vals [][]byte
}
+func ParseGeneratorStatus(generatorBlob []byte) string {
+ if len(generatorBlob) == 0 {
+ return ""
+ }
+ var generator journalGenerator
+ if err := rlp.DecodeBytes(generatorBlob, &generator); err != nil {
+ log.Warn("failed to decode snapshot generator", "err", err)
+ return ""
+ }
+ // Figure out whether we're after or within an account
+ var m string
+ switch marker := generator.Marker; len(marker) {
+ case common.HashLength:
+ m = fmt.Sprintf("at %#x", marker)
+ case 2 * common.HashLength:
+ m = fmt.Sprintf("in %#x at %#x", marker[:common.HashLength], marker[common.HashLength:])
+ default:
+ m = fmt.Sprintf("%#x", marker)
+ }
+ return fmt.Sprintf(`Done: %v, Accounts: %d, Slots: %d, Storage: %d, Marker: %s`,
+ generator.Done, generator.Accounts, generator.Slots, generator.Storage, m)
+}
+
// loadAndParseJournal tries to parse the snapshot journal in latest format.
func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
// Retrieve the disk layer generator. It must exist, no matter the
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 138fcbdec..bcb6dca4f 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -198,25 +198,10 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
}
// If no live objects are available, attempt to use snapshots
var (
- enc []byte
- err error
- meter *time.Duration
+ enc []byte
+ err error
)
- readStart := time.Now()
- if metrics.EnabledExpensive {
- // If the snap is 'under construction', the first lookup may fail. If that
- // happens, we don't want to double-count the time elapsed. Thus this
- // dance with the metering.
- defer func() {
- if meter != nil {
- *meter += time.Since(readStart)
- }
- }()
- }
if s.db.snap != nil {
- if metrics.EnabledExpensive {
- meter = &s.db.SnapshotStorageReads
- }
// If the object was destructed in *this* block (and potentially resurrected),
// the storage has been cleared out, and we should *not* consult the previous
// snapshot about any storage values. The only possible alternatives are:
@@ -226,20 +211,20 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
if _, destructed := s.db.snapDestructs[s.addrHash]; destructed {
return common.Hash{}
}
+ start := time.Now()
enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes()))
+ if metrics.EnabledExpensive {
+ s.db.SnapshotStorageReads += time.Since(start)
+ }
}
// If the snapshot is unavailable or reading from it fails, load from the database.
if s.db.snap == nil || err != nil {
- if meter != nil {
- // If we already spent time checking the snapshot, account for it
- // and reset the readStart
- *meter += time.Since(readStart)
- readStart = time.Now()
- }
+ start := time.Now()
+ enc, err = s.getTrie(db).TryGet(key.Bytes())
if metrics.EnabledExpensive {
- meter = &s.db.StorageReads
+ s.db.StorageReads += time.Since(start)
}
- if enc, err = s.getTrie(db).TryGet(key.Bytes()); err != nil {
+ if err != nil {
s.setError(err)
return common.Hash{}
}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 571f0b6ac..eb5fec538 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -513,16 +513,14 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
return obj
}
// If no live objects are available, attempt to use snapshots
- var (
- data *types.StateAccount
- err error
- )
+ var data *types.StateAccount
if s.snap != nil {
+ start := time.Now()
+ acc, err := s.snap.Account(crypto.HashData(s.hasher, addr.Bytes()))
if metrics.EnabledExpensive {
- defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now())
+ s.SnapshotAccountReads += time.Since(start)
}
- var acc *snapshot.Account
- if acc, err = s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())); err == nil {
+ if err == nil {
if acc == nil {
return nil
}
@@ -541,11 +539,12 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
}
}
// If snapshot unavailable or reading from it failed, load from the database
- if s.snap == nil || err != nil {
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now())
- }
+ if data == nil {
+ start := time.Now()
enc, err := s.trie.TryGet(addr.Bytes())
+ if metrics.EnabledExpensive {
+ s.AccountReads += time.Since(start)
+ }
if err != nil {
s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err))
return nil
diff --git a/core/state_transition.go b/core/state_transition.go
index 135a9c6db..05d563307 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -310,7 +310,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
}
// Set up the initial access list.
- if rules := st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber); rules.IsBerlin {
+ if rules := st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber, st.evm.Context.Random != nil); rules.IsBerlin {
st.state.PrepareAccessList(msg.From(), msg.To(), vm.ActivePrecompiles(rules), msg.AccessList())
}
var (
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 2c7880b3b..dd55618bf 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -75,6 +75,7 @@ type BlockContext struct {
Time *big.Int // Provides information for TIME
Difficulty *big.Int // Provides information for DIFFICULTY
BaseFee *big.Int // Provides information for BASEFEE
+ Random *common.Hash // Provides information for RANDOM
}
// TxContext provides the EVM with information about a transaction.
@@ -131,7 +132,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig
StateDB: statedb,
Config: config,
chainConfig: chainConfig,
- chainRules: chainConfig.Rules(blockCtx.BlockNumber),
+ chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil),
}
evm.interpreter = NewEVMInterpreter(evm, config)
return evm
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index 4eda3bf53..db507c481 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -477,6 +477,12 @@ func opDifficulty(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
return nil, nil
}
+func opRandom(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+ v := new(uint256.Int).SetBytes((interpreter.evm.Context.Random.Bytes()))
+ scope.Stack.push(v)
+ return nil, nil
+}
+
func opGasLimit(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.GasLimit))
return nil, nil
diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go
index e67acd832..36589a126 100644
--- a/core/vm/instructions_test.go
+++ b/core/vm/instructions_test.go
@@ -21,6 +21,7 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
+ "math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
@@ -654,3 +655,36 @@ func TestCreate2Addreses(t *testing.T) {
}
}
}
+
+func TestRandom(t *testing.T) {
+ type testcase struct {
+ name string
+ random common.Hash
+ }
+
+ for _, tt := range []testcase{
+ {name: "empty hash", random: common.Hash{}},
+ {name: "1", random: common.Hash{0}},
+ {name: "emptyCodeHash", random: emptyCodeHash},
+ {name: "hash(0x010203)", random: crypto.Keccak256Hash([]byte{0x01, 0x02, 0x03})},
+ } {
+ var (
+ env = NewEVM(BlockContext{Random: &tt.random}, TxContext{}, nil, params.TestChainConfig, Config{})
+ stack = newstack()
+ pc = uint64(0)
+ evmInterpreter = env.interpreter
+ )
+ opRandom(&pc, evmInterpreter, &ScopeContext{nil, stack, nil})
+ if len(stack.data) != 1 {
+ t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data))
+ }
+ actual := stack.pop()
+ expected, overflow := uint256.FromBig(new(big.Int).SetBytes(tt.random.Bytes()))
+ if overflow {
+ t.Errorf("Testcase %v: invalid overflow", tt.name)
+ }
+ if actual.Cmp(expected) != 0 {
+ t.Errorf("Testcase %v: expected %x, got %x", tt.name, expected, actual)
+ }
+ }
+}
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index 1660e3ce0..21e3c914e 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -69,6 +69,8 @@ func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter {
// If jump table was not initialised we set the default one.
if cfg.JumpTable == nil {
switch {
+ case evm.chainRules.IsMerge:
+ cfg.JumpTable = &mergeInstructionSet
case evm.chainRules.IsLondon:
cfg.JumpTable = &londonInstructionSet
case evm.chainRules.IsBerlin:
diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go
index 6dea5d81f..eef3b53d8 100644
--- a/core/vm/jump_table.go
+++ b/core/vm/jump_table.go
@@ -54,6 +54,7 @@ var (
istanbulInstructionSet = newIstanbulInstructionSet()
berlinInstructionSet = newBerlinInstructionSet()
londonInstructionSet = newLondonInstructionSet()
+ mergeInstructionSet = newMergeInstructionSet()
)
// JumpTable contains the EVM opcodes supported at a given fork.
@@ -77,6 +78,17 @@ func validate(jt JumpTable) JumpTable {
return jt
}
+func newMergeInstructionSet() JumpTable {
+ instructionSet := newLondonInstructionSet()
+ instructionSet[RANDOM] = &operation{
+ execute: opRandom,
+ constantGas: GasQuickStep,
+ minStack: minStack(0, 1),
+ maxStack: maxStack(0, 1),
+ }
+ return validate(instructionSet)
+}
+
// newLondonInstructionSet returns the frontier, homestead, byzantium,
// contantinople, istanbul, petersburg, berlin and london instructions.
func newLondonInstructionSet() JumpTable {
diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go
index a1833e510..ba70fa09d 100644
--- a/core/vm/opcodes.go
+++ b/core/vm/opcodes.go
@@ -95,6 +95,7 @@ const (
TIMESTAMP OpCode = 0x42
NUMBER OpCode = 0x43
DIFFICULTY OpCode = 0x44
+ RANDOM OpCode = 0x44 // Same as DIFFICULTY
GASLIMIT OpCode = 0x45
CHAINID OpCode = 0x46
SELFBALANCE OpCode = 0x47
@@ -275,7 +276,7 @@ var opCodeToString = map[OpCode]string{
COINBASE: "COINBASE",
TIMESTAMP: "TIMESTAMP",
NUMBER: "NUMBER",
- DIFFICULTY: "DIFFICULTY",
+ DIFFICULTY: "DIFFICULTY", // TODO (MariusVanDerWijden) rename to RANDOM post merge
GASLIMIT: "GASLIMIT",
CHAINID: "CHAINID",
SELFBALANCE: "SELFBALANCE",
diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go
index 103ce3e17..7861fb92d 100644
--- a/core/vm/runtime/runtime.go
+++ b/core/vm/runtime/runtime.go
@@ -118,7 +118,7 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) {
vmenv = NewEnv(cfg)
sender = vm.AccountRef(cfg.Origin)
)
- if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber); rules.IsBerlin {
+ if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil); rules.IsBerlin {
cfg.State.PrepareAccessList(cfg.Origin, &address, vm.ActivePrecompiles(rules), nil)
}
cfg.State.CreateAccount(address)
@@ -150,7 +150,7 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) {
vmenv = NewEnv(cfg)
sender = vm.AccountRef(cfg.Origin)
)
- if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber); rules.IsBerlin {
+ if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil); rules.IsBerlin {
cfg.State.PrepareAccessList(cfg.Origin, nil, vm.ActivePrecompiles(rules), nil)
}
// Call the code with the given configuration.
@@ -176,7 +176,7 @@ func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, er
sender := cfg.State.GetOrNewStateObject(cfg.Origin)
statedb := cfg.State
- if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber); rules.IsBerlin {
+ if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil); rules.IsBerlin {
statedb.PrepareAccessList(cfg.Origin, &address, vm.ActivePrecompiles(rules), nil)
}
// Call the code with the given configuration.
diff --git a/crypto/ecies/ecies_test.go b/crypto/ecies/ecies_test.go
index 0a6aeb2b5..96e33da00 100644
--- a/crypto/ecies/ecies_test.go
+++ b/crypto/ecies/ecies_test.go
@@ -279,7 +279,7 @@ var testCases = []testCase{
{
Curve: elliptic.P384(),
Name: "P384",
- Expected: ECIES_AES256_SHA384,
+ Expected: ECIES_AES192_SHA384,
},
{
Curve: elliptic.P521(),
diff --git a/crypto/ecies/params.go b/crypto/ecies/params.go
index 0bd3877dd..39e7c8947 100644
--- a/crypto/ecies/params.go
+++ b/crypto/ecies/params.go
@@ -80,6 +80,14 @@ var (
KeyLen: 16,
}
+ ECIES_AES192_SHA384 = &ECIESParams{
+ Hash: sha512.New384,
+ hashAlgo: crypto.SHA384,
+ Cipher: aes.NewCipher,
+ BlockSize: aes.BlockSize,
+ KeyLen: 24,
+ }
+
ECIES_AES256_SHA256 = &ECIESParams{
Hash: sha256.New,
hashAlgo: crypto.SHA256,
@@ -108,7 +116,7 @@ var (
var paramsFromCurve = map[elliptic.Curve]*ECIESParams{
ethcrypto.S256(): ECIES_AES128_SHA256,
elliptic.P256(): ECIES_AES128_SHA256,
- elliptic.P384(): ECIES_AES256_SHA384,
+ elliptic.P384(): ECIES_AES192_SHA384,
elliptic.P521(): ECIES_AES256_SHA512,
}
diff --git a/eth/backend.go b/eth/backend.go
index a53982166..22535e0e2 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -234,7 +234,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
return nil, err
}
- eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock, merger)
+ eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock)
eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData))
eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil}
diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go
index 3c0b6d9e4..a8b20d758 100644
--- a/eth/catalyst/api.go
+++ b/eth/catalyst/api.go
@@ -20,37 +20,15 @@ package catalyst
import (
"crypto/sha256"
"encoding/binary"
- "errors"
"fmt"
- "math/big"
- "time"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/consensus"
- "github.com/ethereum/go-ethereum/consensus/beacon"
- "github.com/ethereum/go-ethereum/consensus/misc"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/beacon"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
- "github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
- chainParams "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
- "github.com/ethereum/go-ethereum/trie"
-)
-
-var (
- VALID = GenericStringResponse{"VALID"}
- SUCCESS = GenericStringResponse{"SUCCESS"}
- INVALID = ForkChoiceResponse{Status: "INVALID", PayloadID: nil}
- SYNCING = ForkChoiceResponse{Status: "SYNCING", PayloadID: nil}
- GenericServerError = rpc.CustomError{Code: -32000, ValidationError: "Server error"}
- UnknownPayload = rpc.CustomError{Code: -32001, ValidationError: "Unknown payload"}
- InvalidTB = rpc.CustomError{Code: -32002, ValidationError: "Invalid terminal block"}
- InvalidPayloadID = rpc.CustomError{Code: 1, ValidationError: "invalid payload id"}
)
// Register adds catalyst APIs to the full node.
@@ -60,21 +38,7 @@ func Register(stack *node.Node, backend *eth.Ethereum) error {
{
Namespace: "engine",
Version: "1.0",
- Service: NewConsensusAPI(backend, nil),
- Public: true,
- },
- })
- return nil
-}
-
-// RegisterLight adds catalyst APIs to the light client.
-func RegisterLight(stack *node.Node, backend *les.LightEthereum) error {
- log.Warn("Catalyst mode enabled", "protocol", "les")
- stack.RegisterAPIs([]rpc.API{
- {
- Namespace: "engine",
- Version: "1.0",
- Service: NewConsensusAPI(nil, backend),
+ Service: NewConsensusAPI(backend),
Public: true,
},
})
@@ -82,184 +46,86 @@ func RegisterLight(stack *node.Node, backend *les.LightEthereum) error {
}
type ConsensusAPI struct {
- light bool
eth *eth.Ethereum
- les *les.LightEthereum
- engine consensus.Engine // engine is the post-merge consensus engine, only for block creation
- preparedBlocks map[uint64]*ExecutableDataV1
+ preparedBlocks *payloadQueue // preparedBlocks caches payloads (*ExecutableDataV1) by payload ID (PayloadID)
}
-func NewConsensusAPI(eth *eth.Ethereum, les *les.LightEthereum) *ConsensusAPI {
- var engine consensus.Engine
- if eth == nil {
- if les.BlockChain().Config().TerminalTotalDifficulty == nil {
- panic("Catalyst started without valid total difficulty")
- }
- if b, ok := les.Engine().(*beacon.Beacon); ok {
- engine = beacon.New(b.InnerEngine())
- } else {
- engine = beacon.New(les.Engine())
- }
- } else {
- if eth.BlockChain().Config().TerminalTotalDifficulty == nil {
- panic("Catalyst started without valid total difficulty")
- }
- if b, ok := eth.Engine().(*beacon.Beacon); ok {
- engine = beacon.New(b.InnerEngine())
- } else {
- engine = beacon.New(eth.Engine())
- }
+// NewConsensusAPI creates a new consensus api for the given backend.
+// The underlying blockchain needs to have a valid terminal total difficulty set.
+func NewConsensusAPI(eth *eth.Ethereum) *ConsensusAPI {
+ if eth.BlockChain().Config().TerminalTotalDifficulty == nil {
+ panic("Catalyst started without valid total difficulty")
}
return &ConsensusAPI{
- light: eth == nil,
eth: eth,
- les: les,
- engine: engine,
- preparedBlocks: make(map[uint64]*ExecutableDataV1),
+ preparedBlocks: newPayloadQueue(),
}
}
-// blockExecutionEnv gathers all the data required to execute
-// a block, either when assembling it or when inserting it.
-type blockExecutionEnv struct {
- chain *core.BlockChain
- state *state.StateDB
- tcount int
- gasPool *core.GasPool
-
- header *types.Header
- txs []*types.Transaction
- receipts []*types.Receipt
-}
-
-func (env *blockExecutionEnv) commitTransaction(tx *types.Transaction, coinbase common.Address) error {
- vmconfig := *env.chain.GetVMConfig()
- snap := env.state.Snapshot()
- receipt, err := core.ApplyTransaction(env.chain.Config(), env.chain, &coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, vmconfig)
- if err != nil {
- env.state.RevertToSnapshot(snap)
- return err
- }
- env.txs = append(env.txs, tx)
- env.receipts = append(env.receipts, receipt)
- return nil
-}
-
-func (api *ConsensusAPI) makeEnv(parent *types.Block, header *types.Header) (*blockExecutionEnv, error) {
- // The parent state might be missing. It can be the special scenario
- // that consensus layer tries to build a new block based on the very
- // old side chain block and the relevant state is already pruned. So
- // try to retrieve the live state from the chain, if it's not existent,
- // do the necessary recovery work.
- var (
- err error
- state *state.StateDB
- )
- if api.eth.BlockChain().HasState(parent.Root()) {
- state, err = api.eth.BlockChain().StateAt(parent.Root())
- } else {
- // The maximum acceptable reorg depth can be limited by the
- // finalised block somehow. TODO(rjl493456442) fix the hard-
- // coded number here later.
- state, err = api.eth.StateAtBlock(parent, 1000, nil, false, false)
- }
- if err != nil {
- return nil, err
- }
- env := &blockExecutionEnv{
- chain: api.eth.BlockChain(),
- state: state,
- header: header,
- gasPool: new(core.GasPool).AddGas(header.GasLimit),
- }
- return env, nil
-}
-
-func (api *ConsensusAPI) GetPayloadV1(payloadID hexutil.Bytes) (*ExecutableDataV1, error) {
- hash := []byte(payloadID)
- if len(hash) < 8 {
- return nil, &InvalidPayloadID
- }
- id := binary.BigEndian.Uint64(hash[:8])
- data, ok := api.preparedBlocks[id]
- if !ok {
- return nil, &UnknownPayload
- }
- return data, nil
-}
-
-func (api *ConsensusAPI) ForkchoiceUpdatedV1(heads ForkchoiceStateV1, PayloadAttributes *PayloadAttributesV1) (ForkChoiceResponse, error) {
+// ForkchoiceUpdatedV1 has several responsibilities:
+// If the method is called with an empty head block:
+// we return success, which can be used to check if the catalyst mode is enabled
+// If the total difficulty was not reached:
+// we return INVALID
+// If the finalizedBlockHash is set:
+// we check if we have the finalizedBlockHash in our db, if not we start a sync
+// We try to set our blockchain to the headBlock
+// If there are payloadAttributes:
+// we try to assemble a block with the payloadAttributes and return its payloadID
+func (api *ConsensusAPI) ForkchoiceUpdatedV1(heads beacon.ForkchoiceStateV1, payloadAttributes *beacon.PayloadAttributesV1) (beacon.ForkChoiceResponse, error) {
+ log.Trace("Engine API request received", "method", "ForkChoiceUpdated", "head", heads.HeadBlockHash, "finalized", heads.FinalizedBlockHash, "safe", heads.SafeBlockHash)
if heads.HeadBlockHash == (common.Hash{}) {
- return ForkChoiceResponse{Status: SUCCESS.Status, PayloadID: nil}, nil
+ return beacon.ForkChoiceResponse{Status: beacon.SUCCESS.Status, PayloadID: nil}, nil
}
if err := api.checkTerminalTotalDifficulty(heads.HeadBlockHash); err != nil {
if block := api.eth.BlockChain().GetBlockByHash(heads.HeadBlockHash); block == nil {
// TODO (MariusVanDerWijden) trigger sync
- return SYNCING, nil
+ return beacon.SYNCING, nil
}
- return INVALID, err
+ return beacon.INVALID, err
}
// If the finalized block is set, check if it is in our blockchain
if heads.FinalizedBlockHash != (common.Hash{}) {
if block := api.eth.BlockChain().GetBlockByHash(heads.FinalizedBlockHash); block == nil {
// TODO (MariusVanDerWijden) trigger sync
- return SYNCING, nil
+ return beacon.SYNCING, nil
}
}
// SetHead
if err := api.setHead(heads.HeadBlockHash); err != nil {
- return INVALID, err
+ return beacon.INVALID, err
}
- // Assemble block (if needed)
- if PayloadAttributes != nil {
- data, err := api.assembleBlock(heads.HeadBlockHash, PayloadAttributes)
+ // Assemble block (if needed). It only works for full node.
+ if payloadAttributes != nil {
+ data, err := api.assembleBlock(heads.HeadBlockHash, payloadAttributes)
if err != nil {
- return INVALID, err
+ return beacon.INVALID, err
}
- hash := computePayloadId(heads.HeadBlockHash, PayloadAttributes)
- id := binary.BigEndian.Uint64(hash)
- api.preparedBlocks[id] = data
- log.Info("Created payload", "payloadid", id)
- // TODO (MariusVanDerWijden) do something with the payloadID?
- hex := hexutil.Bytes(hash)
- return ForkChoiceResponse{Status: SUCCESS.Status, PayloadID: &hex}, nil
+ id := computePayloadId(heads.HeadBlockHash, payloadAttributes)
+ api.preparedBlocks.put(id, data)
+ log.Info("Created payload", "payloadID", id)
+ return beacon.ForkChoiceResponse{Status: beacon.SUCCESS.Status, PayloadID: &id}, nil
}
- return ForkChoiceResponse{Status: SUCCESS.Status, PayloadID: nil}, nil
+ return beacon.ForkChoiceResponse{Status: beacon.SUCCESS.Status, PayloadID: nil}, nil
}
-func computePayloadId(headBlockHash common.Hash, params *PayloadAttributesV1) []byte {
- // Hash
- hasher := sha256.New()
- hasher.Write(headBlockHash[:])
- binary.Write(hasher, binary.BigEndian, params.Timestamp)
- hasher.Write(params.Random[:])
- hasher.Write(params.SuggestedFeeRecipient[:])
- return hasher.Sum([]byte{})[:8]
-}
-
-func (api *ConsensusAPI) invalid() ExecutePayloadResponse {
- if api.light {
- return ExecutePayloadResponse{Status: INVALID.Status, LatestValidHash: api.les.BlockChain().CurrentHeader().Hash()}
+// GetPayloadV1 returns a cached payload by id.
+func (api *ConsensusAPI) GetPayloadV1(payloadID beacon.PayloadID) (*beacon.ExecutableDataV1, error) {
+ log.Trace("Engine API request received", "method", "GetPayload", "id", payloadID)
+ data := api.preparedBlocks.get(payloadID)
+ if data == nil {
+ return nil, &beacon.UnknownPayload
}
- return ExecutePayloadResponse{Status: INVALID.Status, LatestValidHash: api.eth.BlockChain().CurrentHeader().Hash()}
+ return data, nil
}
-// ExecutePayload creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
-func (api *ConsensusAPI) ExecutePayloadV1(params ExecutableDataV1) (ExecutePayloadResponse, error) {
- block, err := ExecutableDataToBlock(params)
+// ExecutePayloadV1 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
+func (api *ConsensusAPI) ExecutePayloadV1(params beacon.ExecutableDataV1) (beacon.ExecutePayloadResponse, error) {
+ log.Trace("Engine API request received", "method", "ExecutePayload", params.BlockHash, "number", params.Number)
+ block, err := beacon.ExecutableDataToBlock(params)
if err != nil {
return api.invalid(), err
}
- if api.light {
- parent := api.les.BlockChain().GetHeaderByHash(params.ParentHash)
- if parent == nil {
- return api.invalid(), fmt.Errorf("could not find parent %x", params.ParentHash)
- }
- if err = api.les.BlockChain().InsertHeader(block.Header()); err != nil {
- return api.invalid(), err
- }
- return ExecutePayloadResponse{Status: VALID.Status, LatestValidHash: block.Hash()}, nil
- }
if !api.eth.BlockChain().HasBlock(block.ParentHash(), block.NumberU64()-1) {
/*
TODO (MariusVanDerWijden) reenable once sync is merged
@@ -268,7 +134,7 @@ func (api *ConsensusAPI) ExecutePayloadV1(params ExecutableDataV1) (ExecutePaylo
}
*/
// TODO (MariusVanDerWijden) we should return nil here not empty hash
- return ExecutePayloadResponse{Status: SYNCING.Status, LatestValidHash: common.Hash{}}, nil
+ return beacon.ExecutePayloadResponse{Status: beacon.SYNCING.Status, LatestValidHash: common.Hash{}}, nil
}
parent := api.eth.BlockChain().GetBlockByHash(params.ParentHash)
td := api.eth.BlockChain().GetTd(parent.Hash(), block.NumberU64()-1)
@@ -276,188 +142,44 @@ func (api *ConsensusAPI) ExecutePayloadV1(params ExecutableDataV1) (ExecutePaylo
if td.Cmp(ttd) < 0 {
return api.invalid(), fmt.Errorf("can not execute payload on top of block with low td got: %v threshold %v", td, ttd)
}
+ log.Trace("Inserting block without head", "hash", block.Hash(), "number", block.Number)
if err := api.eth.BlockChain().InsertBlockWithoutSetHead(block); err != nil {
return api.invalid(), err
}
- if merger := api.merger(); !merger.TDDReached() {
+ if merger := api.eth.Merger(); !merger.TDDReached() {
merger.ReachTTD()
}
- return ExecutePayloadResponse{Status: VALID.Status, LatestValidHash: block.Hash()}, nil
+ return beacon.ExecutePayloadResponse{Status: beacon.VALID.Status, LatestValidHash: block.Hash()}, nil
}
-// AssembleBlock creates a new block, inserts it into the chain, and returns the "execution
-// data" required for eth2 clients to process the new block.
-func (api *ConsensusAPI) assembleBlock(parentHash common.Hash, params *PayloadAttributesV1) (*ExecutableDataV1, error) {
- if api.light {
- return nil, errors.New("not supported")
- }
+// computePayloadId computes a pseudo-random payloadid, based on the parameters.
+func computePayloadId(headBlockHash common.Hash, params *beacon.PayloadAttributesV1) beacon.PayloadID {
+ // Hash
+ hasher := sha256.New()
+ hasher.Write(headBlockHash[:])
+ binary.Write(hasher, binary.BigEndian, params.Timestamp)
+ hasher.Write(params.Random[:])
+ hasher.Write(params.SuggestedFeeRecipient[:])
+ var out beacon.PayloadID
+ copy(out[:], hasher.Sum(nil)[:8])
+ return out
+}
+
+// invalid returns a response "INVALID" with the latest valid hash set to the current head.
+func (api *ConsensusAPI) invalid() beacon.ExecutePayloadResponse {
+ return beacon.ExecutePayloadResponse{Status: beacon.INVALID.Status, LatestValidHash: api.eth.BlockChain().CurrentHeader().Hash()}
+}
+
+// assembleBlock creates a new block and returns the "execution
+// data" required for beacon clients to process the new block.
+func (api *ConsensusAPI) assembleBlock(parentHash common.Hash, params *beacon.PayloadAttributesV1) (*beacon.ExecutableDataV1, error) {
log.Info("Producing block", "parentHash", parentHash)
-
- bc := api.eth.BlockChain()
- parent := bc.GetBlockByHash(parentHash)
- if parent == nil {
- log.Warn("Cannot assemble block with parent hash to unknown block", "parentHash", parentHash)
- return nil, fmt.Errorf("cannot assemble block with unknown parent %s", parentHash)
- }
-
- if params.Timestamp < parent.Time() {
- return nil, fmt.Errorf("child timestamp lower than parent's: %d < %d", params.Timestamp, parent.Time())
- }
- if now := uint64(time.Now().Unix()); params.Timestamp > now+1 {
- diff := time.Duration(params.Timestamp-now) * time.Second
- log.Warn("Producing block too far in the future", "diff", common.PrettyDuration(diff))
- }
- pending := api.eth.TxPool().Pending(true)
- coinbase := params.SuggestedFeeRecipient
- num := parent.Number()
- header := &types.Header{
- ParentHash: parent.Hash(),
- Number: num.Add(num, common.Big1),
- Coinbase: coinbase,
- GasLimit: parent.GasLimit(), // Keep the gas limit constant in this prototype
- Extra: []byte{}, // TODO (MariusVanDerWijden) properly set extra data
- Time: params.Timestamp,
- }
- if config := api.eth.BlockChain().Config(); config.IsLondon(header.Number) {
- header.BaseFee = misc.CalcBaseFee(config, parent.Header())
- }
- if err := api.engine.Prepare(bc, header); err != nil {
- return nil, err
- }
- env, err := api.makeEnv(parent, header)
+ block, err := api.eth.Miner().GetSealingBlock(parentHash, params.Timestamp, params.SuggestedFeeRecipient, params.Random)
if err != nil {
return nil, err
}
- var (
- signer = types.MakeSigner(bc.Config(), header.Number)
- txHeap = types.NewTransactionsByPriceAndNonce(signer, pending, nil)
- transactions []*types.Transaction
- )
- for {
- if env.gasPool.Gas() < chainParams.TxGas {
- log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", chainParams.TxGas)
- break
- }
- tx := txHeap.Peek()
- if tx == nil {
- break
- }
-
- // The sender is only for logging purposes, and it doesn't really matter if it's correct.
- from, _ := types.Sender(signer, tx)
-
- // Execute the transaction
- env.state.Prepare(tx.Hash(), env.tcount)
- err = env.commitTransaction(tx, coinbase)
- switch err {
- case core.ErrGasLimitReached:
- // Pop the current out-of-gas transaction without shifting in the next from the account
- log.Trace("Gas limit exceeded for current block", "sender", from)
- txHeap.Pop()
-
- case core.ErrNonceTooLow:
- // New head notification data race between the transaction pool and miner, shift
- log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce())
- txHeap.Shift()
-
- case core.ErrNonceTooHigh:
- // Reorg notification data race between the transaction pool and miner, skip account =
- log.Trace("Skipping account with high nonce", "sender", from, "nonce", tx.Nonce())
- txHeap.Pop()
-
- case nil:
- // Everything ok, collect the logs and shift in the next transaction from the same account
- env.tcount++
- txHeap.Shift()
- transactions = append(transactions, tx)
-
- default:
- // Strange error, discard the transaction and get the next in line (note, the
- // nonce-too-high clause will prevent us from executing in vain).
- log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err)
- txHeap.Shift()
- }
- }
- // Create the block.
- block, err := api.engine.FinalizeAndAssemble(bc, header, env.state, transactions, nil /* uncles */, env.receipts)
- if err != nil {
- return nil, err
- }
- return BlockToExecutableData(block, params.Random), nil
-}
-
-func encodeTransactions(txs []*types.Transaction) [][]byte {
- var enc = make([][]byte, len(txs))
- for i, tx := range txs {
- enc[i], _ = tx.MarshalBinary()
- }
- return enc
-}
-
-func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
- var txs = make([]*types.Transaction, len(enc))
- for i, encTx := range enc {
- var tx types.Transaction
- if err := tx.UnmarshalBinary(encTx); err != nil {
- return nil, fmt.Errorf("invalid transaction %d: %v", i, err)
- }
- txs[i] = &tx
- }
- return txs, nil
-}
-
-func ExecutableDataToBlock(params ExecutableDataV1) (*types.Block, error) {
- txs, err := decodeTransactions(params.Transactions)
- if err != nil {
- return nil, err
- }
- if len(params.ExtraData) > 32 {
- return nil, fmt.Errorf("invalid extradata length: %v", len(params.ExtraData))
- }
- number := big.NewInt(0)
- number.SetUint64(params.Number)
- header := &types.Header{
- ParentHash: params.ParentHash,
- UncleHash: types.EmptyUncleHash,
- Coinbase: params.FeeRecipient,
- Root: params.StateRoot,
- TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
- ReceiptHash: params.ReceiptsRoot,
- Bloom: types.BytesToBloom(params.LogsBloom),
- Difficulty: common.Big0,
- Number: number,
- GasLimit: params.GasLimit,
- GasUsed: params.GasUsed,
- Time: params.Timestamp,
- BaseFee: params.BaseFeePerGas,
- Extra: params.ExtraData,
- // TODO (MariusVanDerWijden) add params.Random to header once required
- }
- block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */)
- if block.Hash() != params.BlockHash {
- return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash())
- }
- return block, nil
-}
-
-func BlockToExecutableData(block *types.Block, random common.Hash) *ExecutableDataV1 {
- return &ExecutableDataV1{
- BlockHash: block.Hash(),
- ParentHash: block.ParentHash(),
- FeeRecipient: block.Coinbase(),
- StateRoot: block.Root(),
- Number: block.NumberU64(),
- GasLimit: block.GasLimit(),
- GasUsed: block.GasUsed(),
- BaseFeePerGas: block.BaseFee(),
- Timestamp: block.Time(),
- ReceiptsRoot: block.ReceiptHash(),
- LogsBloom: block.Bloom().Bytes(),
- Transactions: encodeTransactions(block.Transactions()),
- Random: random,
- ExtraData: block.Extra(),
- }
+ return beacon.BlockToExecutableData(block), nil
}
// Used in tests to add a the list of transactions from a block to the tx pool.
@@ -470,17 +192,17 @@ func (api *ConsensusAPI) insertTransactions(txs types.Transactions) error {
func (api *ConsensusAPI) checkTerminalTotalDifficulty(head common.Hash) error {
// shortcut if we entered PoS already
- if api.merger().PoSFinalized() {
+ if api.eth.Merger().PoSFinalized() {
return nil
}
// make sure the parent has enough terminal total difficulty
newHeadBlock := api.eth.BlockChain().GetBlockByHash(head)
if newHeadBlock == nil {
- return &GenericServerError
+ return &beacon.GenericServerError
}
td := api.eth.BlockChain().GetTd(newHeadBlock.Hash(), newHeadBlock.NumberU64())
if td != nil && td.Cmp(api.eth.BlockChain().Config().TerminalTotalDifficulty) < 0 {
- return &InvalidTB
+ return &beacon.InvalidTB
}
return nil
}
@@ -488,49 +210,22 @@ func (api *ConsensusAPI) checkTerminalTotalDifficulty(head common.Hash) error {
// setHead is called to perform a force choice.
func (api *ConsensusAPI) setHead(newHead common.Hash) error {
log.Info("Setting head", "head", newHead)
- if api.light {
- headHeader := api.les.BlockChain().CurrentHeader()
- if headHeader.Hash() == newHead {
- return nil
- }
- newHeadHeader := api.les.BlockChain().GetHeaderByHash(newHead)
- if newHeadHeader == nil {
- return &GenericServerError
- }
- if err := api.les.BlockChain().SetChainHead(newHeadHeader); err != nil {
- return err
- }
- // Trigger the transition if it's the first `NewHead` event.
- merger := api.merger()
- if !merger.PoSFinalized() {
- merger.FinalizePoS()
- }
- return nil
- }
headBlock := api.eth.BlockChain().CurrentBlock()
if headBlock.Hash() == newHead {
return nil
}
newHeadBlock := api.eth.BlockChain().GetBlockByHash(newHead)
if newHeadBlock == nil {
- return &GenericServerError
+ return &beacon.GenericServerError
}
if err := api.eth.BlockChain().SetChainHead(newHeadBlock); err != nil {
return err
}
// Trigger the transition if it's the first `NewHead` event.
- if merger := api.merger(); !merger.PoSFinalized() {
+ if merger := api.eth.Merger(); !merger.PoSFinalized() {
merger.FinalizePoS()
}
// TODO (MariusVanDerWijden) are we really synced now?
api.eth.SetSynced()
return nil
}
-
-// Helper function, return the merger instance.
-func (api *ConsensusAPI) merger() *consensus.Merger {
- if api.light {
- return api.les.Merger()
- }
- return api.eth.Merger()
-}
diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go
index 6e52c4fea..b824d22f8 100644
--- a/eth/catalyst/api_test.go
+++ b/eth/catalyst/api_test.go
@@ -17,14 +17,15 @@
package catalyst
import (
+ "fmt"
"math/big"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/beacon"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
@@ -78,14 +79,14 @@ func TestEth2AssembleBlock(t *testing.T) {
n, ethservice := startEthService(t, genesis, blocks)
defer n.Close()
- api := NewConsensusAPI(ethservice, nil)
+ api := NewConsensusAPI(ethservice)
signer := types.NewEIP155Signer(ethservice.BlockChain().Config().ChainID)
tx, err := types.SignTx(types.NewTransaction(uint64(10), blocks[9].Coinbase(), big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, testKey)
if err != nil {
t.Fatalf("error signing transaction, err=%v", err)
}
ethservice.TxPool().AddLocal(tx)
- blockParams := PayloadAttributesV1{
+ blockParams := beacon.PayloadAttributesV1{
Timestamp: blocks[9].Time() + 5,
}
execData, err := api.assembleBlock(blocks[9].Hash(), &blockParams)
@@ -102,11 +103,11 @@ func TestEth2AssembleBlockWithAnotherBlocksTxs(t *testing.T) {
n, ethservice := startEthService(t, genesis, blocks[:9])
defer n.Close()
- api := NewConsensusAPI(ethservice, nil)
+ api := NewConsensusAPI(ethservice)
// Put the 10th block's tx in the pool and produce a new block
api.insertTransactions(blocks[9].Transactions())
- blockParams := PayloadAttributesV1{
+ blockParams := beacon.PayloadAttributesV1{
Timestamp: blocks[8].Time() + 5,
}
execData, err := api.assembleBlock(blocks[8].Hash(), &blockParams)
@@ -123,8 +124,8 @@ func TestSetHeadBeforeTotalDifficulty(t *testing.T) {
n, ethservice := startEthService(t, genesis, blocks)
defer n.Close()
- api := NewConsensusAPI(ethservice, nil)
- fcState := ForkchoiceStateV1{
+ api := NewConsensusAPI(ethservice)
+ fcState := beacon.ForkchoiceStateV1{
HeadBlockHash: blocks[5].Hash(),
SafeBlockHash: common.Hash{},
FinalizedBlockHash: common.Hash{},
@@ -141,14 +142,14 @@ func TestEth2PrepareAndGetPayload(t *testing.T) {
n, ethservice := startEthService(t, genesis, blocks[:9])
defer n.Close()
- api := NewConsensusAPI(ethservice, nil)
+ api := NewConsensusAPI(ethservice)
// Put the 10th block's tx in the pool and produce a new block
api.insertTransactions(blocks[9].Transactions())
- blockParams := PayloadAttributesV1{
+ blockParams := beacon.PayloadAttributesV1{
Timestamp: blocks[8].Time() + 5,
}
- fcState := ForkchoiceStateV1{
+ fcState := beacon.ForkchoiceStateV1{
HeadBlockHash: blocks[8].Hash(),
SafeBlockHash: common.Hash{},
FinalizedBlockHash: common.Hash{},
@@ -158,13 +159,21 @@ func TestEth2PrepareAndGetPayload(t *testing.T) {
t.Fatalf("error preparing payload, err=%v", err)
}
payloadID := computePayloadId(fcState.HeadBlockHash, &blockParams)
- execData, err := api.GetPayloadV1(hexutil.Bytes(payloadID))
+ execData, err := api.GetPayloadV1(payloadID)
if err != nil {
t.Fatalf("error getting payload, err=%v", err)
}
if len(execData.Transactions) != blocks[9].Transactions().Len() {
t.Fatalf("invalid number of transactions %d != 1", len(execData.Transactions))
}
+ // Test invalid payloadID
+ var invPayload beacon.PayloadID
+ copy(invPayload[:], payloadID[:])
+ invPayload[0] = ^invPayload[0]
+ _, err = api.GetPayloadV1(invPayload)
+ if err == nil {
+ t.Fatal("expected error retrieving invalid payload")
+ }
}
func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan core.RemovedLogsEvent, wantNew, wantRemoved int) {
@@ -185,6 +194,48 @@ func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan co
}
}
+func TestInvalidPayloadTimestamp(t *testing.T) {
+ genesis, preMergeBlocks := generatePreMergeChain(10)
+ n, ethservice := startEthService(t, genesis, preMergeBlocks)
+ ethservice.Merger().ReachTTD()
+ defer n.Close()
+ var (
+ api = NewConsensusAPI(ethservice)
+ parent = ethservice.BlockChain().CurrentBlock()
+ )
+ tests := []struct {
+ time uint64
+ shouldErr bool
+ }{
+ {0, true},
+ {parent.Time(), true},
+ {parent.Time() - 1, true},
+ {parent.Time() + 1, false},
+ {uint64(time.Now().Unix()) + uint64(time.Minute), false},
+ }
+
+ for i, test := range tests {
+ t.Run(fmt.Sprintf("Timestamp test: %v", i), func(t *testing.T) {
+ params := beacon.PayloadAttributesV1{
+ Timestamp: test.time,
+ Random: crypto.Keccak256Hash([]byte{byte(123)}),
+ SuggestedFeeRecipient: parent.Coinbase(),
+ }
+ fcState := beacon.ForkchoiceStateV1{
+ HeadBlockHash: parent.Hash(),
+ SafeBlockHash: common.Hash{},
+ FinalizedBlockHash: common.Hash{},
+ }
+ _, err := api.ForkchoiceUpdatedV1(fcState, ¶ms)
+ if test.shouldErr && err == nil {
+ t.Fatalf("expected error preparing payload with invalid timestamp, err=%v", err)
+ } else if !test.shouldErr && err != nil {
+ t.Fatalf("error preparing payload with valid timestamp, err=%v", err)
+ }
+ })
+ }
+}
+
func TestEth2NewBlock(t *testing.T) {
genesis, preMergeBlocks := generatePreMergeChain(10)
n, ethservice := startEthService(t, genesis, preMergeBlocks)
@@ -192,7 +243,7 @@ func TestEth2NewBlock(t *testing.T) {
defer n.Close()
var (
- api = NewConsensusAPI(ethservice, nil)
+ api = NewConsensusAPI(ethservice)
parent = preMergeBlocks[len(preMergeBlocks)-1]
// This EVM code generates a log when the contract is created.
@@ -210,13 +261,13 @@ func TestEth2NewBlock(t *testing.T) {
tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
ethservice.TxPool().AddLocal(tx)
- execData, err := api.assembleBlock(parent.Hash(), &PayloadAttributesV1{
+ execData, err := api.assembleBlock(parent.Hash(), &beacon.PayloadAttributesV1{
Timestamp: parent.Time() + 5,
})
if err != nil {
t.Fatalf("Failed to create the executable data %v", err)
}
- block, err := ExecutableDataToBlock(*execData)
+ block, err := beacon.ExecutableDataToBlock(*execData)
if err != nil {
t.Fatalf("Failed to convert executable data to block %v", err)
}
@@ -228,7 +279,7 @@ func TestEth2NewBlock(t *testing.T) {
t.Fatalf("Chain head shouldn't be updated")
}
checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
- fcState := ForkchoiceStateV1{
+ fcState := beacon.ForkchoiceStateV1{
HeadBlockHash: block.Hash(),
SafeBlockHash: block.Hash(),
FinalizedBlockHash: block.Hash(),
@@ -250,13 +301,13 @@ func TestEth2NewBlock(t *testing.T) {
)
parent = preMergeBlocks[len(preMergeBlocks)-1]
for i := 0; i < 10; i++ {
- execData, err := api.assembleBlock(parent.Hash(), &PayloadAttributesV1{
+ execData, err := api.assembleBlock(parent.Hash(), &beacon.PayloadAttributesV1{
Timestamp: parent.Time() + 6,
})
if err != nil {
t.Fatalf("Failed to create the executable data %v", err)
}
- block, err := ExecutableDataToBlock(*execData)
+ block, err := beacon.ExecutableDataToBlock(*execData)
if err != nil {
t.Fatalf("Failed to convert executable data to block %v", err)
}
@@ -268,7 +319,7 @@ func TestEth2NewBlock(t *testing.T) {
t.Fatalf("Chain head shouldn't be updated")
}
- fcState := ForkchoiceStateV1{
+ fcState := beacon.ForkchoiceStateV1{
HeadBlockHash: block.Hash(),
SafeBlockHash: block.Hash(),
FinalizedBlockHash: block.Hash(),
@@ -362,7 +413,7 @@ func TestFullAPI(t *testing.T) {
ethservice.Merger().ReachTTD()
defer n.Close()
var (
- api = NewConsensusAPI(ethservice, nil)
+ api = NewConsensusAPI(ethservice)
parent = ethservice.BlockChain().CurrentBlock()
// This EVM code generates a log when the contract is created.
logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
@@ -373,12 +424,12 @@ func TestFullAPI(t *testing.T) {
tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
ethservice.TxPool().AddLocal(tx)
- params := PayloadAttributesV1{
+ params := beacon.PayloadAttributesV1{
Timestamp: parent.Time() + 1,
Random: crypto.Keccak256Hash([]byte{byte(i)}),
SuggestedFeeRecipient: parent.Coinbase(),
}
- fcState := ForkchoiceStateV1{
+ fcState := beacon.ForkchoiceStateV1{
HeadBlockHash: parent.Hash(),
SafeBlockHash: common.Hash{},
FinalizedBlockHash: common.Hash{},
@@ -387,11 +438,11 @@ func TestFullAPI(t *testing.T) {
if err != nil {
t.Fatalf("error preparing payload, err=%v", err)
}
- if resp.Status != SUCCESS.Status {
+ if resp.Status != beacon.SUCCESS.Status {
t.Fatalf("error preparing payload, invalid status: %v", resp.Status)
}
payloadID := computePayloadId(parent.Hash(), ¶ms)
- payload, err := api.GetPayloadV1(hexutil.Bytes(payloadID))
+ payload, err := api.GetPayloadV1(payloadID)
if err != nil {
t.Fatalf("can't get payload: %v", err)
}
@@ -399,10 +450,10 @@ func TestFullAPI(t *testing.T) {
if err != nil {
t.Fatalf("can't execute payload: %v", err)
}
- if execResp.Status != VALID.Status {
+ if execResp.Status != beacon.VALID.Status {
t.Fatalf("invalid status: %v", execResp.Status)
}
- fcState = ForkchoiceStateV1{
+ fcState = beacon.ForkchoiceStateV1{
HeadBlockHash: payload.BlockHash,
SafeBlockHash: payload.ParentHash,
FinalizedBlockHash: payload.ParentHash,
@@ -414,6 +465,5 @@ func TestFullAPI(t *testing.T) {
t.Fatalf("Chain head should be updated")
}
parent = ethservice.BlockChain().CurrentBlock()
-
}
}
diff --git a/eth/catalyst/gen_payload.go b/eth/catalyst/gen_payload.go
deleted file mode 100644
index a0b00fcfd..000000000
--- a/eth/catalyst/gen_payload.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
-
-package catalyst
-
-import (
- "encoding/json"
-
- "github.com/ethereum/go-ethereum/common/hexutil"
-)
-
-var _ = (*payloadResponseMarshaling)(nil)
-
-// MarshalJSON marshals as JSON.
-func (p PayloadResponse) MarshalJSON() ([]byte, error) {
- type PayloadResponse struct {
- PayloadID hexutil.Uint64 `json:"payloadId"`
- }
- var enc PayloadResponse
- enc.PayloadID = hexutil.Uint64(p.PayloadID)
- return json.Marshal(&enc)
-}
-
-// UnmarshalJSON unmarshals from JSON.
-func (p *PayloadResponse) UnmarshalJSON(input []byte) error {
- type PayloadResponse struct {
- PayloadID *hexutil.Uint64 `json:"payloadId"`
- }
- var dec PayloadResponse
- if err := json.Unmarshal(input, &dec); err != nil {
- return err
- }
- if dec.PayloadID != nil {
- p.PayloadID = uint64(*dec.PayloadID)
- }
- return nil
-}
diff --git a/eth/catalyst/queue.go b/eth/catalyst/queue.go
new file mode 100644
index 000000000..aa2ce7823
--- /dev/null
+++ b/eth/catalyst/queue.go
@@ -0,0 +1,78 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package catalyst
+
+import (
+ "sync"
+
+ "github.com/ethereum/go-ethereum/core/beacon"
+)
+
+// maxTrackedPayloads is the maximum number of prepared payloads the execution
+// engine tracks before evicting old ones. Ideally we should only ever track the
+// latest one; but have a slight wiggle room for non-ideal conditions.
+const maxTrackedPayloads = 10
+
+// payloadQueueItem represents an id->payload tuple to store until it's retrieved
+// or evicted.
+type payloadQueueItem struct {
+ id beacon.PayloadID
+ payload *beacon.ExecutableDataV1
+}
+
+// payloadQueue tracks the latest handful of constructed payloads to be retrieved
+// by the beacon chain if block production is requested.
+type payloadQueue struct {
+ payloads []*payloadQueueItem
+ lock sync.RWMutex
+}
+
+// newPayloadQueue creates a pre-initialized queue with a fixed number of slots
+// all containing empty items.
+func newPayloadQueue() *payloadQueue {
+ return &payloadQueue{
+ payloads: make([]*payloadQueueItem, maxTrackedPayloads),
+ }
+}
+
+// put inserts a new payload into the queue at the given id.
+func (q *payloadQueue) put(id beacon.PayloadID, data *beacon.ExecutableDataV1) {
+ q.lock.Lock()
+ defer q.lock.Unlock()
+
+ copy(q.payloads[1:], q.payloads)
+ q.payloads[0] = &payloadQueueItem{
+ id: id,
+ payload: data,
+ }
+}
+
+// get retrieves a previously stored payload item or nil if it does not exist.
+func (q *payloadQueue) get(id beacon.PayloadID) *beacon.ExecutableDataV1 {
+ q.lock.RLock()
+ defer q.lock.RUnlock()
+
+ for _, item := range q.payloads {
+ if item == nil {
+ return nil // no more items
+ }
+ if item.id == id {
+ return item.payload
+ }
+ }
+ return nil
+}
diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go
index 628a56504..06c61ae55 100644
--- a/eth/fetcher/block_fetcher_test.go
+++ b/eth/fetcher/block_fetcher_test.go
@@ -364,6 +364,7 @@ func testSequentialAnnouncements(t *testing.T, light bool) {
hashes, blocks := makeChain(targetBlocks, 0, genesis)
tester := newTester(light)
+ defer tester.fetcher.Stop()
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
@@ -743,7 +744,7 @@ func testInvalidNumberAnnouncement(t *testing.T, light bool) {
badBodyFetcher := tester.makeBodyFetcher("bad", blocks, 0)
imported := make(chan interface{})
- announced := make(chan interface{})
+ announced := make(chan interface{}, 2)
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
if light {
if header == nil {
@@ -806,6 +807,7 @@ func TestEmptyBlockShortCircuit(t *testing.T) {
hashes, blocks := makeChain(32, 0, genesis)
tester := newTester(false)
+ defer tester.fetcher.Stop()
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
diff --git a/eth/handler.go b/eth/handler.go
index 55ca869c7..921a62dba 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -433,7 +433,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
return
}
peer.Log().Debug("Whitelist block verified", "number", number, "hash", hash)
-
+ res.Done <- nil
case <-timeout.C:
peer.Log().Warn("Whitelist challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name())
h.removePeer(peer.ID())
diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go
index 0a1ee2637..314776dff 100644
--- a/eth/protocols/snap/handler.go
+++ b/eth/protocols/snap/handler.go
@@ -299,7 +299,7 @@ func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePac
size uint64
last common.Hash
)
- for it.Next() && size < req.Bytes {
+ for it.Next() {
hash, account := it.Hash(), common.CopyBytes(it.Account())
// Track the returned interval for the Merkle proofs
@@ -315,6 +315,9 @@ func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePac
if bytes.Compare(hash[:], req.Limit[:]) >= 0 {
break
}
+ if size > req.Bytes {
+ break
+ }
}
it.Release()
@@ -464,7 +467,7 @@ func ServiceGetByteCodesQuery(chain *core.BlockChain, req *GetByteCodesPacket) [
// Peers should not request the empty code, but if they do, at
// least sent them back a correct response without db lookups
codes = append(codes, []byte{})
- } else if blob, err := chain.ContractCode(hash); err == nil {
+ } else if blob, err := chain.ContractCodeWithPrefix(hash); err == nil {
codes = append(codes, blob)
bytes += uint64(len(blob))
}
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index be8644a5a..d4e7f1676 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -1781,7 +1781,7 @@ func (s *Syncer) processAccountResponse(res *accountResponse) {
for i, account := range res.accounts {
// Check if the account is a contract with an unknown code
if !bytes.Equal(account.CodeHash, emptyCode[:]) {
- if code := rawdb.ReadCodeWithPrefix(s.db, common.BytesToHash(account.CodeHash)); code == nil {
+ if !rawdb.HasCodeWithPrefix(s.db, common.BytesToHash(account.CodeHash)) {
res.task.codeTasks[common.BytesToHash(account.CodeHash)] = struct{}{}
res.task.needCode[i] = true
res.task.pend++
@@ -1789,7 +1789,7 @@ func (s *Syncer) processAccountResponse(res *accountResponse) {
}
// Check if the account is a contract with an unknown storage trie
if account.Root != emptyRoot {
- if node, err := s.db.Get(account.Root[:]); err != nil || node == nil {
+ if ok, err := s.db.Has(account.Root[:]); err != nil || !ok {
// If there was a previous large state retrieval in progress,
// don't restart it from scratch. This happens if a sync cycle
// is interrupted and resumed later. However, *do* update the
diff --git a/eth/tracers/api.go b/eth/tracers/api.go
index b784e0bcf..767083408 100644
--- a/eth/tracers/api.go
+++ b/eth/tracers/api.go
@@ -593,11 +593,11 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
if threads > len(txs) {
threads = len(txs)
}
- blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
blockHash := block.Hash()
for th := 0; th < threads; th++ {
pend.Add(1)
go func() {
+ blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
defer pend.Done()
// Fetch and execute the next transaction trace tasks
for task := range jobs {
@@ -618,6 +618,7 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
}
// Feed the transactions into the tracers and return
var failed error
+ blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
for i, tx := range txs {
// Send the trace task over for execution
jobs <- &txTraceTask{statedb: statedb.Copy(), index: i}
diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go
index 7521a98f2..cf7c1e6c0 100644
--- a/eth/tracers/internal/tracetest/calltrace_test.go
+++ b/eth/tracers/internal/tracetest/calltrace_test.go
@@ -130,10 +130,6 @@ func TestCallTracerLegacy(t *testing.T) {
testCallTracer("callTracerLegacy", "call_tracer_legacy", t)
}
-func TestCallTracerJs(t *testing.T) {
- testCallTracer("callTracerJs", "call_tracer", t)
-}
-
func TestCallTracerNative(t *testing.T) {
testCallTracer("callTracer", "call_tracer", t)
}
diff --git a/eth/tracers/js/internal/tracers/assets.go b/eth/tracers/js/internal/tracers/assets.go
index a2bb69dee..a117c9f06 100644
--- a/eth/tracers/js/internal/tracers/assets.go
+++ b/eth/tracers/js/internal/tracers/assets.go
@@ -2,12 +2,11 @@
// sources:
// 4byte_tracer_legacy.js (2.933kB)
// bigram_tracer.js (1.712kB)
-// call_tracer_js.js (3.497kB)
// call_tracer_legacy.js (8.956kB)
// evmdis_tracer.js (4.215kB)
-// noop_tracer.js (1.271kB)
+// noop_tracer_legacy.js (1.271kB)
// opcount_tracer.js (1.372kB)
-// prestate_tracer.js (4.287kB)
+// prestate_tracer_legacy.js (4.483kB)
// trigram_tracer.js (1.788kB)
// unigram_tracer.js (1.469kB)
@@ -118,26 +117,6 @@ func bigram_tracerJs() (*asset, error) {
return a, nil
}
-var _call_tracer_jsJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x56\x5f\x6f\xdb\x38\x0c\x7f\x8e\x3f\x05\xaf\x0f\x4b\x82\x65\x71\xbb\x03\xf6\xd0\x2d\x03\x72\x45\xbb\x05\xe8\xb5\x45\x9a\xde\x50\x14\x7d\x50\x6c\xda\xd6\xa6\x48\x86\x44\x37\xcd\x6d\xfd\xee\x07\x4a\x76\x6a\x67\x59\x6f\x2f\x06\x2c\x92\x3f\xfe\xfb\x51\x54\x1c\xc3\x89\x29\x37\x56\xe6\x05\xc1\xdb\xc3\xb7\x47\xb0\x28\x10\x72\xf3\x06\xa9\x40\x8b\xd5\x0a\xa6\x15\x15\xc6\xba\x28\x8e\x61\x51\x48\x07\x99\x54\x08\xd2\x41\x29\x2c\x81\xc9\x80\x76\xf4\x95\x5c\x5a\x61\x37\xe3\x28\x8e\x83\xcd\x5e\x31\x23\x64\x16\x11\x9c\xc9\x68\x2d\x2c\x1e\xc3\xc6\x54\x90\x08\x0d\x16\x53\xe9\xc8\xca\x65\x45\x08\x92\x40\xe8\x34\x36\x16\x56\x26\x95\xd9\x86\x21\x25\x41\xa5\x53\xb4\xde\x35\xa1\x5d\xb9\x26\x8e\x4f\x17\x37\x70\x8e\xce\xa1\x85\x4f\xa8\xd1\x0a\x05\x57\xd5\x52\xc9\x04\xce\x65\x82\xda\x21\x08\x07\x25\x9f\xb8\x02\x53\x58\x7a\x38\x36\x3c\xe3\x50\xae\xeb\x50\xe0\xcc\x54\x3a\x15\x24\x8d\x1e\x01\x4a\x8e\x1c\x1e\xd0\x3a\x69\x34\xfc\xd9\xb8\xaa\x01\x47\x60\x2c\x83\x0c\x04\x71\x02\x16\x4c\xc9\x76\x43\x10\x7a\x03\x4a\xd0\xb3\xe9\x6f\x14\xe4\x39\xef\x14\xa4\xf6\x6e\x0a\x53\x22\x50\x21\x88\xb3\x5e\x4b\xa5\x60\x89\x50\x39\xcc\x2a\x35\x62\xb4\x65\x45\xf0\x65\xb6\xf8\x7c\x79\xb3\x80\xe9\xc5\x2d\x7c\x99\xce\xe7\xd3\x8b\xc5\xed\x7b\x58\x4b\x2a\x4c\x45\x80\x0f\x18\xa0\xe4\xaa\x54\x12\x53\x58\x0b\x6b\x85\xa6\x0d\x98\x8c\x11\xfe\x3e\x9d\x9f\x7c\x9e\x5e\x2c\xa6\x7f\xcd\xce\x67\x8b\x5b\x30\x16\xce\x66\x8b\x8b\xd3\xeb\x6b\x38\xbb\x9c\xc3\x14\xae\xa6\xf3\xc5\xec\xe4\xe6\x7c\x3a\x87\xab\x9b\xf9\xd5\xe5\xf5\xe9\x18\xae\x91\xa3\x42\xb6\xff\xff\x9a\x67\xbe\x7b\x16\x21\x45\x12\x52\xb9\xa6\x12\xb7\xa6\x02\x57\x98\x4a\xa5\x50\x88\x07\x04\x8b\x09\xca\x07\x4c\x41\x40\x62\xca\xcd\x6f\x37\x95\xb1\x84\x32\x3a\xf7\x39\xff\x92\x90\x30\xcb\x40\x1b\x1a\x81\x43\x84\x0f\x05\x51\x79\x1c\xc7\xeb\xf5\x7a\x9c\xeb\x6a\x6c\x6c\x1e\xab\x00\xe7\xe2\x8f\xe3\x28\x62\xd0\x44\x28\x75\x66\xc5\x0a\x17\x56\x24\x68\xb9\xee\xce\xc3\x6b\x5c\x7b\x21\x64\x2c\x05\xb2\x22\x91\x3a\x87\x15\x52\x61\x52\x07\x64\xc0\x62\x69\x2c\xd5\x9d\x02\xa9\x33\x63\x57\x9e\x51\x3e\xd8\x25\x37\x46\x6a\x42\xab\x85\x82\x15\x3a\x27\x72\xf4\x2c\x16\x0c\xa6\x9d\x48\xc8\x53\xe6\x7b\xd4\x63\x3f\x8e\x44\xf2\xed\x18\xee\xbe\x3f\xdd\x8f\xa2\x5e\x26\x2a\x45\xc7\x90\x55\xda\x6b\x0d\x94\xc9\x47\x90\x2e\x87\xf0\xfd\x69\x14\xf5\x2c\xba\xae\x38\xa1\xc7\x5a\x1c\xf5\x7a\x71\x0c\x57\x16\x4b\x66\xb9\xa9\x98\x9d\xb5\x73\x1f\x62\xd4\xeb\x3d\x08\x0b\x01\x01\x26\xde\xa0\x47\x9b\x12\x8f\x01\x00\x12\x7a\x1c\xf3\xcf\x88\x4f\x33\x6b\x56\xfe\x94\xcc\x67\x7c\x64\x1f\x63\x3e\x1a\x7a\x21\x19\x2f\x6a\x0b\xc9\x04\xd1\x83\x50\x95\x87\xeb\x1f\x3e\xf6\xe1\xb5\x07\xf5\x67\x63\x32\xd7\x64\xa5\xce\x07\x47\xef\x82\x6a\x2e\x5c\x80\xa9\x55\x97\x32\x9f\x69\xf2\x68\xb9\x70\xc3\xbd\x06\x37\x0e\xd3\xe3\xfd\x06\x2c\xda\x63\x24\x75\x59\xd1\x71\x27\x56\x7f\x14\xa4\xa6\xa2\x20\x7e\x96\x86\x23\x2f\x7e\x8a\x7a\x3d\x99\xc1\x80\x0a\xe9\xc6\xdb\x3e\xdd\x1d\xde\x87\x1f\xf8\x63\x32\xf1\x37\x55\x26\x35\xa6\xa1\xfe\x75\x7b\x6a\x85\x09\xfc\xc2\xf4\x45\x70\xb4\xd6\xd8\x97\xc0\x83\xc2\x3e\x70\x2f\x61\x70\x40\xe5\x10\x18\x9f\x73\xfa\x6d\xc4\xad\x72\x2b\xc0\x8e\x4a\x07\x03\x5e\xbd\xda\x23\x3e\xc0\x47\x4c\x2a\xa6\x26\x58\x7c\x40\x4b\x98\x1e\xc0\x8f\x1f\x35\xed\xea\xfa\xc2\x64\x32\x39\x38\x7c\x3c\x18\xd6\x71\xa4\xa8\x90\xb0\xab\xe3\x63\x88\x38\x46\xaa\xac\x0e\xd9\x66\x52\x0b\x25\xff\xc5\xda\xed\x30\xea\xf1\x4c\x20\x8f\x5a\x6b\x24\xfc\xd8\x06\x64\x26\xbc\x1f\xe5\x0e\xdd\xbd\xc2\x38\x47\x5a\x6c\x4a\x1c\x0c\x5b\x94\x0f\x44\xd8\xca\xcf\xac\x59\x0d\x86\xcf\xb4\xdf\x11\x2f\x4c\x23\xac\x79\xb6\x23\x9f\xf1\x69\xa3\xe2\x09\xdf\xe5\xee\x56\xf1\x93\x70\x83\x61\x8b\xbe\xfd\xa3\x77\xfd\x0e\x07\xb7\x9a\xff\xf0\x34\x0d\x86\x3b\xdd\xf4\xb9\x71\x9e\x61\xda\x26\xbf\x70\x53\x1b\x77\xe7\xa4\xf6\xd2\x65\xd3\xb8\xac\x5c\x31\xe0\xdf\xa6\xc6\x8f\x92\x76\x4b\x3c\x0f\x4d\xd8\x16\x5a\xa1\xfe\x89\x96\x63\x85\x3a\xa7\xa2\x4e\x83\x35\x3e\xc2\x51\xdd\xf5\x56\x73\x76\xbd\x9b\x72\x30\xdc\xe6\x54\x8f\x37\x4c\xf6\x95\x2f\x04\x51\x17\x91\xd5\x7e\x2e\x64\xe3\xab\xa1\xf9\x8e\xdd\x29\x1f\x07\x77\x1c\x63\xad\xb5\x67\x5a\x42\x34\x0d\x83\xdb\xcd\x7e\x06\xbb\xf4\xd2\xc1\xd0\xc3\xd5\x73\xd8\x32\x6e\x42\x68\xa6\x2c\xb8\xf4\x22\xa6\xa6\x77\xdb\x3f\x99\x9f\x4e\x17\xa7\x7d\x9e\x9a\xbd\x92\xb7\xfd\x26\xa0\x66\x70\x82\x9a\xf1\x67\x4f\x51\xf3\xe1\x6a\xbf\x99\xc0\x51\x93\xd9\xce\x85\xa1\x50\xbf\x39\x6a\x2e\xb3\xbd\xf9\xbe\x68\x00\x77\xf7\x5b\x4f\x2f\x28\x76\x98\xc4\xda\xcc\xa6\x38\x86\x66\x94\xf9\x5d\x60\x51\x10\x3a\x7e\x18\x30\x1b\xcc\xf2\x2b\x26\xbc\x5c\x79\xe9\xf2\x3e\xf6\xaa\x90\xa2\x93\x16\x53\xc8\x24\xaa\x14\x0c\xbf\x10\xf9\xe9\xf1\xd5\x19\xed\x01\x1d\x5a\xc9\x88\x7e\x0f\x8f\xc3\x6b\x56\x32\xa8\x96\x09\xd2\x06\x32\x14\x54\x59\xe4\xf5\x5d\x0a\xe7\x60\x85\x42\x4b\x9d\x67\x95\x52\x1b\x30\x36\x45\x06\x0f\xf7\x8a\xf3\x80\x64\x78\xc1\x5b\x07\xeb\xc2\x40\x6a\x74\xbf\x5e\xea\xa5\x45\x7e\xaf\x8d\xe0\x6b\xe5\x88\x5f\x75\xa5\x12\x1b\x90\x34\x8e\x7a\x4d\x52\xed\xfd\xcc\x99\x6f\x47\xc4\x19\xbe\x10\x7f\x5e\xbe\x4d\x9b\xbb\xdb\xd7\x1f\xf3\x5f\x77\xef\xd6\xdd\xee\x6e\xdc\xe7\xe9\xef\xae\xd7\x66\x82\xba\x3b\xb4\x3d\x57\xdd\x45\xe9\x25\xfe\xaf\xbb\x22\x5b\xdc\xf7\x02\xcf\xe0\xad\x81\xff\x0b\x51\xca\x55\x3b\x27\xb9\x0a\xf1\x78\x2e\x6c\xd5\xfd\x5f\x73\xbf\x71\x17\x07\x5c\x9c\x6f\xb8\xe1\x87\x71\xa8\x51\xcd\x41\xe6\x6d\x38\xb8\xfb\x86\x9b\xfb\xfd\x3c\xad\xa7\xa0\xa5\xd7\x30\xb3\xb9\x3f\x83\xe8\x85\xc5\xbd\x0d\x42\x4e\x0e\xdf\x83\xfc\xd0\x36\xa8\xef\xb0\xf7\x20\x5f\xbf\x6e\x5c\xb6\xe5\x77\xf2\xbe\xb9\xc2\xb6\x0b\x6a\x47\x3e\x6c\x07\x54\x6f\xb4\xa0\x12\xf5\x9e\xa2\xa7\xe8\xbf\x00\x00\x00\xff\xff\x2a\xac\x9f\xff\xa9\x0d\x00\x00")
-
-func call_tracer_jsJsBytes() ([]byte, error) {
- return bindataRead(
- _call_tracer_jsJs,
- "call_tracer_js.js",
- )
-}
-
-func call_tracer_jsJs() (*asset, error) {
- bytes, err := call_tracer_jsJsBytes()
- if err != nil {
- return nil, err
- }
-
- info := bindataFileInfo{name: "call_tracer_js.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
- a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x42, 0x13, 0x7a, 0x14, 0xbf, 0xa7, 0x49, 0x4f, 0xb4, 0x4f, 0x45, 0x1, 0xbc, 0x9e, 0xd1, 0x8e, 0xc7, 0xee, 0x61, 0xfa, 0x82, 0x52, 0xa4, 0x78, 0xfe, 0xff, 0xb1, 0x68, 0x1d, 0xcc, 0x1d, 0x8e}}
- return a, nil
-}
-
var _call_tracer_legacyJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x5a\xdf\x6f\x1b\x37\xf2\x7f\x96\xfe\x8a\x89\x1f\x6a\x09\x51\x24\x39\xe9\xb7\x5f\xc0\xae\x7a\x50\x1d\x25\x35\xe0\xc6\x81\xad\x34\x08\x82\x3c\x50\xbb\xb3\x12\x6b\x8a\xdc\x92\x5c\xc9\xba\xd6\xff\xfb\x61\x86\xdc\xd5\xae\x24\x3b\xbe\x5e\x71\xe8\xbd\x69\x97\x33\xc3\xe1\xcc\x67\x7e\x71\x35\x18\xc0\xb9\xc9\x37\x56\xce\x17\x1e\x5e\x0e\x4f\xfe\x1f\xa6\x0b\x84\xb9\x79\x81\x7e\x81\x16\x8b\x25\x8c\x0b\xbf\x30\xd6\xb5\x07\x03\x98\x2e\xa4\x83\x4c\x2a\x04\xe9\x20\x17\xd6\x83\xc9\xc0\xef\xd0\x2b\x39\xb3\xc2\x6e\xfa\xed\xc1\x20\xf0\x1c\x5c\x26\x09\x99\x45\x04\x67\x32\xbf\x16\x16\x4f\x61\x63\x0a\x48\x84\x06\x8b\xa9\x74\xde\xca\x59\xe1\x11\xa4\x07\xa1\xd3\x81\xb1\xb0\x34\xa9\xcc\x36\x24\x52\x7a\x28\x74\x8a\x96\xb7\xf6\x68\x97\xae\xd4\xe3\xed\xbb\x0f\x70\x89\xce\xa1\x85\xb7\xa8\xd1\x0a\x05\xef\x8b\x99\x92\x09\x5c\xca\x04\xb5\x43\x10\x0e\x72\x7a\xe3\x16\x98\xc2\x8c\xc5\x11\xe3\x1b\x52\xe5\x26\xaa\x02\x6f\x4c\xa1\x53\xe1\xa5\xd1\x3d\x40\x49\x9a\xc3\x0a\xad\x93\x46\xc3\xab\x72\xab\x28\xb0\x07\xc6\x92\x90\x8e\xf0\x74\x00\x0b\x26\x27\xbe\x2e\x08\xbd\x01\x25\xfc\x96\xf5\x09\x06\xd9\x9e\x3b\x05\xa9\x79\x9b\x85\xc9\x11\xfc\x42\x78\x3a\xf5\x5a\x2a\x05\x33\x84\xc2\x61\x56\xa8\x1e\x49\x9b\x15\x1e\x3e\x5e\x4c\x7f\xba\xfa\x30\x85\xf1\xbb\x4f\xf0\x71\x7c\x7d\x3d\x7e\x37\xfd\x74\x06\x6b\xe9\x17\xa6\xf0\x80\x2b\x0c\xa2\xe4\x32\x57\x12\x53\x58\x0b\x6b\x85\xf6\x1b\x30\x19\x49\xf8\x79\x72\x7d\xfe\xd3\xf8\xdd\x74\xfc\xe3\xc5\xe5\xc5\xf4\x13\x18\x0b\x6f\x2e\xa6\xef\x26\x37\x37\xf0\xe6\xea\x1a\xc6\xf0\x7e\x7c\x3d\xbd\x38\xff\x70\x39\xbe\x86\xf7\x1f\xae\xdf\x5f\xdd\x4c\xfa\x70\x83\xa4\x15\x12\xff\xd7\x6d\x9e\xb1\xf7\x2c\x42\x8a\x5e\x48\xe5\x4a\x4b\x7c\x32\x05\xb8\x85\x29\x54\x0a\x0b\xb1\x42\xb0\x98\xa0\x5c\x61\x0a\x02\x12\x93\x6f\x9e\xec\x54\x92\x25\x94\xd1\x73\x3e\xf3\x83\x80\x84\x8b\x0c\xb4\xf1\x3d\x70\x88\xf0\xfd\xc2\xfb\xfc\x74\x30\x58\xaf\xd7\xfd\xb9\x2e\xfa\xc6\xce\x07\x2a\x88\x73\x83\x1f\xfa\x6d\x92\x99\x08\xa5\xa6\x56\x24\x68\xc9\x39\x02\xb2\x82\xcc\xaf\xcc\x5a\x83\xb7\x42\x3b\x91\x90\xab\xe9\x77\xc2\x60\x14\x1e\xf0\x8e\x9e\xbc\x23\xd0\x82\xc5\xdc\x58\xfa\xad\x54\x89\x33\xa9\x3d\x5a\x2d\x14\xcb\x76\xb0\x14\x29\xc2\x6c\x03\xa2\x2e\xb0\x57\x3f\x0c\xc1\x28\xb8\x1b\xa4\xce\x8c\x5d\x32\x2c\xfb\xed\xdf\xdb\xad\xa8\xa1\xf3\x22\xb9\x25\x05\x49\x7e\x52\x58\x8b\xda\x93\x29\x0b\xeb\xe4\x0a\x99\x04\x02\x4d\xb4\xe7\xe4\x97\x9f\x01\xef\x30\x29\x82\xa4\x56\x25\xe4\x14\x3e\xff\x7e\xff\xa5\xd7\x66\xd1\x29\xba\x04\x75\x8a\x29\x9f\xef\xd6\xc1\x7a\xc1\x16\x85\x35\x1e\xaf\x10\x7e\x2d\x9c\xaf\xd1\x64\xd6\x2c\x41\x68\x30\x05\x21\xbe\x6e\x1d\xa9\xbd\x61\x81\x82\x7e\x6b\xb4\xac\x51\xbf\xdd\xaa\x98\x4f\x21\x13\xca\x61\xdc\xd7\x79\xcc\xe9\x34\x52\xaf\xcc\x2d\x49\x36\x96\x20\x6c\x37\x60\xf2\xc4\xa4\x31\x18\xe8\x1c\xd5\x31\xd0\xf5\xdb\x2d\xe2\x3b\x85\xac\xd0\xbc\x6d\x47\x99\x79\x0f\xd2\x59\x17\x7e\x6f\xb7\x48\xec\xb9\xc8\x7d\x61\x91\xed\x89\xd6\x1a\xeb\x40\x2e\x97\x98\x4a\xe1\x51\x6d\xda\xad\xd6\x4a\xd8\xb0\x00\x23\x50\x66\xde\x9f\xa3\x9f\xd0\x63\xa7\x7b\xd6\x6e\xb5\x64\x06\x9d\xb0\xfa\x6c\x34\xe2\xec\x93\x49\x8d\x69\x10\xdf\xf2\x0b\xe9\xfa\x99\x28\x94\xaf\xf6\x25\xa6\x96\x45\x5f\x58\x4d\x3f\xef\x83\x16\x1f\x11\x8c\x56\x1b\x48\x28\xcb\x88\x19\x85\xa7\xdb\x38\x8f\xcb\x78\x38\xd7\x83\x4c\x38\x32\xa1\xcc\x60\x8d\x90\x5b\x7c\x91\x2c\x90\x7c\xa7\x13\x8c\x5a\xba\x8d\x63\xa7\x8e\x80\x76\xeb\x9b\xbc\xef\xcd\xbb\x62\x39\x43\xdb\xe9\xc2\x37\x30\xbc\xcb\x86\x5d\x18\x8d\xf8\x47\xa9\x7b\xe4\x89\xfa\x92\x14\x93\xc7\x83\x32\xff\x8d\xb7\x52\xcf\xc3\x59\xa3\xae\x17\x19\x08\xd0\xb8\x86\xc4\x68\x06\x35\x79\x65\x86\x52\xcf\x21\xb1\x28\x3c\xa6\x3d\x10\x69\x0a\xde\x04\xe4\x55\x38\x6b\x6e\x09\xdf\x7c\x03\x1d\xda\x6c\x04\xc7\xe7\xd7\x93\xf1\x74\x72\x0c\x7f\xfc\x01\xe1\xcd\x51\x78\xf3\xf2\xa8\x5b\xd3\x4c\xea\xab\x2c\x8b\xca\xb1\xc0\x7e\x8e\x78\xdb\x39\xe9\xf6\x57\x42\x15\x78\x95\x05\x35\x23\xed\x44\xa7\x30\x8a\x3c\xcf\x77\x79\x5e\x36\x78\x88\x69\x30\x80\xb1\x73\xb8\x9c\x29\xdc\x0f\xc8\x18\xb1\x1c\xbc\xce\x53\xc6\x22\xf4\x25\x66\x99\x2b\x24\x54\x95\xbb\x46\xf3\xb3\xc6\x2d\xbf\xc9\xf1\x14\x00\xc0\xe4\x3d\x7e\x41\xb1\xc0\x2f\xbc\xf9\x09\xef\xd8\x47\xa5\x09\x09\x55\xe3\x34\xb5\xe8\x5c\xa7\xdb\x0d\xe4\x52\xe7\x85\x3f\x6d\x90\x2f\x71\x69\xec\xa6\xef\x28\x21\x75\xf8\x68\xbd\x70\xd2\x92\x67\x2e\xdc\x85\x26\x9e\x88\xd4\xb7\xc2\x75\xb6\x4b\xe7\xc6\xf9\xd3\x72\x89\x1e\xca\x35\xb6\x05\xb1\x1d\x0f\xef\x8e\xf7\xad\x35\xec\x6e\x91\x70\xf2\x5d\x97\x58\xee\xcf\x2a\x7c\x57\x69\xa2\x9f\x17\x6e\xd1\x61\x38\x6d\x57\xb7\xa9\x60\x04\xde\x16\x78\x10\xfe\x0c\xa9\x7d\x38\x39\x54\x19\xe5\x12\x6f\x8b\x84\x61\x35\x17\x9c\x69\x38\xd2\x05\x65\x5e\x57\xcc\xd8\xe6\xde\x98\x7d\x74\x45\x70\xdd\x4c\x2e\xdf\xbc\x9e\xdc\x4c\xaf\x3f\x9c\x4f\x8f\x6b\x70\x52\x98\x79\x52\xaa\x79\x06\x85\x7a\xee\x17\xac\x3f\x89\x6b\xae\x7e\x26\x9e\x17\x27\x5f\xc2\x1b\x18\x1d\x08\xf9\xd6\xe3\x1c\xf0\xf9\x0b\xcb\xbe\xdf\x37\x5f\x93\x34\x18\xf3\xaf\x41\x92\x37\x4c\x5c\x92\x7b\x53\x12\x3c\xee\xe7\xbf\x18\x54\xe9\x8c\x28\x7e\x14\x4a\xe8\x04\x1f\xd1\x79\x1f\x6b\xf5\xa4\x79\x20\x0f\x2d\xd1\x2f\x4c\xca\x85\x21\x11\xa1\xb6\x94\x08\x4a\x8d\xc6\x7f\x3f\x1b\x8d\x2f\x2f\x6b\xb9\x88\x9f\xcf\xaf\x5e\xd7\xf3\xd3\xf1\xeb\xc9\xe5\xe4\xed\x78\x3a\xd9\xa5\xbd\x99\x8e\xa7\x17\xe7\xfc\xb6\x4c\x5d\x83\x01\xdc\xdc\xca\x9c\x2b\x0c\xe7\x6d\xb3\xcc\xb9\x55\xae\xf4\x75\x3d\xf0\x0b\x43\x4d\xa8\x8d\x05\x34\x13\x3a\x29\x0b\x9b\x2b\x01\xeb\x0d\xc1\xf5\x21\xe7\x9d\xec\x38\xaf\x82\xb0\x74\xef\x2d\xc6\x4d\xd3\x8e\x37\xa5\x5e\x5b\x83\x06\x34\x72\xf2\xe7\x04\xdb\x79\xfa\x21\xe1\x1f\x30\x84\x53\x38\x89\x59\xf4\x91\x34\xfd\x12\x9e\x93\xf8\x3f\x91\xac\x5f\x1d\xe0\xfc\x7b\xa6\xec\xbd\x40\xfb\xef\xa7\x72\x53\xf8\xab\x2c\x3b\x85\x5d\x23\x7e\xbb\x67\xc4\x8a\xfe\x12\xf5\x3e\xfd\xff\xed\xd1\x6f\xd3\x3e\xa1\xca\xe4\xf0\x6c\x0f\x22\x21\xe9\x3e\xdb\x89\x83\x68\x5c\x6e\xef\x58\x1a\x8c\x1e\x28\x34\x2f\x9b\x18\x7e\x28\x53\xfe\x47\x85\xe6\x60\x9b\x4a\xcd\x68\xb3\x11\xed\x81\x45\x6f\x25\xae\x68\xd4\x3c\x76\x2c\x92\x1a\x76\xb3\xa6\xf4\xd5\x87\x8f\x18\x24\x6a\x44\x4e\x2e\xb1\xc1\xa7\xfe\x8c\x7b\x5e\x6a\xd2\xe3\xa8\xc6\x10\x13\xdc\x87\x5b\x84\xa5\xd8\xd0\xa8\x96\x15\xfa\x76\x03\x73\xe1\x20\xdd\x68\xb1\x94\x89\x0b\xf2\xb8\xb9\xb7\x38\x17\x96\xc5\x5a\xfc\xad\x40\x47\x73\x1f\x01\x59\x24\xbe\x10\x4a\x6d\x60\x2e\x69\x78\x23\xee\xce\xcb\x57\xc3\x21\x38\x2f\x73\xd4\x69\x0f\xbe\x7b\x35\xf8\xee\x5b\xb0\x85\xc2\x6e\xbf\x5d\x2b\x61\xd5\x51\xa3\x37\x68\x21\xa2\xe7\x35\xe6\x7e\xd1\xe9\xc2\x0f\x0f\xd4\xc2\x07\x0a\xdb\x41\x5a\x78\x01\x27\x5f\xfa\xa4\xd7\xa8\x81\xdb\xe0\x49\x40\xe5\x30\x4a\xa3\x81\xf7\xea\xf5\x55\xe7\x56\x58\xa1\xc4\x0c\xbb\xa7\x3c\x00\xb3\xad\xd6\x22\x4e\x40\xe4\x14\xc8\x95\x90\x1a\x44\x92\x98\x42\x7b\x32\x7c\x39\xcc\xa8\x0d\xe5\xf7\x63\x5f\xca\xe3\x59\x51\x24\x09\x3a\x57\xa6\x7b\xf6\x1a\xa9\x23\x96\xc4\x0d\x52\x3b\x99\x62\xcd\x2b\x94\x1d\x0c\xa7\xe6\x48\x41\xa3\x74\x29\x70\x69\x1c\x6d\x32\x43\x58\x5b\x1a\xbc\x9c\xd4\x09\xdf\x3c\xa4\x48\xd6\x76\x60\x34\x08\x50\x86\xaf\x3b\x38\xc6\x41\xd8\xb9\xeb\x87\x7c\x4f\xdb\x52\xce\xd1\x66\xdd\x6f\x02\xb9\x0e\x55\x1e\x71\x76\x5a\x21\x0d\x78\x27\x9d\xe7\x8e\x9a\xb4\x94\x0e\x02\x92\xa5\x9e\xf7\x20\x37\x39\xe7\xe9\xaf\x95\xb3\x98\xac\xaf\x27\xbf\x4c\xae\xab\xc6\xe7\xe9\x4e\x2c\x67\x9e\xa3\x6a\x24\x04\x4b\xf3\x96\xc7\xf4\xe8\xc0\x10\x73\x00\x50\xa3\x07\x00\x45\xf2\xb7\xb5\xf1\x7d\xed\x38\x4a\x38\xbf\x75\xcc\x1c\xc3\x3c\x57\x57\xc0\x15\xca\xbb\x9d\xdc\xbd\x9b\x1c\x4c\x5e\x56\x08\x52\x8a\xd3\x0e\x25\xf6\xdd\x49\xa3\xb1\xb0\x1d\x38\xb6\xf8\xbc\xa8\xd9\x78\xcd\xed\x66\x20\xaa\xa5\x06\x5e\x2f\xfb\x56\x11\xaa\x01\xeb\x6e\x0a\x4f\x70\xa0\xfa\xbd\x4d\x7e\x73\xe1\x3e\x38\xf6\x7a\x4c\x7f\x33\x39\xbf\xd0\xbe\x53\x2e\x5e\x68\x78\x01\xe5\x03\x25\x75\x78\xd1\x88\xa2\x03\xd9\xb1\x95\xa2\x42\x8f\xb0\x15\x71\x06\x3b\xaf\x48\x50\x30\x07\x1b\xcd\xa2\xdf\x2f\xce\xc3\x28\x8d\x0c\xf6\xcc\xa2\xef\xe3\x6f\x85\x50\xae\x33\xac\x9a\x85\x70\x02\x6f\xb8\xbc\x8d\xf6\x3a\x49\xe2\x69\xf6\x8e\x67\x35\xb6\x68\x8d\x92\x2d\x74\x82\xe7\x26\xc5\x47\x25\x44\x11\x31\x6d\x54\xbe\x8c\xc0\x3c\xd4\x7b\xb7\xea\x04\x70\x54\x35\x04\x99\x90\xaa\xb0\x78\x74\x06\x07\xd2\x8e\x2b\x6c\x26\x12\xf6\xa5\x43\xe0\x69\xdd\x81\x33\x4b\x5c\x98\x75\x50\xe0\x50\xf2\xda\x07\x47\x85\x83\x9d\xf2\xc1\xd7\x4e\xc2\x41\xe1\xc4\x1c\x6b\xe0\xa8\x0c\x5e\x3a\xea\xe0\x15\xc2\x9f\x86\xce\xf3\xea\xf1\x09\x28\xba\xff\x6b\xe0\xb1\xe3\xe7\xbd\x3e\xa7\x24\xe2\x6e\xa7\xf6\x50\x2a\x1b\x9a\x91\xbf\x97\xe3\x9f\x1c\x61\xbb\xb4\xe1\x68\x4d\xe2\x70\xc0\x6d\x5f\xf3\x75\xf7\x57\xab\x0f\x79\xfe\xa1\x96\x89\x30\xaa\x7f\xc5\xc4\x6f\x71\xca\x5d\x0e\x3d\xe5\x16\x57\xd2\x14\x54\xc0\xf0\x7f\x69\x1c\xae\x5a\xbe\xfb\x76\xeb\x3e\xde\x0b\xb2\xdf\xea\x17\x83\xeb\x45\xbc\xd7\x0e\xdd\x52\xad\x7c\x18\xae\xad\xf1\xba\x30\x0b\x37\xce\x2d\xe6\x7f\xe4\x82\x30\x06\xba\x37\x39\xb5\x03\xb1\x3a\x29\x8b\x22\xdd\x54\x05\xb1\x17\x1a\x11\x58\x08\x9d\xc6\x61\x44\xa4\xa9\x24\x79\x0c\x42\xd2\x50\xcc\x85\xd4\xed\x83\x66\xfc\x6a\x15\x3e\x84\x8c\xbd\xde\xb6\x5e\x48\xe3\x10\x49\x13\x1f\x6b\xdc\x7e\x42\xc1\xdc\x09\xa2\xdd\xbb\xce\x78\x5d\x6a\xb4\x2b\x96\xdc\x09\x83\x58\x09\xa9\x04\x4d\x5f\xdc\x61\xe9\x14\x12\x85\x42\x87\x2f\x1c\x98\x79\xb3\x42\xeb\xda\x4f\x00\xf9\x9f\xc1\xf8\x4e\x56\x2c\x1f\xa3\x39\x9e\x1e\xb3\x4f\x8d\xd8\x70\xfc\x37\x4a\x78\x1f\xe1\x55\x33\x6f\x88\x2c\xe9\xf9\xe3\x17\x6a\xdf\x7e\x5a\x48\x71\xcf\x44\x34\x3f\xc0\xb0\xd6\x97\xff\x5d\x82\x6c\x1f\x62\x97\x55\x7f\x16\x0f\xef\x8d\xe9\x81\x42\xc1\x53\x52\xf9\x69\xaa\xec\x47\x1f\x1b\xda\xca\xe8\x0d\x1d\xdd\x5e\xf8\xf2\x9d\xde\x02\xcb\x1b\x90\xd0\xda\xcf\x10\x35\x48\x8f\x56\xd0\x3c\x44\xe8\x8a\x5f\x53\x48\x4b\xc7\xe2\xd8\x2f\x92\x82\x2e\x0a\x8e\x9f\x36\xa8\x30\x4b\x3d\xef\xb7\x5b\xe1\x7d\x2d\xde\x13\x7f\xb7\x8d\xf7\x50\x01\x99\x33\xde\x09\x54\x57\x02\x89\xbf\xe3\x6e\x91\xc7\xe6\x9d\x7b\x01\x5a\xa3\x57\x61\xa6\xde\xb9\x05\x60\xc6\x78\x13\xb0\x7b\x27\x46\x6b\xfc\xae\x01\x70\x26\x9d\x0b\x17\xc4\xec\x84\x84\xbf\xdb\x8f\x88\x92\x81\x82\xe1\xf4\x30\x03\x2d\x1d\x60\xda\xb9\x99\x20\x62\x7e\x15\x56\x43\x3d\x3f\xad\xaf\x86\x57\xf1\xa0\x72\x59\xb3\x8d\x5c\xb2\x6d\xee\xcf\x0e\x27\xb9\x61\x89\xc7\xc3\xc9\x8c\x6c\x5e\x01\xf6\x01\xd6\xfa\xac\xb1\x4f\xf2\x58\xaa\x64\xe9\x65\x66\x7b\x80\x95\xa5\xd7\x5a\x0e\x7f\xf7\x74\x91\x15\x71\x5d\xc5\x06\x4d\x43\x08\xdf\x36\xee\x2d\x1f\x9a\xb4\x68\x50\x89\x84\x65\x73\x35\x1a\x1d\x0d\xef\xaa\x0f\x23\x31\x57\x35\x68\x4a\x25\x42\x64\x84\xf3\x72\x54\xc8\x7f\x62\xdc\xb6\x1e\x83\xe5\x12\x58\x0c\x1f\x70\xb8\x9b\xa5\x10\x34\x33\x6e\x20\x0a\x47\xa3\xe8\x36\xb6\x52\x74\xd2\x62\x0a\x99\x44\x95\x82\x49\xd1\xf2\xa0\xfb\xab\x33\x3a\x7c\xaa\x43\x2b\x49\x62\xf8\x24\x19\xfe\x1d\xc0\x1f\x4a\xb5\x4c\xd0\x6f\x20\x43\xc1\xdf\xdc\xbc\x81\x5c\x38\x07\x4b\x14\x34\xda\x66\x85\x52\x1b\x30\x36\x45\x12\x5e\xcd\x7a\x14\xd6\x06\x0a\x87\xd6\xc1\x7a\x61\x62\xa9\xe5\x16\x2f\xa7\x6e\x55\xfa\x5e\xbc\xce\x91\x2e\x57\x62\x03\xd2\x53\x59\x8f\x87\xaa\x47\x7a\xf5\xa1\x8b\xbf\x96\x19\x32\xf0\x7e\x98\x97\x53\x61\x33\xce\xf9\x35\x3d\x35\x23\x3c\x0e\x45\xcd\xd8\xde\x5e\x74\x35\x03\xb9\x2c\x3d\xcd\x68\xad\x17\xb2\x66\x48\xf2\x0a\x3f\x35\x83\xb1\xd6\x6a\xf3\x02\x23\xa8\x62\xe0\xa7\x9d\xf0\x64\x2d\x63\x7c\x86\xcf\xba\x15\x39\x3f\xf5\x22\x60\xc8\x8b\x1d\x32\xce\x2d\x6e\x28\x9b\x07\x1b\xd5\x4a\x53\x78\xf1\xf9\x16\x37\x5f\x0e\x57\xa2\x08\xc7\x1a\x5d\x55\x7a\xca\xb0\x08\x6b\x8f\x24\x83\x4a\x0b\x39\x1a\x9e\x81\xfc\xbe\xce\x50\x56\x4f\x90\xcf\x9f\x97\x7b\xd6\xd7\x3f\xcb\x2f\x65\x84\x57\x88\xdf\x59\xef\x36\x34\x8a\x31\x12\x68\x28\x28\xda\xf7\xed\x7f\x05\x00\x00\xff\xff\xfb\x65\x93\x4f\xfc\x22\x00\x00")
func call_tracer_legacyJsBytes() ([]byte, error) {
@@ -178,22 +157,22 @@ func evmdis_tracerJs() (*asset, error) {
return a, nil
}
-var _noop_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x93\x4f\x6f\xdb\x46\x10\xc5\xcf\xe6\xa7\x78\xc7\x04\x50\xc5\xfe\x39\x14\x70\x8a\x02\xac\x61\x27\x2a\x1c\xdb\x90\xe8\x06\x3e\x0e\xc9\xa1\xb8\xe9\x6a\x87\x9d\x9d\x95\x22\x18\xfe\xee\xc5\x92\x12\x12\x14\x69\x9b\x9b\xb0\xd2\xfb\xbd\x37\xf3\x46\x65\x89\x2b\x19\x8f\xea\xb6\x83\xe1\xc7\xef\x7f\xf8\x19\xf5\xc0\xd8\xca\x77\x6c\x03\x2b\xa7\x1d\xaa\x64\x83\x68\x2c\xca\x12\xf5\xe0\x22\x7a\xe7\x19\x2e\x62\x24\x35\x48\x0f\xfb\xc7\xef\xbd\x6b\x94\xf4\xb8\x2c\xca\x72\xd6\x7c\xf5\xeb\x4c\xe8\x95\x19\x51\x7a\x3b\x90\xf2\x25\x8e\x92\xd0\x52\x80\x72\xe7\xa2\xa9\x6b\x92\x31\x9c\x81\x42\x57\x8a\x62\x27\x9d\xeb\x8f\x19\xe9\x0c\x29\x74\xac\x93\xb5\xb1\xee\xe2\x39\xc7\xdb\xbb\x47\xdc\x72\x8c\xac\x78\xcb\x81\x95\x3c\x1e\x52\xe3\x5d\x8b\x5b\xd7\x72\x88\x0c\x8a\x18\xf3\x4b\x1c\xb8\x43\x33\xe1\xb2\xf0\x26\x47\xd9\x9c\xa2\xe0\x46\x52\xe8\xc8\x9c\x84\x05\xd8\xe5\xe4\xd8\xb3\x46\x27\x01\x3f\x9d\xad\x4e\xc0\x05\x44\x33\xe4\x15\x59\x1e\x40\x21\x63\xd6\xbd\x06\x85\x23\x3c\xd9\x67\xe9\x37\x2c\xe4\xf3\xdc\x1d\x5c\x98\x6c\x06\x19\x19\x36\x90\xe5\xa9\x0f\xce\x7b\x34\x8c\x14\xb9\x4f\x7e\x91\x69\x4d\x32\x7c\x58\xd5\xef\xee\x1f\x6b\x54\x77\x4f\xf8\x50\xad\xd7\xd5\x5d\xfd\xf4\x06\x07\x67\x83\x24\x03\xef\x79\x46\xb9\xdd\xe8\x1d\x77\x38\x90\x2a\x05\x3b\x42\xfa\x4c\x78\x7f\xbd\xbe\x7a\x57\xdd\xd5\xd5\x6f\xab\xdb\x55\xfd\x04\x51\xdc\xac\xea\xbb\xeb\xcd\x06\x37\xf7\x6b\x54\x78\xa8\xd6\xf5\xea\xea\xf1\xb6\x5a\xe3\xe1\x71\xfd\x70\xbf\xb9\x5e\x62\xc3\x39\x15\x67\xfd\xff\xef\xbc\x9f\xda\x53\x46\xc7\x46\xce\xc7\xf3\x26\x9e\x24\x21\x0e\x92\x7c\x87\x81\xf6\x0c\xe5\x96\xdd\x9e\x3b\x10\x5a\x19\x8f\xdf\x5c\x6a\x66\x91\x97\xb0\x9d\x66\xfe\xd7\x83\xc4\xaa\x47\x10\x5b\x20\x32\xe3\x97\xc1\x6c\xbc\x2c\xcb\xc3\xe1\xb0\xdc\x86\xb4\x14\xdd\x96\x7e\xc6\xc5\xf2\xd7\x65\x91\x99\x41\x64\xac\x95\x5a\xd6\x5c\xce\xc7\x14\x6d\x62\x37\xa4\xdc\x48\x60\x34\xe2\x3c\xeb\x98\x5b\x46\x2b\x5d\x1e\xe0\xaf\xe4\x94\x3b\xf4\x2a\x3b\x10\x7e\xa7\x3d\x6d\x5a\x75\xa3\x65\x9c\x34\x1f\xb9\x35\x98\xcc\x15\x52\xe3\xa7\x73\x24\x98\x52\x88\xd4\xe6\xbb\xc9\x9f\x5b\xd6\x65\xf1\x5c\x5c\x94\x25\xa2\xf1\x98\xbd\x5d\xd8\xcb\x9f\x99\x2b\x9a\xfb\xd4\x23\x64\x9c\x1c\xa7\xcb\xc8\xa1\xfe\x78\x0f\xfe\xc4\x6d\x32\x8e\xcb\xe2\x22\xeb\x2e\xd1\xa7\x30\x41\x5f\x79\xd9\x2e\xd0\x35\xaf\xf1\x8c\x97\x45\x31\x91\x7b\x4a\xde\xbe\x44\x1f\x86\xd3\x99\x50\x6b\x89\xfc\x89\x96\x23\x49\x0f\x0a\x67\xc3\x7e\x2e\xf0\x62\xd2\xff\xb7\x85\x72\xfc\x9a\x07\x79\x3f\xf9\xcc\xc0\x38\x57\xdf\x30\x07\x38\x63\xa5\x7c\xfb\xb2\x67\xcd\x7f\x7b\x28\x5b\xd2\x10\x27\x5c\xd6\xf4\x2e\x90\x3f\x83\x4f\xe7\x91\x37\xe6\xc2\x76\x59\x5c\xcc\xef\x5f\x84\x6a\xed\xd3\x39\xd4\x4c\xc2\xf3\xcb\x1b\xbc\x14\x2f\xc5\xdf\x01\x00\x00\xff\xff\x77\x56\xe7\x1a\xf7\x04\x00\x00")
+var _noop_tracer_legacyJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x93\x4f\x6f\xdb\x46\x10\xc5\xcf\xe6\xa7\x78\xc7\x04\x50\xc5\xfe\x39\x14\x70\x8a\x02\xac\x61\x27\x2a\x1c\xdb\x90\xe8\x06\x3e\x0e\xc9\xa1\xb8\xe9\x6a\x87\x9d\x9d\x95\x22\x18\xfe\xee\xc5\x92\x12\x12\x14\x69\x9b\x9b\xb0\xd2\xfb\xbd\x37\xf3\x46\x65\x89\x2b\x19\x8f\xea\xb6\x83\xe1\xc7\xef\x7f\xf8\x19\xf5\xc0\xd8\xca\x77\x6c\x03\x2b\xa7\x1d\xaa\x64\x83\x68\x2c\xca\x12\xf5\xe0\x22\x7a\xe7\x19\x2e\x62\x24\x35\x48\x0f\xfb\xc7\xef\xbd\x6b\x94\xf4\xb8\x2c\xca\x72\xd6\x7c\xf5\xeb\x4c\xe8\x95\x19\x51\x7a\x3b\x90\xf2\x25\x8e\x92\xd0\x52\x80\x72\xe7\xa2\xa9\x6b\x92\x31\x9c\x81\x42\x57\x8a\x62\x27\x9d\xeb\x8f\x19\xe9\x0c\x29\x74\xac\x93\xb5\xb1\xee\xe2\x39\xc7\xdb\xbb\x47\xdc\x72\x8c\xac\x78\xcb\x81\x95\x3c\x1e\x52\xe3\x5d\x8b\x5b\xd7\x72\x88\x0c\x8a\x18\xf3\x4b\x1c\xb8\x43\x33\xe1\xb2\xf0\x26\x47\xd9\x9c\xa2\xe0\x46\x52\xe8\xc8\x9c\x84\x05\xd8\xe5\xe4\xd8\xb3\x46\x27\x01\x3f\x9d\xad\x4e\xc0\x05\x44\x33\xe4\x15\x59\x1e\x40\x21\x63\xd6\xbd\x06\x85\x23\x3c\xd9\x67\xe9\x37\x2c\xe4\xf3\xdc\x1d\x5c\x98\x6c\x06\x19\x19\x36\x90\xe5\xa9\x0f\xce\x7b\x34\x8c\x14\xb9\x4f\x7e\x91\x69\x4d\x32\x7c\x58\xd5\xef\xee\x1f\x6b\x54\x77\x4f\xf8\x50\xad\xd7\xd5\x5d\xfd\xf4\x06\x07\x67\x83\x24\x03\xef\x79\x46\xb9\xdd\xe8\x1d\x77\x38\x90\x2a\x05\x3b\x42\xfa\x4c\x78\x7f\xbd\xbe\x7a\x57\xdd\xd5\xd5\x6f\xab\xdb\x55\xfd\x04\x51\xdc\xac\xea\xbb\xeb\xcd\x06\x37\xf7\x6b\x54\x78\xa8\xd6\xf5\xea\xea\xf1\xb6\x5a\xe3\xe1\x71\xfd\x70\xbf\xb9\x5e\x62\xc3\x39\x15\x67\xfd\xff\xef\xbc\x9f\xda\x53\x46\xc7\x46\xce\xc7\xf3\x26\x9e\x24\x21\x0e\x92\x7c\x87\x81\xf6\x0c\xe5\x96\xdd\x9e\x3b\x10\x5a\x19\x8f\xdf\x5c\x6a\x66\x91\x97\xb0\x9d\x66\xfe\xd7\x83\xc4\xaa\x47\x10\x5b\x20\x32\xe3\x97\xc1\x6c\xbc\x2c\xcb\xc3\xe1\xb0\xdc\x86\xb4\x14\xdd\x96\x7e\xc6\xc5\xf2\xd7\x65\x91\x99\x41\x64\xac\x95\x5a\xd6\x5c\xce\xc7\x14\x6d\x62\x37\xa4\xdc\x48\x60\x34\xe2\x3c\xeb\x98\x5b\x46\x2b\x5d\x1e\xe0\xaf\xe4\x94\x3b\xf4\x2a\x3b\x10\x7e\xa7\x3d\x6d\x5a\x75\xa3\x65\x9c\x34\x1f\xb9\x35\x98\xcc\x15\x52\xe3\xa7\x73\x24\x98\x52\x88\xd4\xe6\xbb\xc9\x9f\x5b\xd6\x65\xf1\x5c\x5c\x94\x25\xa2\xf1\x98\xbd\x5d\xd8\xcb\x9f\x99\x2b\x9a\xfb\xd4\x23\x64\x9c\x1c\xa7\xcb\xc8\xa1\xfe\x78\x0f\xfe\xc4\x6d\x32\x8e\xcb\xe2\x22\xeb\x2e\xd1\xa7\x30\x41\x5f\x79\xd9\x2e\xd0\x35\xaf\xf1\x8c\x97\x45\x31\x91\x7b\x4a\xde\xbe\x44\x1f\x86\xd3\x99\x50\x6b\x89\xfc\x89\x96\x23\x49\x0f\x0a\x67\xc3\x7e\x2e\xf0\x62\xd2\xff\xb7\x85\x72\xfc\x9a\x07\x79\x3f\xf9\xcc\xc0\x38\x57\xdf\x30\x07\x38\x63\xa5\x7c\xfb\xb2\x67\xcd\x7f\x7b\x28\x5b\xd2\x10\x27\x5c\xd6\xf4\x2e\x90\x3f\x83\x4f\xe7\x91\x37\xe6\xc2\x76\x59\x5c\xcc\xef\x5f\x84\x6a\xed\xd3\x39\xd4\x4c\xc2\xf3\xcb\x1b\xbc\x14\x2f\xc5\xdf\x01\x00\x00\xff\xff\x77\x56\xe7\x1a\xf7\x04\x00\x00")
-func noop_tracerJsBytes() ([]byte, error) {
+func noop_tracer_legacyJsBytes() ([]byte, error) {
return bindataRead(
- _noop_tracerJs,
- "noop_tracer.js",
+ _noop_tracer_legacyJs,
+ "noop_tracer_legacy.js",
)
}
-func noop_tracerJs() (*asset, error) {
- bytes, err := noop_tracerJsBytes()
+func noop_tracer_legacyJs() (*asset, error) {
+ bytes, err := noop_tracer_legacyJsBytes()
if err != nil {
return nil, err
}
- info := bindataFileInfo{name: "noop_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
+ info := bindataFileInfo{name: "noop_tracer_legacy.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xf, 0x1c, 0x6f, 0x65, 0xaf, 0x90, 0x31, 0xab, 0xf, 0xe0, 0xca, 0x54, 0x7, 0xfd, 0xd3, 0xa1, 0x4a, 0x14, 0x1, 0x2a, 0x9d, 0xdc, 0xb9, 0x64, 0x69, 0x83, 0x30, 0xb1, 0x2a, 0xbd, 0xfb}}
return a, nil
}
@@ -218,23 +197,23 @@ func opcount_tracerJs() (*asset, error) {
return a, nil
}
-var _prestate_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\xdd\x6f\xdb\x38\x12\x7f\xb6\xfe\x8a\x41\x5f\x6c\x5d\x5d\xb9\xcd\x02\x7b\x80\x73\x39\x40\x75\xdd\x36\x40\x36\x09\x6c\xe7\x72\xb9\xc5\x3e\x50\xe4\x48\xe6\x9a\x26\x05\x92\xb2\xe3\x2b\xf2\xbf\x1f\x86\xfa\xf0\x47\x93\xa6\x7b\x6f\x16\x39\xfc\xcd\xf7\x6f\xc6\xa3\x11\x4c\x4c\xb9\xb3\xb2\x58\x7a\x38\x7b\xff\xe1\xef\xb0\x58\x22\x14\xe6\x1d\xfa\x25\x5a\xac\xd6\x90\x56\x7e\x69\xac\x8b\x46\x23\x58\x2c\xa5\x83\x5c\x2a\x04\xe9\xa0\x64\xd6\x83\xc9\xc1\x9f\xc8\x2b\x99\x59\x66\x77\x49\x34\x1a\xd5\x6f\x9e\xbd\x26\x84\xdc\x22\x82\x33\xb9\xdf\x32\x8b\x63\xd8\x99\x0a\x38\xd3\x60\x51\x48\xe7\xad\xcc\x2a\x8f\x20\x3d\x30\x2d\x46\xc6\xc2\xda\x08\x99\xef\x08\x52\x7a\xa8\xb4\x40\x1b\x54\x7b\xb4\x6b\xd7\xda\xf1\xe5\xfa\x0e\xae\xd0\x39\xb4\xf0\x05\x35\x5a\xa6\xe0\xb6\xca\x94\xe4\x70\x25\x39\x6a\x87\xc0\x1c\x94\x74\xe2\x96\x28\x20\x0b\x70\xf4\xf0\x33\x99\x32\x6f\x4c\x81\xcf\xa6\xd2\x82\x79\x69\xf4\x10\x50\x92\xe5\xb0\x41\xeb\xa4\xd1\xf0\x4b\xab\xaa\x01\x1c\x82\xb1\x04\x32\x60\x9e\x1c\xb0\x60\x4a\x7a\x17\x03\xd3\x3b\x50\xcc\xef\x9f\xfe\x44\x40\xf6\x7e\x0b\x90\x3a\xa8\x59\x9a\x12\xc1\x2f\x99\x27\xaf\xb7\x52\x29\xc8\x10\x2a\x87\x79\xa5\x86\x84\x96\x55\x1e\xee\x2f\x17\x5f\x6f\xee\x16\x90\x5e\x3f\xc0\x7d\x3a\x9b\xa5\xd7\x8b\x87\x73\xd8\x4a\xbf\x34\x95\x07\xdc\x60\x0d\x25\xd7\xa5\x92\x28\x60\xcb\xac\x65\xda\xef\xc0\xe4\x84\xf0\xdb\x74\x36\xf9\x9a\x5e\x2f\xd2\x8f\x97\x57\x97\x8b\x07\x30\x16\x3e\x5f\x2e\xae\xa7\xf3\x39\x7c\xbe\x99\x41\x0a\xb7\xe9\x6c\x71\x39\xb9\xbb\x4a\x67\x70\x7b\x37\xbb\xbd\x99\x4f\x13\x98\x23\x59\x85\xf4\xfe\xf5\x98\xe7\x21\x7b\x16\x41\xa0\x67\x52\xb9\x36\x12\x0f\xa6\x02\xb7\x34\x95\x12\xb0\x64\x1b\x04\x8b\x1c\xe5\x06\x05\x30\xe0\xa6\xdc\xfd\x74\x52\x09\x8b\x29\xa3\x8b\xe0\xf3\x8b\x05\x09\x97\x39\x68\xe3\x87\xe0\x10\xe1\x1f\x4b\xef\xcb\xf1\x68\xb4\xdd\x6e\x93\x42\x57\x89\xb1\xc5\x48\xd5\x70\x6e\xf4\xcf\x24\x22\xcc\xd2\xa2\xf3\xcc\xe3\xc2\x32\x8e\x16\x4c\xe5\xcb\xca\x3b\x70\x55\x9e\x4b\x2e\x51\x7b\x90\x3a\x37\x76\x1d\x2a\x05\xbc\x01\x6e\x91\x79\x04\x06\xca\x70\xa6\x00\x1f\x91\x57\xe1\xae\x8e\x74\x28\x57\xcb\xb4\x63\x3c\x9c\xe6\xd6\xac\xc9\xd7\xca\x79\xfa\xe1\x1c\xae\x33\x85\x02\x0a\xd4\xe8\xa4\x83\x4c\x19\xbe\x4a\xa2\x6f\x51\xef\xc0\x18\xaa\x93\xe0\x61\x23\x14\x6a\x63\x8b\x7d\x8b\x90\x55\x52\x09\xa9\x8b\x24\xea\xb5\xd2\x63\xd0\x95\x52\xc3\x28\x40\x28\x63\x56\x55\x99\x72\x6e\xaa\x60\xfb\x9f\xc8\x7d\x0d\xe6\x4a\xe4\x32\xa7\xe2\x60\xdd\xad\x37\xe1\xaa\xd3\x6b\x32\x92\x4f\xa2\xde\x11\xcc\x18\xf2\x4a\x07\x77\x06\x4c\x08\x3b\x04\x91\xc5\xdf\xa2\x5e\x6f\xc3\x2c\x61\xc1\x05\x78\xf3\x15\x1f\xc3\x65\x7c\x1e\xf5\x7a\x32\x87\x81\x5f\x4a\x97\xb4\xc0\xbf\x33\xce\xff\x80\x8b\x8b\x8b\xd0\xd4\xb9\xd4\x28\x62\x20\x88\xde\x73\x62\xf5\x4d\x2f\x63\x8a\x69\x8e\x63\xe8\xbf\x7f\xec\xc3\x5b\x10\x59\x52\xa0\xff\x58\x9f\xd6\xca\x12\x6f\xe6\xde\x4a\x5d\x0c\x3e\xfc\x1a\x0f\xc3\x2b\x6d\xc2\x1b\x68\xc4\xaf\x4d\x27\x5c\xdf\x73\x23\xc2\x75\x63\x73\x2d\x35\x31\xa2\x11\x6a\xa4\x9c\x37\x96\x15\x38\x86\x6f\x4f\xf4\xfd\x44\x5e\x3d\x45\xbd\xa7\xa3\x28\xcf\x6b\xa1\x17\xa2\xdc\x40\x00\x6a\x6f\xbb\x3a\x2f\x24\x75\xea\x61\x02\x02\xde\x8f\x92\x30\x6f\x4d\x39\x49\xc2\x0a\x77\xaf\x67\x82\x2e\xa4\x78\xec\x2e\x56\xb8\x8b\xcf\xa3\x17\x53\x94\x34\x46\xff\x2e\xc5\xe3\xcf\xe6\xeb\xe4\xcd\x51\x5c\xe7\x24\xb5\xb7\x37\x8e\x4f\xe2\x68\xd1\x55\xca\x53\xb9\x4b\xbd\x31\x2b\x22\xae\x25\xc5\x47\xa9\x10\x12\x53\x52\xb6\x5c\xcd\x1c\x19\xa2\x06\xe9\xd1\x32\xa2\x4e\xb3\x41\x4b\x53\x03\x2c\xfa\xca\x6a\xd7\x85\x31\x97\x9a\xa9\x16\xb8\x89\xba\xb7\x8c\xd7\x3d\x53\x9f\x1f\xc4\x92\xfb\xc7\x10\xc5\xe0\xdd\x68\x04\xa9\x07\x72\x11\x4a\x23\xb5\x1f\xc2\x16\x41\x23\x0a\x6a\x7c\x81\xa2\xe2\x3e\xe0\xf5\x37\x4c\x55\xd8\xaf\x9b\x9b\x28\x32\x3c\x35\x15\x4d\x82\x83\xe6\x1f\x06\x03\xd7\x66\x13\x46\x5c\xc6\xf8\x0a\x9a\x86\x33\x56\x16\x52\x47\x4d\x38\x8f\x9a\x8d\x2c\x4a\x08\x38\x98\x15\x72\x45\x49\xa4\x93\x8f\x4c\xc1\x05\x64\xb2\xb8\xd4\xfe\x24\x79\x75\xd0\xdb\xa7\xf1\x1f\x49\xd3\x3c\x89\x23\xc2\x1b\x9c\xc5\x43\xf8\xf0\x6b\x57\x11\xde\x10\x14\xbc\x0e\xe6\xcd\xcb\x50\xd1\x69\x31\x3c\xff\x2c\xa8\xa1\x0e\x7e\x1b\xb4\x26\xae\xca\x28\x1d\xb5\x9f\x21\x8e\xc7\x5d\x7c\xfe\x03\xdc\x63\xdf\x5a\xdc\x26\x34\x09\x13\xe2\x10\x94\x3e\xc3\x77\xc1\xdc\x9d\x43\x01\x6f\x81\xbe\xa4\x26\x55\x4e\xf2\x2f\xcc\xc5\xf0\x37\x68\x24\x6e\xad\xe4\xdf\x59\x52\xe7\xf5\x13\x72\x8b\x6b\x1a\x05\x94\x3a\xce\x94\x42\xdb\x77\x10\x88\x66\xd8\xd4\x60\x48\x32\xae\x4b\xbf\x6b\x07\x84\x67\xb6\x40\xef\x5e\xf7\x26\xe0\xbc\x7b\xd7\xf2\x66\x88\xdf\xae\x44\xb8\xb8\x80\xfe\x64\x36\x4d\x17\xd3\x7e\xd3\x7b\xa3\x11\xdc\x63\x58\x9f\x32\x25\x33\xa1\x76\x20\x50\xa1\xc7\xda\x2e\xa3\x43\x5c\x3b\x1e\x19\xd2\x1e\x44\x1b\x0a\x3e\x4a\xe7\xa5\x2e\xa0\xa6\x97\x2d\x0d\xe3\x06\x2e\x34\x16\x67\x15\x85\xe7\x74\x72\x79\x43\x6b\x88\x45\x22\x23\x1a\x1a\xa1\x47\x99\x92\xdd\xda\x92\x4b\xeb\x3c\x94\x8a\x71\x4c\x08\xaf\x33\xe6\xe5\xa2\x68\xda\x9f\x54\xcf\x42\xdf\x06\xa0\xfd\x54\x64\x8a\xa6\x2a\xa9\x77\x30\x68\x31\xe2\xa8\xd7\xb3\xad\xf4\x01\xf6\xf9\x9e\x47\x9c\xc7\xf2\x90\x45\x68\x1b\xc1\x0d\x12\xef\x06\x0a\xa9\x27\x28\xe9\xfa\xd7\x6f\xcd\xc8\x46\x97\x44\x3d\x7a\x77\x40\x06\xca\x14\xc7\x64\x20\xea\xb0\xf0\xca\x5a\xca\x7f\xc7\xdb\x39\x11\xc3\x9f\x95\xf3\x14\x53\x4b\xe1\x69\x28\xe6\x39\x66\x0d\x3c\x4a\x23\x3a\xfe\x9e\x41\x69\xd8\x85\xe1\x42\xea\x9a\xd1\x56\xaf\x80\xa5\xf1\xa8\xbd\x64\x4a\xed\x28\x0f\x5b\x4b\xbb\x0f\x6d\x3b\x43\x70\x92\xa4\x02\x4d\x05\x51\xa9\xb9\xaa\x44\x5d\x06\xa1\xf8\x1b\x3c\x17\x6c\x3e\x5e\x9a\xd6\xe8\x1c\x2b\x30\xa1\x4a\xca\xe5\x63\xb3\x76\x6a\xe8\xd7\xcc\x38\x88\xfb\x49\x67\xe4\x31\x2f\x29\x53\x24\x6d\x91\x11\xb7\xa7\x42\x58\x74\x6e\x10\x37\x44\xd5\x65\xf6\x7e\x89\x9a\x82\x0f\x1a\xb7\xd0\xed\x33\x8c\x73\xda\xef\xc4\x10\x98\x10\xc4\x87\x27\xbb\x47\xd4\xeb\xb9\xad\xf4\x7c\x09\x41\x93\x29\xf7\xbd\x18\x37\xf5\xcf\x99\x43\x78\x33\xfd\xf7\x62\x72\xf3\x69\x3a\xb9\xb9\x7d\x78\x33\x86\xa3\xb3\xf9\xe5\x7f\xa6\xdd\xd9\xc7\xf4\x2a\xbd\x9e\x4c\xdf\x8c\xc3\x40\x7f\xc6\x21\x6f\x5a\x17\x48\xa1\xf3\x8c\xaf\x92\x12\x71\x35\x78\x7f\xcc\x03\x7b\x07\x7b\xbd\xcc\x22\x5b\x9d\xef\x8d\xa9\x1b\xb4\xd1\xd1\xf2\x34\x5c\xc0\x8b\xc1\x3a\x7f\xd9\x9a\x49\x23\x3f\x68\xd9\x7f\xbf\xbf\x04\xaa\x78\xdd\x8e\xb3\xbf\x6c\x48\xe8\x1d\xc6\x57\x63\x70\x4c\xd1\xda\x2c\xff\x4b\x7f\x77\xf2\xdc\xa1\x1f\x02\x6a\x61\xb6\xc4\x7c\x1d\x6a\x7d\xd3\xe0\x1e\x84\xec\x43\x5c\xd3\xee\x4d\x3e\x88\x3b\x61\x02\xfb\x5e\xf4\xec\x39\x51\xd4\x02\x2e\x5a\xf4\xb7\xe1\xe5\xeb\x81\x3a\x6b\x22\x75\xa2\xe0\x97\x93\xb5\x30\xdc\xaf\x71\x6d\xec\xae\x99\x61\x07\xfe\xfd\x38\xaa\xe9\xd5\x55\x57\x4f\xf4\x41\x45\xd6\x1d\x7c\x9a\x5e\x4d\xbf\xa4\x8b\xe9\x91\xd4\x7c\x91\x2e\x2e\x27\xf5\xd1\x5f\x2e\xbc\x0f\x3f\x5d\x78\xfd\xf9\x7c\x71\x33\x9b\xf6\xc7\xcd\xd7\xd5\x4d\xfa\xa9\xff\x9d\xc2\x66\x75\xfc\x51\xeb\x7a\x73\x6f\xac\xf8\x7f\x3a\xe0\x60\x8d\xcb\xd9\x73\x5b\x5c\xa0\x76\xee\xab\x93\x7f\x49\xc0\x74\xcb\xca\x79\xfd\x4f\xb1\x17\xde\x3f\xcb\xc3\x4f\xd1\x53\xf4\xbf\x00\x00\x00\xff\xff\x3a\xb7\x37\x41\xbf\x10\x00\x00")
+var _prestate_tracer_legacyJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\xdb\x6e\x1b\x39\x12\x7d\x56\x7f\x45\x21\x2f\x92\x36\x4a\x2b\xf6\x00\xb3\x80\xbc\x5e\xa0\xa3\x28\xb1\x00\x8f\x6d\x48\xf2\x66\xbd\x83\x79\x60\x93\xd5\x2d\x8e\x28\xb2\x41\xb2\x25\x6b\x03\xff\xfb\xa2\xd8\x17\x5d\xe2\x4b\x66\xdf\xd4\x64\xf1\x54\xd5\x61\xd5\x61\x69\x38\x84\xb1\x29\x76\x56\xe6\x4b\x0f\xe7\x1f\xcf\xfe\x0e\x8b\x25\x42\x6e\x3e\xa0\x5f\xa2\xc5\x72\x0d\x49\xe9\x97\xc6\xba\x68\x38\x84\xc5\x52\x3a\xc8\xa4\x42\x90\x0e\x0a\x66\x3d\x98\x0c\xfc\x89\xbd\x92\xa9\x65\x76\x17\x47\xc3\x61\x75\xe6\xd9\x6d\x42\xc8\x2c\x22\x38\x93\xf9\x2d\xb3\x38\x82\x9d\x29\x81\x33\x0d\x16\x85\x74\xde\xca\xb4\xf4\x08\xd2\x03\xd3\x62\x68\x2c\xac\x8d\x90\xd9\x8e\x20\xa5\x87\x52\x0b\xb4\xc1\xb5\x47\xbb\x76\x4d\x1c\x5f\x6f\xee\xe1\x1a\x9d\x43\x0b\x5f\x51\xa3\x65\x0a\xee\xca\x54\x49\x0e\xd7\x92\xa3\x76\x08\xcc\x41\x41\x2b\x6e\x89\x02\xd2\x00\x47\x07\xbf\x50\x28\xf3\x3a\x14\xf8\x62\x4a\x2d\x98\x97\x46\x0f\x00\x25\x45\x0e\x1b\xb4\x4e\x1a\x0d\xbf\x34\xae\x6a\xc0\x01\x18\x4b\x20\x3d\xe6\x29\x01\x0b\xa6\xa0\x73\x7d\x60\x7a\x07\x8a\xf9\xfd\xd1\x9f\x20\x64\x9f\xb7\x00\xa9\x83\x9b\xa5\x29\x10\xfc\x92\x79\xca\x7a\x2b\x95\x82\x14\xa1\x74\x98\x95\x6a\x40\x68\x69\xe9\xe1\xdb\x74\x71\x75\x7b\xbf\x80\xe4\xe6\x01\xbe\x25\xb3\x59\x72\xb3\x78\xb8\x80\xad\xf4\x4b\x53\x7a\xc0\x0d\x56\x50\x72\x5d\x28\x89\x02\xb6\xcc\x5a\xa6\xfd\x0e\x4c\x46\x08\xbf\x4d\x66\xe3\xab\xe4\x66\x91\x7c\x9a\x5e\x4f\x17\x0f\x60\x2c\x7c\x99\x2e\x6e\x26\xf3\x39\x7c\xb9\x9d\x41\x02\x77\xc9\x6c\x31\x1d\xdf\x5f\x27\x33\xb8\xbb\x9f\xdd\xdd\xce\x27\x31\xcc\x91\xa2\x42\x3a\xff\x36\xe7\x59\xb8\x3d\x8b\x20\xd0\x33\xa9\x5c\xc3\xc4\x83\x29\xc1\x2d\x4d\xa9\x04\x2c\xd9\x06\xc1\x22\x47\xb9\x41\x01\x0c\xb8\x29\x76\x3f\x7d\xa9\x84\xc5\x94\xd1\x79\xc8\xf9\xc5\x82\x84\x69\x06\xda\xf8\x01\x38\x44\xf8\xc7\xd2\xfb\x62\x34\x1c\x6e\xb7\xdb\x38\xd7\x65\x6c\x6c\x3e\x54\x15\x9c\x1b\xfe\x33\x8e\x08\xb3\xb0\xe8\x3c\xf3\xb8\xb0\x8c\xa3\x05\x53\xfa\xa2\xf4\x0e\x5c\x99\x65\x92\x4b\xd4\x1e\xa4\xce\x8c\x5d\x87\x4a\x01\x6f\x80\x5b\x64\x1e\x81\x81\x32\x9c\x29\xc0\x47\xe4\x65\xd8\xab\x98\x0e\xe5\x6a\x99\x76\x8c\x87\xd5\xcc\x9a\x35\xe5\x5a\x3a\x4f\x3f\x9c\xc3\x75\xaa\x50\x40\x8e\x1a\x9d\x74\x90\x2a\xc3\x57\x71\xf4\x3d\xea\x1c\x04\x43\x75\x12\x32\xac\x8d\x42\x6d\x6c\xb1\x6b\x11\xd2\x52\x2a\x21\x75\x1e\x47\x9d\xc6\x7a\x04\xba\x54\x6a\x10\x05\x08\x65\xcc\xaa\x2c\x12\xce\x4d\x19\x62\xff\x13\xb9\xaf\xc0\x5c\x81\x5c\x66\x54\x1c\xac\xdd\xf5\x26\x6c\xb5\x7e\x4d\x4a\xf6\x71\xd4\x39\x82\x19\x41\x56\xea\x90\x4e\x8f\x09\x61\x07\x20\xd2\xfe\xf7\xa8\xd3\xd9\x30\x4b\x58\x70\x09\xde\x5c\xe1\x63\xd8\xec\x5f\x44\x9d\x8e\xcc\xa0\xe7\x97\xd2\xc5\x0d\xf0\xef\x8c\xf3\x3f\xe0\xf2\xf2\x32\x34\x75\x26\x35\x8a\x3e\x10\x44\xe7\x39\xb3\x6a\xa7\x93\x32\xc5\x34\xc7\x11\x74\x3f\x3e\x76\xe1\x3d\x88\x34\xce\xd1\x7f\xaa\x56\x2b\x67\xb1\x37\x73\x6f\xa5\xce\x7b\x67\xbf\xf6\x07\xe1\x94\x36\xe1\x0c\xd4\xe6\x37\xa6\x35\xae\xf6\xb9\x11\x61\xbb\x8e\xb9\xb2\x1a\x1b\x51\x1b\xd5\x56\xce\x1b\xcb\x72\x1c\xc1\xf7\x27\xfa\x7e\xa2\xac\x9e\xa2\xce\xd3\x11\xcb\xf3\xca\xe8\x05\x96\x6b\x08\x40\xed\x6d\x5b\xe7\xb9\xa4\x4e\x3d\xbc\x80\x80\xf7\xda\x25\xcc\x9b\x50\x4e\x2e\x61\x85\xbb\xb7\x6f\x82\x36\xa4\x78\x6c\x37\x56\xb8\xeb\x5f\x44\x2f\x5e\x51\x5c\x07\xfd\xbb\x14\x8f\x3f\x7b\x5f\x27\x67\x8e\x78\x9d\x93\xd5\x3e\xde\x7e\xff\x84\x47\x8b\xae\x54\x9e\xca\x5d\xea\x8d\x59\x91\x70\x2d\x89\x1f\xa5\x02\x25\xa6\xa0\xdb\x72\x95\x72\xa4\x88\x1a\xa4\x47\xcb\x48\x3a\xcd\x06\x2d\xbd\x1a\x60\xd1\x97\x56\xbb\x96\xc6\x4c\x6a\xa6\x1a\xe0\x9a\x75\x6f\x19\xaf\x7a\xa6\x5a\x3f\xe0\x92\xfb\xc7\xc0\x62\xc8\xee\x07\x52\x02\x05\xd4\x5d\xcf\x65\x4f\x85\x1a\x0a\x83\x5c\x4f\x33\xf0\x8f\xa1\x6f\xa9\xf9\x33\xb4\x1f\x8c\x56\xbb\x41\x70\x6f\x91\xcb\x22\x68\x49\x7d\xf1\xf5\x99\x25\x73\xba\xeb\xab\xc4\x0a\x53\x94\xf4\x94\x88\xb8\xf5\x73\xd4\x83\x14\x68\xec\x4d\x88\xb5\x22\x31\x0a\x18\x89\x07\x32\x86\xc2\x48\xed\x07\xb0\x45\xd0\x88\x82\x84\x4a\xa0\x28\xb9\x0f\x01\x74\x37\x4c\x95\xd8\xad\xc4\x88\x24\x3d\x1c\x35\x25\xbd\x5c\x07\x62\x35\x08\x84\xae\xcd\x26\x3c\xc9\x29\xe3\x2b\xa8\x05\xc2\x58\x99\x4b\x1d\xbd\x18\x18\x01\xd7\xa1\xd5\x45\x47\x2b\x9f\x98\x82\x4b\x48\x65\x3e\xd5\xfe\xa4\xd8\xaa\x22\x69\x8e\xf6\xff\x88\xeb\x66\x8f\x1d\x09\x74\xef\xbc\x3f\x80\xb3\x5f\xdb\x0a\xf6\x86\xa0\xe0\x6d\x30\x6f\x5e\x86\x8a\x4e\x8b\xf7\xf9\x63\xc1\x0d\x29\xce\xfb\xe0\x35\x76\x65\x4a\xe5\x53\xe5\x19\x78\x3c\x56\x9d\x8b\x57\x70\x8f\x73\x6b\x70\x6b\x6a\x62\x26\xc4\x21\x28\x7d\x86\xef\x9c\xb9\x7b\x87\x02\xde\x03\x7d\x49\x4d\xae\x9c\xe4\x5f\x99\xeb\xc3\xdf\xa0\xb6\xb8\xb3\x92\xff\x10\x49\x75\xaf\x9f\x91\x5b\x5c\x53\xb9\xd1\xd5\x71\xa6\x14\xda\xae\x83\x20\x8c\x83\xba\x67\xc2\x25\xe3\xba\xf0\xbb\xe6\x41\xf3\xcc\xe6\xe8\xdd\xdb\xd9\x04\x9c\x0f\x1f\x1a\x9d\x0f\xfc\xed\x0a\x6a\x15\xe8\x8e\x67\x93\x64\x31\xe9\xd6\xdd\x32\x1c\xc2\x37\x0c\xe3\x5e\xaa\x64\x2a\xd4\x0e\x04\x2a\xf4\x58\xc5\x65\x74\xe0\xb5\xd5\xbd\x01\xcd\x6d\x34\x51\xe1\xa3\x74\x5e\xea\x1c\xaa\x2e\xdb\xd2\xf0\xd0\x76\xcc\x86\x00\x4b\xa2\xe7\xf4\xa5\xf5\x86\xc6\x26\x8b\x24\x9e\xf4\xc8\x05\x4d\x61\x4a\xb6\x63\x56\x26\xad\xf3\x50\x28\xc6\x31\x34\x59\x1b\xcc\xcb\x45\x51\xcb\x15\xb9\x9e\x05\x9d\x09\x40\xfb\x57\x9c\x29\x9a\x02\xc8\xbd\x83\x5e\x83\xd1\x8f\x3a\x1d\xdb\x58\x1f\x60\x5f\xec\x75\xcf\x79\x2c\x0e\x55\x8f\xa6\x27\xdc\x20\xbd\x13\x41\xf2\xaa\x17\x9f\x7c\xfd\xeb\xb7\x7a\xc4\x40\x17\x47\x1d\x3a\x77\x20\x5e\xca\xe4\x7b\xf1\x22\x31\x10\x15\x2d\xbc\xb4\xf6\x40\x6e\x40\x66\x24\x0c\x7f\x96\xce\x13\xa7\x96\xe8\xa9\x25\xf1\x75\xd1\x7b\x43\xf3\xea\xa7\xb8\x1a\x59\x0b\xe3\x51\x7b\xc9\x94\xda\xd1\x3d\x6c\x2d\xcd\x6a\x34\x9d\x0d\xc0\x49\xb2\x0a\x32\x15\x4c\xa5\xe6\xaa\x14\x55\x19\x84\xe2\xaf\xf1\x5c\x88\xf9\x78\xc8\x5b\xa3\x73\x2c\xc7\x98\x2a\x29\x93\x8f\xf5\x98\xac\xa1\x5b\x29\x79\xaf\xdf\x7d\x49\x30\x95\xc9\xe3\xa6\xc8\xe8\x2d\x4a\x84\xb0\xe8\x5c\xaf\x7f\xa0\xa1\x55\x8d\x2e\x51\x13\xf9\xa0\x71\x0b\xed\xfc\xc5\x38\xa7\x79\x54\x0c\x80\x09\x41\x7a\x78\x32\x2b\x45\x9d\x8e\xdb\x4a\xcf\x97\x10\x3c\x99\x62\xdf\x8b\xfd\xba\xfe\x39\x73\x08\xef\x26\xff\x5e\x8c\x6f\x3f\x4f\xc6\xb7\x77\x0f\xef\x46\x70\xb4\x36\x9f\xfe\x67\x72\xba\x76\x95\xcc\xaf\xda\xb5\x4f\xc9\x75\x72\x33\x9e\xbc\x1b\x85\xa1\xe4\x99\x24\xbd\x69\xd2\xa2\x20\x9c\x67\x7c\x15\x17\x88\xab\xde\xc7\x63\x6d\xd8\x27\xdd\xe9\xa4\x16\xd9\xea\x62\x1f\x60\xd5\xb4\xb5\x8f\x46\xbb\xe1\x12\x5e\x24\xf0\xe2\xe5\x68\xc6\xb5\x7d\xaf\x79\x11\xf6\x33\x58\x90\x8f\xb7\xe3\x38\xff\xcb\x81\x84\x7e\x62\x7c\x35\x02\xc7\x14\x8d\xfe\xf2\xbf\xf4\x97\x2d\xcb\x1c\xfa\x01\xa0\x16\x66\x4b\x6a\xd8\xa2\x56\x3b\x35\xee\x01\x65\x67\xfd\x4a\x8a\x6f\xb3\x5e\xbf\x35\x26\xb0\x1f\x4d\xcf\x9f\x33\x45\x2d\xe0\xb2\x41\x7f\x1f\x4e\xbe\x4d\xd4\x79\xcd\xd4\x89\x83\x5f\x4e\x46\xdb\xb0\xbf\xc6\xb5\xb1\xbb\xfa\x5d\x3b\xc8\xef\x75\x56\x93\xeb\xeb\xb6\x9e\xe8\x83\x8a\xac\x5d\xf8\x3c\xb9\x9e\x7c\x4d\x16\x93\x23\xab\xf9\x22\x59\x4c\xc7\xd5\xd2\x5f\x2e\xbc\xb3\x9f\x2e\xbc\xee\x7c\xbe\xb8\x9d\x4d\xba\xa3\xfa\xeb\xfa\x36\xf9\xdc\xfd\xc1\x61\x3d\xfe\xbe\xd6\xce\xde\x7c\x33\x56\xfc\x3f\x1d\x70\x30\x8a\x66\xec\xb9\x49\x34\xc8\x3d\xf7\xe5\xc9\x3f\x3d\x60\xba\x51\xea\xac\xfa\xb7\xdb\x09\xe7\x9f\xd5\xe6\xa7\xe8\x29\xfa\x5f\x00\x00\x00\xff\xff\xfa\x53\xfa\x80\x83\x11\x00\x00")
-func prestate_tracerJsBytes() ([]byte, error) {
+func prestate_tracer_legacyJsBytes() ([]byte, error) {
return bindataRead(
- _prestate_tracerJs,
- "prestate_tracer.js",
+ _prestate_tracer_legacyJs,
+ "prestate_tracer_legacy.js",
)
}
-func prestate_tracerJs() (*asset, error) {
- bytes, err := prestate_tracerJsBytes()
+func prestate_tracer_legacyJs() (*asset, error) {
+ bytes, err := prestate_tracer_legacyJsBytes()
if err != nil {
return nil, err
}
- info := bindataFileInfo{name: "prestate_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
- a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd4, 0x9, 0xf9, 0x44, 0x13, 0x31, 0x89, 0xf7, 0x35, 0x9a, 0xc6, 0xf0, 0x86, 0x9d, 0xb2, 0xe3, 0x57, 0xe2, 0xc0, 0xde, 0xc9, 0x3a, 0x4c, 0x4a, 0x94, 0x90, 0xa5, 0x92, 0x2f, 0xbf, 0xc0, 0xb8}}
+ info := bindataFileInfo{name: "prestate_tracer_legacy.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
+ a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x94, 0xcf, 0x10, 0x37, 0xae, 0x8f, 0xd5, 0xfe, 0xf3, 0x25, 0x15, 0x25, 0x9b, 0x6b, 0x56, 0x7b, 0x3c, 0xa9, 0xda, 0xe8, 0xa2, 0xd3, 0x5, 0x96, 0x9c, 0xfd, 0x23, 0x68, 0xa2, 0x5, 0xca, 0x16}}
return a, nil
}
@@ -369,16 +348,15 @@ func AssetNames() []string {
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
- "4byte_tracer_legacy.js": _4byte_tracer_legacyJs,
- "bigram_tracer.js": bigram_tracerJs,
- "call_tracer_js.js": call_tracer_jsJs,
- "call_tracer_legacy.js": call_tracer_legacyJs,
- "evmdis_tracer.js": evmdis_tracerJs,
- "noop_tracer.js": noop_tracerJs,
- "opcount_tracer.js": opcount_tracerJs,
- "prestate_tracer.js": prestate_tracerJs,
- "trigram_tracer.js": trigram_tracerJs,
- "unigram_tracer.js": unigram_tracerJs,
+ "4byte_tracer_legacy.js": _4byte_tracer_legacyJs,
+ "bigram_tracer.js": bigram_tracerJs,
+ "call_tracer_legacy.js": call_tracer_legacyJs,
+ "evmdis_tracer.js": evmdis_tracerJs,
+ "noop_tracer_legacy.js": noop_tracer_legacyJs,
+ "opcount_tracer.js": opcount_tracerJs,
+ "prestate_tracer_legacy.js": prestate_tracer_legacyJs,
+ "trigram_tracer.js": trigram_tracerJs,
+ "unigram_tracer.js": unigram_tracerJs,
}
// AssetDebug is true if the assets were built with the debug flag enabled.
@@ -425,16 +403,15 @@ type bintree struct {
}
var _bintree = &bintree{nil, map[string]*bintree{
- "4byte_tracer_legacy.js": {_4byte_tracer_legacyJs, map[string]*bintree{}},
- "bigram_tracer.js": {bigram_tracerJs, map[string]*bintree{}},
- "call_tracer_js.js": {call_tracer_jsJs, map[string]*bintree{}},
- "call_tracer_legacy.js": {call_tracer_legacyJs, map[string]*bintree{}},
- "evmdis_tracer.js": {evmdis_tracerJs, map[string]*bintree{}},
- "noop_tracer.js": {noop_tracerJs, map[string]*bintree{}},
- "opcount_tracer.js": {opcount_tracerJs, map[string]*bintree{}},
- "prestate_tracer.js": {prestate_tracerJs, map[string]*bintree{}},
- "trigram_tracer.js": {trigram_tracerJs, map[string]*bintree{}},
- "unigram_tracer.js": {unigram_tracerJs, map[string]*bintree{}},
+ "4byte_tracer_legacy.js": {_4byte_tracer_legacyJs, map[string]*bintree{}},
+ "bigram_tracer.js": {bigram_tracerJs, map[string]*bintree{}},
+ "call_tracer_legacy.js": {call_tracer_legacyJs, map[string]*bintree{}},
+ "evmdis_tracer.js": {evmdis_tracerJs, map[string]*bintree{}},
+ "noop_tracer_legacy.js": {noop_tracer_legacyJs, map[string]*bintree{}},
+ "opcount_tracer.js": {opcount_tracerJs, map[string]*bintree{}},
+ "prestate_tracer_legacy.js": {prestate_tracer_legacyJs, map[string]*bintree{}},
+ "trigram_tracer.js": {trigram_tracerJs, map[string]*bintree{}},
+ "unigram_tracer.js": {unigram_tracerJs, map[string]*bintree{}},
}}
// RestoreAsset restores an asset under the given directory.
diff --git a/eth/tracers/js/internal/tracers/call_tracer_js.js b/eth/tracers/js/internal/tracers/call_tracer_js.js
deleted file mode 100644
index 7da7bf216..000000000
--- a/eth/tracers/js/internal/tracers/call_tracer_js.js
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2021 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-
-// callFrameTracer uses the new call frame tracing methods to report useful information
-// about internal messages of a transaction.
-{
- callstack: [{}],
- fault: function(log, db) {},
- result: function(ctx, db) {
- // Prepare outer message info
- var result = {
- type: ctx.type,
- from: toHex(ctx.from),
- to: toHex(ctx.to),
- value: '0x' + ctx.value.toString(16),
- gas: '0x' + bigInt(ctx.gas).toString(16),
- gasUsed: '0x' + bigInt(ctx.gasUsed).toString(16),
- input: toHex(ctx.input),
- output: toHex(ctx.output),
- }
- if (this.callstack[0].calls !== undefined) {
- result.calls = this.callstack[0].calls
- }
- if (this.callstack[0].error !== undefined) {
- result.error = this.callstack[0].error
- } else if (ctx.error !== undefined) {
- result.error = ctx.error
- }
- if (result.error !== undefined && (result.error !== "execution reverted" || result.output ==="0x")) {
- delete result.output
- }
-
- return this.finalize(result)
- },
- enter: function(frame) {
- var call = {
- type: frame.getType(),
- from: toHex(frame.getFrom()),
- to: toHex(frame.getTo()),
- input: toHex(frame.getInput()),
- gas: '0x' + bigInt(frame.getGas()).toString('16'),
- }
- if (frame.getValue() !== undefined){
- call.value='0x' + bigInt(frame.getValue()).toString(16)
- }
- this.callstack.push(call)
- },
- exit: function(frameResult) {
- var len = this.callstack.length
- if (len > 1) {
- var call = this.callstack.pop()
- call.gasUsed = '0x' + bigInt(frameResult.getGasUsed()).toString('16')
- var error = frameResult.getError()
- if (error === undefined) {
- call.output = toHex(frameResult.getOutput())
- } else {
- call.error = error
- if (call.type === 'CREATE' || call.type === 'CREATE2') {
- delete call.to
- }
- }
- len -= 1
- if (this.callstack[len-1].calls === undefined) {
- this.callstack[len-1].calls = []
- }
- this.callstack[len-1].calls.push(call)
- }
- },
- // finalize recreates a call object using the final desired field oder for json
- // serialization. This is a nicety feature to pass meaningfully ordered results
- // to users who don't interpret it, just display it.
- finalize: function(call) {
- var sorted = {
- type: call.type,
- from: call.from,
- to: call.to,
- value: call.value,
- gas: call.gas,
- gasUsed: call.gasUsed,
- input: call.input,
- output: call.output,
- error: call.error,
- time: call.time,
- calls: call.calls,
- }
- for (var key in sorted) {
- if (sorted[key] === undefined) {
- delete sorted[key]
- }
- }
- if (sorted.calls !== undefined) {
- for (var i=0; i.
+
+package native
+
+import (
+ "encoding/json"
+ "math/big"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/eth/tracers"
+)
+
+func init() {
+ register("prestateTracer", newPrestateTracer)
+}
+
+type prestate = map[common.Address]*account
+type account struct {
+ Balance string `json:"balance"`
+ Nonce uint64 `json:"nonce"`
+ Code string `json:"code"`
+ Storage map[common.Hash]common.Hash `json:"storage"`
+}
+
+type prestateTracer struct {
+ env *vm.EVM
+ prestate prestate
+ create bool
+ to common.Address
+ interrupt uint32 // Atomic flag to signal execution interruption
+ reason error // Textual reason for the interruption
+}
+
+func newPrestateTracer() tracers.Tracer {
+ // First callframe contains tx context info
+ // and is populated on start and end.
+ return &prestateTracer{prestate: prestate{}}
+}
+
+// CaptureStart implements the EVMLogger interface to initialize the tracing operation.
+func (t *prestateTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
+ t.env = env
+ t.create = create
+ t.to = to
+
+ // Compute intrinsic gas
+ isHomestead := env.ChainConfig().IsHomestead(env.Context.BlockNumber)
+ isIstanbul := env.ChainConfig().IsIstanbul(env.Context.BlockNumber)
+ intrinsicGas, err := core.IntrinsicGas(input, nil, create, isHomestead, isIstanbul)
+ if err != nil {
+ return
+ }
+
+ t.lookupAccount(from)
+ t.lookupAccount(to)
+
+ // The recipient balance includes the value transferred.
+ toBal := hexutil.MustDecodeBig(t.prestate[to].Balance)
+ toBal = new(big.Int).Sub(toBal, value)
+ t.prestate[to].Balance = hexutil.EncodeBig(toBal)
+
+ // The sender balance is after reducing: value, gasLimit, intrinsicGas.
+ // We need to re-add them to get the pre-tx balance.
+ fromBal := hexutil.MustDecodeBig(t.prestate[from].Balance)
+ gasPrice := env.TxContext.GasPrice
+ consumedGas := new(big.Int).Mul(
+ gasPrice,
+ new(big.Int).Add(
+ new(big.Int).SetUint64(intrinsicGas),
+ new(big.Int).SetUint64(gas),
+ ),
+ )
+ fromBal.Add(fromBal, new(big.Int).Add(value, consumedGas))
+ t.prestate[from].Balance = hexutil.EncodeBig(fromBal)
+ t.prestate[from].Nonce--
+}
+
+// CaptureEnd is called after the call finishes to finalize the tracing.
+func (t *prestateTracer) CaptureEnd(output []byte, gasUsed uint64, _ time.Duration, err error) {
+ if t.create {
+ // Exclude created contract.
+ delete(t.prestate, t.to)
+ }
+}
+
+// CaptureState implements the EVMLogger interface to trace a single step of VM execution.
+func (t *prestateTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
+ stack := scope.Stack
+ stackData := stack.Data()
+ stackLen := len(stackData)
+ switch {
+ case stackLen >= 1 && (op == vm.SLOAD || op == vm.SSTORE):
+ slot := common.Hash(stackData[stackLen-1].Bytes32())
+ t.lookupStorage(scope.Contract.Address(), slot)
+ case stackLen >= 1 && (op == vm.EXTCODECOPY || op == vm.EXTCODEHASH || op == vm.EXTCODESIZE || op == vm.BALANCE || op == vm.SELFDESTRUCT):
+ addr := common.Address(stackData[stackLen-1].Bytes20())
+ t.lookupAccount(addr)
+ case stackLen >= 5 && (op == vm.DELEGATECALL || op == vm.CALL || op == vm.STATICCALL || op == vm.CALLCODE):
+ addr := common.Address(stackData[stackLen-2].Bytes20())
+ t.lookupAccount(addr)
+ case op == vm.CREATE:
+ addr := scope.Contract.Address()
+ nonce := t.env.StateDB.GetNonce(addr)
+ t.lookupAccount(crypto.CreateAddress(addr, nonce))
+ case stackLen >= 4 && op == vm.CREATE2:
+ offset := stackData[stackLen-2]
+ size := stackData[stackLen-3]
+ init := scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64()))
+ inithash := crypto.Keccak256(init)
+ salt := stackData[stackLen-4]
+ t.lookupAccount(crypto.CreateAddress2(scope.Contract.Address(), salt.Bytes32(), inithash))
+ }
+}
+
+// CaptureFault implements the EVMLogger interface to trace an execution fault.
+func (t *prestateTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, _ *vm.ScopeContext, depth int, err error) {
+}
+
+// CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct).
+func (t *prestateTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
+}
+
+// CaptureExit is called when EVM exits a scope, even if the scope didn't
+// execute any code.
+func (t *prestateTracer) CaptureExit(output []byte, gasUsed uint64, err error) {
+}
+
+// GetResult returns the json-encoded nested list of call traces, and any
+// error arising from the encoding or forceful termination (via `Stop`).
+func (t *prestateTracer) GetResult() (json.RawMessage, error) {
+ res, err := json.Marshal(t.prestate)
+ if err != nil {
+ return nil, err
+ }
+ return json.RawMessage(res), t.reason
+}
+
+// Stop terminates execution of the tracer at the first opportune moment.
+func (t *prestateTracer) Stop(err error) {
+ t.reason = err
+ atomic.StoreUint32(&t.interrupt, 1)
+}
+
+// lookupAccount fetches details of an account and adds it to the prestate
+// if it doesn't exist there.
+func (t *prestateTracer) lookupAccount(addr common.Address) {
+ if _, ok := t.prestate[addr]; ok {
+ return
+ }
+ t.prestate[addr] = &account{
+ Balance: bigToHex(t.env.StateDB.GetBalance(addr)),
+ Nonce: t.env.StateDB.GetNonce(addr),
+ Code: bytesToHex(t.env.StateDB.GetCode(addr)),
+ Storage: make(map[common.Hash]common.Hash),
+ }
+}
+
+// lookupStorage fetches the requested storage slot and adds
+// it to the prestate of the given contract. It assumes `lookupAccount`
+// has been performed on the contract before.
+func (t *prestateTracer) lookupStorage(addr common.Address, key common.Hash) {
+ if _, ok := t.prestate[addr].Storage[key]; ok {
+ return
+ }
+ t.prestate[addr].Storage[key] = t.env.StateDB.GetState(addr, key)
+}
diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go
index e6a93c96f..68389efbf 100644
--- a/ethclient/ethclient.go
+++ b/ethclient/ethclient.go
@@ -456,6 +456,17 @@ func (ec *Client) CallContract(ctx context.Context, msg ethereum.CallMsg, blockN
return hex, nil
}
+// CallContractAtHash is almost the same as CallContract except that it selects
+// the block by block hash instead of block height.
+func (ec *Client) CallContractAtHash(ctx context.Context, msg ethereum.CallMsg, blockHash common.Hash) ([]byte, error) {
+ var hex hexutil.Bytes
+ err := ec.c.CallContext(ctx, &hex, "eth_call", toCallArg(msg), rpc.BlockNumberOrHashWithHash(blockHash, false))
+ if err != nil {
+ return nil, err
+ }
+ return hex, nil
+}
+
// PendingCallContract executes a message call transaction using the EVM.
// The state seen by the contract call is the pending state.
func (ec *Client) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) {
diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go
index d56febc91..4a8727b37 100644
--- a/ethclient/ethclient_test.go
+++ b/ethclient/ethclient_test.go
@@ -285,6 +285,9 @@ func TestEthClient(t *testing.T) {
"CallContract": {
func(t *testing.T) { testCallContract(t, client) },
},
+ "CallContractAtHash": {
+ func(t *testing.T) { testCallContractAtHash(t, client) },
+ },
"AtFunctions": {
func(t *testing.T) { testAtFunctions(t, client) },
},
@@ -507,6 +510,33 @@ func testStatusFunctions(t *testing.T, client *rpc.Client) {
}
}
+func testCallContractAtHash(t *testing.T, client *rpc.Client) {
+ ec := NewClient(client)
+
+ // EstimateGas
+ msg := ethereum.CallMsg{
+ From: testAddr,
+ To: &common.Address{},
+ Gas: 21000,
+ Value: big.NewInt(1),
+ }
+ gas, err := ec.EstimateGas(context.Background(), msg)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if gas != 21000 {
+ t.Fatalf("unexpected gas price: %v", gas)
+ }
+ block, err := ec.HeaderByNumber(context.Background(), big.NewInt(1))
+ if err != nil {
+ t.Fatalf("BlockByNumber error: %v", err)
+ }
+ // CallContract
+ if _, err := ec.CallContractAtHash(context.Background(), msg, block.Hash()); err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
func testCallContract(t *testing.T, client *rpc.Client) {
ec := NewClient(client)
diff --git a/go.mod b/go.mod
index fe9b848ac..62f87601e 100644
--- a/go.mod
+++ b/go.mod
@@ -17,7 +17,7 @@ require (
github.com/cloudflare/cloudflare-go v0.14.0
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f
github.com/davecgh/go-spew v1.1.1
- github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea
+ github.com/deckarep/golang-set v1.8.0
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48
@@ -32,7 +32,7 @@ require (
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa
github.com/google/uuid v1.1.5
github.com/gorilla/websocket v1.4.2
- github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29
+ github.com/graph-gophers/graphql-go v1.3.0
github.com/hashicorp/go-bexpr v0.1.10
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
github.com/holiman/bloomfilter/v2 v2.0.3
@@ -41,10 +41,10 @@ require (
github.com/influxdata/influxdb v1.8.3
github.com/influxdata/influxdb-client-go/v2 v2.4.0
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
- github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458
+ github.com/jackpal/go-nat-pmp v1.0.2
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e
github.com/julienschmidt/httprouter v1.2.0
- github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559
+ github.com/karalabe/usb v0.0.2
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.8
github.com/mattn/go-isatty v0.0.12
diff --git a/go.sum b/go.sum
index 3e36b359e..34682f325 100644
--- a/go.sum
+++ b/go.sum
@@ -111,8 +111,8 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
-github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
+github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
+github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
@@ -216,8 +216,8 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 h1:sezaKhEfPFg8W0Enm61B9Gs911H8iesGY5R8NDPtd1M=
-github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
+github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0=
+github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -248,8 +248,8 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
-github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
-github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
+github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
+github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
@@ -265,8 +265,8 @@ github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
-github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559 h1:0VWDXPNE0brOek1Q8bLfzKkvOzwbQE/snjGojlCr8CY=
-github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
+github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4=
+github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go
index 4e0f099e4..a0b797906 100644
--- a/graphql/graphql_test.go
+++ b/graphql/graphql_test.go
@@ -48,6 +48,7 @@ func TestBuildSchema(t *testing.T) {
conf := node.DefaultConfig
conf.DataDir = ddir
stack, err := node.New(&conf)
+ defer stack.Close()
if err != nil {
t.Fatalf("could not create new node: %v", err)
}
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 65e34752b..366d57141 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -287,7 +287,7 @@ func NewPrivateAccountAPI(b Backend, nonceLock *AddrLocker) *PrivateAccountAPI {
}
}
-// listAccounts will return a list of addresses for accounts this node manages.
+// ListAccounts will return a list of addresses for accounts this node manages.
func (s *PrivateAccountAPI) ListAccounts() []common.Address {
return s.am.Accounts()
}
@@ -767,8 +767,7 @@ func (s *PublicBlockChainAPI) GetBlockByHash(ctx context.Context, hash common.Ha
return nil, err
}
-// GetUncleByBlockNumberAndIndex returns the uncle block for the given block hash and index. When fullTx is true
-// all transactions in the block are returned in full detail, otherwise only the transaction hash is returned.
+// GetUncleByBlockNumberAndIndex returns the uncle block for the given block hash and index.
func (s *PublicBlockChainAPI) GetUncleByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) (map[string]interface{}, error) {
block, err := s.b.BlockByNumber(ctx, blockNr)
if block != nil {
@@ -783,8 +782,7 @@ func (s *PublicBlockChainAPI) GetUncleByBlockNumberAndIndex(ctx context.Context,
return nil, err
}
-// GetUncleByBlockHashAndIndex returns the uncle block for the given block hash and index. When fullTx is true
-// all transactions in the block are returned in full detail, otherwise only the transaction hash is returned.
+// GetUncleByBlockHashAndIndex returns the uncle block for the given block hash and index.
func (s *PublicBlockChainAPI) GetUncleByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index hexutil.Uint) (map[string]interface{}, error) {
block, err := s.b.BlockByHash(ctx, blockHash)
if block != nil {
@@ -1432,8 +1430,9 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH
} else {
to = crypto.CreateAddress(args.from(), uint64(*args.Nonce))
}
+ isPostMerge := header.Difficulty.Cmp(common.Big0) == 0
// Retrieve the precompiles since they don't need to be added to the access list
- precompiles := vm.ActivePrecompiles(b.ChainConfig().Rules(header.Number))
+ precompiles := vm.ActivePrecompiles(b.ChainConfig().Rules(header.Number, isPostMerge))
// Create an initial tracer
prevTracer := logger.NewAccessListTracer(nil, args.from(), to, precompiles)
@@ -1657,7 +1656,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(ctx context.Context, ha
fields["status"] = hexutil.Uint(receipt.Status)
}
if receipt.Logs == nil {
- fields["logs"] = [][]*types.Log{}
+ fields["logs"] = []*types.Log{}
}
// If the ContractAddress is 20 0x0 bytes, assume it is not a contract creation
if receipt.ContractAddress != (common.Address{}) {
diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go
index 2d08d3008..9c5950af5 100644
--- a/internal/ethapi/transaction_args.go
+++ b/internal/ethapi/transaction_args.go
@@ -55,20 +55,20 @@ type TransactionArgs struct {
}
// from retrieves the transaction sender address.
-func (arg *TransactionArgs) from() common.Address {
- if arg.From == nil {
+func (args *TransactionArgs) from() common.Address {
+ if args.From == nil {
return common.Address{}
}
- return *arg.From
+ return *args.From
}
// data retrieves the transaction calldata. Input field is preferred.
-func (arg *TransactionArgs) data() []byte {
- if arg.Input != nil {
- return *arg.Input
+func (args *TransactionArgs) data() []byte {
+ if args.Input != nil {
+ return *args.Input
}
- if arg.Data != nil {
- return *arg.Data
+ if args.Data != nil {
+ return *args.Data
}
return nil
}
diff --git a/internal/jsre/jsre_test.go b/internal/jsre/jsre_test.go
index bc38f7a44..57acdaed9 100644
--- a/internal/jsre/jsre_test.go
+++ b/internal/jsre/jsre_test.go
@@ -83,20 +83,20 @@ func TestNatto(t *testing.T) {
err := jsre.Exec("test.js")
if err != nil {
- t.Errorf("expected no error, got %v", err)
+ t.Fatalf("expected no error, got %v", err)
}
time.Sleep(100 * time.Millisecond)
val, err := jsre.Run("msg")
if err != nil {
- t.Errorf("expected no error, got %v", err)
+ t.Fatalf("expected no error, got %v", err)
}
if val.ExportType().Kind() != reflect.String {
- t.Errorf("expected string value, got %v", val)
+ t.Fatalf("expected string value, got %v", val)
}
exp := "testMsg"
got := val.ToString().String()
if exp != got {
- t.Errorf("expected '%v', got '%v'", exp, got)
+ t.Fatalf("expected '%v', got '%v'", exp, got)
}
jsre.Stop(false)
}
diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go
index c4bdbaeb8..87bf46415 100644
--- a/internal/web3ext/web3ext.go
+++ b/internal/web3ext/web3ext.go
@@ -576,6 +576,11 @@ web3._extend({
params: 3,
inputFormatter: [null, web3._extend.formatters.inputBlockNumberFormatter, null]
}),
+ new web3._extend.Method({
+ name: 'getLogs',
+ call: 'eth_getLogs',
+ params: 1,
+ }),
],
properties: [
new web3._extend.Property({
diff --git a/les/catalyst/api.go b/les/catalyst/api.go
new file mode 100644
index 000000000..5f5193c3b
--- /dev/null
+++ b/les/catalyst/api.go
@@ -0,0 +1,178 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package catalyst implements the temporary eth1/eth2 RPC integration.
+package catalyst
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/beacon"
+ "github.com/ethereum/go-ethereum/les"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/rpc"
+)
+
+// Register adds catalyst APIs to the light client.
+func Register(stack *node.Node, backend *les.LightEthereum) error {
+ log.Warn("Catalyst mode enabled", "protocol", "les")
+ stack.RegisterAPIs([]rpc.API{
+ {
+ Namespace: "engine",
+ Version: "1.0",
+ Service: NewConsensusAPI(backend),
+ Public: true,
+ },
+ })
+ return nil
+}
+
+type ConsensusAPI struct {
+ les *les.LightEthereum
+}
+
+// NewConsensusAPI creates a new consensus api for the given backend.
+// The underlying blockchain needs to have a valid terminal total difficulty set.
+func NewConsensusAPI(les *les.LightEthereum) *ConsensusAPI {
+ if les.BlockChain().Config().TerminalTotalDifficulty == nil {
+ panic("Catalyst started without valid total difficulty")
+ }
+ return &ConsensusAPI{les: les}
+}
+
+// ForkchoiceUpdatedV1 has several responsibilities:
+// If the method is called with an empty head block:
+// we return success, which can be used to check if the catalyst mode is enabled
+// If the total difficulty was not reached:
+// we return INVALID
+// If the finalizedBlockHash is set:
+// we check if we have the finalizedBlockHash in our db, if not we start a sync
+// We try to set our blockchain to the headBlock
+// If there are payloadAttributes:
+// we return an error since block creation is not supported in les mode
+func (api *ConsensusAPI) ForkchoiceUpdatedV1(heads beacon.ForkchoiceStateV1, payloadAttributes *beacon.PayloadAttributesV1) (beacon.ForkChoiceResponse, error) {
+ if heads.HeadBlockHash == (common.Hash{}) {
+ return beacon.ForkChoiceResponse{Status: beacon.SUCCESS.Status, PayloadID: nil}, nil
+ }
+ if err := api.checkTerminalTotalDifficulty(heads.HeadBlockHash); err != nil {
+ if header := api.les.BlockChain().GetHeaderByHash(heads.HeadBlockHash); header == nil {
+ // TODO (MariusVanDerWijden) trigger sync
+ return beacon.SYNCING, nil
+ }
+ return beacon.INVALID, err
+ }
+ // If the finalized block is set, check if it is in our blockchain
+ if heads.FinalizedBlockHash != (common.Hash{}) {
+ if header := api.les.BlockChain().GetHeaderByHash(heads.FinalizedBlockHash); header == nil {
+ // TODO (MariusVanDerWijden) trigger sync
+ return beacon.SYNCING, nil
+ }
+ }
+ // SetHead
+ if err := api.setHead(heads.HeadBlockHash); err != nil {
+ return beacon.INVALID, err
+ }
+ if payloadAttributes != nil {
+ return beacon.INVALID, errors.New("not supported")
+ }
+ return beacon.ForkChoiceResponse{Status: beacon.SUCCESS.Status, PayloadID: nil}, nil
+}
+
+// GetPayloadV1 returns a cached payload by id. It's not supported in les mode.
+func (api *ConsensusAPI) GetPayloadV1(payloadID beacon.PayloadID) (*beacon.ExecutableDataV1, error) {
+ return nil, &beacon.GenericServerError
+}
+
+// ExecutePayloadV1 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
+func (api *ConsensusAPI) ExecutePayloadV1(params beacon.ExecutableDataV1) (beacon.ExecutePayloadResponse, error) {
+ block, err := beacon.ExecutableDataToBlock(params)
+ if err != nil {
+ return api.invalid(), err
+ }
+ if !api.les.BlockChain().HasHeader(block.ParentHash(), block.NumberU64()-1) {
+ /*
+ TODO (MariusVanDerWijden) reenable once sync is merged
+ if err := api.eth.Downloader().BeaconSync(api.eth.SyncMode(), block.Header()); err != nil {
+ return SYNCING, err
+ }
+ */
+ // TODO (MariusVanDerWijden) we should return nil here not empty hash
+ return beacon.ExecutePayloadResponse{Status: beacon.SYNCING.Status, LatestValidHash: common.Hash{}}, nil
+ }
+ parent := api.les.BlockChain().GetHeaderByHash(params.ParentHash)
+ if parent == nil {
+ return api.invalid(), fmt.Errorf("could not find parent %x", params.ParentHash)
+ }
+ td := api.les.BlockChain().GetTd(parent.Hash(), block.NumberU64()-1)
+ ttd := api.les.BlockChain().Config().TerminalTotalDifficulty
+ if td.Cmp(ttd) < 0 {
+ return api.invalid(), fmt.Errorf("can not execute payload on top of block with low td got: %v threshold %v", td, ttd)
+ }
+ if err = api.les.BlockChain().InsertHeader(block.Header()); err != nil {
+ return api.invalid(), err
+ }
+ if merger := api.les.Merger(); !merger.TDDReached() {
+ merger.ReachTTD()
+ }
+ return beacon.ExecutePayloadResponse{Status: beacon.VALID.Status, LatestValidHash: block.Hash()}, nil
+}
+
+// invalid returns a response "INVALID" with the latest valid hash set to the current head.
+func (api *ConsensusAPI) invalid() beacon.ExecutePayloadResponse {
+ return beacon.ExecutePayloadResponse{Status: beacon.INVALID.Status, LatestValidHash: api.les.BlockChain().CurrentHeader().Hash()}
+}
+
+func (api *ConsensusAPI) checkTerminalTotalDifficulty(head common.Hash) error {
+ // shortcut if we entered PoS already
+ if api.les.Merger().PoSFinalized() {
+ return nil
+ }
+ // make sure the parent has enough terminal total difficulty
+ header := api.les.BlockChain().GetHeaderByHash(head)
+ if header == nil {
+ return &beacon.GenericServerError
+ }
+ td := api.les.BlockChain().GetTd(header.Hash(), header.Number.Uint64())
+ if td != nil && td.Cmp(api.les.BlockChain().Config().TerminalTotalDifficulty) < 0 {
+ return &beacon.InvalidTB
+ }
+ return nil
+}
+
+// setHead is called to perform a force choice.
+func (api *ConsensusAPI) setHead(newHead common.Hash) error {
+ log.Info("Setting head", "head", newHead)
+
+ headHeader := api.les.BlockChain().CurrentHeader()
+ if headHeader.Hash() == newHead {
+ return nil
+ }
+ newHeadHeader := api.les.BlockChain().GetHeaderByHash(newHead)
+ if newHeadHeader == nil {
+ return &beacon.GenericServerError
+ }
+ if err := api.les.BlockChain().SetChainHead(newHeadHeader); err != nil {
+ return err
+ }
+ // Trigger the transition if it's the first `NewHead` event.
+ if merger := api.les.Merger(); !merger.PoSFinalized() {
+ merger.FinalizePoS()
+ }
+ return nil
+}
diff --git a/les/catalyst/api_test.go b/les/catalyst/api_test.go
new file mode 100644
index 000000000..c1cbf645c
--- /dev/null
+++ b/les/catalyst/api_test.go
@@ -0,0 +1,244 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package catalyst
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/beacon"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/eth/downloader"
+ "github.com/ethereum/go-ethereum/eth/ethconfig"
+ "github.com/ethereum/go-ethereum/les"
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
+)
+
+var (
+ // testKey is a private key to use for funding a tester account.
+ testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+
+ // testAddr is the Ethereum address of the tester account.
+ testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
+
+ testBalance = big.NewInt(2e18)
+)
+
+func generatePreMergeChain(n int) (*core.Genesis, []*types.Header, []*types.Block) {
+ db := rawdb.NewMemoryDatabase()
+ config := params.AllEthashProtocolChanges
+ genesis := &core.Genesis{
+ Config: config,
+ Alloc: core.GenesisAlloc{testAddr: {Balance: testBalance}},
+ ExtraData: []byte("test genesis"),
+ Timestamp: 9000,
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ gblock := genesis.ToBlock(db)
+ engine := ethash.NewFaker()
+ blocks, _ := core.GenerateChain(config, gblock, engine, db, n, nil)
+ totalDifficulty := big.NewInt(0)
+
+ var headers []*types.Header
+ for _, b := range blocks {
+ totalDifficulty.Add(totalDifficulty, b.Difficulty())
+ headers = append(headers, b.Header())
+ }
+ config.TerminalTotalDifficulty = totalDifficulty
+
+ return genesis, headers, blocks
+}
+
+func TestSetHeadBeforeTotalDifficulty(t *testing.T) {
+ genesis, headers, blocks := generatePreMergeChain(10)
+ n, lesService := startLesService(t, genesis, headers)
+ defer n.Close()
+
+ api := NewConsensusAPI(lesService)
+ fcState := beacon.ForkchoiceStateV1{
+ HeadBlockHash: blocks[5].Hash(),
+ SafeBlockHash: common.Hash{},
+ FinalizedBlockHash: common.Hash{},
+ }
+ if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err == nil {
+ t.Errorf("fork choice updated before total terminal difficulty should fail")
+ }
+}
+
+func TestExecutePayloadV1(t *testing.T) {
+ genesis, headers, blocks := generatePreMergeChain(10)
+ n, lesService := startLesService(t, genesis, headers[:9])
+ lesService.Merger().ReachTTD()
+ defer n.Close()
+
+ api := NewConsensusAPI(lesService)
+ fcState := beacon.ForkchoiceStateV1{
+ HeadBlockHash: blocks[8].Hash(),
+ SafeBlockHash: common.Hash{},
+ FinalizedBlockHash: common.Hash{},
+ }
+ if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
+ t.Errorf("Failed to update head %v", err)
+ }
+ block := blocks[9]
+
+ fakeBlock := types.NewBlock(&types.Header{
+ ParentHash: block.ParentHash(),
+ UncleHash: crypto.Keccak256Hash(nil),
+ Coinbase: block.Coinbase(),
+ Root: block.Root(),
+ TxHash: crypto.Keccak256Hash(nil),
+ ReceiptHash: crypto.Keccak256Hash(nil),
+ Bloom: block.Bloom(),
+ Difficulty: big.NewInt(0),
+ Number: block.Number(),
+ GasLimit: block.GasLimit(),
+ GasUsed: block.GasUsed(),
+ Time: block.Time(),
+ Extra: block.Extra(),
+ MixDigest: block.MixDigest(),
+ Nonce: types.BlockNonce{},
+ BaseFee: block.BaseFee(),
+ }, nil, nil, nil, trie.NewStackTrie(nil))
+
+ _, err := api.ExecutePayloadV1(beacon.ExecutableDataV1{
+ ParentHash: fakeBlock.ParentHash(),
+ FeeRecipient: fakeBlock.Coinbase(),
+ StateRoot: fakeBlock.Root(),
+ ReceiptsRoot: fakeBlock.ReceiptHash(),
+ LogsBloom: fakeBlock.Bloom().Bytes(),
+ Random: fakeBlock.MixDigest(),
+ Number: fakeBlock.NumberU64(),
+ GasLimit: fakeBlock.GasLimit(),
+ GasUsed: fakeBlock.GasUsed(),
+ Timestamp: fakeBlock.Time(),
+ ExtraData: fakeBlock.Extra(),
+ BaseFeePerGas: fakeBlock.BaseFee(),
+ BlockHash: fakeBlock.Hash(),
+ Transactions: encodeTransactions(fakeBlock.Transactions()),
+ })
+ if err != nil {
+ t.Errorf("Failed to execute payload %v", err)
+ }
+ headHeader := api.les.BlockChain().CurrentHeader()
+ if headHeader.Number.Uint64() != fakeBlock.NumberU64()-1 {
+ t.Fatal("Unexpected chain head update")
+ }
+ fcState = beacon.ForkchoiceStateV1{
+ HeadBlockHash: fakeBlock.Hash(),
+ SafeBlockHash: common.Hash{},
+ FinalizedBlockHash: common.Hash{},
+ }
+ if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
+ t.Fatal("Failed to update head")
+ }
+ headHeader = api.les.BlockChain().CurrentHeader()
+ if headHeader.Number.Uint64() != fakeBlock.NumberU64() {
+ t.Fatal("Failed to update chain head")
+ }
+}
+
+func TestEth2DeepReorg(t *testing.T) {
+ // TODO (MariusVanDerWijden) TestEth2DeepReorg is currently broken, because it tries to reorg
+ // before the totalTerminalDifficulty threshold
+ /*
+ genesis, preMergeBlocks := generatePreMergeChain(core.TriesInMemory * 2)
+ n, ethservice := startEthService(t, genesis, preMergeBlocks)
+ defer n.Close()
+
+ var (
+ api = NewConsensusAPI(ethservice, nil)
+ parent = preMergeBlocks[len(preMergeBlocks)-core.TriesInMemory-1]
+ head = ethservice.BlockChain().CurrentBlock().NumberU64()
+ )
+ if ethservice.BlockChain().HasBlockAndState(parent.Hash(), parent.NumberU64()) {
+ t.Errorf("Block %d not pruned", parent.NumberU64())
+ }
+ for i := 0; i < 10; i++ {
+ execData, err := api.assembleBlock(AssembleBlockParams{
+ ParentHash: parent.Hash(),
+ Timestamp: parent.Time() + 5,
+ })
+ if err != nil {
+ t.Fatalf("Failed to create the executable data %v", err)
+ }
+ block, err := ExecutableDataToBlock(ethservice.BlockChain().Config(), parent.Header(), *execData)
+ if err != nil {
+ t.Fatalf("Failed to convert executable data to block %v", err)
+ }
+ newResp, err := api.ExecutePayload(*execData)
+ if err != nil || newResp.Status != "VALID" {
+ t.Fatalf("Failed to insert block: %v", err)
+ }
+ if ethservice.BlockChain().CurrentBlock().NumberU64() != head {
+ t.Fatalf("Chain head shouldn't be updated")
+ }
+ if err := api.setHead(block.Hash()); err != nil {
+ t.Fatalf("Failed to set head: %v", err)
+ }
+ if ethservice.BlockChain().CurrentBlock().NumberU64() != block.NumberU64() {
+ t.Fatalf("Chain head should be updated")
+ }
+ parent, head = block, block.NumberU64()
+ }
+ */
+}
+
+// startEthService creates a full node instance for testing.
+func startLesService(t *testing.T, genesis *core.Genesis, headers []*types.Header) (*node.Node, *les.LightEthereum) {
+ t.Helper()
+
+ n, err := node.New(&node.Config{})
+ if err != nil {
+ t.Fatal("can't create node:", err)
+ }
+ ethcfg := ðconfig.Config{
+ Genesis: genesis,
+ Ethash: ethash.Config{PowMode: ethash.ModeFake},
+ SyncMode: downloader.LightSync,
+ TrieDirtyCache: 256,
+ TrieCleanCache: 256,
+ LightPeers: 10,
+ }
+ lesService, err := les.New(n, ethcfg)
+ if err != nil {
+ t.Fatal("can't create eth service:", err)
+ }
+ if err := n.Start(); err != nil {
+ t.Fatal("can't start node:", err)
+ }
+ if _, err := lesService.BlockChain().InsertHeaderChain(headers, 0); err != nil {
+ n.Close()
+ t.Fatal("can't import test headers:", err)
+ }
+ return n, lesService
+}
+
+func encodeTransactions(txs []*types.Transaction) [][]byte {
+ var enc = make([][]byte, len(txs))
+ for i, tx := range txs {
+ enc[i], _ = tx.MarshalBinary()
+ }
+ return enc
+}
diff --git a/miner/miner.go b/miner/miner.go
index c8aaa5b92..20e12c240 100644
--- a/miner/miner.go
+++ b/miner/miner.go
@@ -35,10 +35,12 @@ import (
"github.com/ethereum/go-ethereum/params"
)
-// Backend wraps all methods required for mining.
+// Backend wraps all methods required for mining. Only full node is capable
+// to offer all the functions here.
type Backend interface {
BlockChain() *core.BlockChain
TxPool() *core.TxPool
+ StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error)
}
// Config is the configuration parameters of mining.
@@ -68,7 +70,7 @@ type Miner struct {
wg sync.WaitGroup
}
-func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, isLocalBlock func(header *types.Header) bool, merger *consensus.Merger) *Miner {
+func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, isLocalBlock func(header *types.Header) bool) *Miner {
miner := &Miner{
eth: eth,
mux: mux,
@@ -76,7 +78,7 @@ func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *even
exitCh: make(chan struct{}),
startCh: make(chan common.Address),
stopCh: make(chan struct{}),
- worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true, merger),
+ worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true),
}
miner.wg.Add(1)
go miner.update()
@@ -233,6 +235,12 @@ func (miner *Miner) DisablePreseal() {
miner.worker.disablePreseal()
}
+// GetSealingBlock retrieves a sealing block based on the given parameters.
+// The returned block is not sealed but all other fields should be filled.
+func (miner *Miner) GetSealingBlock(parent common.Hash, timestamp uint64, coinbase common.Address, random common.Hash) (*types.Block, error) {
+ return miner.worker.getSealingBlock(parent, timestamp, coinbase, random)
+}
+
// SubscribePendingLogs starts delivering logs from pending transactions
// to the given channel.
func (miner *Miner) SubscribePendingLogs(ch chan<- []*types.Log) event.Subscription {
diff --git a/miner/miner_test.go b/miner/miner_test.go
index de7ca73e2..cf619845d 100644
--- a/miner/miner_test.go
+++ b/miner/miner_test.go
@@ -18,11 +18,11 @@
package miner
import (
+ "errors"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
@@ -55,6 +55,10 @@ func (m *mockBackend) TxPool() *core.TxPool {
return m.txPool
}
+func (m *mockBackend) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) {
+ return nil, errors.New("not supported")
+}
+
type testBlockChain struct {
statedb *state.StateDB
gasLimit uint64
@@ -80,7 +84,8 @@ func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent)
}
func TestMiner(t *testing.T) {
- miner, mux := createMiner(t)
+ miner, mux, cleanup := createMiner(t)
+ defer cleanup(false)
miner.Start(common.HexToAddress("0x12345"))
waitForMiningState(t, miner, true)
// Start the downloader
@@ -107,7 +112,8 @@ func TestMiner(t *testing.T) {
// An initial FailedEvent should allow mining to stop on a subsequent
// downloader StartEvent.
func TestMinerDownloaderFirstFails(t *testing.T) {
- miner, mux := createMiner(t)
+ miner, mux, cleanup := createMiner(t)
+ defer cleanup(false)
miner.Start(common.HexToAddress("0x12345"))
waitForMiningState(t, miner, true)
// Start the downloader
@@ -138,8 +144,8 @@ func TestMinerDownloaderFirstFails(t *testing.T) {
}
func TestMinerStartStopAfterDownloaderEvents(t *testing.T) {
- miner, mux := createMiner(t)
-
+ miner, mux, cleanup := createMiner(t)
+ defer cleanup(false)
miner.Start(common.HexToAddress("0x12345"))
waitForMiningState(t, miner, true)
// Start the downloader
@@ -161,7 +167,8 @@ func TestMinerStartStopAfterDownloaderEvents(t *testing.T) {
}
func TestStartWhileDownload(t *testing.T) {
- miner, mux := createMiner(t)
+ miner, mux, cleanup := createMiner(t)
+ defer cleanup(false)
waitForMiningState(t, miner, false)
miner.Start(common.HexToAddress("0x12345"))
waitForMiningState(t, miner, true)
@@ -174,16 +181,19 @@ func TestStartWhileDownload(t *testing.T) {
}
func TestStartStopMiner(t *testing.T) {
- miner, _ := createMiner(t)
+ miner, _, cleanup := createMiner(t)
+ defer cleanup(false)
waitForMiningState(t, miner, false)
miner.Start(common.HexToAddress("0x12345"))
waitForMiningState(t, miner, true)
miner.Stop()
waitForMiningState(t, miner, false)
+
}
func TestCloseMiner(t *testing.T) {
- miner, _ := createMiner(t)
+ miner, _, cleanup := createMiner(t)
+ defer cleanup(true)
waitForMiningState(t, miner, false)
miner.Start(common.HexToAddress("0x12345"))
waitForMiningState(t, miner, true)
@@ -195,7 +205,8 @@ func TestCloseMiner(t *testing.T) {
// TestMinerSetEtherbase checks that etherbase becomes set even if mining isn't
// possible at the moment
func TestMinerSetEtherbase(t *testing.T) {
- miner, mux := createMiner(t)
+ miner, mux, cleanup := createMiner(t)
+ defer cleanup(false)
// Start with a 'bad' mining address
miner.Start(common.HexToAddress("0xdead"))
waitForMiningState(t, miner, true)
@@ -230,7 +241,7 @@ func waitForMiningState(t *testing.T, m *Miner, mining bool) {
t.Fatalf("Mining() == %t, want %t", state, mining)
}
-func createMiner(t *testing.T) (*Miner, *event.TypeMux) {
+func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) {
// Create Ethash config
config := Config{
Etherbase: common.HexToAddress("123456789"),
@@ -246,7 +257,6 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux) {
// Create consensus engine
engine := clique.New(chainConfig.Clique, chainDB)
// Create Ethereum backend
- merger := consensus.NewMerger(rawdb.NewMemoryDatabase())
bc, err := core.NewBlockChain(chainDB, nil, chainConfig, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("can't create new chain %v", err)
@@ -259,5 +269,14 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux) {
// Create event Mux
mux := new(event.TypeMux)
// Create Miner
- return New(backend, &config, chainConfig, mux, engine, nil, merger), mux
+ miner := New(backend, &config, chainConfig, mux, engine, nil)
+ cleanup := func(skipMiner bool) {
+ bc.Stop()
+ engine.Close()
+ pool.Stop()
+ if !skipMiner {
+ miner.Close()
+ }
+ }
+ return miner, mux, cleanup
}
diff --git a/miner/stress/beacon/main.go b/miner/stress/beacon/main.go
index 70005e20d..9fa63281c 100644
--- a/miner/stress/beacon/main.go
+++ b/miner/stress/beacon/main.go
@@ -32,13 +32,15 @@ import (
"github.com/ethereum/go-ethereum/common/fdlimit"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/beacon"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth"
- "github.com/ethereum/go-ethereum/eth/catalyst"
+ ethcatalyst "github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/les"
+ lescatalyst "github.com/ethereum/go-ethereum/les/catalyst"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/node"
@@ -88,24 +90,26 @@ var (
type ethNode struct {
typ nodetype
- api *catalyst.ConsensusAPI
- ethBackend *eth.Ethereum
- lesBackend *les.LightEthereum
stack *node.Node
enode *enode.Node
+ api *ethcatalyst.ConsensusAPI
+ ethBackend *eth.Ethereum
+ lapi *lescatalyst.ConsensusAPI
+ lesBackend *les.LightEthereum
}
func newNode(typ nodetype, genesis *core.Genesis, enodes []*enode.Node) *ethNode {
var (
err error
- api *catalyst.ConsensusAPI
+ api *ethcatalyst.ConsensusAPI
+ lapi *lescatalyst.ConsensusAPI
stack *node.Node
ethBackend *eth.Ethereum
lesBackend *les.LightEthereum
)
// Start the node and wait until it's up
if typ == eth2LightClient {
- stack, lesBackend, api, err = makeLightNode(genesis)
+ stack, lesBackend, lapi, err = makeLightNode(genesis)
} else {
stack, ethBackend, api, err = makeFullNode(genesis)
}
@@ -131,20 +135,27 @@ func newNode(typ nodetype, genesis *core.Genesis, enodes []*enode.Node) *ethNode
typ: typ,
api: api,
ethBackend: ethBackend,
+ lapi: lapi,
lesBackend: lesBackend,
stack: stack,
enode: enode,
}
}
-func (n *ethNode) assembleBlock(parentHash common.Hash, parentTimestamp uint64) (*catalyst.ExecutableDataV1, error) {
+func (n *ethNode) assembleBlock(parentHash common.Hash, parentTimestamp uint64) (*beacon.ExecutableDataV1, error) {
if n.typ != eth2MiningNode {
return nil, errors.New("invalid node type")
}
- payloadAttribute := catalyst.PayloadAttributesV1{
- Timestamp: uint64(time.Now().Unix()),
+ timestamp := uint64(time.Now().Unix())
+ if timestamp <= parentTimestamp {
+ timestamp = parentTimestamp + 1
}
- fcState := catalyst.ForkchoiceStateV1{
+ payloadAttribute := beacon.PayloadAttributesV1{
+ Timestamp: timestamp,
+ Random: common.Hash{},
+ SuggestedFeeRecipient: common.HexToAddress("0xdeadbeef"),
+ }
+ fcState := beacon.ForkchoiceStateV1{
HeadBlockHash: parentHash,
SafeBlockHash: common.Hash{},
FinalizedBlockHash: common.Hash{},
@@ -156,39 +167,62 @@ func (n *ethNode) assembleBlock(parentHash common.Hash, parentTimestamp uint64)
return n.api.GetPayloadV1(*payload.PayloadID)
}
-func (n *ethNode) insertBlock(eb catalyst.ExecutableDataV1) error {
+func (n *ethNode) insertBlock(eb beacon.ExecutableDataV1) error {
if !eth2types(n.typ) {
return errors.New("invalid node type")
}
- newResp, err := n.api.ExecutePayloadV1(eb)
- if err != nil {
- return err
- } else if newResp.Status != "VALID" {
- return errors.New("failed to insert block")
+ switch n.typ {
+ case eth2NormalNode, eth2MiningNode:
+ newResp, err := n.api.ExecutePayloadV1(eb)
+ if err != nil {
+ return err
+ } else if newResp.Status != "VALID" {
+ return errors.New("failed to insert block")
+ }
+ return nil
+ case eth2LightClient:
+ newResp, err := n.lapi.ExecutePayloadV1(eb)
+ if err != nil {
+ return err
+ } else if newResp.Status != "VALID" {
+ return errors.New("failed to insert block")
+ }
+ return nil
+ default:
+ return errors.New("undefined node")
}
- return nil
}
-func (n *ethNode) insertBlockAndSetHead(parent *types.Header, ed catalyst.ExecutableDataV1) error {
+func (n *ethNode) insertBlockAndSetHead(parent *types.Header, ed beacon.ExecutableDataV1) error {
if !eth2types(n.typ) {
return errors.New("invalid node type")
}
if err := n.insertBlock(ed); err != nil {
return err
}
- block, err := catalyst.ExecutableDataToBlock(ed)
+ block, err := beacon.ExecutableDataToBlock(ed)
if err != nil {
return err
}
- fcState := catalyst.ForkchoiceStateV1{
+ fcState := beacon.ForkchoiceStateV1{
HeadBlockHash: block.ParentHash(),
SafeBlockHash: common.Hash{},
FinalizedBlockHash: common.Hash{},
}
- if _, err := n.api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
- return err
+ switch n.typ {
+ case eth2NormalNode, eth2MiningNode:
+ if _, err := n.api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
+ return err
+ }
+ return nil
+ case eth2LightClient:
+ if _, err := n.lapi.ForkchoiceUpdatedV1(fcState, nil); err != nil {
+ return err
+ }
+ return nil
+ default:
+ return errors.New("undefined node")
}
- return nil
}
type nodeManager struct {
@@ -284,12 +318,15 @@ func (mgr *nodeManager) run() {
nodes = append(nodes, mgr.getNodes(eth2NormalNode)...)
nodes = append(nodes, mgr.getNodes(eth2LightClient)...)
for _, node := range append(nodes) {
- fcState := catalyst.ForkchoiceStateV1{
+ fcState := beacon.ForkchoiceStateV1{
HeadBlockHash: oldest.Hash(),
SafeBlockHash: common.Hash{},
- FinalizedBlockHash: common.Hash{},
+ FinalizedBlockHash: oldest.Hash(),
}
- node.api.ForkchoiceUpdatedV1(fcState, nil)
+ // TODO(rjl493456442) finalization doesn't work properly, FIX IT
+ _ = fcState
+ _ = node
+ //node.api.ForkchoiceUpdatedV1(fcState, nil)
}
log.Info("Finalised eth2 block", "number", oldest.NumberU64(), "hash", oldest.Hash())
waitFinalise = waitFinalise[1:]
@@ -327,12 +364,11 @@ func (mgr *nodeManager) run() {
log.Error("Failed to assemble the block", "err", err)
continue
}
- block, _ := catalyst.ExecutableDataToBlock(*ed)
+ block, _ := beacon.ExecutableDataToBlock(*ed)
nodes := mgr.getNodes(eth2MiningNode)
nodes = append(nodes, mgr.getNodes(eth2NormalNode)...)
nodes = append(nodes, mgr.getNodes(eth2LightClient)...)
-
for _, node := range nodes {
if err := node.insertBlockAndSetHead(parentBlock.Header(), *ed); err != nil {
log.Error("Failed to insert block", "type", node.typ, "err", err)
@@ -410,9 +446,8 @@ func makeGenesis(faucets []*ecdsa.PrivateKey) *core.Genesis {
genesis.Difficulty = params.MinimumDifficulty
genesis.GasLimit = 25000000
- genesis.Config.ChainID = big.NewInt(18)
- genesis.Config.EIP150Hash = common.Hash{}
genesis.BaseFee = big.NewInt(params.InitialBaseFee)
+ genesis.Config = params.AllEthashProtocolChanges
genesis.Config.TerminalTotalDifficulty = transitionDifficulty
genesis.Alloc = core.GenesisAlloc{}
@@ -424,7 +459,7 @@ func makeGenesis(faucets []*ecdsa.PrivateKey) *core.Genesis {
return genesis
}
-func makeFullNode(genesis *core.Genesis) (*node.Node, *eth.Ethereum, *catalyst.ConsensusAPI, error) {
+func makeFullNode(genesis *core.Genesis) (*node.Node, *eth.Ethereum, *ethcatalyst.ConsensusAPI, error) {
// Define the basic configurations for the Ethereum node
datadir, _ := ioutil.TempDir("", "")
@@ -472,10 +507,10 @@ func makeFullNode(genesis *core.Genesis) (*node.Node, *eth.Ethereum, *catalyst.C
log.Crit("Failed to create the LES server", "err", err)
}
err = stack.Start()
- return stack, ethBackend, catalyst.NewConsensusAPI(ethBackend, nil), err
+ return stack, ethBackend, ethcatalyst.NewConsensusAPI(ethBackend), err
}
-func makeLightNode(genesis *core.Genesis) (*node.Node, *les.LightEthereum, *catalyst.ConsensusAPI, error) {
+func makeLightNode(genesis *core.Genesis) (*node.Node, *les.LightEthereum, *lescatalyst.ConsensusAPI, error) {
// Define the basic configurations for the Ethereum node
datadir, _ := ioutil.TempDir("", "")
@@ -510,7 +545,7 @@ func makeLightNode(genesis *core.Genesis) (*node.Node, *les.LightEthereum, *cata
return nil, nil, nil, err
}
err = stack.Start()
- return stack, lesBackend, catalyst.NewConsensusAPI(nil, lesBackend), err
+ return stack, lesBackend, lescatalyst.NewConsensusAPI(lesBackend), err
}
func eth2types(typ nodetype) bool {
diff --git a/miner/worker.go b/miner/worker.go
index 2c576ad08..c6927a1ca 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -17,8 +17,8 @@
package miner
import (
- "bytes"
"errors"
+ "fmt"
"math/big"
"sync"
"sync/atomic"
@@ -54,14 +54,14 @@ const (
// resubmitAdjustChanSize is the size of resubmitting interval adjustment channel.
resubmitAdjustChanSize = 10
- // miningLogAtDepth is the number of confirmations before logging successful mining.
- miningLogAtDepth = 7
+ // sealingLogAtDepth is the number of confirmations before logging successful sealing.
+ sealingLogAtDepth = 7
- // minRecommitInterval is the minimal time interval to recreate the mining block with
+ // minRecommitInterval is the minimal time interval to recreate the sealing block with
// any newly arrived transactions.
minRecommitInterval = 1 * time.Second
- // maxRecommitInterval is the maximum time interval to recreate the mining block with
+ // maxRecommitInterval is the maximum time interval to recreate the sealing block with
// any newly arrived transactions.
maxRecommitInterval = 15 * time.Second
@@ -77,20 +77,68 @@ const (
staleThreshold = 7
)
-// environment is the worker's current environment and holds all of the current state information.
+// environment is the worker's current environment and holds all
+// information of the sealing block generation.
type environment struct {
signer types.Signer
state *state.StateDB // apply state changes here
ancestors mapset.Set // ancestor set (used for checking uncle parent validity)
family mapset.Set // family set (used for checking uncle invalidity)
- uncles mapset.Set // uncle set
tcount int // tx count in cycle
gasPool *core.GasPool // available gas used to pack transactions
+ coinbase common.Address
header *types.Header
txs []*types.Transaction
receipts []*types.Receipt
+ uncles map[common.Hash]*types.Header
+}
+
+// copy creates a deep copy of environment.
+func (env *environment) copy() *environment {
+ cpy := &environment{
+ signer: env.signer,
+ state: env.state.Copy(),
+ ancestors: env.ancestors.Clone(),
+ family: env.family.Clone(),
+ tcount: env.tcount,
+ coinbase: env.coinbase,
+ header: types.CopyHeader(env.header),
+ receipts: copyReceipts(env.receipts),
+ }
+ if env.gasPool != nil {
+ gasPool := *env.gasPool
+ cpy.gasPool = &gasPool
+ }
+ // The content of txs and uncles are immutable, unnecessary
+ // to do the expensive deep copy for them.
+ cpy.txs = make([]*types.Transaction, len(env.txs))
+ copy(cpy.txs, env.txs)
+ cpy.uncles = make(map[common.Hash]*types.Header)
+ for hash, uncle := range env.uncles {
+ cpy.uncles[hash] = uncle
+ }
+ return cpy
+}
+
+// unclelist returns the contained uncles as the list format.
+func (env *environment) unclelist() []*types.Header {
+ var uncles []*types.Header
+ for _, uncle := range env.uncles {
+ uncles = append(uncles, uncle)
+ }
+ return uncles
+}
+
+// discard terminates the background prefetcher go-routine. It should
+// always be called for all created environment instances otherwise
+// the go-routine leak can happen.
+func (env *environment) discard() {
+ if env.state == nil {
+ return
+ }
+ env.state.StopPrefetcher()
}
// task contains all information for consensus engine sealing and result submitting.
@@ -114,6 +162,13 @@ type newWorkReq struct {
timestamp int64
}
+// getWorkReq represents a request for getting a new sealing work with provided parameters.
+type getWorkReq struct {
+ params *generateParams
+ err error
+ result chan *types.Block
+}
+
// intervalAdjust represents a resubmitting interval adjustment.
type intervalAdjust struct {
ratio float64
@@ -128,7 +183,6 @@ type worker struct {
engine consensus.Engine
eth Backend
chain *core.BlockChain
- merger *consensus.Merger
// Feeds
pendingLogsFeed event.Feed
@@ -144,6 +198,7 @@ type worker struct {
// Channels
newWorkCh chan *newWorkReq
+ getWorkCh chan *getWorkReq
taskCh chan *task
resultCh chan *types.Block
startCh chan struct{}
@@ -191,7 +246,7 @@ type worker struct {
resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval.
}
-func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(header *types.Header) bool, init bool, merger *consensus.Merger) *worker {
+func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(header *types.Header) bool, init bool) *worker {
worker := &worker{
config: config,
chainConfig: chainConfig,
@@ -199,16 +254,16 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus
eth: eth,
mux: mux,
chain: eth.BlockChain(),
- merger: merger,
isLocalBlock: isLocalBlock,
localUncles: make(map[common.Hash]*types.Block),
remoteUncles: make(map[common.Hash]*types.Block),
- unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth),
+ unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), sealingLogAtDepth),
pendingTasks: make(map[common.Hash]*task),
txsCh: make(chan core.NewTxsEvent, txChanSize),
chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize),
newWorkCh: make(chan *newWorkReq),
+ getWorkCh: make(chan *getWorkReq),
taskCh: make(chan *task),
resultCh: make(chan *types.Block, resultQueueSize),
exitCh: make(chan struct{}),
@@ -264,15 +319,18 @@ func (w *worker) setExtra(extra []byte) {
// setRecommitInterval updates the interval for miner sealing work recommitting.
func (w *worker) setRecommitInterval(interval time.Duration) {
- w.resubmitIntervalCh <- interval
+ select {
+ case w.resubmitIntervalCh <- interval:
+ case <-w.exitCh:
+ }
}
-// disablePreseal disables pre-sealing mining feature
+// disablePreseal disables pre-sealing feature
func (w *worker) disablePreseal() {
atomic.StoreUint32(&w.noempty, 1)
}
-// enablePreseal enables pre-sealing mining feature
+// enablePreseal enables pre-sealing feature
func (w *worker) enablePreseal() {
atomic.StoreUint32(&w.noempty, 0)
}
@@ -350,13 +408,13 @@ func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) t
return time.Duration(int64(next))
}
-// newWorkLoop is a standalone goroutine to submit new mining work upon received events.
+// newWorkLoop is a standalone goroutine to submit new sealing work upon received events.
func (w *worker) newWorkLoop(recommit time.Duration) {
defer w.wg.Done()
var (
interrupt *int32
minRecommit = recommit // minimal resubmit interval specified by user.
- timestamp int64 // timestamp for each round of mining.
+ timestamp int64 // timestamp for each round of sealing.
)
timer := time.NewTimer(0)
@@ -401,7 +459,7 @@ func (w *worker) newWorkLoop(recommit time.Duration) {
commit(false, commitInterruptNewHead)
case <-timer.C:
- // If mining is running resubmit a new work cycle periodically to pull in
+ // If sealing is running resubmit a new work cycle periodically to pull in
// higher priced transactions. Disable this overhead for pending blocks.
if w.isRunning() && (w.chainConfig.Clique == nil || w.chainConfig.Clique.Period > 0) {
// Short circuit if no new transaction arrives.
@@ -448,22 +506,36 @@ func (w *worker) newWorkLoop(recommit time.Duration) {
}
}
-// mainLoop is a standalone goroutine to regenerate the sealing task based on the received event.
+// mainLoop is responsible for generating and submitting sealing work based on
+// the received event. It can support two modes: automatically generate task and
+// submit it or return task according to given parameters for various proposes.
func (w *worker) mainLoop() {
defer w.wg.Done()
defer w.txsSub.Unsubscribe()
defer w.chainHeadSub.Unsubscribe()
defer w.chainSideSub.Unsubscribe()
defer func() {
- if w.current != nil && w.current.state != nil {
- w.current.state.StopPrefetcher()
+ if w.current != nil {
+ w.current.discard()
}
}()
+ cleanTicker := time.NewTicker(time.Second * 10)
+ defer cleanTicker.Stop()
+
for {
select {
case req := <-w.newWorkCh:
- w.commitNewWork(req.interrupt, req.noempty, req.timestamp)
+ w.commitWork(req.interrupt, req.noempty, req.timestamp)
+
+ case req := <-w.getWorkCh:
+ block, err := w.generateWork(req.params)
+ if err != nil {
+ req.err = err
+ req.result <- nil
+ } else {
+ req.result <- block
+ }
case ev := <-w.chainSideCh:
// Short circuit for duplicate side blocks
@@ -479,46 +551,40 @@ func (w *worker) mainLoop() {
} else {
w.remoteUncles[ev.Block.Hash()] = ev.Block
}
- // If our mining block contains less than 2 uncle blocks,
- // add the new uncle block if valid and regenerate a mining block.
- if w.isRunning() && w.current != nil && w.current.uncles.Cardinality() < 2 {
+ // If our sealing block contains less than 2 uncle blocks,
+ // add the new uncle block if valid and regenerate a new
+ // sealing block for higher profit.
+ if w.isRunning() && w.current != nil && len(w.current.uncles) < 2 {
start := time.Now()
if err := w.commitUncle(w.current, ev.Block.Header()); err == nil {
- var uncles []*types.Header
- w.current.uncles.Each(func(item interface{}) bool {
- hash, ok := item.(common.Hash)
- if !ok {
- return false
- }
- uncle, exist := w.localUncles[hash]
- if !exist {
- uncle, exist = w.remoteUncles[hash]
- }
- if !exist {
- return false
- }
- uncles = append(uncles, uncle.Header())
- return false
- })
- w.commit(uncles, nil, true, start)
+ w.commit(w.current.copy(), nil, true, start)
+ }
+ }
+
+ case <-cleanTicker.C:
+ chainHead := w.chain.CurrentBlock()
+ for hash, uncle := range w.localUncles {
+ if uncle.NumberU64()+staleThreshold <= chainHead.NumberU64() {
+ delete(w.localUncles, hash)
+ }
+ }
+ for hash, uncle := range w.remoteUncles {
+ if uncle.NumberU64()+staleThreshold <= chainHead.NumberU64() {
+ delete(w.remoteUncles, hash)
}
}
case ev := <-w.txsCh:
- // Apply transactions to the pending state if we're not mining.
+ // Apply transactions to the pending state if we're not sealing
//
// Note all transactions received may not be continuous with transactions
- // already included in the current mining block. These transactions will
+ // already included in the current sealing block. These transactions will
// be automatically eliminated.
if !w.isRunning() && w.current != nil {
// If block is already full, abort
if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas {
continue
}
- w.mu.RLock()
- coinbase := w.coinbase
- w.mu.RUnlock()
-
txs := make(map[common.Address]types.Transactions)
for _, tx := range ev.Txs {
acc, _ := types.Sender(w.current.signer, tx)
@@ -526,18 +592,19 @@ func (w *worker) mainLoop() {
}
txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee)
tcount := w.current.tcount
- w.commitTransactions(txset, coinbase, nil)
- // Only update the snapshot if any new transactons were added
+ w.commitTransactions(w.current, txset, nil)
+
+ // Only update the snapshot if any new transactions were added
// to the pending block
if tcount != w.current.tcount {
- w.updateSnapshot()
+ w.updateSnapshot(w.current)
}
} else {
// Special case, if the consensus engine is 0 period clique(dev mode),
- // submit mining work here since all empty submission will be rejected
+ // submit sealing work here since all empty submission will be rejected
// by clique. Of course the advance sealing(empty submission) is disabled.
if w.chainConfig.Clique != nil && w.chainConfig.Clique.Period == 0 {
- w.commitNewWork(nil, true, time.Now().Unix())
+ w.commitWork(nil, true, time.Now().Unix())
}
}
atomic.AddInt32(&w.newTxs, int32(len(ev.Txs)))
@@ -679,23 +746,35 @@ func (w *worker) resultLoop() {
}
}
-// makeCurrent creates a new environment for the current cycle.
-func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error {
+// makeEnv creates a new environment for the sealing block.
+func (w *worker) makeEnv(parent *types.Block, header *types.Header, coinbase common.Address) (*environment, error) {
// Retrieve the parent state to execute on top and start a prefetcher for
- // the miner to speed block sealing up a bit
+ // the miner to speed block sealing up a bit.
state, err := w.chain.StateAt(parent.Root())
if err != nil {
- return err
+ // Note since the sealing block can be created upon the arbitrary parent
+ // block, but the state of parent block may already be pruned, so the necessary
+ // state recovery is needed here in the future.
+ //
+ // The maximum acceptable reorg depth can be limited by the finalised block
+ // somehow. TODO(rjl493456442) fix the hard-coded number here later.
+ state, err = w.eth.StateAtBlock(parent, 1024, nil, false, false)
+ log.Warn("Recovered mining state", "root", parent.Root(), "err", err)
+ }
+ if err != nil {
+ return nil, err
}
state.StartPrefetcher("miner")
+ // Note the passed coinbase may be different with header.Coinbase.
env := &environment{
signer: types.MakeSigner(w.chainConfig, header.Number),
state: state,
+ coinbase: coinbase,
ancestors: mapset.NewSet(),
family: mapset.NewSet(),
- uncles: mapset.NewSet(),
header: header,
+ uncles: make(map[common.Hash]*types.Header),
}
// when 08 is processed ancestors contain 07 (quick block)
for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) {
@@ -707,20 +786,16 @@ func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error {
}
// Keep track of transactions which return errors so they can be removed
env.tcount = 0
-
- // Swap out the old work with the new one, terminating any leftover prefetcher
- // processes in the mean time and starting a new one.
- if w.current != nil && w.current.state != nil {
- w.current.state.StopPrefetcher()
- }
- w.current = env
- return nil
+ return env, nil
}
// commitUncle adds the given block to uncle block set, returns error if failed to add.
func (w *worker) commitUncle(env *environment, uncle *types.Header) error {
+ if w.isTTDReached(env.header) {
+ return errors.New("ignore uncle for beacon block")
+ }
hash := uncle.Hash()
- if env.uncles.Contains(hash) {
+ if _, exist := env.uncles[hash]; exist {
return errors.New("uncle not unique")
}
if env.header.ParentHash == uncle.ParentHash {
@@ -732,82 +807,58 @@ func (w *worker) commitUncle(env *environment, uncle *types.Header) error {
if env.family.Contains(hash) {
return errors.New("uncle already included")
}
- env.uncles.Add(uncle.Hash())
+ env.uncles[hash] = uncle
return nil
}
-// updateSnapshot updates pending snapshot block and state.
-// Note this function assumes the current variable is thread safe.
-func (w *worker) updateSnapshot() {
+// updateSnapshot updates pending snapshot block, receipts and state.
+func (w *worker) updateSnapshot(env *environment) {
w.snapshotMu.Lock()
defer w.snapshotMu.Unlock()
- var uncles []*types.Header
- w.current.uncles.Each(func(item interface{}) bool {
- hash, ok := item.(common.Hash)
- if !ok {
- return false
- }
- uncle, exist := w.localUncles[hash]
- if !exist {
- uncle, exist = w.remoteUncles[hash]
- }
- if !exist {
- return false
- }
- uncles = append(uncles, uncle.Header())
- return false
- })
-
w.snapshotBlock = types.NewBlock(
- w.current.header,
- w.current.txs,
- uncles,
- w.current.receipts,
+ env.header,
+ env.txs,
+ env.unclelist(),
+ env.receipts,
trie.NewStackTrie(nil),
)
- w.snapshotReceipts = copyReceipts(w.current.receipts)
- w.snapshotState = w.current.state.Copy()
+ w.snapshotReceipts = copyReceipts(env.receipts)
+ w.snapshotState = env.state.Copy()
}
-func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) {
- snap := w.current.state.Snapshot()
+func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]*types.Log, error) {
+ snap := env.state.Snapshot()
- receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, *w.chain.GetVMConfig())
+ receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &env.coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig())
if err != nil {
- w.current.state.RevertToSnapshot(snap)
+ env.state.RevertToSnapshot(snap)
return nil, err
}
- w.current.txs = append(w.current.txs, tx)
- w.current.receipts = append(w.current.receipts, receipt)
+ env.txs = append(env.txs, tx)
+ env.receipts = append(env.receipts, receipt)
return receipt.Logs, nil
}
-func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coinbase common.Address, interrupt *int32) bool {
- // Short circuit if current is nil
- if w.current == nil {
- return true
+func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, interrupt *int32) bool {
+ gasLimit := env.header.GasLimit
+ if env.gasPool == nil {
+ env.gasPool = new(core.GasPool).AddGas(gasLimit)
}
-
- gasLimit := w.current.header.GasLimit
- if w.current.gasPool == nil {
- w.current.gasPool = new(core.GasPool).AddGas(gasLimit)
- }
-
var coalescedLogs []*types.Log
for {
// In the following three cases, we will interrupt the execution of the transaction.
// (1) new head block event arrival, the interrupt signal is 1
// (2) worker start or restart, the interrupt signal is 1
- // (3) worker recreate the mining block with any newly arrived transactions, the interrupt signal is 2.
+ // (3) worker recreate the sealing block with any newly arrived transactions, the interrupt signal is 2.
// For the first two cases, the semi-finished work will be discarded.
// For the third case, the semi-finished work will be submitted to the consensus engine.
if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone {
// Notify resubmit loop to increase resubmitting interval due to too frequent commits.
if atomic.LoadInt32(interrupt) == commitInterruptResubmit {
- ratio := float64(gasLimit-w.current.gasPool.Gas()) / float64(gasLimit)
+ ratio := float64(gasLimit-env.gasPool.Gas()) / float64(gasLimit)
if ratio < 0.1 {
ratio = 0.1
}
@@ -819,8 +870,8 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin
return atomic.LoadInt32(interrupt) == commitInterruptNewHead
}
// If we don't have enough gas for any further transactions then we're done
- if w.current.gasPool.Gas() < params.TxGas {
- log.Trace("Not enough gas for further transactions", "have", w.current.gasPool, "want", params.TxGas)
+ if env.gasPool.Gas() < params.TxGas {
+ log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas)
break
}
// Retrieve the next transaction and abort if all done
@@ -832,19 +883,19 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin
// during transaction acceptance is the transaction pool.
//
// We use the eip155 signer regardless of the current hf.
- from, _ := types.Sender(w.current.signer, tx)
+ from, _ := types.Sender(env.signer, tx)
// Check whether the tx is replay protected. If we're not in the EIP155 hf
// phase, start ignoring the sender until we do.
- if tx.Protected() && !w.chainConfig.IsEIP155(w.current.header.Number) {
+ if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) {
log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block)
txs.Pop()
continue
}
// Start executing the transaction
- w.current.state.Prepare(tx.Hash(), w.current.tcount)
+ env.state.Prepare(tx.Hash(), env.tcount)
- logs, err := w.commitTransaction(tx, coinbase)
+ logs, err := w.commitTransaction(env, tx)
switch {
case errors.Is(err, core.ErrGasLimitReached):
// Pop the current out-of-gas transaction without shifting in the next from the account
@@ -864,7 +915,7 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin
case errors.Is(err, nil):
// Everything ok, collect the logs and shift in the next transaction from the same account
coalescedLogs = append(coalescedLogs, logs...)
- w.current.tcount++
+ env.tcount++
txs.Shift()
case errors.Is(err, core.ErrTxTypeNotSupported):
@@ -881,8 +932,8 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin
}
if !w.isRunning() && len(coalescedLogs) > 0 {
- // We don't push the pendingLogsEvent while we are mining. The reason is that
- // when we are mining, the worker will regenerate a mining block every 3 seconds.
+ // We don't push the pendingLogsEvent while we are sealing. The reason is that
+ // when we are sealing, the worker will regenerate a sealing block every 3 seconds.
// In order to avoid pushing the repeated pendingLog, we disable the pending log pushing.
// make a copy, the state caches the logs and these logs get "upgraded" from pending to mined
@@ -903,24 +954,56 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin
return false
}
-// commitNewWork generates several new sealing tasks based on the parent block.
-func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) {
+// generateParams wraps various of settings for generating sealing task.
+type generateParams struct {
+ timestamp uint64 // The timstamp for sealing task
+ forceTime bool // Flag whether the given timestamp is immutable or not
+ parentHash common.Hash // Parent block hash, empty means the latest chain head
+ coinbase common.Address // The fee recipient address for including transaction
+ random common.Hash // The randomness generated by beacon chain, empty before the merge
+ noUncle bool // Flag whether the uncle block inclusion is allowed
+ noExtra bool // Flag whether the extra field assignment is allowed
+}
+
+// prepareWork constructs the sealing task according to the given parameters,
+// either based on the last chain head or specified parent. In this function
+// the pending transactions are not filled yet, only the empty task returned.
+func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
w.mu.RLock()
defer w.mu.RUnlock()
- tstart := time.Now()
+ // Find the parent block for sealing task
parent := w.chain.CurrentBlock()
-
- if parent.Time() >= uint64(timestamp) {
- timestamp = int64(parent.Time() + 1)
+ if genParams.parentHash != (common.Hash{}) {
+ parent = w.chain.GetBlockByHash(genParams.parentHash)
}
+ if parent == nil {
+ return nil, fmt.Errorf("missing parent")
+ }
+ // Sanity check the timestamp correctness, recap the timestamp
+ // to parent+1 if the mutation is allowed.
+ timestamp := genParams.timestamp
+ if parent.Time() >= timestamp {
+ if genParams.forceTime {
+ return nil, fmt.Errorf("invalid timestamp, parent %d given %d", parent.Time(), timestamp)
+ }
+ timestamp = parent.Time() + 1
+ }
+ // Construct the sealing block header, set the extra field if it's allowed
num := parent.Number()
header := &types.Header{
ParentHash: parent.Hash(),
Number: num.Add(num, common.Big1),
GasLimit: core.CalcGasLimit(parent.GasLimit(), w.config.GasCeil),
- Extra: w.extra,
- Time: uint64(timestamp),
+ Time: timestamp,
+ Coinbase: genParams.coinbase,
+ }
+ if !genParams.noExtra && len(w.extra) != 0 {
+ header.Extra = w.extra
+ }
+ // Set the randomness field from the beacon chain if it's available.
+ if genParams.random != (common.Hash{}) {
+ header.MixDigest = genParams.random
}
// Set baseFee and GasLimit if we are on an EIP-1559 chain
if w.chainConfig.IsLondon(header.Number) {
@@ -930,83 +1013,47 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64)
header.GasLimit = core.CalcGasLimit(parentGasLimit, w.config.GasCeil)
}
}
- // Only set the coinbase if our consensus engine is running (avoid spurious block rewards)
- if w.isRunning() {
- if w.coinbase == (common.Address{}) {
- log.Error("Refusing to mine without etherbase")
- return
- }
- header.Coinbase = w.coinbase
- }
+ // Run the consensus preparation with the default or customized consensus engine.
if err := w.engine.Prepare(w.chain, header); err != nil {
- log.Error("Failed to prepare header for mining", "err", err)
- return
- }
- // If we are care about TheDAO hard-fork check whether to override the extra-data or not
- if daoBlock := w.chainConfig.DAOForkBlock; daoBlock != nil {
- // Check whether the block is among the fork extra-override range
- limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange)
- if header.Number.Cmp(daoBlock) >= 0 && header.Number.Cmp(limit) < 0 {
- // Depending whether we support or oppose the fork, override differently
- if w.chainConfig.DAOForkSupport {
- header.Extra = common.CopyBytes(params.DAOForkBlockExtra)
- } else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
- header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data
- }
- }
+ log.Error("Failed to prepare header for sealing", "err", err)
+ return nil, err
}
// Could potentially happen if starting to mine in an odd state.
- err := w.makeCurrent(parent, header)
+ // Note genParams.coinbase can be different with header.Coinbase
+ // since clique algorithm can modify the coinbase field in header.
+ env, err := w.makeEnv(parent, header, genParams.coinbase)
if err != nil {
- log.Error("Failed to create mining context", "err", err)
- return
+ log.Error("Failed to create sealing context", "err", err)
+ return nil, err
}
- // Create the current work task and check any fork transitions needed
- env := w.current
- if w.chainConfig.DAOForkSupport && w.chainConfig.DAOForkBlock != nil && w.chainConfig.DAOForkBlock.Cmp(header.Number) == 0 {
- misc.ApplyDAOHardFork(env.state)
- }
- // Accumulate the uncles for the current block
- uncles := make([]*types.Header, 0, 2)
- commitUncles := func(blocks map[common.Hash]*types.Block) {
- // Clean up stale uncle blocks first
- for hash, uncle := range blocks {
- if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() {
- delete(blocks, hash)
- }
- }
- for hash, uncle := range blocks {
- if len(uncles) == 2 {
- break
- }
- if err := w.commitUncle(env, uncle.Header()); err != nil {
- log.Trace("Possible uncle rejected", "hash", hash, "reason", err)
- } else {
- log.Debug("Committing new uncle to block", "hash", hash)
- uncles = append(uncles, uncle.Header())
+ // Accumulate the uncles for the sealing work only if it's allowed.
+ if !genParams.noUncle {
+ commitUncles := func(blocks map[common.Hash]*types.Block) {
+ for hash, uncle := range blocks {
+ if len(env.uncles) == 2 {
+ break
+ }
+ if err := w.commitUncle(env, uncle.Header()); err != nil {
+ log.Trace("Possible uncle rejected", "hash", hash, "reason", err)
+ } else {
+ log.Debug("Committing new uncle to block", "hash", hash)
+ }
}
}
+ // Prefer to locally generated uncle
+ commitUncles(w.localUncles)
+ commitUncles(w.remoteUncles)
}
- // Prefer to locally generated uncle
- commitUncles(w.localUncles)
- commitUncles(w.remoteUncles)
-
- // Create an empty block based on temporary copied state for
- // sealing in advance without waiting block execution finished.
- if !noempty && atomic.LoadUint32(&w.noempty) == 0 {
- w.commit(uncles, nil, false, tstart)
- }
+ return env, nil
+}
+// fillTransactions retrieves the pending transactions from the txpool and fills them
+// into the given sealing block. The transaction selection and ordering strategy can
+// be customized with the plugin in the future.
+func (w *worker) fillTransactions(interrupt *int32, env *environment) {
+ // Split the pending transactions into locals and remotes
// Fill the block with all available pending transactions.
pending := w.eth.TxPool().Pending(true)
- // Short circuit if there is no available pending transactions.
- // But if we disable empty precommit already, ignore it. Since
- // empty block is necessary to keep the liveness of the network.
- if len(pending) == 0 && atomic.LoadUint32(&w.noempty) == 0 {
- w.updateSnapshot()
- return
- }
- // Split the pending transactions into locals and remotes
localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending
for _, account := range w.eth.TxPool().Locals() {
if txs := remoteTxs[account]; len(txs) > 0 {
@@ -1015,57 +1062,139 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64)
}
}
if len(localTxs) > 0 {
- txs := types.NewTransactionsByPriceAndNonce(w.current.signer, localTxs, header.BaseFee)
- if w.commitTransactions(txs, w.coinbase, interrupt) {
+ txs := types.NewTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee)
+ if w.commitTransactions(env, txs, interrupt) {
return
}
}
if len(remoteTxs) > 0 {
- txs := types.NewTransactionsByPriceAndNonce(w.current.signer, remoteTxs, header.BaseFee)
- if w.commitTransactions(txs, w.coinbase, interrupt) {
+ txs := types.NewTransactionsByPriceAndNonce(env.signer, remoteTxs, env.header.BaseFee)
+ if w.commitTransactions(env, txs, interrupt) {
return
}
}
- w.commit(uncles, w.fullTaskHook, true, tstart)
+}
+
+// generateWork generates a sealing block based on the given parameters.
+func (w *worker) generateWork(params *generateParams) (*types.Block, error) {
+ work, err := w.prepareWork(params)
+ if err != nil {
+ return nil, err
+ }
+ defer work.discard()
+
+ w.fillTransactions(nil, work)
+ return w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, work.txs, work.unclelist(), work.receipts)
+}
+
+// commitWork generates several new sealing tasks based on the parent block
+// and submit them to the sealer.
+func (w *worker) commitWork(interrupt *int32, noempty bool, timestamp int64) {
+ start := time.Now()
+
+ // Set the coinbase if the worker is running or it's required
+ var coinbase common.Address
+ if w.isRunning() {
+ if w.coinbase == (common.Address{}) {
+ log.Error("Refusing to mine without etherbase")
+ return
+ }
+ coinbase = w.coinbase // Use the preset address as the fee recipient
+ }
+ work, err := w.prepareWork(&generateParams{
+ timestamp: uint64(timestamp),
+ coinbase: coinbase,
+ })
+ if err != nil {
+ return
+ }
+ // Create an empty block based on temporary copied state for
+ // sealing in advance without waiting block execution finished.
+ if !noempty && atomic.LoadUint32(&w.noempty) == 0 {
+ w.commit(work.copy(), nil, false, start)
+ }
+ // Fill pending transactions from the txpool
+ w.fillTransactions(interrupt, work)
+ w.commit(work.copy(), w.fullTaskHook, true, start)
+
+ // Swap out the old work with the new one, terminating any leftover
+ // prefetcher processes in the mean time and starting a new one.
+ if w.current != nil {
+ w.current.discard()
+ }
+ w.current = work
}
// commit runs any post-transaction state modifications, assembles the final block
// and commits new work if consensus engine is running.
-func (w *worker) commit(uncles []*types.Header, interval func(), update bool, start time.Time) error {
- // Deep copy receipts here to avoid interaction between different tasks.
- receipts := copyReceipts(w.current.receipts)
- s := w.current.state.Copy()
- block, err := w.engine.FinalizeAndAssemble(w.chain, w.current.header, s, w.current.txs, uncles, receipts)
- if err != nil {
- return err
- }
+// Note the assumption is held that the mutation is allowed to the passed env, do
+// the deep copy first.
+func (w *worker) commit(env *environment, interval func(), update bool, start time.Time) error {
if w.isRunning() {
if interval != nil {
interval()
}
- // If we're post merge, just ignore
- td, ttd := w.chain.GetTd(block.ParentHash(), block.NumberU64()-1), w.chain.Config().TerminalTotalDifficulty
- if td != nil && ttd != nil && td.Cmp(ttd) >= 0 {
- return nil
+ // Create a local environment copy, avoid the data race with snapshot state.
+ // https://github.com/ethereum/go-ethereum/issues/24299
+ env := env.copy()
+ block, err := w.engine.FinalizeAndAssemble(w.chain, env.header, env.state, env.txs, env.unclelist(), env.receipts)
+ if err != nil {
+ return err
}
- select {
- case w.taskCh <- &task{receipts: receipts, state: s, block: block, createdAt: time.Now()}:
- w.unconfirmed.Shift(block.NumberU64() - 1)
- log.Info("Commit new mining work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()),
- "uncles", len(uncles), "txs", w.current.tcount,
- "gas", block.GasUsed(), "fees", totalFees(block, receipts),
- "elapsed", common.PrettyDuration(time.Since(start)))
+ // If we're post merge, just ignore
+ if !w.isTTDReached(block.Header()) {
+ select {
+ case w.taskCh <- &task{receipts: env.receipts, state: env.state, block: block, createdAt: time.Now()}:
+ w.unconfirmed.Shift(block.NumberU64() - 1)
+ log.Info("Commit new sealing work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()),
+ "uncles", len(env.uncles), "txs", env.tcount,
+ "gas", block.GasUsed(), "fees", totalFees(block, env.receipts),
+ "elapsed", common.PrettyDuration(time.Since(start)))
- case <-w.exitCh:
- log.Info("Worker has exited")
+ case <-w.exitCh:
+ log.Info("Worker has exited")
+ }
}
}
if update {
- w.updateSnapshot()
+ w.updateSnapshot(env)
}
return nil
}
+// getSealingBlock generates the sealing block based on the given parameters.
+func (w *worker) getSealingBlock(parent common.Hash, timestamp uint64, coinbase common.Address, random common.Hash) (*types.Block, error) {
+ req := &getWorkReq{
+ params: &generateParams{
+ timestamp: timestamp,
+ forceTime: true,
+ parentHash: parent,
+ coinbase: coinbase,
+ random: random,
+ noUncle: true,
+ noExtra: true,
+ },
+ result: make(chan *types.Block, 1),
+ }
+ select {
+ case w.getWorkCh <- req:
+ block := <-req.result
+ if block == nil {
+ return nil, req.err
+ }
+ return block, nil
+ case <-w.exitCh:
+ return nil, errors.New("miner closed")
+ }
+}
+
+// isTTDReached returns the indicator if the given block has reached the total
+// terminal difficulty for The Merge transition.
+func (w *worker) isTTDReached(header *types.Header) bool {
+ td, ttd := w.chain.GetTd(header.ParentHash, header.Number.Uint64()-1), w.chain.Config().TerminalTotalDifficulty
+ return td != nil && ttd != nil && td.Cmp(ttd) >= 0
+}
+
// copyReceipts makes a deep copy of the given receipts.
func copyReceipts(receipts []*types.Receipt) []*types.Receipt {
result := make([]*types.Receipt, len(receipts))
diff --git a/miner/worker_test.go b/miner/worker_test.go
index c8ddd2c32..dd029433b 100644
--- a/miner/worker_test.go
+++ b/miner/worker_test.go
@@ -17,6 +17,7 @@
package miner
import (
+ "errors"
"math/big"
"math/rand"
"sync/atomic"
@@ -30,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
@@ -166,6 +168,9 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
func (b *testWorkerBackend) BlockChain() *core.BlockChain { return b.chain }
func (b *testWorkerBackend) TxPool() *core.TxPool { return b.txPool }
+func (b *testWorkerBackend) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) {
+ return nil, errors.New("not supported")
+}
func (b *testWorkerBackend) newRandomUncle() *types.Block {
var parent *types.Block
@@ -197,7 +202,7 @@ func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction {
func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*worker, *testWorkerBackend) {
backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks)
backend.txPool.AddLocals(pendingTxs)
- w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false, consensus.NewMerger(rawdb.NewMemoryDatabase()))
+ w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false)
w.setEtherbase(testBankAddress)
return w, backend
}
@@ -382,7 +387,7 @@ func testRegenerateMiningBlock(t *testing.T, chainConfig *params.ChainConfig, en
w, b := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0)
defer w.close()
- var taskCh = make(chan struct{})
+ var taskCh = make(chan struct{}, 3)
taskIndex := 0
w.newTaskHook = func(task *task) {
@@ -521,3 +526,144 @@ func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine co
t.Error("interval reset timeout")
}
}
+
+func TestGetSealingWorkEthash(t *testing.T) {
+ testGetSealingWork(t, ethashChainConfig, ethash.NewFaker(), false)
+}
+
+func TestGetSealingWorkClique(t *testing.T) {
+ testGetSealingWork(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, rawdb.NewMemoryDatabase()), false)
+}
+
+func TestGetSealingWorkPostMerge(t *testing.T) {
+ local := new(params.ChainConfig)
+ *local = *ethashChainConfig
+ local.TerminalTotalDifficulty = big.NewInt(0)
+ testGetSealingWork(t, local, ethash.NewFaker(), true)
+}
+
+func testGetSealingWork(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, postMerge bool) {
+ defer engine.Close()
+
+ w, b := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0)
+ defer w.close()
+
+ w.setExtra([]byte{0x01, 0x02})
+ w.postSideBlock(core.ChainSideEvent{Block: b.uncleBlock})
+
+ w.skipSealHook = func(task *task) bool {
+ return true
+ }
+ w.fullTaskHook = func() {
+ time.Sleep(100 * time.Millisecond)
+ }
+ timestamp := uint64(time.Now().Unix())
+ assertBlock := func(block *types.Block, number uint64, coinbase common.Address, random common.Hash) {
+ if block.Time() != timestamp {
+ // Sometime the timestamp will be mutated if the timestamp
+ // is even smaller than parent block's. It's OK.
+ t.Logf("Invalid timestamp, want %d, get %d", timestamp, block.Time())
+ }
+ if len(block.Uncles()) != 0 {
+ t.Error("Unexpected uncle block")
+ }
+ _, isClique := engine.(*clique.Clique)
+ if !isClique {
+ if len(block.Extra()) != 0 {
+ t.Error("Unexpected extra field")
+ }
+ if block.Coinbase() != coinbase {
+ t.Errorf("Unexpected coinbase got %x want %x", block.Coinbase(), coinbase)
+ }
+ } else {
+ if block.Coinbase() != (common.Address{}) {
+ t.Error("Unexpected coinbase")
+ }
+ }
+ if !isClique {
+ if block.MixDigest() != random {
+ t.Error("Unexpected mix digest")
+ }
+ }
+ if block.Nonce() != 0 {
+ t.Error("Unexpected block nonce")
+ }
+ if block.NumberU64() != number {
+ t.Errorf("Mismatched block number, want %d got %d", number, block.NumberU64())
+ }
+ }
+ var cases = []struct {
+ parent common.Hash
+ coinbase common.Address
+ random common.Hash
+ expectNumber uint64
+ expectErr bool
+ }{
+ {
+ b.chain.Genesis().Hash(),
+ common.HexToAddress("0xdeadbeef"),
+ common.HexToHash("0xcafebabe"),
+ uint64(1),
+ false,
+ },
+ {
+ b.chain.CurrentBlock().Hash(),
+ common.HexToAddress("0xdeadbeef"),
+ common.HexToHash("0xcafebabe"),
+ b.chain.CurrentBlock().NumberU64() + 1,
+ false,
+ },
+ {
+ b.chain.CurrentBlock().Hash(),
+ common.Address{},
+ common.HexToHash("0xcafebabe"),
+ b.chain.CurrentBlock().NumberU64() + 1,
+ false,
+ },
+ {
+ b.chain.CurrentBlock().Hash(),
+ common.Address{},
+ common.Hash{},
+ b.chain.CurrentBlock().NumberU64() + 1,
+ false,
+ },
+ {
+ common.HexToHash("0xdeadbeef"),
+ common.HexToAddress("0xdeadbeef"),
+ common.HexToHash("0xcafebabe"),
+ 0,
+ true,
+ },
+ }
+
+ // This API should work even when the automatic sealing is not enabled
+ for _, c := range cases {
+ block, err := w.getSealingBlock(c.parent, timestamp, c.coinbase, c.random)
+ if c.expectErr {
+ if err == nil {
+ t.Error("Expect error but get nil")
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Unexpected error %v", err)
+ }
+ assertBlock(block, c.expectNumber, c.coinbase, c.random)
+ }
+ }
+
+ // This API should work even when the automatic sealing is enabled
+ w.start()
+ for _, c := range cases {
+ block, err := w.getSealingBlock(c.parent, timestamp, c.coinbase, c.random)
+ if c.expectErr {
+ if err == nil {
+ t.Error("Expect error but get nil")
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Unexpected error %v", err)
+ }
+ assertBlock(block, c.expectNumber, c.coinbase, c.random)
+ }
+ }
+}
diff --git a/node/node_test.go b/node/node_test.go
index e10463060..25cfa9d38 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -393,7 +393,7 @@ func TestLifecycleTerminationGuarantee(t *testing.T) {
// on the given prefix
func TestRegisterHandler_Successful(t *testing.T) {
node := createNode(t, 7878, 7979)
-
+ defer node.Close()
// create and mount handler
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("success"))
diff --git a/p2p/server.go b/p2p/server.go
index bcfc1bd10..138975e54 100644
--- a/p2p/server.go
+++ b/p2p/server.go
@@ -943,9 +943,8 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro
}
// If dialing, figure out the remote public key.
- var dialPubkey *ecdsa.PublicKey
if dialDest != nil {
- dialPubkey = new(ecdsa.PublicKey)
+ dialPubkey := new(ecdsa.PublicKey)
if err := dialDest.Load((*enode.Secp256k1)(dialPubkey)); err != nil {
err = errors.New("dial destination doesn't have a secp256k1 public key")
srv.log.Trace("Setting up connection failed", "addr", c.fd.RemoteAddr(), "conn", c.flags, "err", err)
diff --git a/params/config.go b/params/config.go
index 36482f238..7f52472ec 100644
--- a/params/config.go
+++ b/params/config.go
@@ -267,7 +267,7 @@ var (
AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil}
- TestRules = TestChainConfig.Rules(new(big.Int))
+ TestRules = TestChainConfig.Rules(new(big.Int), false)
)
// TrustedCheckpoint represents a set of post-processed trie roots (CHT and
@@ -668,10 +668,11 @@ type Rules struct {
IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool
IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool
IsBerlin, IsLondon bool
+ IsMerge bool
}
// Rules ensures c's ChainID is not nil.
-func (c *ChainConfig) Rules(num *big.Int) Rules {
+func (c *ChainConfig) Rules(num *big.Int, isMerge bool) Rules {
chainID := c.ChainID
if chainID == nil {
chainID = new(big.Int)
@@ -688,5 +689,6 @@ func (c *ChainConfig) Rules(num *big.Int) Rules {
IsIstanbul: c.IsIstanbul(num),
IsBerlin: c.IsBerlin(num),
IsLondon: c.IsLondon(num),
+ IsMerge: isMerge,
}
}
diff --git a/params/version.go b/params/version.go
index 9c463da27..743d5f74f 100644
--- a/params/version.go
+++ b/params/version.go
@@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 10 // Minor version component of the current release
- VersionPatch = 15 // Patch version component of the current release
+ VersionPatch = 16 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)
diff --git a/rpc/client.go b/rpc/client.go
index e43760c22..d55af7554 100644
--- a/rpc/client.go
+++ b/rpc/client.go
@@ -58,12 +58,6 @@ const (
maxClientSubscriptionBuffer = 20000
)
-const (
- httpScheme = "http"
- wsScheme = "ws"
- ipcScheme = "ipc"
-)
-
// BatchElem is an element in a batch request.
type BatchElem struct {
Method string
@@ -80,7 +74,7 @@ type BatchElem struct {
// Client represents a connection to an RPC server.
type Client struct {
idgen func() ID // for subscriptions
- scheme string // connection type: http, ws or ipc
+ isHTTP bool // connection type: http, ws or ipc
services *serviceRegistry
idCounter uint32
@@ -115,11 +109,9 @@ type clientConn struct {
}
func (c *Client) newClientConn(conn ServerCodec) *clientConn {
- ctx := context.WithValue(context.Background(), clientContextKey{}, c)
- // Http connections have already set the scheme
- if !c.isHTTP() && c.scheme != "" {
- ctx = context.WithValue(ctx, "scheme", c.scheme)
- }
+ ctx := context.Background()
+ ctx = context.WithValue(ctx, clientContextKey{}, c)
+ ctx = context.WithValue(ctx, peerInfoContextKey{}, conn.peerInfo())
handler := newHandler(ctx, conn, c.idgen, c.services)
return &clientConn{conn, handler}
}
@@ -145,7 +137,7 @@ func (op *requestOp) wait(ctx context.Context, c *Client) (*jsonrpcMessage, erro
select {
case <-ctx.Done():
// Send the timeout to dispatch so it can remove the request IDs.
- if !c.isHTTP() {
+ if !c.isHTTP {
select {
case c.reqTimeout <- op:
case <-c.closing:
@@ -212,18 +204,10 @@ func newClient(initctx context.Context, connect reconnectFunc) (*Client, error)
}
func initClient(conn ServerCodec, idgen func() ID, services *serviceRegistry) *Client {
- scheme := ""
- switch conn.(type) {
- case *httpConn:
- scheme = httpScheme
- case *websocketCodec:
- scheme = wsScheme
- case *jsonCodec:
- scheme = ipcScheme
- }
+ _, isHTTP := conn.(*httpConn)
c := &Client{
+ isHTTP: isHTTP,
idgen: idgen,
- scheme: scheme,
services: services,
writeConn: conn,
close: make(chan struct{}),
@@ -236,7 +220,7 @@ func initClient(conn ServerCodec, idgen func() ID, services *serviceRegistry) *C
reqSent: make(chan error, 1),
reqTimeout: make(chan *requestOp),
}
- if !c.isHTTP() {
+ if !isHTTP {
go c.dispatch(conn)
}
return c
@@ -267,7 +251,7 @@ func (c *Client) SupportedModules() (map[string]string, error) {
// Close closes the client, aborting any in-flight requests.
func (c *Client) Close() {
- if c.isHTTP() {
+ if c.isHTTP {
return
}
select {
@@ -281,7 +265,7 @@ func (c *Client) Close() {
// This method only works for clients using HTTP, it doesn't have
// any effect for clients using another transport.
func (c *Client) SetHeader(key, value string) {
- if !c.isHTTP() {
+ if !c.isHTTP {
return
}
conn := c.writeConn.(*httpConn)
@@ -315,7 +299,7 @@ func (c *Client) CallContext(ctx context.Context, result interface{}, method str
}
op := &requestOp{ids: []json.RawMessage{msg.ID}, resp: make(chan *jsonrpcMessage, 1)}
- if c.isHTTP() {
+ if c.isHTTP {
err = c.sendHTTP(ctx, op, msg)
} else {
err = c.send(ctx, op, msg)
@@ -378,7 +362,7 @@ func (c *Client) BatchCallContext(ctx context.Context, b []BatchElem) error {
}
var err error
- if c.isHTTP() {
+ if c.isHTTP {
err = c.sendBatchHTTP(ctx, op, msgs)
} else {
err = c.send(ctx, op, msgs)
@@ -417,7 +401,7 @@ func (c *Client) Notify(ctx context.Context, method string, args ...interface{})
}
msg.ID = nil
- if c.isHTTP() {
+ if c.isHTTP {
return c.sendHTTP(ctx, op, msg)
}
return c.send(ctx, op, msg)
@@ -450,12 +434,12 @@ func (c *Client) Subscribe(ctx context.Context, namespace string, channel interf
// Check type of channel first.
chanVal := reflect.ValueOf(channel)
if chanVal.Kind() != reflect.Chan || chanVal.Type().ChanDir()&reflect.SendDir == 0 {
- panic("first argument to Subscribe must be a writable channel")
+ panic(fmt.Sprintf("channel argument of Subscribe has type %T, need writable channel", channel))
}
if chanVal.IsNil() {
panic("channel given to Subscribe must not be nil")
}
- if c.isHTTP() {
+ if c.isHTTP {
return nil, ErrNotificationsUnsupported
}
@@ -509,8 +493,8 @@ func (c *Client) send(ctx context.Context, op *requestOp, msg interface{}) error
}
func (c *Client) write(ctx context.Context, msg interface{}, retry bool) error {
- // The previous write failed. Try to establish a new connection.
if c.writeConn == nil {
+ // The previous write failed. Try to establish a new connection.
if err := c.reconnect(ctx); err != nil {
return err
}
@@ -657,7 +641,3 @@ func (c *Client) read(codec ServerCodec) {
c.readOp <- readOp{msgs, batch}
}
}
-
-func (c *Client) isHTTP() bool {
- return c.scheme == httpScheme
-}
diff --git a/rpc/client_test.go b/rpc/client_test.go
index 224eb0c5c..fa6010bb1 100644
--- a/rpc/client_test.go
+++ b/rpc/client_test.go
@@ -615,6 +615,7 @@ func TestClientReconnect(t *testing.T) {
// Start a server and corresponding client.
s1, l1 := startServer("127.0.0.1:0")
client, err := DialContext(ctx, "ws://"+l1.Addr().String())
+ defer client.Close()
if err != nil {
t.Fatal("can't dial", err)
}
diff --git a/rpc/http.go b/rpc/http.go
index 32f4e7d90..18404c060 100644
--- a/rpc/http.go
+++ b/rpc/http.go
@@ -48,11 +48,18 @@ type httpConn struct {
headers http.Header
}
-// httpConn is treated specially by Client.
+// httpConn implements ServerCodec, but it is treated specially by Client
+// and some methods don't work. The panic() stubs here exist to ensure
+// this special treatment is correct.
+
func (hc *httpConn) writeJSON(context.Context, interface{}) error {
panic("writeJSON called on httpConn")
}
+func (hc *httpConn) peerInfo() PeerInfo {
+ panic("peerInfo called on httpConn")
+}
+
func (hc *httpConn) remoteAddr() string {
return hc.url
}
@@ -174,6 +181,7 @@ func (hc *httpConn) doRequest(ctx context.Context, msg interface{}) (io.ReadClos
return nil, err
}
req.ContentLength = int64(len(body))
+ req.GetBody = func() (io.ReadCloser, error) { return ioutil.NopCloser(bytes.NewReader(body)), nil }
// set headers
hc.mu.Lock()
@@ -236,20 +244,19 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), code)
return
}
+
+ // Create request-scoped context.
+ connInfo := PeerInfo{Transport: "http", RemoteAddr: r.RemoteAddr}
+ connInfo.HTTP.Version = r.Proto
+ connInfo.HTTP.Host = r.Host
+ connInfo.HTTP.Origin = r.Header.Get("Origin")
+ connInfo.HTTP.UserAgent = r.Header.Get("User-Agent")
+ ctx := r.Context()
+ ctx = context.WithValue(ctx, peerInfoContextKey{}, connInfo)
+
// All checks passed, create a codec that reads directly from the request body
// until EOF, writes the response to w, and orders the server to process a
// single request.
- ctx := r.Context()
- ctx = context.WithValue(ctx, "remote", r.RemoteAddr)
- ctx = context.WithValue(ctx, "scheme", r.Proto)
- ctx = context.WithValue(ctx, "local", r.Host)
- if ua := r.Header.Get("User-Agent"); ua != "" {
- ctx = context.WithValue(ctx, "User-Agent", ua)
- }
- if origin := r.Header.Get("Origin"); origin != "" {
- ctx = context.WithValue(ctx, "Origin", origin)
- }
-
w.Header().Set("content-type", contentType)
codec := newHTTPServerConn(r, w)
defer codec.close()
diff --git a/rpc/http_test.go b/rpc/http_test.go
index 97f8d44c3..c84d7705f 100644
--- a/rpc/http_test.go
+++ b/rpc/http_test.go
@@ -162,3 +162,39 @@ func TestHTTPErrorResponse(t *testing.T) {
t.Error("unexpected error message", errMsg)
}
}
+
+func TestHTTPPeerInfo(t *testing.T) {
+ s := newTestServer()
+ defer s.Stop()
+ ts := httptest.NewServer(s)
+ defer ts.Close()
+
+ c, err := Dial(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c.SetHeader("user-agent", "ua-testing")
+ c.SetHeader("origin", "origin.example.com")
+
+ // Request peer information.
+ var info PeerInfo
+ if err := c.Call(&info, "test_peerInfo"); err != nil {
+ t.Fatal(err)
+ }
+
+ if info.RemoteAddr == "" {
+ t.Error("RemoteAddr not set")
+ }
+ if info.Transport != "http" {
+ t.Errorf("wrong Transport %q", info.Transport)
+ }
+ if info.HTTP.Version != "HTTP/1.1" {
+ t.Errorf("wrong HTTP.Version %q", info.HTTP.Version)
+ }
+ if info.HTTP.UserAgent != "ua-testing" {
+ t.Errorf("wrong HTTP.UserAgent %q", info.HTTP.UserAgent)
+ }
+ if info.HTTP.Origin != "origin.example.com" {
+ t.Errorf("wrong HTTP.Origin %q", info.HTTP.UserAgent)
+ }
+}
diff --git a/rpc/json.go b/rpc/json.go
index 1daee3db8..6024f1e7d 100644
--- a/rpc/json.go
+++ b/rpc/json.go
@@ -198,6 +198,11 @@ func NewCodec(conn Conn) ServerCodec {
return NewFuncCodec(conn, enc.Encode, dec.Decode)
}
+func (c *jsonCodec) peerInfo() PeerInfo {
+ // This returns "ipc" because all other built-in transports have a separate codec type.
+ return PeerInfo{Transport: "ipc", RemoteAddr: c.remote}
+}
+
func (c *jsonCodec) remoteAddr() string {
return c.remote
}
diff --git a/rpc/server.go b/rpc/server.go
index 64e078a7f..e2d5c0383 100644
--- a/rpc/server.go
+++ b/rpc/server.go
@@ -145,3 +145,38 @@ func (s *RPCService) Modules() map[string]string {
}
return modules
}
+
+// PeerInfo contains information about the remote end of the network connection.
+//
+// This is available within RPC method handlers through the context. Call
+// PeerInfoFromContext to get information about the client connection related to
+// the current method call.
+type PeerInfo struct {
+ // Transport is name of the protocol used by the client.
+ // This can be "http", "ws" or "ipc".
+ Transport string
+
+ // Address of client. This will usually contain the IP address and port.
+ RemoteAddr string
+
+ // Addditional information for HTTP and WebSocket connections.
+ HTTP struct {
+ // Protocol version, i.e. "HTTP/1.1". This is not set for WebSocket.
+ Version string
+ // Header values sent by the client.
+ UserAgent string
+ Origin string
+ Host string
+ }
+}
+
+type peerInfoContextKey struct{}
+
+// PeerInfoFromContext returns information about the client's network connection.
+// Use this with the context passed to RPC method handler functions.
+//
+// The zero value is returned if no connection info is present in ctx.
+func PeerInfoFromContext(ctx context.Context) PeerInfo {
+ info, _ := ctx.Value(peerInfoContextKey{}).(PeerInfo)
+ return info
+}
diff --git a/rpc/server_test.go b/rpc/server_test.go
index 6a2b09e44..c692a071c 100644
--- a/rpc/server_test.go
+++ b/rpc/server_test.go
@@ -45,7 +45,7 @@ func TestServerRegisterName(t *testing.T) {
t.Fatalf("Expected service calc to be registered")
}
- wantCallbacks := 9
+ wantCallbacks := 10
if len(svc.callbacks) != wantCallbacks {
t.Errorf("Expected %d callbacks for service 'service', got %d", wantCallbacks, len(svc.callbacks))
}
diff --git a/rpc/testservice_test.go b/rpc/testservice_test.go
index 62afc1df4..253e26328 100644
--- a/rpc/testservice_test.go
+++ b/rpc/testservice_test.go
@@ -80,6 +80,10 @@ func (s *testService) EchoWithCtx(ctx context.Context, str string, i int, args *
return echoResult{str, i, args}
}
+func (s *testService) PeerInfo(ctx context.Context) PeerInfo {
+ return PeerInfoFromContext(ctx)
+}
+
func (s *testService) Sleep(ctx context.Context, duration time.Duration) {
time.Sleep(duration)
}
diff --git a/rpc/types.go b/rpc/types.go
index ca52d474d..959e38372 100644
--- a/rpc/types.go
+++ b/rpc/types.go
@@ -40,8 +40,10 @@ type API struct {
// a RPC session. Implementations must be go-routine safe since the codec can be called in
// multiple go-routines concurrently.
type ServerCodec interface {
+ peerInfo() PeerInfo
readBatch() (msgs []*jsonrpcMessage, isBatch bool, err error)
close()
+
jsonWriter
}
diff --git a/rpc/websocket.go b/rpc/websocket.go
index 5571324af..28380d8aa 100644
--- a/rpc/websocket.go
+++ b/rpc/websocket.go
@@ -60,7 +60,7 @@ func (s *Server) WebsocketHandler(allowedOrigins []string) http.Handler {
log.Debug("WebSocket upgrade failed", "err", err)
return
}
- codec := newWebsocketCodec(conn)
+ codec := newWebsocketCodec(conn, r.Host, r.Header)
s.ServeCodec(codec, 0)
})
}
@@ -197,7 +197,7 @@ func DialWebsocketWithDialer(ctx context.Context, endpoint, origin string, diale
}
return nil, hErr
}
- return newWebsocketCodec(conn), nil
+ return newWebsocketCodec(conn, endpoint, header), nil
})
}
@@ -235,12 +235,13 @@ func wsClientHeaders(endpoint, origin string) (string, http.Header, error) {
type websocketCodec struct {
*jsonCodec
conn *websocket.Conn
+ info PeerInfo
wg sync.WaitGroup
pingReset chan struct{}
}
-func newWebsocketCodec(conn *websocket.Conn) ServerCodec {
+func newWebsocketCodec(conn *websocket.Conn, host string, req http.Header) ServerCodec {
conn.SetReadLimit(wsMessageSizeLimit)
conn.SetPongHandler(func(appData string) error {
conn.SetReadDeadline(time.Time{})
@@ -250,7 +251,16 @@ func newWebsocketCodec(conn *websocket.Conn) ServerCodec {
jsonCodec: NewFuncCodec(conn, conn.WriteJSON, conn.ReadJSON).(*jsonCodec),
conn: conn,
pingReset: make(chan struct{}, 1),
+ info: PeerInfo{
+ Transport: "ws",
+ RemoteAddr: conn.RemoteAddr().String(),
+ },
}
+ // Fill in connection details.
+ wc.info.HTTP.Host = host
+ wc.info.HTTP.Origin = req.Get("Origin")
+ wc.info.HTTP.UserAgent = req.Get("User-Agent")
+ // Start pinger.
wc.wg.Add(1)
go wc.pingLoop()
return wc
@@ -261,6 +271,10 @@ func (wc *websocketCodec) close() {
wc.wg.Wait()
}
+func (wc *websocketCodec) peerInfo() PeerInfo {
+ return wc.info
+}
+
func (wc *websocketCodec) writeJSON(ctx context.Context, v interface{}) error {
err := wc.jsonCodec.writeJSON(ctx, v)
if err == nil {
diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go
index cf83b621f..8659f798e 100644
--- a/rpc/websocket_test.go
+++ b/rpc/websocket_test.go
@@ -117,6 +117,41 @@ func TestWebsocketLargeCall(t *testing.T) {
}
}
+func TestWebsocketPeerInfo(t *testing.T) {
+ var (
+ s = newTestServer()
+ ts = httptest.NewServer(s.WebsocketHandler([]string{"origin.example.com"}))
+ tsurl = "ws:" + strings.TrimPrefix(ts.URL, "http:")
+ )
+ defer s.Stop()
+ defer ts.Close()
+
+ ctx := context.Background()
+ c, err := DialWebsocket(ctx, tsurl, "origin.example.com")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Request peer information.
+ var connInfo PeerInfo
+ if err := c.Call(&connInfo, "test_peerInfo"); err != nil {
+ t.Fatal(err)
+ }
+
+ if connInfo.RemoteAddr == "" {
+ t.Error("RemoteAddr not set")
+ }
+ if connInfo.Transport != "ws" {
+ t.Errorf("wrong Transport %q", connInfo.Transport)
+ }
+ if connInfo.HTTP.UserAgent != "Go-http-client/1.1" {
+ t.Errorf("wrong HTTP.UserAgent %q", connInfo.HTTP.UserAgent)
+ }
+ if connInfo.HTTP.Origin != "origin.example.com" {
+ t.Errorf("wrong HTTP.Origin %q", connInfo.HTTP.UserAgent)
+ }
+}
+
// This test checks that client handles WebSocket ping frames correctly.
func TestClientWebsocketPing(t *testing.T) {
t.Parallel()
diff --git a/signer/core/api.go b/signer/core/api.go
index 48b54b8f4..f06fbeb76 100644
--- a/signer/core/api.go
+++ b/signer/core/api.go
@@ -33,6 +33,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/signer/core/apitypes"
"github.com/ethereum/go-ethereum/signer/storage"
)
@@ -188,23 +189,24 @@ func StartClefAccountManager(ksLocation string, nousb, lightKDF bool, scpath str
// MetadataFromContext extracts Metadata from a given context.Context
func MetadataFromContext(ctx context.Context) Metadata {
+ info := rpc.PeerInfoFromContext(ctx)
+
m := Metadata{"NA", "NA", "NA", "", ""} // batman
- if v := ctx.Value("remote"); v != nil {
- m.Remote = v.(string)
+ if info.Transport != "" {
+ if info.Transport == "http" {
+ m.Scheme = info.HTTP.Version
+ }
+ m.Scheme = info.Transport
}
- if v := ctx.Value("scheme"); v != nil {
- m.Scheme = v.(string)
+ if info.RemoteAddr != "" {
+ m.Remote = info.RemoteAddr
}
- if v := ctx.Value("local"); v != nil {
- m.Local = v.(string)
- }
- if v := ctx.Value("Origin"); v != nil {
- m.Origin = v.(string)
- }
- if v := ctx.Value("User-Agent"); v != nil {
- m.UserAgent = v.(string)
+ if info.HTTP.Host != "" {
+ m.Local = info.HTTP.Host
}
+ m.Origin = info.HTTP.Origin
+ m.UserAgent = info.HTTP.UserAgent
return m
}
diff --git a/signer/core/api_test.go b/signer/core/api_test.go
index 36f12f71a..9f44ca319 100644
--- a/signer/core/api_test.go
+++ b/signer/core/api_test.go
@@ -256,6 +256,9 @@ func TestSignTx(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+ if len(list) == 0 {
+ t.Fatal("Unexpected empty list")
+ }
a := common.NewMixedcaseAddress(list[0])
methodSig := "test(uint)"
diff --git a/signer/core/apitypes/types.go b/signer/core/apitypes/types.go
index 15ab15341..f5c2fe2f3 100644
--- a/signer/core/apitypes/types.go
+++ b/signer/core/apitypes/types.go
@@ -262,6 +262,7 @@ func (typedData *TypedData) HashStruct(primaryType string, data TypedDataMessage
// Dependencies returns an array of custom types ordered by their hierarchical reference tree
func (typedData *TypedData) Dependencies(primaryType string, found []string) []string {
+ primaryType = strings.TrimSuffix(primaryType, "[]")
includes := func(arr []string, str string) bool {
for _, obj := range arr {
if obj == str {
@@ -364,7 +365,7 @@ func (typedData *TypedData) EncodeData(primaryType string, data map[string]inter
if err != nil {
return nil, err
}
- arrayBuffer.Write(encodedData)
+ arrayBuffer.Write(crypto.Keccak256(encodedData))
} else {
bytesValue, err := typedData.EncodePrimitiveValue(parsedType, item, depth)
if err != nil {
diff --git a/signer/core/gnosis_safe.go b/signer/core/gnosis_safe.go
index 016b1fff3..1b88db1af 100644
--- a/signer/core/gnosis_safe.go
+++ b/signer/core/gnosis_safe.go
@@ -31,6 +31,7 @@ type GnosisSafeTx struct {
SafeTxGas big.Int `json:"safeTxGas"`
Nonce big.Int `json:"nonce"`
InputExpHash common.Hash `json:"safeTxHash"`
+ ChainId *math.HexOrDecimal256 `json:"chainId,omitempty"`
}
// ToTypedData converts the tx to a EIP-712 Typed Data structure for signing
@@ -39,9 +40,14 @@ func (tx *GnosisSafeTx) ToTypedData() apitypes.TypedData {
if tx.Data != nil {
data = *tx.Data
}
+ var domainType = []apitypes.Type{{Name: "verifyingContract", Type: "address"}}
+ if tx.ChainId != nil {
+ domainType = append([]apitypes.Type{{Name: "chainId", Type: "uint256"}}, domainType[0])
+ }
+
gnosisTypedData := apitypes.TypedData{
Types: apitypes.Types{
- "EIP712Domain": []apitypes.Type{{Name: "verifyingContract", Type: "address"}},
+ "EIP712Domain": domainType,
"SafeTx": []apitypes.Type{
{Name: "to", Type: "address"},
{Name: "value", Type: "uint256"},
@@ -57,6 +63,7 @@ func (tx *GnosisSafeTx) ToTypedData() apitypes.TypedData {
},
Domain: apitypes.TypedDataDomain{
VerifyingContract: tx.Safe.Address().Hex(),
+ ChainId: tx.ChainId,
},
PrimaryType: "SafeTx",
Message: apitypes.TypedDataMessage{
@@ -88,6 +95,7 @@ func (tx *GnosisSafeTx) ArgsForValidation() *apitypes.SendTxArgs {
Nonce: hexutil.Uint64(tx.Nonce.Uint64()),
Data: tx.Data,
Input: nil,
+ ChainID: (*hexutil.Big)(tx.ChainId),
}
return args
}
diff --git a/signer/core/signed_data_test.go b/signer/core/signed_data_test.go
index 1d972d296..fbc2903d9 100644
--- a/signer/core/signed_data_test.go
+++ b/signer/core/signed_data_test.go
@@ -532,3 +532,283 @@ func TestGnosisCustomData(t *testing.T) {
t.Fatalf("Error, got %x, wanted %x", sighash, expSigHash)
}
}
+
+var gnosisTypedDataWithChainId = `
+{
+ "types": {
+ "EIP712Domain": [
+ { "type": "uint256", "name": "chainId" },
+ { "type": "address", "name": "verifyingContract" }
+ ],
+ "SafeTx": [
+ { "type": "address", "name": "to" },
+ { "type": "uint256", "name": "value" },
+ { "type": "bytes", "name": "data" },
+ { "type": "uint8", "name": "operation" },
+ { "type": "uint256", "name": "safeTxGas" },
+ { "type": "uint256", "name": "baseGas" },
+ { "type": "uint256", "name": "gasPrice" },
+ { "type": "address", "name": "gasToken" },
+ { "type": "address", "name": "refundReceiver" },
+ { "type": "uint256", "name": "nonce" }
+ ]
+ },
+ "domain": {
+ "verifyingContract": "0x111dAE35D176A9607053e0c46e91F36AFbC1dc57",
+ "chainId": "4"
+ },
+ "primaryType": "SafeTx",
+ "message": {
+ "to": "0x5592EC0cfb4dbc12D3aB100b257153436a1f0FEa",
+ "value": "0",
+ "data": "0xa9059cbb00000000000000000000000099d580d3a7fe7bd183b2464517b2cd7ce5a8f15a0000000000000000000000000000000000000000000000000de0b6b3a7640000",
+ "operation": 0,
+ "safeTxGas": 0,
+ "baseGas": 0,
+ "gasPrice": "0",
+ "gasToken": "0x0000000000000000000000000000000000000000",
+ "refundReceiver": "0x0000000000000000000000000000000000000000",
+ "nonce": 15
+ }
+}`
+
+var gnosisTxWithChainId = `
+{
+ "safe": "0x111dAE35D176A9607053e0c46e91F36AFbC1dc57",
+ "to": "0x5592EC0cfb4dbc12D3aB100b257153436a1f0FEa",
+ "value": "0",
+ "data": "0xa9059cbb00000000000000000000000099d580d3a7fe7bd183b2464517b2cd7ce5a8f15a0000000000000000000000000000000000000000000000000de0b6b3a7640000",
+ "operation": 0,
+ "gasToken": "0x0000000000000000000000000000000000000000",
+ "safeTxGas": 0,
+ "baseGas": 0,
+ "gasPrice": "0",
+ "refundReceiver": "0x0000000000000000000000000000000000000000",
+ "nonce": 15,
+ "executionDate": "2022-01-10T20:00:12Z",
+ "submissionDate": "2022-01-10T19:59:59.689989Z",
+ "modified": "2022-01-10T20:00:31.903635Z",
+ "blockNumber": 9968802,
+ "transactionHash": "0xc9fef30499ee8984974ab9dddd9d15c2a97c1a4393935dceed5efc3af9fc41a4",
+ "safeTxHash": "0x6619dab5401503f2735256e12b898e69eb701d6a7e0d07abf1be4bb8aebfba29",
+ "executor": "0xbc2BB26a6d821e69A38016f3858561a1D80d4182",
+ "isExecuted": true,
+ "isSuccessful": true,
+ "ethGasPrice": "2500000009",
+ "gasUsed": 82902,
+ "fee": "207255000746118",
+ "chainId": "4",
+ "origin": null,
+ "dataDecoded": {
+ "method": "transfer",
+ "parameters": [
+ {
+ "name": "to",
+ "type": "address",
+ "value": "0x99D580d3a7FE7BD183b2464517B2cD7ce5A8F15A"
+ },
+ {
+ "name": "value",
+ "type": "uint256",
+ "value": "1000000000000000000"
+ }
+ ]
+ },
+ "confirmationsRequired": 1,
+ "confirmations": [
+ {
+ "owner": "0xbc2BB26a6d821e69A38016f3858561a1D80d4182",
+ "submissionDate": "2022-01-10T19:59:59.722500Z",
+ "transactionHash": null,
+ "signature": "0x5ca34641bcdee06e7b99143bfe34778195ca41022bd35837b96c204c7786be9d6dfa6dba43b53cd92da45ac728899e1561b232d28f38ba82df45f164caba38be1b",
+ "signatureType": "EOA"
+ }
+ ],
+ "signatures": "0x5ca34641bcdee06e7b99143bfe34778195ca41022bd35837b96c204c7786be9d6dfa6dba43b53cd92da45ac728899e1561b232d28f38ba82df45f164caba38be1b"
+}
+`
+
+func TestGnosisTypedDataWithChainId(t *testing.T) {
+ var td apitypes.TypedData
+ err := json.Unmarshal([]byte(gnosisTypedDataWithChainId), &td)
+ if err != nil {
+ t.Fatalf("unmarshalling failed '%v'", err)
+ }
+ _, sighash, err := sign(td)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expSigHash := common.FromHex("0x6619dab5401503f2735256e12b898e69eb701d6a7e0d07abf1be4bb8aebfba29")
+ if !bytes.Equal(expSigHash, sighash) {
+ t.Fatalf("Error, got %x, wanted %x", sighash, expSigHash)
+ }
+}
+
+// TestGnosisCustomData tests the scenario where a user submits only the gnosis-safe
+// specific data, and we fill the TypedData struct on our side
+func TestGnosisCustomDataWithChainId(t *testing.T) {
+ var tx core.GnosisSafeTx
+ err := json.Unmarshal([]byte(gnosisTxWithChainId), &tx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var td = tx.ToTypedData()
+ _, sighash, err := sign(td)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expSigHash := common.FromHex("0x6619dab5401503f2735256e12b898e69eb701d6a7e0d07abf1be4bb8aebfba29")
+ if !bytes.Equal(expSigHash, sighash) {
+ t.Fatalf("Error, got %x, wanted %x", sighash, expSigHash)
+ }
+}
+
+var complexTypedData = `
+{
+ "types": {
+ "EIP712Domain": [
+ {
+ "name": "chainId",
+ "type": "uint256"
+ },
+ {
+ "name": "name",
+ "type": "string"
+ },
+ {
+ "name": "verifyingContract",
+ "type": "address"
+ },
+ {
+ "name": "version",
+ "type": "string"
+ }
+ ],
+ "Action": [
+ {
+ "name": "action",
+ "type": "string"
+ },
+ {
+ "name": "params",
+ "type": "string"
+ }
+ ],
+ "Cell": [
+ {
+ "name": "capacity",
+ "type": "string"
+ },
+ {
+ "name": "lock",
+ "type": "string"
+ },
+ {
+ "name": "type",
+ "type": "string"
+ },
+ {
+ "name": "data",
+ "type": "string"
+ },
+ {
+ "name": "extraData",
+ "type": "string"
+ }
+ ],
+ "Transaction": [
+ {
+ "name": "DAS_MESSAGE",
+ "type": "string"
+ },
+ {
+ "name": "inputsCapacity",
+ "type": "string"
+ },
+ {
+ "name": "outputsCapacity",
+ "type": "string"
+ },
+ {
+ "name": "fee",
+ "type": "string"
+ },
+ {
+ "name": "action",
+ "type": "Action"
+ },
+ {
+ "name": "inputs",
+ "type": "Cell[]"
+ },
+ {
+ "name": "outputs",
+ "type": "Cell[]"
+ },
+ {
+ "name": "digest",
+ "type": "bytes32"
+ }
+ ]
+ },
+ "primaryType": "Transaction",
+ "domain": {
+ "chainId": "56",
+ "name": "da.systems",
+ "verifyingContract": "0x0000000000000000000000000000000020210722",
+ "version": "1"
+ },
+ "message": {
+ "DAS_MESSAGE": "SELL mobcion.bit FOR 100000 CKB",
+ "inputsCapacity": "1216.9999 CKB",
+ "outputsCapacity": "1216.9998 CKB",
+ "fee": "0.0001 CKB",
+ "digest": "0x53a6c0f19ec281604607f5d6817e442082ad1882bef0df64d84d3810dae561eb",
+ "action": {
+ "action": "start_account_sale",
+ "params": "0x00"
+ },
+ "inputs": [
+ {
+ "capacity": "218 CKB",
+ "lock": "das-lock,0x01,0x051c152f77f8efa9c7c6d181cc97ee67c165c506...",
+ "type": "account-cell-type,0x01,0x",
+ "data": "{ account: mobcion.bit, expired_at: 1670913958 }",
+ "extraData": "{ status: 0, records_hash: 0x55478d76900611eb079b22088081124ed6c8bae21a05dd1a0d197efcc7c114ce }"
+ }
+ ],
+ "outputs": [
+ {
+ "capacity": "218 CKB",
+ "lock": "das-lock,0x01,0x051c152f77f8efa9c7c6d181cc97ee67c165c506...",
+ "type": "account-cell-type,0x01,0x",
+ "data": "{ account: mobcion.bit, expired_at: 1670913958 }",
+ "extraData": "{ status: 1, records_hash: 0x55478d76900611eb079b22088081124ed6c8bae21a05dd1a0d197efcc7c114ce }"
+ },
+ {
+ "capacity": "201 CKB",
+ "lock": "das-lock,0x01,0x051c152f77f8efa9c7c6d181cc97ee67c165c506...",
+ "type": "account-sale-cell-type,0x01,0x",
+ "data": "0x1209460ef3cb5f1c68ed2c43a3e020eec2d9de6e...",
+ "extraData": ""
+ }
+ ]
+ }
+}
+`
+
+func TestComplexTypedData(t *testing.T) {
+ var td apitypes.TypedData
+ err := json.Unmarshal([]byte(complexTypedData), &td)
+ if err != nil {
+ t.Fatalf("unmarshalling failed '%v'", err)
+ }
+ _, sighash, err := sign(td)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expSigHash := common.FromHex("0x42b1aca82bb6900ff75e90a136de550a58f1a220a071704088eabd5e6ce20446")
+ if !bytes.Equal(expSigHash, sighash) {
+ t.Fatalf("Error, got %x, wanted %x", sighash, expSigHash)
+ }
+}
diff --git a/tests/evm-benchmarks b/tests/evm-benchmarks
new file mode 160000
index 000000000..849b3e239
--- /dev/null
+++ b/tests/evm-benchmarks
@@ -0,0 +1 @@
+Subproject commit 849b3e239a28f236dc99574b2e10e0c720895105
diff --git a/tests/gen_stenv.go b/tests/gen_stenv.go
index ecf7af850..29fbce121 100644
--- a/tests/gen_stenv.go
+++ b/tests/gen_stenv.go
@@ -17,7 +17,8 @@ var _ = (*stEnvMarshaling)(nil)
func (s stEnv) MarshalJSON() ([]byte, error) {
type stEnv struct {
Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
- Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
+ Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"optional"`
+ Random *math.HexOrDecimal256 `json:"currentRandom" gencodec:"optional"`
GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
@@ -26,6 +27,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
var enc stEnv
enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty)
+ enc.Random = (*math.HexOrDecimal256)(s.Random)
enc.GasLimit = math.HexOrDecimal64(s.GasLimit)
enc.Number = math.HexOrDecimal64(s.Number)
enc.Timestamp = math.HexOrDecimal64(s.Timestamp)
@@ -37,7 +39,8 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
func (s *stEnv) UnmarshalJSON(input []byte) error {
type stEnv struct {
Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
- Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
+ Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"optional"`
+ Random *math.HexOrDecimal256 `json:"currentRandom" gencodec:"optional"`
GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"`
@@ -51,10 +54,12 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'currentCoinbase' for stEnv")
}
s.Coinbase = common.Address(*dec.Coinbase)
- if dec.Difficulty == nil {
- return errors.New("missing required field 'currentDifficulty' for stEnv")
+ if dec.Difficulty != nil {
+ s.Difficulty = (*big.Int)(dec.Difficulty)
+ }
+ if dec.Random != nil {
+ s.Random = (*big.Int)(dec.Random)
}
- s.Difficulty = (*big.Int)(dec.Difficulty)
if dec.GasLimit == nil {
return errors.New("missing required field 'currentGasLimit' for stEnv")
}
diff --git a/tests/init_test.go b/tests/init_test.go
index 312ad8869..7e2f3ff7f 100644
--- a/tests/init_test.go
+++ b/tests/init_test.go
@@ -41,6 +41,7 @@ var (
transactionTestDir = filepath.Join(baseDir, "TransactionTests")
rlpTestDir = filepath.Join(baseDir, "RLPTests")
difficultyTestDir = filepath.Join(baseDir, "BasicTests")
+ benchmarksDir = filepath.Join(".", "evm-benchmarks", "benchmarks")
)
func readJSON(reader io.Reader, value interface{}) error {
diff --git a/tests/state_test.go b/tests/state_test.go
index 78ecda040..d2c92b211 100644
--- a/tests/state_test.go
+++ b/tests/state_test.go
@@ -20,9 +20,16 @@ import (
"bufio"
"bytes"
"fmt"
+ "math/big"
+ "os"
+ "path/filepath"
"reflect"
+ "strings"
"testing"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
)
@@ -61,6 +68,7 @@ func TestState(t *testing.T) {
for _, dir := range []string{
stateTestDir,
legacyStateTestDir,
+ benchmarksDir,
} {
st.walk(t, dir, func(t *testing.T, name string, test *StateTest) {
for _, subtest := range test.Subtests() {
@@ -131,3 +139,116 @@ func withTrace(t *testing.T, gasLimit uint64, test func(vm.Config) error) {
// t.Logf("EVM output: 0x%x", tracer.Output())
// t.Logf("EVM error: %v", tracer.Error())
}
+
+func BenchmarkEVM(b *testing.B) {
+ // Walk the directory.
+ dir := benchmarksDir
+ dirinfo, err := os.Stat(dir)
+ if os.IsNotExist(err) || !dirinfo.IsDir() {
+ fmt.Fprintf(os.Stderr, "can't find test files in %s, did you clone the evm-benchmarks submodule?\n", dir)
+ b.Skip("missing test files")
+ }
+ err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ if info.IsDir() {
+ return nil
+ }
+ if ext := filepath.Ext(path); ext == ".json" {
+ name := filepath.ToSlash(strings.TrimPrefix(strings.TrimSuffix(path, ext), dir+string(filepath.Separator)))
+ b.Run(name, func(b *testing.B) { runBenchmarkFile(b, path) })
+ }
+ return nil
+ })
+ if err != nil {
+ b.Fatal(err)
+ }
+}
+
+func runBenchmarkFile(b *testing.B, path string) {
+ m := make(map[string]StateTest)
+ if err := readJSONFile(path, &m); err != nil {
+ b.Fatal(err)
+ return
+ }
+ if len(m) != 1 {
+ b.Fatal("expected single benchmark in a file")
+ return
+ }
+ for _, t := range m {
+ runBenchmark(b, &t)
+ }
+}
+
+func runBenchmark(b *testing.B, t *StateTest) {
+ for _, subtest := range t.Subtests() {
+ subtest := subtest
+ key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index)
+
+ b.Run(key, func(b *testing.B) {
+ vmconfig := vm.Config{}
+
+ config, eips, err := GetChainConfig(subtest.Fork)
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ vmconfig.ExtraEips = eips
+ block := t.genesis(config).ToBlock(nil)
+ _, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, false)
+
+ var baseFee *big.Int
+ if config.IsLondon(new(big.Int)) {
+ baseFee = t.json.Env.BaseFee
+ if baseFee == nil {
+ // Retesteth uses `0x10` for genesis baseFee. Therefore, it defaults to
+ // parent - 2 : 0xa as the basefee for 'this' context.
+ baseFee = big.NewInt(0x0a)
+ }
+ }
+ post := t.json.Post[subtest.Fork][subtest.Index]
+ msg, err := t.json.Tx.toMessage(post, baseFee)
+ if err != nil {
+ b.Error(err)
+ return
+ }
+
+ // Try to recover tx with current signer
+ if len(post.TxBytes) != 0 {
+ var ttx types.Transaction
+ err := ttx.UnmarshalBinary(post.TxBytes)
+ if err != nil {
+ b.Error(err)
+ return
+ }
+
+ if _, err := types.Sender(types.LatestSigner(config), &ttx); err != nil {
+ b.Error(err)
+ return
+ }
+ }
+
+ // Prepare the EVM.
+ txContext := core.NewEVMTxContext(msg)
+ context := core.NewEVMBlockContext(block.Header(), nil, &t.json.Env.Coinbase)
+ context.GetHash = vmTestBlockHash
+ context.BaseFee = baseFee
+ evm := vm.NewEVM(context, txContext, statedb, config, vmconfig)
+
+ // Create "contract" for sender to cache code analysis.
+ sender := vm.NewContract(vm.AccountRef(msg.From()), vm.AccountRef(msg.From()),
+ nil, 0)
+
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ // Execute the message.
+ snapshot := statedb.Snapshot()
+ _, _, err = evm.Call(sender, *msg.To(), msg.Data(), msg.Gas(), msg.Value())
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ statedb.RevertToSnapshot(snapshot)
+ }
+
+ })
+ }
+}
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index f7fb08bfb..4fd3cf76b 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -80,16 +80,18 @@ type stPostState struct {
type stEnv struct {
Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
- Difficulty *big.Int `json:"currentDifficulty" gencodec:"required"`
+ Difficulty *big.Int `json:"currentDifficulty" gencodec:"optional"`
+ Random *big.Int `json:"currentRandom" gencodec:"optional"`
GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
Number uint64 `json:"currentNumber" gencodec:"required"`
Timestamp uint64 `json:"currentTimestamp" gencodec:"required"`
- BaseFee *big.Int `json:"currentBaseFee" gencodec:"optional"`
+ BaseFee *big.Int `json:"currentBaseFee" gencodec:"optional"`
}
type stEnvMarshaling struct {
Coinbase common.UnprefixedAddress
Difficulty *math.HexOrDecimal256
+ Random *math.HexOrDecimal256
GasLimit math.HexOrDecimal64
Number math.HexOrDecimal64
Timestamp math.HexOrDecimal64
@@ -218,8 +220,12 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
context := core.NewEVMBlockContext(block.Header(), nil, &t.json.Env.Coinbase)
context.GetHash = vmTestBlockHash
context.BaseFee = baseFee
+ if t.json.Env.Random != nil {
+ rnd := common.BigToHash(t.json.Env.Random)
+ context.Random = &rnd
+ context.Difficulty = big.NewInt(0)
+ }
evm := vm.NewEVM(context, txContext, statedb, config, vmconfig)
-
// Execute the message.
snapshot := statedb.Snapshot()
gaspool := new(core.GasPool)
@@ -268,7 +274,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo
}
func (t *StateTest) genesis(config *params.ChainConfig) *core.Genesis {
- return &core.Genesis{
+ genesis := &core.Genesis{
Config: config,
Coinbase: t.json.Env.Coinbase,
Difficulty: t.json.Env.Difficulty,
@@ -277,6 +283,12 @@ func (t *StateTest) genesis(config *params.ChainConfig) *core.Genesis {
Timestamp: t.json.Env.Timestamp,
Alloc: t.json.Pre,
}
+ if t.json.Env.Random != nil {
+ // Post-Merge
+ genesis.Mixhash = common.BigToHash(t.json.Env.Random)
+ genesis.Difficulty = big.NewInt(0)
+ }
+ return genesis
}
func (tx *stTransaction) toMessage(ps stPostState, baseFee *big.Int) (core.Message, error) {
diff --git a/trie/iterator.go b/trie/iterator.go
index 654772aa1..9f6dc3af7 100644
--- a/trie/iterator.go
+++ b/trie/iterator.go
@@ -115,7 +115,7 @@ type NodeIterator interface {
// Before adding a similar mechanism to any other place in Geth, consider
// making trie.Database an interface and wrapping at that level. It's a huge
// refactor, but it could be worth it if another occurrence arises.
- AddResolver(ethdb.KeyValueStore)
+ AddResolver(ethdb.KeyValueReader)
}
// nodeIteratorState represents the iteration state at one particular node of the
@@ -134,7 +134,7 @@ type nodeIterator struct {
path []byte // Path to the current node
err error // Failure set in case of an internal error in the iterator
- resolver ethdb.KeyValueStore // Optional intermediate resolver above the disk layer
+ resolver ethdb.KeyValueReader // Optional intermediate resolver above the disk layer
}
// errIteratorEnd is stored in nodeIterator.err when iteration is done.
@@ -159,7 +159,7 @@ func newNodeIterator(trie *Trie, start []byte) NodeIterator {
return it
}
-func (it *nodeIterator) AddResolver(resolver ethdb.KeyValueStore) {
+func (it *nodeIterator) AddResolver(resolver ethdb.KeyValueReader) {
it.resolver = resolver
}
@@ -549,7 +549,7 @@ func (it *differenceIterator) Path() []byte {
return it.b.Path()
}
-func (it *differenceIterator) AddResolver(resolver ethdb.KeyValueStore) {
+func (it *differenceIterator) AddResolver(resolver ethdb.KeyValueReader) {
panic("not implemented")
}
@@ -660,7 +660,7 @@ func (it *unionIterator) Path() []byte {
return (*it.items)[0].Path()
}
-func (it *unionIterator) AddResolver(resolver ethdb.KeyValueStore) {
+func (it *unionIterator) AddResolver(resolver ethdb.KeyValueReader) {
panic("not implemented")
}
diff --git a/trie/proof.go b/trie/proof.go
index 51ecea0c3..9be3b6221 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -406,7 +406,7 @@ func unset(parent node, child node, key []byte, pos int, removeLeft bool) error
}
// hasRightElement returns the indicator whether there exists more elements
-// in the right side of the given path. The given path can point to an existent
+// on the right side of the given path. The given path can point to an existent
// key or a non-existent one. This function has the assumption that the whole
// path should already be resolved.
func hasRightElement(node node, key []byte) bool {
@@ -505,7 +505,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key
if val != nil || hasRightElement(root, firstKey) {
return false, errors.New("more entries available")
}
- return hasRightElement(root, firstKey), nil
+ return false, nil
}
// Special case, there is only one element and two edge keys are same.
// In this case, we can't construct two edge paths. So handle it here.
@@ -563,7 +563,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key
if tr.Hash() != rootHash {
return false, fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, tr.Hash())
}
- return hasRightElement(root, keys[len(keys)-1]), nil
+ return hasRightElement(tr.root, keys[len(keys)-1]), nil
}
// get returns the child of the given node. Return nil if the
diff --git a/trie/proof_test.go b/trie/proof_test.go
index 95ad6169c..29866714c 100644
--- a/trie/proof_test.go
+++ b/trie/proof_test.go
@@ -1067,3 +1067,36 @@ func nonRandomTrie(n int) (*Trie, map[string]*kv) {
}
return trie, vals
}
+
+func TestRangeProofKeysWithSharedPrefix(t *testing.T) {
+ keys := [][]byte{
+ common.Hex2Bytes("aa10000000000000000000000000000000000000000000000000000000000000"),
+ common.Hex2Bytes("aa20000000000000000000000000000000000000000000000000000000000000"),
+ }
+ vals := [][]byte{
+ common.Hex2Bytes("02"),
+ common.Hex2Bytes("03"),
+ }
+ trie := new(Trie)
+ for i, key := range keys {
+ trie.Update(key, vals[i])
+ }
+ root := trie.Hash()
+ proof := memorydb.New()
+ start := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")
+ end := common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+ if err := trie.Prove(start, 0, proof); err != nil {
+ t.Fatalf("failed to prove start: %v", err)
+ }
+ if err := trie.Prove(end, 0, proof); err != nil {
+ t.Fatalf("failed to prove end: %v", err)
+ }
+
+ more, err := VerifyRangeProof(root, start, end, keys, vals, proof)
+ if err != nil {
+ t.Fatalf("failed to verify range proof: %v", err)
+ }
+ if more != false {
+ t.Error("expected more to be false")
+ }
+}
diff --git a/trie/sync.go b/trie/sync.go
index 81d38ee3a..7eaa35244 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -155,8 +155,7 @@ func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, cal
}
// If database says this is a duplicate, then at least the trie node is
// present, and we hold the assumption that it's NOT legacy contract code.
- blob := rawdb.ReadTrieNode(s.database, root)
- if len(blob) > 0 {
+ if rawdb.HasTrieNode(s.database, root) {
return
}
// Assemble the new sub-trie sync request
@@ -193,7 +192,7 @@ func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash) {
// sync is expected to run with a fresh new node. Even there
// exists the code with legacy format, fetch and store with
// new scheme anyway.
- if blob := rawdb.ReadCodeWithPrefix(s.database, hash); len(blob) > 0 {
+ if rawdb.HasCodeWithPrefix(s.database, hash) {
return
}
// Assemble the new sub-trie sync request
@@ -401,7 +400,7 @@ func (s *Sync) children(req *request, object node) ([]*request, error) {
}
// If database says duplicate, then at least the trie node is present
// and we hold the assumption that it's NOT legacy contract code.
- if blob := rawdb.ReadTrieNode(s.database, hash); len(blob) > 0 {
+ if rawdb.HasTrieNode(s.database, hash) {
continue
}
// Locally unknown node, schedule for retrieval