forked from cerc-io/plugeth
Merge tag 'v1.10.16' into re-merge/v1.10.16
This commit is contained in:
commit
b404517691
4
.gitmodules
vendored
4
.gitmodules
vendored
@ -2,3 +2,7 @@
|
||||
path = tests/testdata
|
||||
url = https://github.com/ethereum/tests
|
||||
shallow = true
|
||||
[submodule "evm-benchmarks"]
|
||||
path = tests/evm-benchmarks
|
||||
url = https://github.com/ipsilon/evm-benchmarks
|
||||
shallow = true
|
||||
|
@ -1,7 +1,7 @@
|
||||
# This file configures github.com/golangci/golangci-lint.
|
||||
|
||||
run:
|
||||
timeout: 5m
|
||||
timeout: 20m
|
||||
tests: true
|
||||
# default is true. Enables skipping of directories:
|
||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
|
@ -165,7 +165,7 @@ saving your blockchain as well as map the default ports. There is also an `alpin
|
||||
available for a slim version of the image.
|
||||
|
||||
Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers
|
||||
and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not
|
||||
and/or hosts. By default, `geth` binds to the local interface and RPC endpoints are not
|
||||
accessible from the outside.
|
||||
|
||||
### Programmatically interfacing `geth` nodes
|
||||
|
@ -19,7 +19,7 @@ Audit reports are published in the `docs` folder: https://github.com/ethereum/go
|
||||
|
||||
**Please do not file a public ticket** mentioning the vulnerability.
|
||||
|
||||
To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publically disclosed security vulnerabilities.
|
||||
To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publicly disclosed security vulnerabilities.
|
||||
|
||||
Use the built-in `geth version-check` feature to check whether the software is affected by any known vulnerability. This command will fetch the latest [`vulnerabilities.json`](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json) file which contains known security vulnerabilities concerning `geth`, and cross-check the data against its own version number.
|
||||
|
||||
|
@ -81,13 +81,7 @@ func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
|
||||
if len(arguments) != 0 {
|
||||
return nil, fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
||||
}
|
||||
// Nothing to unmarshal, return default variables
|
||||
nonIndexedArgs := arguments.NonIndexed()
|
||||
defaultVars := make([]interface{}, len(nonIndexedArgs))
|
||||
for index, arg := range nonIndexedArgs {
|
||||
defaultVars[index] = reflect.New(arg.Type.GetType())
|
||||
}
|
||||
return defaultVars, nil
|
||||
return make([]interface{}, 0), nil
|
||||
}
|
||||
return arguments.UnpackValues(data)
|
||||
}
|
||||
|
@ -230,6 +230,9 @@ func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common
|
||||
defer b.mu.Unlock()
|
||||
|
||||
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config)
|
||||
if receipt == nil {
|
||||
return nil, ethereum.NotFound
|
||||
}
|
||||
return receipt, nil
|
||||
}
|
||||
|
||||
@ -639,7 +642,6 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
|
||||
}
|
||||
|
||||
// SendTransaction updates the pending block to include the given transaction.
|
||||
// It panics if the transaction is invalid.
|
||||
func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
@ -647,17 +649,17 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
|
||||
// Get the last block
|
||||
block, err := b.blockByHash(ctx, b.pendingBlock.ParentHash())
|
||||
if err != nil {
|
||||
panic("could not fetch parent")
|
||||
return fmt.Errorf("could not fetch parent")
|
||||
}
|
||||
// Check transaction validity
|
||||
signer := types.MakeSigner(b.blockchain.Config(), block.Number())
|
||||
sender, err := types.Sender(signer, tx)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("invalid transaction: %v", err))
|
||||
return fmt.Errorf("invalid transaction: %v", err)
|
||||
}
|
||||
nonce := b.pendingState.GetNonce(sender)
|
||||
if tx.Nonce() != nonce {
|
||||
panic(fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce))
|
||||
return fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce)
|
||||
}
|
||||
// Include tx in chain
|
||||
blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@ -35,14 +36,16 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty
|
||||
logger := log.New("hash", tx.Hash())
|
||||
for {
|
||||
receipt, err := b.TransactionReceipt(ctx, tx.Hash())
|
||||
if receipt != nil {
|
||||
if err == nil {
|
||||
return receipt, nil
|
||||
}
|
||||
if err != nil {
|
||||
logger.Trace("Receipt retrieval failed", "err", err)
|
||||
} else {
|
||||
|
||||
if errors.Is(err, ethereum.NotFound) {
|
||||
logger.Trace("Transaction not yet mined")
|
||||
} else {
|
||||
logger.Trace("Receipt retrieval failed", "err", err)
|
||||
}
|
||||
|
||||
// Wait for the next round.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
@ -46,7 +46,7 @@ const (
|
||||
// accounts (derived from the same seed).
|
||||
type Wallet interface {
|
||||
// URL retrieves the canonical path under which this wallet is reachable. It is
|
||||
// user by upper layers to define a sorting order over all wallets from multiple
|
||||
// used by upper layers to define a sorting order over all wallets from multiple
|
||||
// backends.
|
||||
URL() URL
|
||||
|
||||
@ -89,7 +89,7 @@ type Wallet interface {
|
||||
// accounts.
|
||||
//
|
||||
// Note, self derivation will increment the last component of the specified path
|
||||
// opposed to decending into a child path to allow discovering accounts starting
|
||||
// opposed to descending into a child path to allow discovering accounts starting
|
||||
// from non zero components.
|
||||
//
|
||||
// Some hardware wallets switched derivation paths through their evolution, so
|
||||
@ -105,7 +105,7 @@ type Wallet interface {
|
||||
// or optionally with the aid of any location metadata from the embedded URL field.
|
||||
//
|
||||
// If the wallet requires additional authentication to sign the request (e.g.
|
||||
// a password to decrypt the account, or a PIN code o verify the transaction),
|
||||
// a password to decrypt the account, or a PIN code to verify the transaction),
|
||||
// an AuthNeededError instance will be returned, containing infos for the user
|
||||
// about which fields or actions are needed. The user may retry by providing
|
||||
// the needed details via SignDataWithPassphrase, or by other means (e.g. unlock
|
||||
@ -124,13 +124,13 @@ type Wallet interface {
|
||||
// or optionally with the aid of any location metadata from the embedded URL field.
|
||||
//
|
||||
// If the wallet requires additional authentication to sign the request (e.g.
|
||||
// a password to decrypt the account, or a PIN code o verify the transaction),
|
||||
// a password to decrypt the account, or a PIN code to verify the transaction),
|
||||
// an AuthNeededError instance will be returned, containing infos for the user
|
||||
// about which fields or actions are needed. The user may retry by providing
|
||||
// the needed details via SignTextWithPassphrase, or by other means (e.g. unlock
|
||||
// the account in a keystore).
|
||||
//
|
||||
// This method should return the signature in 'canonical' format, with v 0 or 1
|
||||
// This method should return the signature in 'canonical' format, with v 0 or 1.
|
||||
SignText(account Account, text []byte) ([]byte, error)
|
||||
|
||||
// SignTextWithPassphrase is identical to Signtext, but also takes a password
|
||||
|
@ -42,7 +42,7 @@ var ErrInvalidPassphrase = errors.New("invalid password")
|
||||
var ErrWalletAlreadyOpen = errors.New("wallet already open")
|
||||
|
||||
// ErrWalletClosed is returned if a wallet is attempted to be opened the
|
||||
// secodn time.
|
||||
// second time.
|
||||
var ErrWalletClosed = errors.New("wallet closed")
|
||||
|
||||
// AuthNeededError is returned by backends for signing requests where the user
|
||||
|
@ -638,7 +638,7 @@ func (w *Wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun
|
||||
// accounts.
|
||||
//
|
||||
// Note, self derivation will increment the last component of the specified path
|
||||
// opposed to decending into a child path to allow discovering accounts starting
|
||||
// opposed to descending into a child path to allow discovering accounts starting
|
||||
// from non zero components.
|
||||
//
|
||||
// Some hardware wallets switched derivation paths through their evolution, so
|
||||
|
@ -496,7 +496,7 @@ func (w *wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun
|
||||
// accounts.
|
||||
//
|
||||
// Note, self derivation will increment the last component of the specified path
|
||||
// opposed to decending into a child path to allow discovering accounts starting
|
||||
// opposed to descending into a child path to allow discovering accounts starting
|
||||
// from non zero components.
|
||||
//
|
||||
// Some hardware wallets switched derivation paths through their evolution, so
|
||||
|
@ -334,7 +334,11 @@ func downloadLinter(cachedir string) string {
|
||||
const version = "1.42.0"
|
||||
|
||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH)
|
||||
arch := runtime.GOARCH
|
||||
if arch == "arm" {
|
||||
arch += "v" + os.Getenv("GOARM")
|
||||
}
|
||||
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, arch)
|
||||
url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s.tar.gz", version, base)
|
||||
archivePath := filepath.Join(cachedir, base+".tar.gz")
|
||||
if err := csdb.DownloadFile(url, archivePath); err != nil {
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/forkid"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@ -67,6 +68,13 @@ func (c *Chain) TotalDifficultyAt(height int) *big.Int {
|
||||
return sum
|
||||
}
|
||||
|
||||
func (c *Chain) RootAt(height int) common.Hash {
|
||||
if height < c.Len() {
|
||||
return c.blocks[height].Root()
|
||||
}
|
||||
return common.Hash{}
|
||||
}
|
||||
|
||||
// ForkID gets the fork id of the chain.
|
||||
func (c *Chain) ForkID() forkid.ID {
|
||||
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()))
|
||||
|
@ -96,6 +96,19 @@ func (s *Suite) dial66() (*Conn, error) {
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// dial66 attempts to dial the given node and perform a handshake,
|
||||
// returning the created Conn with additional snap/1 capabilities if
|
||||
// successful.
|
||||
func (s *Suite) dialSnap() (*Conn, error) {
|
||||
conn, err := s.dial66()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dial failed: %v", err)
|
||||
}
|
||||
conn.caps = append(conn.caps, p2p.Cap{Name: "snap", Version: 1})
|
||||
conn.ourHighestSnapProtoVersion = 1
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// peer performs both the protocol handshake and the status message
|
||||
// exchange with the node in order to peer with it.
|
||||
func (c *Conn) peer(chain *Chain, status *Status) error {
|
||||
@ -131,7 +144,11 @@ func (c *Conn) handshake() error {
|
||||
}
|
||||
c.negotiateEthProtocol(msg.Caps)
|
||||
if c.negotiatedProtoVersion == 0 {
|
||||
return fmt.Errorf("could not negotiate protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion)
|
||||
return fmt.Errorf("could not negotiate eth protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion)
|
||||
}
|
||||
// If we require snap, verify that it was negotiated
|
||||
if c.ourHighestSnapProtoVersion != c.negotiatedSnapProtoVersion {
|
||||
return fmt.Errorf("could not negotiate snap protocol (remote caps: %v, local snap version: %v)", msg.Caps, c.ourHighestSnapProtoVersion)
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
@ -143,15 +160,21 @@ func (c *Conn) handshake() error {
|
||||
// advertised capability from peer.
|
||||
func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) {
|
||||
var highestEthVersion uint
|
||||
var highestSnapVersion uint
|
||||
for _, capability := range caps {
|
||||
if capability.Name != "eth" {
|
||||
continue
|
||||
}
|
||||
switch capability.Name {
|
||||
case "eth":
|
||||
if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion {
|
||||
highestEthVersion = capability.Version
|
||||
}
|
||||
case "snap":
|
||||
if capability.Version > highestSnapVersion && capability.Version <= c.ourHighestSnapProtoVersion {
|
||||
highestSnapVersion = capability.Version
|
||||
}
|
||||
}
|
||||
}
|
||||
c.negotiatedProtoVersion = highestEthVersion
|
||||
c.negotiatedSnapProtoVersion = highestSnapVersion
|
||||
}
|
||||
|
||||
// statusExchange performs a `Status` message exchange with the given node.
|
||||
@ -325,6 +348,15 @@ func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, isEth66 bo
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Conn) snapRequest(msg Message, id uint64, chain *Chain) (Message, error) {
|
||||
defer c.SetReadDeadline(time.Time{})
|
||||
c.SetReadDeadline(time.Now().Add(5 * time.Second))
|
||||
if err := c.Write(msg); err != nil {
|
||||
return nil, fmt.Errorf("could not write to connection: %v", err)
|
||||
}
|
||||
return c.ReadSnap(id)
|
||||
}
|
||||
|
||||
// getBlockHeaders66 executes the given `GetBlockHeaders` request over the eth66 protocol.
|
||||
func getBlockHeaders66(chain *Chain, conn *Conn, request *GetBlockHeaders, id uint64) (BlockHeaders, error) {
|
||||
// write request
|
||||
|
675
cmd/devp2p/internal/ethtest/snap.go
Normal file
675
cmd/devp2p/internal/ethtest/snap.go
Normal file
@ -0,0 +1,675 @@
|
||||
// Copyright 2014 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethtest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth/protocols/snap"
|
||||
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||
"github.com/ethereum/go-ethereum/light"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"golang.org/x/crypto/sha3"
|
||||
)
|
||||
|
||||
func (s *Suite) TestSnapStatus(t *utesting.T) {
|
||||
conn, err := s.dialSnap()
|
||||
if err != nil {
|
||||
t.Fatalf("dial failed: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
if err := conn.peer(s.chain, nil); err != nil {
|
||||
t.Fatalf("peering failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
type accRangeTest struct {
|
||||
nBytes uint64
|
||||
root common.Hash
|
||||
origin common.Hash
|
||||
limit common.Hash
|
||||
|
||||
expAccounts int
|
||||
expFirst common.Hash
|
||||
expLast common.Hash
|
||||
}
|
||||
|
||||
// TestSnapGetAccountRange various forms of GetAccountRange requests.
|
||||
func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
|
||||
var (
|
||||
root = s.chain.RootAt(999)
|
||||
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||
zero = common.Hash{}
|
||||
firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29")
|
||||
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
||||
firstKeyPlus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2b")
|
||||
secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
|
||||
storageRoot = common.HexToHash("0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790")
|
||||
)
|
||||
for i, tc := range []accRangeTest{
|
||||
// Tests decreasing the number of bytes
|
||||
{4000, root, zero, ffHash, 76, firstKey, common.HexToHash("0xd2669dcf3858e7f1eecb8b5fedbf22fbea3e9433848a75035f79d68422c2dcda")},
|
||||
{3000, root, zero, ffHash, 57, firstKey, common.HexToHash("0x9b63fa753ece5cb90657d02ecb15df4dc1508d8c1d187af1bf7f1a05e747d3c7")},
|
||||
{2000, root, zero, ffHash, 38, firstKey, common.HexToHash("0x5e6140ecae4354a9e8f47559a8c6209c1e0e69cb077b067b528556c11698b91f")},
|
||||
{1, root, zero, ffHash, 1, firstKey, firstKey},
|
||||
|
||||
// Tests variations of the range
|
||||
//
|
||||
// [00b to firstkey]: should return [firstkey, secondkey], where secondkey is out of bounds
|
||||
{4000, root, common.HexToHash("0x00bf000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2b"), 2, firstKey, secondKey},
|
||||
// [00b0 to 0bf0]: where both are before firstkey. Should return firstKey (even though it's out of bounds)
|
||||
{4000, root, common.HexToHash("0x00b0000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x00bf100000000000000000000000000000000000000000000000000000000000"), 1, firstKey, firstKey},
|
||||
{4000, root, zero, zero, 1, firstKey, firstKey},
|
||||
{4000, root, firstKey, ffHash, 76, firstKey, common.HexToHash("0xd2669dcf3858e7f1eecb8b5fedbf22fbea3e9433848a75035f79d68422c2dcda")},
|
||||
{4000, root, firstKeyPlus1, ffHash, 76, secondKey, common.HexToHash("0xd28f55d3b994f16389f36944ad685b48e0fc3f8fbe86c3ca92ebecadf16a783f")},
|
||||
|
||||
// Test different root hashes
|
||||
//
|
||||
// A stateroot that does not exist
|
||||
{4000, common.Hash{0x13, 37}, zero, ffHash, 0, zero, zero},
|
||||
// The genesis stateroot (we expect it to not be served)
|
||||
{4000, s.chain.RootAt(0), zero, ffHash, 0, zero, zero},
|
||||
// A 127 block old stateroot, expected to be served
|
||||
{4000, s.chain.RootAt(999 - 127), zero, ffHash, 77, firstKey, common.HexToHash("0xe4c6fdef5dd4e789a2612390806ee840b8ec0fe52548f8b4efe41abb20c37aac")},
|
||||
// A root which is not actually an account root, but a storage orot
|
||||
{4000, storageRoot, zero, ffHash, 0, zero, zero},
|
||||
|
||||
// And some non-sensical requests
|
||||
//
|
||||
// range from [0xFF to 0x00], wrong order. Expect not to be serviced
|
||||
{4000, root, ffHash, zero, 0, zero, zero},
|
||||
// range from [firstkey, firstkey-1], wrong order. Expect to get first key.
|
||||
{4000, root, firstKey, firstKeyMinus1, 1, firstKey, firstKey},
|
||||
// range from [firstkey, 0], wrong order. Expect to get first key.
|
||||
{4000, root, firstKey, zero, 1, firstKey, firstKey},
|
||||
// Max bytes: 0. Expect to deliver one account.
|
||||
{0, root, zero, ffHash, 1, firstKey, firstKey},
|
||||
} {
|
||||
if err := s.snapGetAccountRange(t, &tc); err != nil {
|
||||
t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\nfailed: %v", i, tc.root, tc.origin, tc.limit, tc.nBytes, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type stRangesTest struct {
|
||||
root common.Hash
|
||||
accounts []common.Hash
|
||||
origin []byte
|
||||
limit []byte
|
||||
nBytes uint64
|
||||
|
||||
expSlots int
|
||||
}
|
||||
|
||||
// TestSnapGetStorageRange various forms of GetStorageRanges requests.
|
||||
func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
|
||||
var (
|
||||
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||
zero = common.Hash{}
|
||||
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
||||
secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
|
||||
)
|
||||
for i, tc := range []stRangesTest{
|
||||
{
|
||||
root: s.chain.RootAt(999),
|
||||
accounts: []common.Hash{secondKey, firstKey},
|
||||
origin: zero[:],
|
||||
limit: ffHash[:],
|
||||
nBytes: 500,
|
||||
expSlots: 0,
|
||||
},
|
||||
|
||||
/*
|
||||
Some tests against this account:
|
||||
{
|
||||
"balance": "0",
|
||||
"nonce": 1,
|
||||
"root": "0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
|
||||
"storage": {
|
||||
"0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace": "02",
|
||||
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6": "01",
|
||||
"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b": "03"
|
||||
},
|
||||
"key": "0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844"
|
||||
}
|
||||
*/
|
||||
{ // [:] -> [slot1, slot2, slot3]
|
||||
root: s.chain.RootAt(999),
|
||||
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
|
||||
origin: zero[:],
|
||||
limit: ffHash[:],
|
||||
nBytes: 500,
|
||||
expSlots: 3,
|
||||
},
|
||||
{ // [slot1:] -> [slot1, slot2, slot3]
|
||||
root: s.chain.RootAt(999),
|
||||
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
|
||||
origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"),
|
||||
limit: ffHash[:],
|
||||
nBytes: 500,
|
||||
expSlots: 3,
|
||||
},
|
||||
{ // [slot1+ :] -> [slot2, slot3]
|
||||
root: s.chain.RootAt(999),
|
||||
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
|
||||
origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5acf"),
|
||||
limit: ffHash[:],
|
||||
nBytes: 500,
|
||||
expSlots: 2,
|
||||
},
|
||||
{ // [slot1:slot2] -> [slot1, slot2]
|
||||
root: s.chain.RootAt(999),
|
||||
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
|
||||
origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"),
|
||||
limit: common.FromHex("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6"),
|
||||
nBytes: 500,
|
||||
expSlots: 2,
|
||||
},
|
||||
{ // [slot1+:slot2+] -> [slot2, slot3]
|
||||
root: s.chain.RootAt(999),
|
||||
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
|
||||
origin: common.FromHex("0x4fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
||||
limit: common.FromHex("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf7"),
|
||||
nBytes: 500,
|
||||
expSlots: 2,
|
||||
},
|
||||
} {
|
||||
if err := s.snapGetStorageRanges(t, &tc); err != nil {
|
||||
t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\n #accounts: %d\nfailed: %v",
|
||||
i, tc.root, tc.origin, tc.limit, tc.nBytes, len(tc.accounts), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type byteCodesTest struct {
|
||||
nBytes uint64
|
||||
hashes []common.Hash
|
||||
|
||||
expHashes int
|
||||
}
|
||||
|
||||
var (
|
||||
// emptyRoot is the known root hash of an empty trie.
|
||||
emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||
// emptyCode is the known hash of the empty EVM bytecode.
|
||||
emptyCode = common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
|
||||
)
|
||||
|
||||
// TestSnapGetByteCodes various forms of GetByteCodes requests.
|
||||
func (s *Suite) TestSnapGetByteCodes(t *utesting.T) {
|
||||
// The halfchain import should yield these bytecodes
|
||||
var hcBytecodes []common.Hash
|
||||
for _, s := range []string{
|
||||
"0x200c90460d8b0063210d5f5b9918e053c8f2c024485e0f1b48be8b1fc71b1317",
|
||||
"0x20ba67ed4ac6aff626e0d1d4db623e2fada9593daeefc4a6eb4b70e6cff986f3",
|
||||
"0x24b5b4902cb3d897c1cee9f16be8e897d8fa277c04c6dc8214f18295fca5de44",
|
||||
"0x320b9d0a2be39b8a1c858f9f8cb96b1df0983071681de07ded3a7c0d05db5fd6",
|
||||
"0x48cb0d5275936a24632babc7408339f9f7b051274809de565b8b0db76e97e03c",
|
||||
"0x67c7a6f5cdaa43b4baa0e15b2be63346d1b9ce9f2c3d7e5804e0cacd44ee3b04",
|
||||
"0x6d8418059bdc8c3fabf445e6bfc662af3b6a4ae45999b953996e42c7ead2ab49",
|
||||
"0x7043422e5795d03f17ee0463a37235258e609fdd542247754895d72695e3e142",
|
||||
"0x727f9e6f0c4bac1ff8d72c2972122d9c8d37ccb37e04edde2339e8da193546f1",
|
||||
"0x86ccd5e23c78568a8334e0cebaf3e9f48c998307b0bfb1c378cee83b4bfb29cb",
|
||||
"0x8fc89b00d6deafd4c4279531e743365626dbfa28845ec697919d305c2674302d",
|
||||
"0x92cfc353bcb9746bb6f9996b6b9df779c88af2e9e0eeac44879ca19887c9b732",
|
||||
"0x941b4872104f0995a4898fcf0f615ea6bf46bfbdfcf63ea8f2fd45b3f3286b77",
|
||||
"0xa02fe8f41159bb39d2b704c633c3d6389cf4bfcb61a2539a9155f60786cf815f",
|
||||
"0xa4b94e0afdffcb0af599677709dac067d3145489ea7aede57672bee43e3b7373",
|
||||
"0xaf4e64edd3234c1205b725e42963becd1085f013590bd7ed93f8d711c5eb65fb",
|
||||
"0xb69a18fa855b742031420081999086f6fb56c3930ae8840944e8b8ae9931c51e",
|
||||
"0xc246c217bc73ce6666c93a93a94faa5250564f50a3fdc27ea74c231c07fe2ca6",
|
||||
"0xcd6e4ab2c3034df2a8a1dfaaeb1c4baecd162a93d22de35e854ee2945cbe0c35",
|
||||
"0xe24b692d09d6fc2f3d1a6028c400a27c37d7cbb11511907c013946d6ce263d3b",
|
||||
"0xe440c5f0e8603fd1ed25976eee261ccee8038cf79d6a4c0eb31b2bf883be737f",
|
||||
"0xe6eacbc509203d21ac814b350e72934fde686b7f673c19be8cf956b0c70078ce",
|
||||
"0xe8530de4371467b5be7ea0e69e675ab36832c426d6c1ce9513817c0f0ae1486b",
|
||||
"0xe85d487abbbc83bf3423cf9731360cf4f5a37220e18e5add54e72ee20861196a",
|
||||
"0xf195ea389a5eea28db0be93660014275b158963dec44af1dfa7d4743019a9a49",
|
||||
} {
|
||||
hcBytecodes = append(hcBytecodes, common.HexToHash(s))
|
||||
}
|
||||
|
||||
for i, tc := range []byteCodesTest{
|
||||
// A few stateroots
|
||||
{
|
||||
nBytes: 10000, hashes: []common.Hash{s.chain.RootAt(0), s.chain.RootAt(999)},
|
||||
expHashes: 0,
|
||||
},
|
||||
{
|
||||
nBytes: 10000, hashes: []common.Hash{s.chain.RootAt(0), s.chain.RootAt(0)},
|
||||
expHashes: 0,
|
||||
},
|
||||
// Empties
|
||||
{
|
||||
nBytes: 10000, hashes: []common.Hash{emptyRoot},
|
||||
expHashes: 0,
|
||||
},
|
||||
{
|
||||
nBytes: 10000, hashes: []common.Hash{emptyCode},
|
||||
expHashes: 1,
|
||||
},
|
||||
{
|
||||
nBytes: 10000, hashes: []common.Hash{emptyCode, emptyCode, emptyCode},
|
||||
expHashes: 3,
|
||||
},
|
||||
// The existing bytecodes
|
||||
{
|
||||
nBytes: 10000, hashes: hcBytecodes,
|
||||
expHashes: len(hcBytecodes),
|
||||
},
|
||||
// The existing, with limited byte arg
|
||||
{
|
||||
nBytes: 1, hashes: hcBytecodes,
|
||||
expHashes: 1,
|
||||
},
|
||||
{
|
||||
nBytes: 0, hashes: hcBytecodes,
|
||||
expHashes: 1,
|
||||
},
|
||||
{
|
||||
nBytes: 1000, hashes: []common.Hash{hcBytecodes[0], hcBytecodes[0], hcBytecodes[0], hcBytecodes[0]},
|
||||
expHashes: 4,
|
||||
},
|
||||
} {
|
||||
if err := s.snapGetByteCodes(t, &tc); err != nil {
|
||||
t.Errorf("test %d \n bytes: %d\n #hashes: %d\nfailed: %v", i, tc.nBytes, len(tc.hashes), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type trieNodesTest struct {
|
||||
root common.Hash
|
||||
paths []snap.TrieNodePathSet
|
||||
nBytes uint64
|
||||
|
||||
expHashes []common.Hash
|
||||
expReject bool
|
||||
}
|
||||
|
||||
func decodeNibbles(nibbles []byte, bytes []byte) {
|
||||
for bi, ni := 0, 0; ni < len(nibbles); bi, ni = bi+1, ni+2 {
|
||||
bytes[bi] = nibbles[ni]<<4 | nibbles[ni+1]
|
||||
}
|
||||
}
|
||||
|
||||
// hasTerm returns whether a hex key has the terminator flag.
|
||||
func hasTerm(s []byte) bool {
|
||||
return len(s) > 0 && s[len(s)-1] == 16
|
||||
}
|
||||
|
||||
func keybytesToHex(str []byte) []byte {
|
||||
l := len(str)*2 + 1
|
||||
var nibbles = make([]byte, l)
|
||||
for i, b := range str {
|
||||
nibbles[i*2] = b / 16
|
||||
nibbles[i*2+1] = b % 16
|
||||
}
|
||||
nibbles[l-1] = 16
|
||||
return nibbles
|
||||
}
|
||||
|
||||
func hexToCompact(hex []byte) []byte {
|
||||
terminator := byte(0)
|
||||
if hasTerm(hex) {
|
||||
terminator = 1
|
||||
hex = hex[:len(hex)-1]
|
||||
}
|
||||
buf := make([]byte, len(hex)/2+1)
|
||||
buf[0] = terminator << 5 // the flag byte
|
||||
if len(hex)&1 == 1 {
|
||||
buf[0] |= 1 << 4 // odd flag
|
||||
buf[0] |= hex[0] // first nibble is contained in the first byte
|
||||
hex = hex[1:]
|
||||
}
|
||||
decodeNibbles(hex, buf[1:])
|
||||
return buf
|
||||
}
|
||||
|
||||
// TestSnapTrieNodes various forms of GetTrieNodes requests.
|
||||
func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
|
||||
|
||||
key := common.FromHex("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
||||
// helper function to iterate the key, and generate the compact-encoded
|
||||
// trie paths along the way.
|
||||
pathTo := func(length int) snap.TrieNodePathSet {
|
||||
hex := keybytesToHex(key)[:length]
|
||||
hex[len(hex)-1] = 0 // remove term flag
|
||||
hKey := hexToCompact(hex)
|
||||
return snap.TrieNodePathSet{hKey}
|
||||
}
|
||||
var accPaths []snap.TrieNodePathSet
|
||||
for i := 1; i <= 65; i++ {
|
||||
accPaths = append(accPaths, pathTo(i))
|
||||
}
|
||||
empty := emptyCode
|
||||
for i, tc := range []trieNodesTest{
|
||||
{
|
||||
root: s.chain.RootAt(999),
|
||||
paths: nil,
|
||||
nBytes: 500,
|
||||
expHashes: nil,
|
||||
},
|
||||
{
|
||||
root: s.chain.RootAt(999),
|
||||
paths: []snap.TrieNodePathSet{
|
||||
snap.TrieNodePathSet{}, // zero-length pathset should 'abort' and kick us off
|
||||
snap.TrieNodePathSet{[]byte{0}},
|
||||
},
|
||||
nBytes: 5000,
|
||||
expHashes: []common.Hash{},
|
||||
expReject: true,
|
||||
},
|
||||
{
|
||||
root: s.chain.RootAt(999),
|
||||
paths: []snap.TrieNodePathSet{
|
||||
snap.TrieNodePathSet{[]byte{0}},
|
||||
snap.TrieNodePathSet{[]byte{1}, []byte{0}},
|
||||
},
|
||||
nBytes: 5000,
|
||||
//0x6b3724a41b8c38b46d4d02fba2bb2074c47a507eb16a9a4b978f91d32e406faf
|
||||
expHashes: []common.Hash{s.chain.RootAt(999)},
|
||||
},
|
||||
{ // nonsensically long path
|
||||
root: s.chain.RootAt(999),
|
||||
paths: []snap.TrieNodePathSet{
|
||||
snap.TrieNodePathSet{[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8}},
|
||||
},
|
||||
nBytes: 5000,
|
||||
expHashes: []common.Hash{common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")},
|
||||
},
|
||||
{
|
||||
root: s.chain.RootAt(0),
|
||||
paths: []snap.TrieNodePathSet{
|
||||
snap.TrieNodePathSet{[]byte{0}},
|
||||
snap.TrieNodePathSet{[]byte{1}, []byte{0}},
|
||||
},
|
||||
nBytes: 5000,
|
||||
expHashes: []common.Hash{},
|
||||
},
|
||||
{
|
||||
// The leaf is only a couple of levels down, so the continued trie traversal causes lookup failures.
|
||||
root: s.chain.RootAt(999),
|
||||
paths: accPaths,
|
||||
nBytes: 5000,
|
||||
expHashes: []common.Hash{
|
||||
common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"),
|
||||
common.HexToHash("0x4fb1e4e2391e4b4da471d59641319b8fa25d76c973d4bec594d7b00a69ae5135"),
|
||||
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
||||
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
||||
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
||||
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
||||
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
||||
empty, empty, empty},
|
||||
},
|
||||
{
|
||||
// Basically the same as above, with different ordering
|
||||
root: s.chain.RootAt(999),
|
||||
paths: []snap.TrieNodePathSet{
|
||||
accPaths[10], accPaths[1], accPaths[0],
|
||||
},
|
||||
nBytes: 5000,
|
||||
expHashes: []common.Hash{
|
||||
empty,
|
||||
common.HexToHash("0x4fb1e4e2391e4b4da471d59641319b8fa25d76c973d4bec594d7b00a69ae5135"),
|
||||
common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"),
|
||||
},
|
||||
},
|
||||
} {
|
||||
if err := s.snapGetTrieNodes(t, &tc); err != nil {
|
||||
t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Suite) snapGetAccountRange(t *utesting.T, tc *accRangeTest) error {
|
||||
conn, err := s.dialSnap()
|
||||
if err != nil {
|
||||
t.Fatalf("dial failed: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
if err = conn.peer(s.chain, nil); err != nil {
|
||||
t.Fatalf("peering failed: %v", err)
|
||||
}
|
||||
// write request
|
||||
req := &GetAccountRange{
|
||||
ID: uint64(rand.Int63()),
|
||||
Root: tc.root,
|
||||
Origin: tc.origin,
|
||||
Limit: tc.limit,
|
||||
Bytes: tc.nBytes,
|
||||
}
|
||||
resp, err := conn.snapRequest(req, req.ID, s.chain)
|
||||
if err != nil {
|
||||
return fmt.Errorf("account range request failed: %v", err)
|
||||
}
|
||||
var res *snap.AccountRangePacket
|
||||
if r, ok := resp.(*AccountRange); !ok {
|
||||
return fmt.Errorf("account range response wrong: %T %v", resp, resp)
|
||||
} else {
|
||||
res = (*snap.AccountRangePacket)(r)
|
||||
}
|
||||
if exp, got := tc.expAccounts, len(res.Accounts); exp != got {
|
||||
return fmt.Errorf("expected %d accounts, got %d", exp, got)
|
||||
}
|
||||
// Check that the encoding order is correct
|
||||
for i := 1; i < len(res.Accounts); i++ {
|
||||
if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {
|
||||
return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
|
||||
}
|
||||
}
|
||||
var (
|
||||
hashes []common.Hash
|
||||
accounts [][]byte
|
||||
proof = res.Proof
|
||||
)
|
||||
hashes, accounts, err = res.Unpack()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(hashes) > 0 {
|
||||
if exp, got := tc.expFirst, res.Accounts[0].Hash; exp != got {
|
||||
return fmt.Errorf("expected first account 0x%x, got 0x%x", exp, got)
|
||||
}
|
||||
if exp, got := tc.expLast, res.Accounts[len(res.Accounts)-1].Hash; exp != got {
|
||||
return fmt.Errorf("expected last account 0x%x, got 0x%x", exp, got)
|
||||
}
|
||||
}
|
||||
// Reconstruct a partial trie from the response and verify it
|
||||
keys := make([][]byte, len(hashes))
|
||||
for i, key := range hashes {
|
||||
keys[i] = common.CopyBytes(key[:])
|
||||
}
|
||||
nodes := make(light.NodeList, len(proof))
|
||||
for i, node := range proof {
|
||||
nodes[i] = node
|
||||
}
|
||||
proofdb := nodes.NodeSet()
|
||||
|
||||
var end []byte
|
||||
if len(keys) > 0 {
|
||||
end = keys[len(keys)-1]
|
||||
}
|
||||
_, err = trie.VerifyRangeProof(tc.root, tc.origin[:], end, keys, accounts, proofdb)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Suite) snapGetStorageRanges(t *utesting.T, tc *stRangesTest) error {
|
||||
conn, err := s.dialSnap()
|
||||
if err != nil {
|
||||
t.Fatalf("dial failed: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
if err = conn.peer(s.chain, nil); err != nil {
|
||||
t.Fatalf("peering failed: %v", err)
|
||||
}
|
||||
// write request
|
||||
req := &GetStorageRanges{
|
||||
ID: uint64(rand.Int63()),
|
||||
Root: tc.root,
|
||||
Accounts: tc.accounts,
|
||||
Origin: tc.origin,
|
||||
Limit: tc.limit,
|
||||
Bytes: tc.nBytes,
|
||||
}
|
||||
resp, err := conn.snapRequest(req, req.ID, s.chain)
|
||||
if err != nil {
|
||||
return fmt.Errorf("account range request failed: %v", err)
|
||||
}
|
||||
var res *snap.StorageRangesPacket
|
||||
if r, ok := resp.(*StorageRanges); !ok {
|
||||
return fmt.Errorf("account range response wrong: %T %v", resp, resp)
|
||||
} else {
|
||||
res = (*snap.StorageRangesPacket)(r)
|
||||
}
|
||||
gotSlots := 0
|
||||
// Ensure the ranges are monotonically increasing
|
||||
for i, slots := range res.Slots {
|
||||
gotSlots += len(slots)
|
||||
for j := 1; j < len(slots); j++ {
|
||||
if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
|
||||
return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])
|
||||
}
|
||||
}
|
||||
}
|
||||
if exp, got := tc.expSlots, gotSlots; exp != got {
|
||||
return fmt.Errorf("expected %d slots, got %d", exp, got)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Suite) snapGetByteCodes(t *utesting.T, tc *byteCodesTest) error {
|
||||
conn, err := s.dialSnap()
|
||||
if err != nil {
|
||||
t.Fatalf("dial failed: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
if err = conn.peer(s.chain, nil); err != nil {
|
||||
t.Fatalf("peering failed: %v", err)
|
||||
}
|
||||
// write request
|
||||
req := &GetByteCodes{
|
||||
ID: uint64(rand.Int63()),
|
||||
Hashes: tc.hashes,
|
||||
Bytes: tc.nBytes,
|
||||
}
|
||||
resp, err := conn.snapRequest(req, req.ID, s.chain)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getBytecodes request failed: %v", err)
|
||||
}
|
||||
var res *snap.ByteCodesPacket
|
||||
if r, ok := resp.(*ByteCodes); !ok {
|
||||
return fmt.Errorf("bytecodes response wrong: %T %v", resp, resp)
|
||||
} else {
|
||||
res = (*snap.ByteCodesPacket)(r)
|
||||
}
|
||||
if exp, got := tc.expHashes, len(res.Codes); exp != got {
|
||||
for i, c := range res.Codes {
|
||||
fmt.Printf("%d. %#x\n", i, c)
|
||||
}
|
||||
return fmt.Errorf("expected %d bytecodes, got %d", exp, got)
|
||||
}
|
||||
// Cross reference the requested bytecodes with the response to find gaps
|
||||
// that the serving node is missing
|
||||
var (
|
||||
bytecodes = res.Codes
|
||||
hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState)
|
||||
hash = make([]byte, 32)
|
||||
codes = make([][]byte, len(req.Hashes))
|
||||
)
|
||||
|
||||
for i, j := 0, 0; i < len(bytecodes); i++ {
|
||||
// Find the next hash that we've been served, leaving misses with nils
|
||||
hasher.Reset()
|
||||
hasher.Write(bytecodes[i])
|
||||
hasher.Read(hash)
|
||||
|
||||
for j < len(req.Hashes) && !bytes.Equal(hash, req.Hashes[j][:]) {
|
||||
j++
|
||||
}
|
||||
if j < len(req.Hashes) {
|
||||
codes[j] = bytecodes[i]
|
||||
j++
|
||||
continue
|
||||
}
|
||||
// We've either ran out of hashes, or got unrequested data
|
||||
return errors.New("unexpected bytecode")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Suite) snapGetTrieNodes(t *utesting.T, tc *trieNodesTest) error {
|
||||
conn, err := s.dialSnap()
|
||||
if err != nil {
|
||||
t.Fatalf("dial failed: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
if err = conn.peer(s.chain, nil); err != nil {
|
||||
t.Fatalf("peering failed: %v", err)
|
||||
}
|
||||
// write request
|
||||
req := &GetTrieNodes{
|
||||
ID: uint64(rand.Int63()),
|
||||
Root: tc.root,
|
||||
Paths: tc.paths,
|
||||
Bytes: tc.nBytes,
|
||||
}
|
||||
resp, err := conn.snapRequest(req, req.ID, s.chain)
|
||||
if err != nil {
|
||||
if tc.expReject {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("trienodes request failed: %v", err)
|
||||
}
|
||||
var res *snap.TrieNodesPacket
|
||||
if r, ok := resp.(*TrieNodes); !ok {
|
||||
return fmt.Errorf("trienodes response wrong: %T %v", resp, resp)
|
||||
} else {
|
||||
res = (*snap.TrieNodesPacket)(r)
|
||||
}
|
||||
|
||||
// Check the correctness
|
||||
|
||||
// Cross reference the requested trienodes with the response to find gaps
|
||||
// that the serving node is missing
|
||||
hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
|
||||
hash := make([]byte, 32)
|
||||
trienodes := res.Nodes
|
||||
if got, want := len(trienodes), len(tc.expHashes); got != want {
|
||||
return fmt.Errorf("wrong trienode count, got %d, want %d\n", got, want)
|
||||
}
|
||||
for i, trienode := range trienodes {
|
||||
hasher.Reset()
|
||||
hasher.Write(trienode)
|
||||
hasher.Read(hash)
|
||||
if got, want := hash, tc.expHashes[i]; !bytes.Equal(got, want[:]) {
|
||||
fmt.Printf("hash %d wrong, got %#x, want %#x\n", i, got, want)
|
||||
err = fmt.Errorf("hash %d wrong, got %#x, want %#x", i, got, want)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
36
cmd/devp2p/internal/ethtest/snapTypes.go
Normal file
36
cmd/devp2p/internal/ethtest/snapTypes.go
Normal file
@ -0,0 +1,36 @@
|
||||
package ethtest
|
||||
|
||||
import "github.com/ethereum/go-ethereum/eth/protocols/snap"
|
||||
|
||||
// GetAccountRange represents an account range query.
|
||||
type GetAccountRange snap.GetAccountRangePacket
|
||||
|
||||
func (g GetAccountRange) Code() int { return 33 }
|
||||
|
||||
type AccountRange snap.AccountRangePacket
|
||||
|
||||
func (g AccountRange) Code() int { return 34 }
|
||||
|
||||
type GetStorageRanges snap.GetStorageRangesPacket
|
||||
|
||||
func (g GetStorageRanges) Code() int { return 35 }
|
||||
|
||||
type StorageRanges snap.StorageRangesPacket
|
||||
|
||||
func (g StorageRanges) Code() int { return 36 }
|
||||
|
||||
type GetByteCodes snap.GetByteCodesPacket
|
||||
|
||||
func (g GetByteCodes) Code() int { return 37 }
|
||||
|
||||
type ByteCodes snap.ByteCodesPacket
|
||||
|
||||
func (g ByteCodes) Code() int { return 38 }
|
||||
|
||||
type GetTrieNodes snap.GetTrieNodesPacket
|
||||
|
||||
func (g GetTrieNodes) Code() int { return 39 }
|
||||
|
||||
type TrieNodes snap.TrieNodesPacket
|
||||
|
||||
func (g TrieNodes) Code() int { return 40 }
|
@ -125,6 +125,16 @@ func (s *Suite) Eth66Tests() []utesting.Test {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Suite) SnapTests() []utesting.Test {
|
||||
return []utesting.Test{
|
||||
{Name: "TestSnapStatus", Fn: s.TestSnapStatus},
|
||||
{Name: "TestSnapAccountRange", Fn: s.TestSnapGetAccountRange},
|
||||
{Name: "TestSnapGetByteCodes", Fn: s.TestSnapGetByteCodes},
|
||||
{Name: "TestSnapGetTrieNodes", Fn: s.TestSnapTrieNodes},
|
||||
{Name: "TestSnapGetStorageRanges", Fn: s.TestSnapGetStorageRanges},
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
eth66 = true // indicates whether suite should negotiate eth66 connection
|
||||
eth65 = false // indicates whether suite should negotiate eth65 connection or below.
|
||||
|
@ -55,6 +55,27 @@ func TestEthSuite(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSnapSuite(t *testing.T) {
|
||||
geth, err := runGeth()
|
||||
if err != nil {
|
||||
t.Fatalf("could not run geth: %v", err)
|
||||
}
|
||||
defer geth.Close()
|
||||
|
||||
suite, err := NewSuite(geth.Server().Self(), fullchainFile, genesisFile)
|
||||
if err != nil {
|
||||
t.Fatalf("could not create new test suite: %v", err)
|
||||
}
|
||||
for _, test := range suite.SnapTests() {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
|
||||
if result[0].Failed {
|
||||
t.Fatal()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// runGeth creates and starts a geth node
|
||||
func runGeth() (*node.Node, error) {
|
||||
stack, err := node.New(&node.Config{
|
||||
|
@ -19,6 +19,7 @@ package ethtest
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
@ -128,7 +129,9 @@ type Conn struct {
|
||||
*rlpx.Conn
|
||||
ourKey *ecdsa.PrivateKey
|
||||
negotiatedProtoVersion uint
|
||||
negotiatedSnapProtoVersion uint
|
||||
ourHighestProtoVersion uint
|
||||
ourHighestSnapProtoVersion uint
|
||||
caps []p2p.Cap
|
||||
}
|
||||
|
||||
@ -259,12 +262,7 @@ func (c *Conn) Read66() (uint64, Message) {
|
||||
|
||||
// Write writes a eth packet to the connection.
|
||||
func (c *Conn) Write(msg Message) error {
|
||||
// check if message is eth protocol message
|
||||
var (
|
||||
payload []byte
|
||||
err error
|
||||
)
|
||||
payload, err = rlp.EncodeToBytes(msg)
|
||||
payload, err := rlp.EncodeToBytes(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -281,3 +279,43 @@ func (c *Conn) Write66(req eth.Packet, code int) error {
|
||||
_, err = c.Conn.Write(uint64(code), payload)
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadSnap reads a snap/1 response with the given id from the connection.
|
||||
func (c *Conn) ReadSnap(id uint64) (Message, error) {
|
||||
respId := id + 1
|
||||
start := time.Now()
|
||||
for respId != id && time.Since(start) < timeout {
|
||||
code, rawData, _, err := c.Conn.Read()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read from connection: %v", err)
|
||||
}
|
||||
var snpMsg interface{}
|
||||
switch int(code) {
|
||||
case (GetAccountRange{}).Code():
|
||||
snpMsg = new(GetAccountRange)
|
||||
case (AccountRange{}).Code():
|
||||
snpMsg = new(AccountRange)
|
||||
case (GetStorageRanges{}).Code():
|
||||
snpMsg = new(GetStorageRanges)
|
||||
case (StorageRanges{}).Code():
|
||||
snpMsg = new(StorageRanges)
|
||||
case (GetByteCodes{}).Code():
|
||||
snpMsg = new(GetByteCodes)
|
||||
case (ByteCodes{}).Code():
|
||||
snpMsg = new(ByteCodes)
|
||||
case (GetTrieNodes{}).Code():
|
||||
snpMsg = new(GetTrieNodes)
|
||||
case (TrieNodes{}).Code():
|
||||
snpMsg = new(TrieNodes)
|
||||
default:
|
||||
//return nil, fmt.Errorf("invalid message code: %d", code)
|
||||
continue
|
||||
}
|
||||
if err := rlp.DecodeBytes(rawData, snpMsg); err != nil {
|
||||
return nil, fmt.Errorf("could not rlp decode message: %v", err)
|
||||
}
|
||||
return snpMsg.(Message), nil
|
||||
|
||||
}
|
||||
return nil, fmt.Errorf("request timed out")
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ var (
|
||||
Subcommands: []cli.Command{
|
||||
rlpxPingCommand,
|
||||
rlpxEthTestCommand,
|
||||
rlpxSnapTestCommand,
|
||||
},
|
||||
}
|
||||
rlpxPingCommand = cli.Command{
|
||||
@ -53,6 +54,16 @@ var (
|
||||
testTAPFlag,
|
||||
},
|
||||
}
|
||||
rlpxSnapTestCommand = cli.Command{
|
||||
Name: "snap-test",
|
||||
Usage: "Runs tests against a node",
|
||||
ArgsUsage: "<node> <chain.rlp> <genesis.json>",
|
||||
Action: rlpxSnapTest,
|
||||
Flags: []cli.Flag{
|
||||
testPatternFlag,
|
||||
testTAPFlag,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func rlpxPing(ctx *cli.Context) error {
|
||||
@ -106,3 +117,15 @@ func rlpxEthTest(ctx *cli.Context) error {
|
||||
}
|
||||
return runTests(ctx, suite.AllEthTests())
|
||||
}
|
||||
|
||||
// rlpxSnapTest runs the snap protocol test suite.
|
||||
func rlpxSnapTest(ctx *cli.Context) error {
|
||||
if ctx.NArg() < 3 {
|
||||
exit("missing path to chain.rlp as command-line argument")
|
||||
}
|
||||
suite, err := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2])
|
||||
if err != nil {
|
||||
exit(err)
|
||||
}
|
||||
return runTests(ctx, suite.SnapTests())
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ func getPassphrase(ctx *cli.Context, confirmation bool) string {
|
||||
// signHash is a helper function that calculates a hash for the given message
|
||||
// that can be safely used to calculate a signature from.
|
||||
//
|
||||
// The hash is calulcated as
|
||||
// The hash is calculated as
|
||||
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
||||
//
|
||||
// This gives context to the signed message and prevents signing of transactions.
|
||||
|
@ -67,6 +67,7 @@ type ommer struct {
|
||||
type stEnv struct {
|
||||
Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
|
||||
Difficulty *big.Int `json:"currentDifficulty"`
|
||||
Random *big.Int `json:"currentRandom"`
|
||||
ParentDifficulty *big.Int `json:"parentDifficulty"`
|
||||
GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
|
||||
Number uint64 `json:"currentNumber" gencodec:"required"`
|
||||
@ -81,6 +82,7 @@ type stEnv struct {
|
||||
type stEnvMarshaling struct {
|
||||
Coinbase common.UnprefixedAddress
|
||||
Difficulty *math.HexOrDecimal256
|
||||
Random *math.HexOrDecimal256
|
||||
ParentDifficulty *math.HexOrDecimal256
|
||||
GasLimit math.HexOrDecimal64
|
||||
Number math.HexOrDecimal64
|
||||
@ -139,6 +141,11 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||
if pre.Env.BaseFee != nil {
|
||||
vmContext.BaseFee = new(big.Int).Set(pre.Env.BaseFee)
|
||||
}
|
||||
// If random is defined, add it to the vmContext.
|
||||
if pre.Env.Random != nil {
|
||||
rnd := common.BigToHash(pre.Env.Random)
|
||||
vmContext.Random = &rnd
|
||||
}
|
||||
// If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's
|
||||
// done in StateProcessor.Process(block, ...), right before transactions are applied.
|
||||
if chainConfig.DAOForkSupport &&
|
||||
|
@ -18,6 +18,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
|
||||
type stEnv struct {
|
||||
Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
|
||||
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
|
||||
Random *math.HexOrDecimal256 `json:"currentRandom"`
|
||||
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
|
||||
GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
|
||||
Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
|
||||
@ -31,6 +32,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
|
||||
var enc stEnv
|
||||
enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
|
||||
enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty)
|
||||
enc.Random = (*math.HexOrDecimal256)(s.Random)
|
||||
enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty)
|
||||
enc.GasLimit = math.HexOrDecimal64(s.GasLimit)
|
||||
enc.Number = math.HexOrDecimal64(s.Number)
|
||||
@ -48,6 +50,7 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
|
||||
type stEnv struct {
|
||||
Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
|
||||
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
|
||||
Random *math.HexOrDecimal256 `json:"currentRandom"`
|
||||
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
|
||||
GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
|
||||
Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
|
||||
@ -69,6 +72,9 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
|
||||
if dec.Difficulty != nil {
|
||||
s.Difficulty = (*big.Int)(dec.Difficulty)
|
||||
}
|
||||
if dec.Random != nil {
|
||||
s.Random = (*big.Int)(dec.Random)
|
||||
}
|
||||
if dec.ParentDifficulty != nil {
|
||||
s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty)
|
||||
}
|
||||
|
@ -252,6 +252,10 @@ func Transition(ctx *cli.Context) error {
|
||||
return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
|
||||
}
|
||||
}
|
||||
// Sanity check, to not `panic` in state_transition
|
||||
if prestate.Env.Random != nil && !chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) {
|
||||
return NewError(ErrorConfig, errors.New("can only apply RANDOM on top of London chainrules"))
|
||||
}
|
||||
if env := prestate.Env; env.Difficulty == nil {
|
||||
// If difficulty was not provided by caller, we need to calculate it.
|
||||
switch {
|
||||
|
@ -120,7 +120,7 @@ func importAccountWithExpect(t *testing.T, key string, expected string) {
|
||||
if err := ioutil.WriteFile(passwordFile, []byte("foobar"), 0600); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
geth := runGeth(t, "account", "import", keyfile, "-password", passwordFile)
|
||||
geth := runGeth(t, "--lightkdf", "account", "import", keyfile, "-password", passwordFile)
|
||||
defer geth.ExpectExit()
|
||||
geth.Expect(expected)
|
||||
}
|
||||
|
@ -161,7 +161,7 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
||||
if ctx.GlobalIsSet(utils.OverrideTerminalTotalDifficulty.Name) {
|
||||
cfg.Eth.OverrideTerminalTotalDifficulty = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideTerminalTotalDifficulty.Name))
|
||||
}
|
||||
backend, _ := utils.RegisterEthService(stack, &cfg.Eth, ctx.GlobalBool(utils.CatalystFlag.Name))
|
||||
backend, _ := utils.RegisterEthService(stack, &cfg.Eth)
|
||||
|
||||
// Configure GraphQL if requested
|
||||
if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) {
|
||||
|
@ -34,9 +34,11 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/console/prompt"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
@ -69,6 +71,7 @@ Remove blockchain and state databases`,
|
||||
dbDumpFreezerIndex,
|
||||
dbImportCmd,
|
||||
dbExportCmd,
|
||||
dbMetadataCmd,
|
||||
},
|
||||
}
|
||||
dbInspectCmd = cli.Command{
|
||||
@ -233,6 +236,21 @@ WARNING: This is a low-level operation which may cause database corruption!`,
|
||||
},
|
||||
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
|
||||
}
|
||||
dbMetadataCmd = cli.Command{
|
||||
Action: utils.MigrateFlags(showMetaData),
|
||||
Name: "metadata",
|
||||
Usage: "Shows metadata about the chain status.",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.MainnetFlag,
|
||||
utils.RopstenFlag,
|
||||
utils.SepoliaFlag,
|
||||
utils.RinkebyFlag,
|
||||
utils.GoerliFlag,
|
||||
},
|
||||
Description: "Shows metadata about the chain status.",
|
||||
}
|
||||
)
|
||||
|
||||
func removeDB(ctx *cli.Context) error {
|
||||
@ -539,7 +557,7 @@ func freezerInspect(ctx *cli.Context) error {
|
||||
defer stack.Close()
|
||||
path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
|
||||
log.Info("Opening freezer", "location", path, "name", kind)
|
||||
if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy); err != nil {
|
||||
if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy, true); err != nil {
|
||||
return err
|
||||
} else {
|
||||
f.DumpIndex(start, end)
|
||||
@ -685,3 +703,50 @@ func exportChaindata(ctx *cli.Context) error {
|
||||
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
|
||||
}
|
||||
|
||||
func showMetaData(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
defer stack.Close()
|
||||
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||
ancients, err := db.Ancients()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
|
||||
}
|
||||
pp := func(val *uint64) string {
|
||||
if val == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return fmt.Sprintf("%d (0x%x)", *val, *val)
|
||||
}
|
||||
data := [][]string{
|
||||
{"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
|
||||
{"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
|
||||
{"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
|
||||
{"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
|
||||
if b := rawdb.ReadHeadBlock(db); b != nil {
|
||||
data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
|
||||
data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
|
||||
data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())})
|
||||
}
|
||||
if h := rawdb.ReadHeadHeader(db); h != nil {
|
||||
data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
|
||||
data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
|
||||
data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (0x%x)", h.Number, h.Number)})
|
||||
}
|
||||
data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
|
||||
{"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
|
||||
{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
|
||||
{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
|
||||
{"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
|
||||
{"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
|
||||
{"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
|
||||
{"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
|
||||
{"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
|
||||
{"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
|
||||
}...)
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
table.SetHeader([]string{"Field", "Value"})
|
||||
table.AppendBulk(data)
|
||||
table.Render()
|
||||
return nil
|
||||
}
|
||||
|
@ -160,7 +160,6 @@ var (
|
||||
utils.GpoIgnoreGasPriceFlag,
|
||||
utils.MinerNotifyFullFlag,
|
||||
configFileFlag,
|
||||
utils.CatalystFlag,
|
||||
}
|
||||
|
||||
rpcFlags = []cli.Flag{
|
||||
@ -211,7 +210,7 @@ func init() {
|
||||
// Initialize the CLI app and start Geth
|
||||
app.Action = geth
|
||||
app.HideVersion = true // we have a command to print the version
|
||||
app.Copyright = "Copyright 2013-2021 The go-ethereum Authors"
|
||||
app.Copyright = "Copyright 2013-2022 The go-ethereum Authors"
|
||||
app.Commands = []cli.Command{
|
||||
// See chaincmd.go:
|
||||
initCommand,
|
||||
|
@ -418,8 +418,7 @@ func traverseRawState(ctx *cli.Context) error {
|
||||
// Check the present for non-empty hash node(embedded node doesn't
|
||||
// have their own hash).
|
||||
if node != (common.Hash{}) {
|
||||
blob := rawdb.ReadTrieNode(chaindb, node)
|
||||
if len(blob) == 0 {
|
||||
if !rawdb.HasTrieNode(chaindb, node) {
|
||||
log.Error("Missing trie node(storage)", "hash", node)
|
||||
return errors.New("missing storage")
|
||||
}
|
||||
|
@ -229,7 +229,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
||||
utils.SnapshotFlag,
|
||||
utils.BloomFilterSizeFlag,
|
||||
cli.HelpFlag,
|
||||
utils.CatalystFlag,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -25,6 +25,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/jedisct1/go-minisign"
|
||||
)
|
||||
|
||||
func TestVerification(t *testing.T) {
|
||||
@ -128,3 +130,39 @@ func TestMatching(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGethPubKeysParseable(t *testing.T) {
|
||||
for _, pubkey := range gethPubKeys {
|
||||
_, err := minisign.NewPublicKey(pubkey)
|
||||
if err != nil {
|
||||
t.Errorf("Should be parseable")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeyID(t *testing.T) {
|
||||
type args struct {
|
||||
id [8]byte
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{"@holiman key", args{id: extractKeyId(gethPubKeys[0])}, "FB1D084D39BAEC24"},
|
||||
{"second key", args{id: extractKeyId(gethPubKeys[1])}, "138B1CA303E51687"},
|
||||
{"third key", args{id: extractKeyId(gethPubKeys[2])}, "FD9813B2D2098484"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := keyID(tt.args.id); got != tt.want {
|
||||
t.Errorf("keyID() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func extractKeyId(pubkey string) [8]byte {
|
||||
p, _ := minisign.NewPublicKey(pubkey)
|
||||
return p.KeyId
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/eth/catalyst"
|
||||
ethcatalyst "github.com/ethereum/go-ethereum/eth/catalyst"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||
"github.com/ethereum/go-ethereum/eth/gasprice"
|
||||
@ -56,6 +56,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||
"github.com/ethereum/go-ethereum/internal/flags"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
lescatalyst "github.com/ethereum/go-ethereum/les/catalyst"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/metrics/exp"
|
||||
@ -789,11 +790,6 @@ var (
|
||||
Usage: "InfluxDB organization name (v2 only)",
|
||||
Value: metrics.DefaultConfig.InfluxDBOrganization,
|
||||
}
|
||||
|
||||
CatalystFlag = cli.BoolFlag{
|
||||
Name: "catalyst",
|
||||
Usage: "Catalyst mode (eth2 integration testing)",
|
||||
}
|
||||
)
|
||||
|
||||
// MakeDataDir retrieves the currently requested data directory, terminating
|
||||
@ -1673,9 +1669,15 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
||||
// Create a new developer genesis block or reuse existing one
|
||||
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), ctx.GlobalUint64(DeveloperGasLimitFlag.Name), developer.Address)
|
||||
if ctx.GlobalIsSet(DataDirFlag.Name) {
|
||||
// If datadir doesn't exist we need to open db in write-mode
|
||||
// so leveldb can create files.
|
||||
readonly := true
|
||||
if !common.FileExist(stack.ResolvePath("chaindata")) {
|
||||
readonly = false
|
||||
}
|
||||
// Check if we have an already initialized chain and fall back to
|
||||
// that if so. Otherwise we need to generate a new genesis spec.
|
||||
chaindb := MakeChainDatabase(ctx, stack, false) // TODO (MariusVanDerWijden) make this read only
|
||||
chaindb := MakeChainDatabase(ctx, stack, readonly)
|
||||
if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) {
|
||||
cfg.Genesis = nil // fallback to db content
|
||||
}
|
||||
@ -1710,15 +1712,15 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) {
|
||||
// RegisterEthService adds an Ethereum client to the stack.
|
||||
// The second return value is the full node instance, which may be nil if the
|
||||
// node is running as a light client.
|
||||
func RegisterEthService(stack *node.Node, cfg *ethconfig.Config, isCatalyst bool) (ethapi.Backend, *eth.Ethereum) {
|
||||
func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend, *eth.Ethereum) {
|
||||
if cfg.SyncMode == downloader.LightSync {
|
||||
backend, err := les.New(stack, cfg)
|
||||
if err != nil {
|
||||
Fatalf("Failed to register the Ethereum service: %v", err)
|
||||
}
|
||||
stack.RegisterAPIs(tracers.APIs(backend.ApiBackend))
|
||||
if isCatalyst {
|
||||
if err := catalyst.RegisterLight(stack, backend); err != nil {
|
||||
if backend.BlockChain().Config().TerminalTotalDifficulty != nil {
|
||||
if err := lescatalyst.Register(stack, backend); err != nil {
|
||||
Fatalf("Failed to register the catalyst service: %v", err)
|
||||
}
|
||||
}
|
||||
@ -1734,8 +1736,8 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config, isCatalyst bool
|
||||
Fatalf("Failed to create the LES server: %v", err)
|
||||
}
|
||||
}
|
||||
if isCatalyst {
|
||||
if err := catalyst.Register(stack, backend); err != nil {
|
||||
if backend.BlockChain().Config().TerminalTotalDifficulty != nil {
|
||||
if err := ethcatalyst.Register(stack, backend); err != nil {
|
||||
Fatalf("Failed to register the catalyst service: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -43,7 +43,6 @@ var (
|
||||
// error types into the consensus package.
|
||||
var (
|
||||
errTooManyUncles = errors.New("too many uncles")
|
||||
errInvalidMixDigest = errors.New("invalid mix digest")
|
||||
errInvalidNonce = errors.New("invalid nonce")
|
||||
errInvalidUncleHash = errors.New("invalid uncle hash")
|
||||
)
|
||||
@ -182,10 +181,7 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa
|
||||
if len(header.Extra) > 32 {
|
||||
return fmt.Errorf("extra-data longer than 32 bytes (%d)", len(header.Extra))
|
||||
}
|
||||
// Verify the seal parts. Ensure the mixhash, nonce and uncle hash are the expected value.
|
||||
if header.MixDigest != (common.Hash{}) {
|
||||
return errInvalidMixDigest
|
||||
}
|
||||
// Verify the seal parts. Ensure the nonce and uncle hash are the expected value.
|
||||
if header.Nonce != beaconNonce {
|
||||
return errInvalidNonce
|
||||
}
|
||||
|
29
core/beacon/errors.go
Normal file
29
core/beacon/errors.go
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
||||
|
||||
package beacon
|
||||
|
||||
import "github.com/ethereum/go-ethereum/rpc"
|
||||
|
||||
var (
|
||||
VALID = GenericStringResponse{"VALID"}
|
||||
SUCCESS = GenericStringResponse{"SUCCESS"}
|
||||
INVALID = ForkChoiceResponse{Status: "INVALID", PayloadID: nil}
|
||||
SYNCING = ForkChoiceResponse{Status: "SYNCING", PayloadID: nil}
|
||||
GenericServerError = rpc.CustomError{Code: -32000, ValidationError: "Server error"}
|
||||
UnknownPayload = rpc.CustomError{Code: -32001, ValidationError: "Unknown payload"}
|
||||
InvalidTB = rpc.CustomError{Code: -32002, ValidationError: "Invalid terminal block"}
|
||||
)
|
@ -1,6 +1,6 @@
|
||||
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
|
||||
|
||||
package catalyst
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"encoding/json"
|
@ -1,6 +1,6 @@
|
||||
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
|
||||
|
||||
package catalyst
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"encoding/json"
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
@ -14,18 +14,21 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package catalyst
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/fjl/gencodec -type PayloadAttributesV1 -field-override payloadAttributesMarshaling -out gen_blockparams.go
|
||||
|
||||
// Structure described at https://github.com/ethereum/execution-apis/pull/74
|
||||
// PayloadAttributesV1 structure described at https://github.com/ethereum/execution-apis/pull/74
|
||||
type PayloadAttributesV1 struct {
|
||||
Timestamp uint64 `json:"timestamp" gencodec:"required"`
|
||||
Random common.Hash `json:"random" gencodec:"required"`
|
||||
@ -39,7 +42,7 @@ type payloadAttributesMarshaling struct {
|
||||
|
||||
//go:generate go run github.com/fjl/gencodec -type ExecutableDataV1 -field-override executableDataMarshaling -out gen_ed.go
|
||||
|
||||
// Structure described at https://github.com/ethereum/execution-apis/src/engine/specification.md
|
||||
// ExecutableDataV1 structure described at https://github.com/ethereum/execution-apis/src/engine/specification.md
|
||||
type ExecutableDataV1 struct {
|
||||
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
|
||||
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
|
||||
@ -69,17 +72,6 @@ type executableDataMarshaling struct {
|
||||
Transactions []hexutil.Bytes
|
||||
}
|
||||
|
||||
//go:generate go run github.com/fjl/gencodec -type PayloadResponse -field-override payloadResponseMarshaling -out gen_payload.go
|
||||
|
||||
type PayloadResponse struct {
|
||||
PayloadID uint64 `json:"payloadId"`
|
||||
}
|
||||
|
||||
// JSON type overrides for payloadResponse.
|
||||
type payloadResponseMarshaling struct {
|
||||
PayloadID hexutil.Uint64
|
||||
}
|
||||
|
||||
type NewBlockResponse struct {
|
||||
Valid bool `json:"valid"`
|
||||
}
|
||||
@ -102,9 +94,28 @@ type ConsensusValidatedParams struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// PayloadID is an identifier of the payload build process
|
||||
type PayloadID [8]byte
|
||||
|
||||
func (b PayloadID) String() string {
|
||||
return hexutil.Encode(b[:])
|
||||
}
|
||||
|
||||
func (b PayloadID) MarshalText() ([]byte, error) {
|
||||
return hexutil.Bytes(b[:]).MarshalText()
|
||||
}
|
||||
|
||||
func (b *PayloadID) UnmarshalText(input []byte) error {
|
||||
err := hexutil.UnmarshalFixedText("PayloadID", input, b[:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid payload id %q: %w", input, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ForkChoiceResponse struct {
|
||||
Status string `json:"status"`
|
||||
PayloadID *hexutil.Bytes `json:"payloadId"`
|
||||
PayloadID *PayloadID `json:"payloadId"`
|
||||
}
|
||||
|
||||
type ForkchoiceStateV1 struct {
|
||||
@ -112,3 +123,82 @@ type ForkchoiceStateV1 struct {
|
||||
SafeBlockHash common.Hash `json:"safeBlockHash"`
|
||||
FinalizedBlockHash common.Hash `json:"finalizedBlockHash"`
|
||||
}
|
||||
|
||||
func encodeTransactions(txs []*types.Transaction) [][]byte {
|
||||
var enc = make([][]byte, len(txs))
|
||||
for i, tx := range txs {
|
||||
enc[i], _ = tx.MarshalBinary()
|
||||
}
|
||||
return enc
|
||||
}
|
||||
|
||||
func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
|
||||
var txs = make([]*types.Transaction, len(enc))
|
||||
for i, encTx := range enc {
|
||||
var tx types.Transaction
|
||||
if err := tx.UnmarshalBinary(encTx); err != nil {
|
||||
return nil, fmt.Errorf("invalid transaction %d: %v", i, err)
|
||||
}
|
||||
txs[i] = &tx
|
||||
}
|
||||
return txs, nil
|
||||
}
|
||||
|
||||
// ExecutableDataToBlock constructs a block from executable data.
|
||||
// It verifies that the following fields:
|
||||
// len(extraData) <= 32
|
||||
// uncleHash = emptyUncleHash
|
||||
// difficulty = 0
|
||||
// and that the blockhash of the constructed block matches the parameters.
|
||||
func ExecutableDataToBlock(params ExecutableDataV1) (*types.Block, error) {
|
||||
txs, err := decodeTransactions(params.Transactions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(params.ExtraData) > 32 {
|
||||
return nil, fmt.Errorf("invalid extradata length: %v", len(params.ExtraData))
|
||||
}
|
||||
header := &types.Header{
|
||||
ParentHash: params.ParentHash,
|
||||
UncleHash: types.EmptyUncleHash,
|
||||
Coinbase: params.FeeRecipient,
|
||||
Root: params.StateRoot,
|
||||
TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
|
||||
ReceiptHash: params.ReceiptsRoot,
|
||||
Bloom: types.BytesToBloom(params.LogsBloom),
|
||||
Difficulty: common.Big0,
|
||||
Number: new(big.Int).SetUint64(params.Number),
|
||||
GasLimit: params.GasLimit,
|
||||
GasUsed: params.GasUsed,
|
||||
Time: params.Timestamp,
|
||||
BaseFee: params.BaseFeePerGas,
|
||||
Extra: params.ExtraData,
|
||||
MixDigest: params.Random,
|
||||
}
|
||||
block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */)
|
||||
if block.Hash() != params.BlockHash {
|
||||
return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash())
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
|
||||
// BlockToExecutableData constructs the executableDataV1 structure by filling the
|
||||
// fields from the given block. It assumes the given block is post-merge block.
|
||||
func BlockToExecutableData(block *types.Block) *ExecutableDataV1 {
|
||||
return &ExecutableDataV1{
|
||||
BlockHash: block.Hash(),
|
||||
ParentHash: block.ParentHash(),
|
||||
FeeRecipient: block.Coinbase(),
|
||||
StateRoot: block.Root(),
|
||||
Number: block.NumberU64(),
|
||||
GasLimit: block.GasLimit(),
|
||||
GasUsed: block.GasUsed(),
|
||||
BaseFeePerGas: block.BaseFee(),
|
||||
Timestamp: block.Time(),
|
||||
ReceiptsRoot: block.ReceiptHash(),
|
||||
LogsBloom: block.Bloom().Bytes(),
|
||||
Transactions: encodeTransactions(block.Transactions()),
|
||||
Random: block.MixDigest(),
|
||||
ExtraData: block.Extra(),
|
||||
}
|
||||
}
|
@ -554,7 +554,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
|
||||
// Degrade the chain markers if they are explicitly reverted.
|
||||
// In theory we should update all in-memory markers in the
|
||||
// last step, however the direction of SetHead is from high
|
||||
// to low, so it's safe the update in-memory markers directly.
|
||||
// to low, so it's safe to update in-memory markers directly.
|
||||
bc.currentBlock.Store(newHeadBlock)
|
||||
headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
|
||||
}
|
||||
@ -979,32 +979,31 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
||||
// range. In this case, all tx indices of newly imported blocks should be
|
||||
// generated.
|
||||
var batch = bc.db.NewBatch()
|
||||
for _, block := range blockChain {
|
||||
for i, block := range blockChain {
|
||||
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
|
||||
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
||||
} else if rawdb.ReadTxIndexTail(bc.db) != nil {
|
||||
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
||||
}
|
||||
stats.processed++
|
||||
}
|
||||
|
||||
// Flush all tx-lookup index data.
|
||||
if batch.ValueSize() > ethdb.IdealBatchSize || i == len(blockChain)-1 {
|
||||
size += int64(batch.ValueSize())
|
||||
if err := batch.Write(); err != nil {
|
||||
// The tx index data could not be written.
|
||||
// Roll back the ancient store update.
|
||||
if err = batch.Write(); err != nil {
|
||||
fastBlock := bc.CurrentFastBlock().NumberU64()
|
||||
if err := bc.db.TruncateAncients(fastBlock + 1); err != nil {
|
||||
log.Error("Can't truncate ancient store after failed insert", "err", err)
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
batch.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
|
||||
if err := bc.db.Sync(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Update the current fast block because all block data is now present in DB.
|
||||
previousFastBlock := bc.CurrentFastBlock().NumberU64()
|
||||
if !updateHead(blockChain[len(blockChain)-1]) {
|
||||
|
@ -1779,6 +1779,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
|
||||
SnapshotLimit: 0, // Disable snapshot by default
|
||||
}
|
||||
)
|
||||
defer engine.Close()
|
||||
if snapshots {
|
||||
config.SnapshotLimit = 256
|
||||
config.SnapshotWait = true
|
||||
@ -1836,25 +1837,25 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
|
||||
newChain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to recreate chain: %v", err)
|
||||
}
|
||||
defer chain.Stop()
|
||||
defer newChain.Stop()
|
||||
|
||||
// Iterate over all the remaining blocks and ensure there are no gaps
|
||||
verifyNoGaps(t, chain, true, canonblocks)
|
||||
verifyNoGaps(t, chain, false, sideblocks)
|
||||
verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks)
|
||||
verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks)
|
||||
verifyNoGaps(t, newChain, true, canonblocks)
|
||||
verifyNoGaps(t, newChain, false, sideblocks)
|
||||
verifyCutoff(t, newChain, true, canonblocks, tt.expCanonicalBlocks)
|
||||
verifyCutoff(t, newChain, false, sideblocks, tt.expSidechainBlocks)
|
||||
|
||||
if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
|
||||
if head := newChain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
|
||||
t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
|
||||
}
|
||||
if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
|
||||
if head := newChain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
|
||||
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock)
|
||||
}
|
||||
if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
|
||||
if head := newChain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
|
||||
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock)
|
||||
}
|
||||
if frozen, err := db.(freezer).Ancients(); err != nil {
|
||||
|
@ -2987,10 +2987,10 @@ func TestDeleteRecreateSlots(t *testing.T) {
|
||||
initCode := []byte{
|
||||
byte(vm.PUSH1), 0x3, // value
|
||||
byte(vm.PUSH1), 0x3, // location
|
||||
byte(vm.SSTORE), // Set slot[3] = 1
|
||||
byte(vm.SSTORE), // Set slot[3] = 3
|
||||
byte(vm.PUSH1), 0x4, // value
|
||||
byte(vm.PUSH1), 0x4, // location
|
||||
byte(vm.SSTORE), // Set slot[4] = 1
|
||||
byte(vm.SSTORE), // Set slot[4] = 4
|
||||
// Slots are set, now return the code
|
||||
byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack
|
||||
byte(vm.PUSH1), 0x0, // memory start on stack
|
||||
|
@ -40,6 +40,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
|
||||
var (
|
||||
beneficiary common.Address
|
||||
baseFee *big.Int
|
||||
random *common.Hash
|
||||
)
|
||||
|
||||
// If we don't have an explicit author (i.e. not mining), extract from the header
|
||||
@ -51,6 +52,9 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
|
||||
if header.BaseFee != nil {
|
||||
baseFee = new(big.Int).Set(header.BaseFee)
|
||||
}
|
||||
if header.Difficulty.Cmp(common.Big0) == 0 {
|
||||
random = &header.MixDigest
|
||||
}
|
||||
return vm.BlockContext{
|
||||
CanTransfer: CanTransfer,
|
||||
Transfer: Transfer,
|
||||
@ -61,6 +65,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
|
||||
Difficulty: new(big.Int).Set(header.Difficulty),
|
||||
BaseFee: baseFee,
|
||||
GasLimit: header.GasLimit,
|
||||
Random: random,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -294,7 +294,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
|
||||
if g.GasLimit == 0 {
|
||||
head.GasLimit = params.GenesisGasLimit
|
||||
}
|
||||
if g.Difficulty == nil {
|
||||
if g.Difficulty == nil && g.Mixhash == (common.Hash{}) {
|
||||
head.Difficulty = params.GenesisDifficulty
|
||||
}
|
||||
if g.Config != nil && g.Config.IsLondon(common.Big0) {
|
||||
|
@ -41,16 +41,14 @@ func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
|
||||
|
||||
// ReadCode retrieves the contract code of the provided code hash.
|
||||
func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
||||
// Try with the legacy code scheme first, if not then try with current
|
||||
// scheme. Since most of the code will be found with legacy scheme.
|
||||
//
|
||||
// todo(rjl493456442) change the order when we forcibly upgrade the code
|
||||
// scheme with snapshot.
|
||||
data, _ := db.Get(hash[:])
|
||||
// Try with the prefixed code scheme first, if not then try with legacy
|
||||
// scheme.
|
||||
data := ReadCodeWithPrefix(db, hash)
|
||||
if len(data) != 0 {
|
||||
return data
|
||||
}
|
||||
return ReadCodeWithPrefix(db, hash)
|
||||
data, _ = db.Get(hash[:])
|
||||
return data
|
||||
}
|
||||
|
||||
// ReadCodeWithPrefix retrieves the contract code of the provided code hash.
|
||||
@ -61,6 +59,14 @@ func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
||||
return data
|
||||
}
|
||||
|
||||
// HasCodeWithPrefix checks if the contract code corresponding to the
|
||||
// provided code hash is present in the db. This function will only check
|
||||
// presence using the prefix-scheme.
|
||||
func HasCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) bool {
|
||||
ok, _ := db.Has(codeKey(hash))
|
||||
return ok
|
||||
}
|
||||
|
||||
// WriteCode writes the provided contract code database.
|
||||
func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
|
||||
if err := db.Put(codeKey(hash), code); err != nil {
|
||||
@ -81,6 +87,12 @@ func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
||||
return data
|
||||
}
|
||||
|
||||
// HasTrieNode checks if the trie node with the provided hash is present in db.
|
||||
func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool {
|
||||
ok, _ := db.Has(hash.Bytes())
|
||||
return ok
|
||||
}
|
||||
|
||||
// WriteTrieNode writes the provided trie node database.
|
||||
func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
|
||||
if err := db.Put(hash.Bytes(), node); err != nil {
|
||||
|
@ -133,7 +133,7 @@ func newFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
|
||||
|
||||
// Create the tables.
|
||||
for name, disableSnappy := range tables {
|
||||
table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy)
|
||||
table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy, readonly)
|
||||
if err != nil {
|
||||
for _, table := range freezer.tables {
|
||||
table.Close()
|
||||
@ -144,8 +144,15 @@ func newFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
|
||||
freezer.tables[name] = table
|
||||
}
|
||||
|
||||
if freezer.readonly {
|
||||
// In readonly mode only validate, don't truncate.
|
||||
// validate also sets `freezer.frozen`.
|
||||
err = freezer.validate()
|
||||
} else {
|
||||
// Truncate all tables to common length.
|
||||
if err := freezer.repair(); err != nil {
|
||||
err = freezer.repair()
|
||||
}
|
||||
if err != nil {
|
||||
for _, table := range freezer.tables {
|
||||
table.Close()
|
||||
}
|
||||
@ -309,6 +316,33 @@ func (f *freezer) Sync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// validate checks that every table has the same length.
|
||||
// Used instead of `repair` in readonly mode.
|
||||
func (f *freezer) validate() error {
|
||||
if len(f.tables) == 0 {
|
||||
return nil
|
||||
}
|
||||
var (
|
||||
length uint64
|
||||
name string
|
||||
)
|
||||
// Hack to get length of any table
|
||||
for kind, table := range f.tables {
|
||||
length = atomic.LoadUint64(&table.items)
|
||||
name = kind
|
||||
break
|
||||
}
|
||||
// Now check every table against that length
|
||||
for kind, table := range f.tables {
|
||||
items := atomic.LoadUint64(&table.items)
|
||||
if length != items {
|
||||
return fmt.Errorf("freezer tables %s and %s have differing lengths: %d != %d", kind, name, items, length)
|
||||
}
|
||||
}
|
||||
atomic.StoreUint64(&f.frozen, length)
|
||||
return nil
|
||||
}
|
||||
|
||||
// repair truncates all data tables to the same length.
|
||||
func (f *freezer) repair() error {
|
||||
min := uint64(math.MaxUint64)
|
||||
|
@ -95,6 +95,7 @@ type freezerTable struct {
|
||||
items uint64 // Number of items stored in the table (including items removed from tail)
|
||||
|
||||
noCompression bool // if true, disables snappy compression. Note: does not work retroactively
|
||||
readonly bool
|
||||
maxFileSize uint32 // Max file size for data-files
|
||||
name string
|
||||
path string
|
||||
@ -119,8 +120,8 @@ type freezerTable struct {
|
||||
}
|
||||
|
||||
// NewFreezerTable opens the given path as a freezer table.
|
||||
func NewFreezerTable(path, name string, disableSnappy bool) (*freezerTable, error) {
|
||||
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy)
|
||||
func NewFreezerTable(path, name string, disableSnappy, readonly bool) (*freezerTable, error) {
|
||||
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, readonly)
|
||||
}
|
||||
|
||||
// openFreezerFileForAppend opens a freezer table file and seeks to the end
|
||||
@ -164,7 +165,7 @@ func truncateFreezerFile(file *os.File, size int64) error {
|
||||
// newTable opens a freezer table, creating the data and index files if they are
|
||||
// non existent. Both files are truncated to the shortest common length to ensure
|
||||
// they don't go out of sync.
|
||||
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
|
||||
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression, readonly bool) (*freezerTable, error) {
|
||||
// Ensure the containing directory exists and open the indexEntry file
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return nil, err
|
||||
@ -177,7 +178,16 @@ func newTable(path string, name string, readMeter metrics.Meter, writeMeter metr
|
||||
// Compressed idx
|
||||
idxName = fmt.Sprintf("%s.cidx", name)
|
||||
}
|
||||
offsets, err := openFreezerFileForAppend(filepath.Join(path, idxName))
|
||||
var (
|
||||
err error
|
||||
offsets *os.File
|
||||
)
|
||||
if readonly {
|
||||
// Will fail if table doesn't exist
|
||||
offsets, err = openFreezerFileForReadOnly(filepath.Join(path, idxName))
|
||||
} else {
|
||||
offsets, err = openFreezerFileForAppend(filepath.Join(path, idxName))
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -192,6 +202,7 @@ func newTable(path string, name string, readMeter metrics.Meter, writeMeter metr
|
||||
path: path,
|
||||
logger: log.New("database", path, "table", name),
|
||||
noCompression: noCompression,
|
||||
readonly: readonly,
|
||||
maxFileSize: maxFilesize,
|
||||
}
|
||||
if err := tab.repair(); err != nil {
|
||||
@ -252,7 +263,11 @@ func (t *freezerTable) repair() error {
|
||||
|
||||
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
|
||||
lastIndex.unmarshalBinary(buffer)
|
||||
if t.readonly {
|
||||
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly)
|
||||
} else {
|
||||
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForAppend)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -301,6 +316,8 @@ func (t *freezerTable) repair() error {
|
||||
contentExp = int64(lastIndex.offset)
|
||||
}
|
||||
}
|
||||
// Sync() fails for read-only files on windows.
|
||||
if !t.readonly {
|
||||
// Ensure all reparation changes have been written to disk
|
||||
if err := t.index.Sync(); err != nil {
|
||||
return err
|
||||
@ -308,6 +325,7 @@ func (t *freezerTable) repair() error {
|
||||
if err := t.head.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Update the item and byte counters and return
|
||||
t.items = uint64(t.itemOffset) + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
|
||||
t.headBytes = contentSize
|
||||
@ -334,8 +352,12 @@ func (t *freezerTable) preopen() (err error) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if t.readonly {
|
||||
t.head, err = t.openFile(t.headId, openFreezerFileForReadOnly)
|
||||
} else {
|
||||
// Open head in read/write
|
||||
t.head, err = t.openFile(t.headId, openFreezerFileForAppend)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -40,7 +40,7 @@ func TestFreezerBasics(t *testing.T) {
|
||||
// set cutoff at 50 bytes
|
||||
f, err := newTable(os.TempDir(),
|
||||
fmt.Sprintf("unittest-%d", rand.Uint64()),
|
||||
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true)
|
||||
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -85,7 +85,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
||||
f *freezerTable
|
||||
err error
|
||||
)
|
||||
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -99,7 +99,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
||||
require.NoError(t, batch.commit())
|
||||
f.Close()
|
||||
|
||||
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -116,7 +116,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
||||
t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
|
||||
}
|
||||
f.Close()
|
||||
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -131,7 +131,7 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
|
||||
|
||||
// Fill table
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -160,7 +160,7 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
|
||||
|
||||
// Now open it again
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -183,7 +183,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
||||
|
||||
// Fill a table and close it
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -209,7 +209,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
||||
|
||||
// Now open it again
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -232,7 +232,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
||||
|
||||
// And if we open it, we should now be able to read all of them (new values)
|
||||
{
|
||||
f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
for y := 1; y < 255; y++ {
|
||||
exp := getChunk(15, ^y)
|
||||
got, err := f.Retrieve(uint64(y))
|
||||
@ -254,7 +254,7 @@ func TestSnappyDetection(t *testing.T) {
|
||||
|
||||
// Open with snappy
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -265,7 +265,7 @@ func TestSnappyDetection(t *testing.T) {
|
||||
|
||||
// Open without snappy
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -277,7 +277,7 @@ func TestSnappyDetection(t *testing.T) {
|
||||
|
||||
// Open with snappy
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -309,7 +309,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
|
||||
|
||||
// Fill a table and close it
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -345,7 +345,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
|
||||
// 45, 45, 15
|
||||
// with 3+3+1 items
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -366,7 +366,7 @@ func TestFreezerTruncate(t *testing.T) {
|
||||
|
||||
// Fill table
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -382,7 +382,7 @@ func TestFreezerTruncate(t *testing.T) {
|
||||
|
||||
// Reopen, truncate
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -407,7 +407,7 @@ func TestFreezerRepairFirstFile(t *testing.T) {
|
||||
|
||||
// Fill table
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -440,7 +440,7 @@ func TestFreezerRepairFirstFile(t *testing.T) {
|
||||
|
||||
// Reopen
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -475,7 +475,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
|
||||
|
||||
// Fill table
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -491,7 +491,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
|
||||
|
||||
// Reopen and read all files
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -523,7 +523,7 @@ func TestFreezerOffset(t *testing.T) {
|
||||
|
||||
// Fill table
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -584,7 +584,7 @@ func TestFreezerOffset(t *testing.T) {
|
||||
|
||||
// Now open again
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -638,7 +638,7 @@ func TestFreezerOffset(t *testing.T) {
|
||||
|
||||
// Check that existing items have been moved to index 1M.
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -726,7 +726,7 @@ func TestSequentialRead(t *testing.T) {
|
||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||
fname := fmt.Sprintf("batchread-%d", rand.Uint64())
|
||||
{ // Fill table
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -736,7 +736,7 @@ func TestSequentialRead(t *testing.T) {
|
||||
f.Close()
|
||||
}
|
||||
{ // Open it, iterate, verify iteration
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -757,7 +757,7 @@ func TestSequentialRead(t *testing.T) {
|
||||
}
|
||||
{ // Open it, iterate, verify byte limit. The byte limit is less than item
|
||||
// size, so each lookup should only return one item
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -786,7 +786,7 @@ func TestSequentialReadByteLimit(t *testing.T) {
|
||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||
fname := fmt.Sprintf("batchread-2-%d", rand.Uint64())
|
||||
{ // Fill table
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -808,7 +808,7 @@ func TestSequentialReadByteLimit(t *testing.T) {
|
||||
{100, 109, 10},
|
||||
} {
|
||||
{
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true)
|
||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -829,3 +829,89 @@ func TestSequentialReadByteLimit(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFreezerReadonly(t *testing.T) {
|
||||
tmpdir := os.TempDir()
|
||||
// Case 1: Check it fails on non-existent file.
|
||||
_, err := newTable(tmpdir,
|
||||
fmt.Sprintf("readonlytest-%d", rand.Uint64()),
|
||||
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
|
||||
if err == nil {
|
||||
t.Fatal("readonly table instantiation should fail for non-existent table")
|
||||
}
|
||||
|
||||
// Case 2: Check that it fails on invalid index length.
|
||||
fname := fmt.Sprintf("readonlytest-%d", rand.Uint64())
|
||||
idxFile, err := openFreezerFileForAppend(filepath.Join(tmpdir, fmt.Sprintf("%s.ridx", fname)))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to open index file: %v\n", err)
|
||||
}
|
||||
// size should not be a multiple of indexEntrySize.
|
||||
idxFile.Write(make([]byte, 17))
|
||||
idxFile.Close()
|
||||
_, err = newTable(tmpdir, fname,
|
||||
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
|
||||
if err == nil {
|
||||
t.Errorf("readonly table instantiation should fail for invalid index size")
|
||||
}
|
||||
|
||||
// Case 3: Open table non-readonly table to write some data.
|
||||
// Then corrupt the head file and make sure opening the table
|
||||
// again in readonly triggers an error.
|
||||
fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
|
||||
f, err := newTable(tmpdir, fname,
|
||||
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to instantiate table: %v", err)
|
||||
}
|
||||
writeChunks(t, f, 8, 32)
|
||||
// Corrupt table file
|
||||
if _, err := f.head.Write([]byte{1, 1}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = newTable(tmpdir, fname,
|
||||
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
|
||||
if err == nil {
|
||||
t.Errorf("readonly table instantiation should fail for corrupt table file")
|
||||
}
|
||||
|
||||
// Case 4: Write some data to a table and later re-open it as readonly.
|
||||
// Should be successful.
|
||||
fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
|
||||
f, err = newTable(tmpdir, fname,
|
||||
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to instantiate table: %v\n", err)
|
||||
}
|
||||
writeChunks(t, f, 32, 128)
|
||||
if err := f.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f, err = newTable(tmpdir, fname,
|
||||
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
v, err := f.Retrieve(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
exp := getChunk(128, 10)
|
||||
if !bytes.Equal(v, exp) {
|
||||
t.Errorf("retrieved value is incorrect")
|
||||
}
|
||||
|
||||
// Case 5: Now write some data via a batch.
|
||||
// This should fail either during AppendRaw or Commit
|
||||
batch := f.newBatch()
|
||||
writeErr := batch.AppendRaw(32, make([]byte, 1))
|
||||
if writeErr == nil {
|
||||
writeErr = batch.commit()
|
||||
}
|
||||
if writeErr == nil {
|
||||
t.Fatalf("Writing to readonly table should fail")
|
||||
}
|
||||
}
|
||||
|
@ -253,6 +253,44 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFreezerReadonlyValidate(t *testing.T) {
|
||||
tables := map[string]bool{"a": true, "b": true}
|
||||
dir, err := ioutil.TempDir("", "freezer")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
// Open non-readonly freezer and fill individual tables
|
||||
// with different amount of data.
|
||||
f, err := newFreezer(dir, "", false, 2049, tables)
|
||||
if err != nil {
|
||||
t.Fatal("can't open freezer", err)
|
||||
}
|
||||
var item = make([]byte, 1024)
|
||||
aBatch := f.tables["a"].newBatch()
|
||||
require.NoError(t, aBatch.AppendRaw(0, item))
|
||||
require.NoError(t, aBatch.AppendRaw(1, item))
|
||||
require.NoError(t, aBatch.AppendRaw(2, item))
|
||||
require.NoError(t, aBatch.commit())
|
||||
bBatch := f.tables["b"].newBatch()
|
||||
require.NoError(t, bBatch.AppendRaw(0, item))
|
||||
require.NoError(t, bBatch.commit())
|
||||
if f.tables["a"].items != 3 {
|
||||
t.Fatalf("unexpected number of items in table")
|
||||
}
|
||||
if f.tables["b"].items != 1 {
|
||||
t.Fatalf("unexpected number of items in table")
|
||||
}
|
||||
require.NoError(t, f.Close())
|
||||
|
||||
// Re-openening as readonly should fail when validating
|
||||
// table lengths.
|
||||
f, err = newFreezer(dir, "", true, 2049, tables)
|
||||
if err == nil {
|
||||
t.Fatal("readonly freezer should fail with differing table lengths")
|
||||
}
|
||||
}
|
||||
|
||||
func newFreezerForTesting(t *testing.T, tables map[string]bool) (*freezer, string) {
|
||||
t.Helper()
|
||||
|
||||
|
@ -66,6 +66,29 @@ type journalStorage struct {
|
||||
Vals [][]byte
|
||||
}
|
||||
|
||||
func ParseGeneratorStatus(generatorBlob []byte) string {
|
||||
if len(generatorBlob) == 0 {
|
||||
return ""
|
||||
}
|
||||
var generator journalGenerator
|
||||
if err := rlp.DecodeBytes(generatorBlob, &generator); err != nil {
|
||||
log.Warn("failed to decode snapshot generator", "err", err)
|
||||
return ""
|
||||
}
|
||||
// Figure out whether we're after or within an account
|
||||
var m string
|
||||
switch marker := generator.Marker; len(marker) {
|
||||
case common.HashLength:
|
||||
m = fmt.Sprintf("at %#x", marker)
|
||||
case 2 * common.HashLength:
|
||||
m = fmt.Sprintf("in %#x at %#x", marker[:common.HashLength], marker[common.HashLength:])
|
||||
default:
|
||||
m = fmt.Sprintf("%#x", marker)
|
||||
}
|
||||
return fmt.Sprintf(`Done: %v, Accounts: %d, Slots: %d, Storage: %d, Marker: %s`,
|
||||
generator.Done, generator.Accounts, generator.Slots, generator.Storage, m)
|
||||
}
|
||||
|
||||
// loadAndParseJournal tries to parse the snapshot journal in latest format.
|
||||
func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
|
||||
// Retrieve the disk layer generator. It must exist, no matter the
|
||||
|
@ -200,23 +200,8 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
|
||||
var (
|
||||
enc []byte
|
||||
err error
|
||||
meter *time.Duration
|
||||
)
|
||||
readStart := time.Now()
|
||||
if metrics.EnabledExpensive {
|
||||
// If the snap is 'under construction', the first lookup may fail. If that
|
||||
// happens, we don't want to double-count the time elapsed. Thus this
|
||||
// dance with the metering.
|
||||
defer func() {
|
||||
if meter != nil {
|
||||
*meter += time.Since(readStart)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if s.db.snap != nil {
|
||||
if metrics.EnabledExpensive {
|
||||
meter = &s.db.SnapshotStorageReads
|
||||
}
|
||||
// If the object was destructed in *this* block (and potentially resurrected),
|
||||
// the storage has been cleared out, and we should *not* consult the previous
|
||||
// snapshot about any storage values. The only possible alternatives are:
|
||||
@ -226,20 +211,20 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
|
||||
if _, destructed := s.db.snapDestructs[s.addrHash]; destructed {
|
||||
return common.Hash{}
|
||||
}
|
||||
start := time.Now()
|
||||
enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes()))
|
||||
if metrics.EnabledExpensive {
|
||||
s.db.SnapshotStorageReads += time.Since(start)
|
||||
}
|
||||
}
|
||||
// If the snapshot is unavailable or reading from it fails, load from the database.
|
||||
if s.db.snap == nil || err != nil {
|
||||
if meter != nil {
|
||||
// If we already spent time checking the snapshot, account for it
|
||||
// and reset the readStart
|
||||
*meter += time.Since(readStart)
|
||||
readStart = time.Now()
|
||||
}
|
||||
start := time.Now()
|
||||
enc, err = s.getTrie(db).TryGet(key.Bytes())
|
||||
if metrics.EnabledExpensive {
|
||||
meter = &s.db.StorageReads
|
||||
s.db.StorageReads += time.Since(start)
|
||||
}
|
||||
if enc, err = s.getTrie(db).TryGet(key.Bytes()); err != nil {
|
||||
if err != nil {
|
||||
s.setError(err)
|
||||
return common.Hash{}
|
||||
}
|
||||
|
@ -513,16 +513,14 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
|
||||
return obj
|
||||
}
|
||||
// If no live objects are available, attempt to use snapshots
|
||||
var (
|
||||
data *types.StateAccount
|
||||
err error
|
||||
)
|
||||
var data *types.StateAccount
|
||||
if s.snap != nil {
|
||||
start := time.Now()
|
||||
acc, err := s.snap.Account(crypto.HashData(s.hasher, addr.Bytes()))
|
||||
if metrics.EnabledExpensive {
|
||||
defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now())
|
||||
s.SnapshotAccountReads += time.Since(start)
|
||||
}
|
||||
var acc *snapshot.Account
|
||||
if acc, err = s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())); err == nil {
|
||||
if err == nil {
|
||||
if acc == nil {
|
||||
return nil
|
||||
}
|
||||
@ -541,11 +539,12 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
|
||||
}
|
||||
}
|
||||
// If snapshot unavailable or reading from it failed, load from the database
|
||||
if s.snap == nil || err != nil {
|
||||
if metrics.EnabledExpensive {
|
||||
defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now())
|
||||
}
|
||||
if data == nil {
|
||||
start := time.Now()
|
||||
enc, err := s.trie.TryGet(addr.Bytes())
|
||||
if metrics.EnabledExpensive {
|
||||
s.AccountReads += time.Since(start)
|
||||
}
|
||||
if err != nil {
|
||||
s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err))
|
||||
return nil
|
||||
|
@ -310,7 +310,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
|
||||
}
|
||||
|
||||
// Set up the initial access list.
|
||||
if rules := st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber); rules.IsBerlin {
|
||||
if rules := st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber, st.evm.Context.Random != nil); rules.IsBerlin {
|
||||
st.state.PrepareAccessList(msg.From(), msg.To(), vm.ActivePrecompiles(rules), msg.AccessList())
|
||||
}
|
||||
var (
|
||||
|
@ -75,6 +75,7 @@ type BlockContext struct {
|
||||
Time *big.Int // Provides information for TIME
|
||||
Difficulty *big.Int // Provides information for DIFFICULTY
|
||||
BaseFee *big.Int // Provides information for BASEFEE
|
||||
Random *common.Hash // Provides information for RANDOM
|
||||
}
|
||||
|
||||
// TxContext provides the EVM with information about a transaction.
|
||||
@ -131,7 +132,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig
|
||||
StateDB: statedb,
|
||||
Config: config,
|
||||
chainConfig: chainConfig,
|
||||
chainRules: chainConfig.Rules(blockCtx.BlockNumber),
|
||||
chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil),
|
||||
}
|
||||
evm.interpreter = NewEVMInterpreter(evm, config)
|
||||
return evm
|
||||
|
@ -477,6 +477,12 @@ func opDifficulty(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func opRandom(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
v := new(uint256.Int).SetBytes((interpreter.evm.Context.Random.Bytes()))
|
||||
scope.Stack.push(v)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func opGasLimit(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||
scope.Stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.GasLimit))
|
||||
return nil, nil
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -654,3 +655,36 @@ func TestCreate2Addreses(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRandom(t *testing.T) {
|
||||
type testcase struct {
|
||||
name string
|
||||
random common.Hash
|
||||
}
|
||||
|
||||
for _, tt := range []testcase{
|
||||
{name: "empty hash", random: common.Hash{}},
|
||||
{name: "1", random: common.Hash{0}},
|
||||
{name: "emptyCodeHash", random: emptyCodeHash},
|
||||
{name: "hash(0x010203)", random: crypto.Keccak256Hash([]byte{0x01, 0x02, 0x03})},
|
||||
} {
|
||||
var (
|
||||
env = NewEVM(BlockContext{Random: &tt.random}, TxContext{}, nil, params.TestChainConfig, Config{})
|
||||
stack = newstack()
|
||||
pc = uint64(0)
|
||||
evmInterpreter = env.interpreter
|
||||
)
|
||||
opRandom(&pc, evmInterpreter, &ScopeContext{nil, stack, nil})
|
||||
if len(stack.data) != 1 {
|
||||
t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data))
|
||||
}
|
||||
actual := stack.pop()
|
||||
expected, overflow := uint256.FromBig(new(big.Int).SetBytes(tt.random.Bytes()))
|
||||
if overflow {
|
||||
t.Errorf("Testcase %v: invalid overflow", tt.name)
|
||||
}
|
||||
if actual.Cmp(expected) != 0 {
|
||||
t.Errorf("Testcase %v: expected %x, got %x", tt.name, expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -69,6 +69,8 @@ func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter {
|
||||
// If jump table was not initialised we set the default one.
|
||||
if cfg.JumpTable == nil {
|
||||
switch {
|
||||
case evm.chainRules.IsMerge:
|
||||
cfg.JumpTable = &mergeInstructionSet
|
||||
case evm.chainRules.IsLondon:
|
||||
cfg.JumpTable = &londonInstructionSet
|
||||
case evm.chainRules.IsBerlin:
|
||||
|
@ -54,6 +54,7 @@ var (
|
||||
istanbulInstructionSet = newIstanbulInstructionSet()
|
||||
berlinInstructionSet = newBerlinInstructionSet()
|
||||
londonInstructionSet = newLondonInstructionSet()
|
||||
mergeInstructionSet = newMergeInstructionSet()
|
||||
)
|
||||
|
||||
// JumpTable contains the EVM opcodes supported at a given fork.
|
||||
@ -77,6 +78,17 @@ func validate(jt JumpTable) JumpTable {
|
||||
return jt
|
||||
}
|
||||
|
||||
func newMergeInstructionSet() JumpTable {
|
||||
instructionSet := newLondonInstructionSet()
|
||||
instructionSet[RANDOM] = &operation{
|
||||
execute: opRandom,
|
||||
constantGas: GasQuickStep,
|
||||
minStack: minStack(0, 1),
|
||||
maxStack: maxStack(0, 1),
|
||||
}
|
||||
return validate(instructionSet)
|
||||
}
|
||||
|
||||
// newLondonInstructionSet returns the frontier, homestead, byzantium,
|
||||
// contantinople, istanbul, petersburg, berlin and london instructions.
|
||||
func newLondonInstructionSet() JumpTable {
|
||||
|
@ -95,6 +95,7 @@ const (
|
||||
TIMESTAMP OpCode = 0x42
|
||||
NUMBER OpCode = 0x43
|
||||
DIFFICULTY OpCode = 0x44
|
||||
RANDOM OpCode = 0x44 // Same as DIFFICULTY
|
||||
GASLIMIT OpCode = 0x45
|
||||
CHAINID OpCode = 0x46
|
||||
SELFBALANCE OpCode = 0x47
|
||||
@ -275,7 +276,7 @@ var opCodeToString = map[OpCode]string{
|
||||
COINBASE: "COINBASE",
|
||||
TIMESTAMP: "TIMESTAMP",
|
||||
NUMBER: "NUMBER",
|
||||
DIFFICULTY: "DIFFICULTY",
|
||||
DIFFICULTY: "DIFFICULTY", // TODO (MariusVanDerWijden) rename to RANDOM post merge
|
||||
GASLIMIT: "GASLIMIT",
|
||||
CHAINID: "CHAINID",
|
||||
SELFBALANCE: "SELFBALANCE",
|
||||
|
@ -118,7 +118,7 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) {
|
||||
vmenv = NewEnv(cfg)
|
||||
sender = vm.AccountRef(cfg.Origin)
|
||||
)
|
||||
if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber); rules.IsBerlin {
|
||||
if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil); rules.IsBerlin {
|
||||
cfg.State.PrepareAccessList(cfg.Origin, &address, vm.ActivePrecompiles(rules), nil)
|
||||
}
|
||||
cfg.State.CreateAccount(address)
|
||||
@ -150,7 +150,7 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) {
|
||||
vmenv = NewEnv(cfg)
|
||||
sender = vm.AccountRef(cfg.Origin)
|
||||
)
|
||||
if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber); rules.IsBerlin {
|
||||
if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil); rules.IsBerlin {
|
||||
cfg.State.PrepareAccessList(cfg.Origin, nil, vm.ActivePrecompiles(rules), nil)
|
||||
}
|
||||
// Call the code with the given configuration.
|
||||
@ -176,7 +176,7 @@ func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, er
|
||||
sender := cfg.State.GetOrNewStateObject(cfg.Origin)
|
||||
statedb := cfg.State
|
||||
|
||||
if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber); rules.IsBerlin {
|
||||
if rules := cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil); rules.IsBerlin {
|
||||
statedb.PrepareAccessList(cfg.Origin, &address, vm.ActivePrecompiles(rules), nil)
|
||||
}
|
||||
// Call the code with the given configuration.
|
||||
|
@ -279,7 +279,7 @@ var testCases = []testCase{
|
||||
{
|
||||
Curve: elliptic.P384(),
|
||||
Name: "P384",
|
||||
Expected: ECIES_AES256_SHA384,
|
||||
Expected: ECIES_AES192_SHA384,
|
||||
},
|
||||
{
|
||||
Curve: elliptic.P521(),
|
||||
|
@ -80,6 +80,14 @@ var (
|
||||
KeyLen: 16,
|
||||
}
|
||||
|
||||
ECIES_AES192_SHA384 = &ECIESParams{
|
||||
Hash: sha512.New384,
|
||||
hashAlgo: crypto.SHA384,
|
||||
Cipher: aes.NewCipher,
|
||||
BlockSize: aes.BlockSize,
|
||||
KeyLen: 24,
|
||||
}
|
||||
|
||||
ECIES_AES256_SHA256 = &ECIESParams{
|
||||
Hash: sha256.New,
|
||||
hashAlgo: crypto.SHA256,
|
||||
@ -108,7 +116,7 @@ var (
|
||||
var paramsFromCurve = map[elliptic.Curve]*ECIESParams{
|
||||
ethcrypto.S256(): ECIES_AES128_SHA256,
|
||||
elliptic.P256(): ECIES_AES128_SHA256,
|
||||
elliptic.P384(): ECIES_AES256_SHA384,
|
||||
elliptic.P384(): ECIES_AES192_SHA384,
|
||||
elliptic.P521(): ECIES_AES256_SHA512,
|
||||
}
|
||||
|
||||
|
@ -234,7 +234,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock, merger)
|
||||
eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock)
|
||||
eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData))
|
||||
|
||||
eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil}
|
||||
|
@ -20,37 +20,15 @@ package catalyst
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/consensus/beacon"
|
||||
"github.com/ethereum/go-ethereum/consensus/misc"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/beacon"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
chainParams "github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
var (
|
||||
VALID = GenericStringResponse{"VALID"}
|
||||
SUCCESS = GenericStringResponse{"SUCCESS"}
|
||||
INVALID = ForkChoiceResponse{Status: "INVALID", PayloadID: nil}
|
||||
SYNCING = ForkChoiceResponse{Status: "SYNCING", PayloadID: nil}
|
||||
GenericServerError = rpc.CustomError{Code: -32000, ValidationError: "Server error"}
|
||||
UnknownPayload = rpc.CustomError{Code: -32001, ValidationError: "Unknown payload"}
|
||||
InvalidTB = rpc.CustomError{Code: -32002, ValidationError: "Invalid terminal block"}
|
||||
InvalidPayloadID = rpc.CustomError{Code: 1, ValidationError: "invalid payload id"}
|
||||
)
|
||||
|
||||
// Register adds catalyst APIs to the full node.
|
||||
@ -60,21 +38,7 @@ func Register(stack *node.Node, backend *eth.Ethereum) error {
|
||||
{
|
||||
Namespace: "engine",
|
||||
Version: "1.0",
|
||||
Service: NewConsensusAPI(backend, nil),
|
||||
Public: true,
|
||||
},
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterLight adds catalyst APIs to the light client.
|
||||
func RegisterLight(stack *node.Node, backend *les.LightEthereum) error {
|
||||
log.Warn("Catalyst mode enabled", "protocol", "les")
|
||||
stack.RegisterAPIs([]rpc.API{
|
||||
{
|
||||
Namespace: "engine",
|
||||
Version: "1.0",
|
||||
Service: NewConsensusAPI(nil, backend),
|
||||
Service: NewConsensusAPI(backend),
|
||||
Public: true,
|
||||
},
|
||||
})
|
||||
@ -82,184 +46,86 @@ func RegisterLight(stack *node.Node, backend *les.LightEthereum) error {
|
||||
}
|
||||
|
||||
type ConsensusAPI struct {
|
||||
light bool
|
||||
eth *eth.Ethereum
|
||||
les *les.LightEthereum
|
||||
engine consensus.Engine // engine is the post-merge consensus engine, only for block creation
|
||||
preparedBlocks map[uint64]*ExecutableDataV1
|
||||
preparedBlocks *payloadQueue // preparedBlocks caches payloads (*ExecutableDataV1) by payload ID (PayloadID)
|
||||
}
|
||||
|
||||
func NewConsensusAPI(eth *eth.Ethereum, les *les.LightEthereum) *ConsensusAPI {
|
||||
var engine consensus.Engine
|
||||
if eth == nil {
|
||||
if les.BlockChain().Config().TerminalTotalDifficulty == nil {
|
||||
panic("Catalyst started without valid total difficulty")
|
||||
}
|
||||
if b, ok := les.Engine().(*beacon.Beacon); ok {
|
||||
engine = beacon.New(b.InnerEngine())
|
||||
} else {
|
||||
engine = beacon.New(les.Engine())
|
||||
}
|
||||
} else {
|
||||
// NewConsensusAPI creates a new consensus api for the given backend.
|
||||
// The underlying blockchain needs to have a valid terminal total difficulty set.
|
||||
func NewConsensusAPI(eth *eth.Ethereum) *ConsensusAPI {
|
||||
if eth.BlockChain().Config().TerminalTotalDifficulty == nil {
|
||||
panic("Catalyst started without valid total difficulty")
|
||||
}
|
||||
if b, ok := eth.Engine().(*beacon.Beacon); ok {
|
||||
engine = beacon.New(b.InnerEngine())
|
||||
} else {
|
||||
engine = beacon.New(eth.Engine())
|
||||
}
|
||||
}
|
||||
return &ConsensusAPI{
|
||||
light: eth == nil,
|
||||
eth: eth,
|
||||
les: les,
|
||||
engine: engine,
|
||||
preparedBlocks: make(map[uint64]*ExecutableDataV1),
|
||||
preparedBlocks: newPayloadQueue(),
|
||||
}
|
||||
}
|
||||
|
||||
// blockExecutionEnv gathers all the data required to execute
|
||||
// a block, either when assembling it or when inserting it.
|
||||
type blockExecutionEnv struct {
|
||||
chain *core.BlockChain
|
||||
state *state.StateDB
|
||||
tcount int
|
||||
gasPool *core.GasPool
|
||||
|
||||
header *types.Header
|
||||
txs []*types.Transaction
|
||||
receipts []*types.Receipt
|
||||
}
|
||||
|
||||
func (env *blockExecutionEnv) commitTransaction(tx *types.Transaction, coinbase common.Address) error {
|
||||
vmconfig := *env.chain.GetVMConfig()
|
||||
snap := env.state.Snapshot()
|
||||
receipt, err := core.ApplyTransaction(env.chain.Config(), env.chain, &coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, vmconfig)
|
||||
if err != nil {
|
||||
env.state.RevertToSnapshot(snap)
|
||||
return err
|
||||
}
|
||||
env.txs = append(env.txs, tx)
|
||||
env.receipts = append(env.receipts, receipt)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (api *ConsensusAPI) makeEnv(parent *types.Block, header *types.Header) (*blockExecutionEnv, error) {
|
||||
// The parent state might be missing. It can be the special scenario
|
||||
// that consensus layer tries to build a new block based on the very
|
||||
// old side chain block and the relevant state is already pruned. So
|
||||
// try to retrieve the live state from the chain, if it's not existent,
|
||||
// do the necessary recovery work.
|
||||
var (
|
||||
err error
|
||||
state *state.StateDB
|
||||
)
|
||||
if api.eth.BlockChain().HasState(parent.Root()) {
|
||||
state, err = api.eth.BlockChain().StateAt(parent.Root())
|
||||
} else {
|
||||
// The maximum acceptable reorg depth can be limited by the
|
||||
// finalised block somehow. TODO(rjl493456442) fix the hard-
|
||||
// coded number here later.
|
||||
state, err = api.eth.StateAtBlock(parent, 1000, nil, false, false)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
env := &blockExecutionEnv{
|
||||
chain: api.eth.BlockChain(),
|
||||
state: state,
|
||||
header: header,
|
||||
gasPool: new(core.GasPool).AddGas(header.GasLimit),
|
||||
}
|
||||
return env, nil
|
||||
}
|
||||
|
||||
func (api *ConsensusAPI) GetPayloadV1(payloadID hexutil.Bytes) (*ExecutableDataV1, error) {
|
||||
hash := []byte(payloadID)
|
||||
if len(hash) < 8 {
|
||||
return nil, &InvalidPayloadID
|
||||
}
|
||||
id := binary.BigEndian.Uint64(hash[:8])
|
||||
data, ok := api.preparedBlocks[id]
|
||||
if !ok {
|
||||
return nil, &UnknownPayload
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (api *ConsensusAPI) ForkchoiceUpdatedV1(heads ForkchoiceStateV1, PayloadAttributes *PayloadAttributesV1) (ForkChoiceResponse, error) {
|
||||
// ForkchoiceUpdatedV1 has several responsibilities:
|
||||
// If the method is called with an empty head block:
|
||||
// we return success, which can be used to check if the catalyst mode is enabled
|
||||
// If the total difficulty was not reached:
|
||||
// we return INVALID
|
||||
// If the finalizedBlockHash is set:
|
||||
// we check if we have the finalizedBlockHash in our db, if not we start a sync
|
||||
// We try to set our blockchain to the headBlock
|
||||
// If there are payloadAttributes:
|
||||
// we try to assemble a block with the payloadAttributes and return its payloadID
|
||||
func (api *ConsensusAPI) ForkchoiceUpdatedV1(heads beacon.ForkchoiceStateV1, payloadAttributes *beacon.PayloadAttributesV1) (beacon.ForkChoiceResponse, error) {
|
||||
log.Trace("Engine API request received", "method", "ForkChoiceUpdated", "head", heads.HeadBlockHash, "finalized", heads.FinalizedBlockHash, "safe", heads.SafeBlockHash)
|
||||
if heads.HeadBlockHash == (common.Hash{}) {
|
||||
return ForkChoiceResponse{Status: SUCCESS.Status, PayloadID: nil}, nil
|
||||
return beacon.ForkChoiceResponse{Status: beacon.SUCCESS.Status, PayloadID: nil}, nil
|
||||
}
|
||||
if err := api.checkTerminalTotalDifficulty(heads.HeadBlockHash); err != nil {
|
||||
if block := api.eth.BlockChain().GetBlockByHash(heads.HeadBlockHash); block == nil {
|
||||
// TODO (MariusVanDerWijden) trigger sync
|
||||
return SYNCING, nil
|
||||
return beacon.SYNCING, nil
|
||||
}
|
||||
return INVALID, err
|
||||
return beacon.INVALID, err
|
||||
}
|
||||
// If the finalized block is set, check if it is in our blockchain
|
||||
if heads.FinalizedBlockHash != (common.Hash{}) {
|
||||
if block := api.eth.BlockChain().GetBlockByHash(heads.FinalizedBlockHash); block == nil {
|
||||
// TODO (MariusVanDerWijden) trigger sync
|
||||
return SYNCING, nil
|
||||
return beacon.SYNCING, nil
|
||||
}
|
||||
}
|
||||
// SetHead
|
||||
if err := api.setHead(heads.HeadBlockHash); err != nil {
|
||||
return INVALID, err
|
||||
return beacon.INVALID, err
|
||||
}
|
||||
// Assemble block (if needed)
|
||||
if PayloadAttributes != nil {
|
||||
data, err := api.assembleBlock(heads.HeadBlockHash, PayloadAttributes)
|
||||
// Assemble block (if needed). It only works for full node.
|
||||
if payloadAttributes != nil {
|
||||
data, err := api.assembleBlock(heads.HeadBlockHash, payloadAttributes)
|
||||
if err != nil {
|
||||
return INVALID, err
|
||||
return beacon.INVALID, err
|
||||
}
|
||||
hash := computePayloadId(heads.HeadBlockHash, PayloadAttributes)
|
||||
id := binary.BigEndian.Uint64(hash)
|
||||
api.preparedBlocks[id] = data
|
||||
log.Info("Created payload", "payloadid", id)
|
||||
// TODO (MariusVanDerWijden) do something with the payloadID?
|
||||
hex := hexutil.Bytes(hash)
|
||||
return ForkChoiceResponse{Status: SUCCESS.Status, PayloadID: &hex}, nil
|
||||
id := computePayloadId(heads.HeadBlockHash, payloadAttributes)
|
||||
api.preparedBlocks.put(id, data)
|
||||
log.Info("Created payload", "payloadID", id)
|
||||
return beacon.ForkChoiceResponse{Status: beacon.SUCCESS.Status, PayloadID: &id}, nil
|
||||
}
|
||||
return ForkChoiceResponse{Status: SUCCESS.Status, PayloadID: nil}, nil
|
||||
return beacon.ForkChoiceResponse{Status: beacon.SUCCESS.Status, PayloadID: nil}, nil
|
||||
}
|
||||
|
||||
func computePayloadId(headBlockHash common.Hash, params *PayloadAttributesV1) []byte {
|
||||
// Hash
|
||||
hasher := sha256.New()
|
||||
hasher.Write(headBlockHash[:])
|
||||
binary.Write(hasher, binary.BigEndian, params.Timestamp)
|
||||
hasher.Write(params.Random[:])
|
||||
hasher.Write(params.SuggestedFeeRecipient[:])
|
||||
return hasher.Sum([]byte{})[:8]
|
||||
// GetPayloadV1 returns a cached payload by id.
|
||||
func (api *ConsensusAPI) GetPayloadV1(payloadID beacon.PayloadID) (*beacon.ExecutableDataV1, error) {
|
||||
log.Trace("Engine API request received", "method", "GetPayload", "id", payloadID)
|
||||
data := api.preparedBlocks.get(payloadID)
|
||||
if data == nil {
|
||||
return nil, &beacon.UnknownPayload
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (api *ConsensusAPI) invalid() ExecutePayloadResponse {
|
||||
if api.light {
|
||||
return ExecutePayloadResponse{Status: INVALID.Status, LatestValidHash: api.les.BlockChain().CurrentHeader().Hash()}
|
||||
}
|
||||
return ExecutePayloadResponse{Status: INVALID.Status, LatestValidHash: api.eth.BlockChain().CurrentHeader().Hash()}
|
||||
}
|
||||
|
||||
// ExecutePayload creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
|
||||
func (api *ConsensusAPI) ExecutePayloadV1(params ExecutableDataV1) (ExecutePayloadResponse, error) {
|
||||
block, err := ExecutableDataToBlock(params)
|
||||
// ExecutePayloadV1 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
|
||||
func (api *ConsensusAPI) ExecutePayloadV1(params beacon.ExecutableDataV1) (beacon.ExecutePayloadResponse, error) {
|
||||
log.Trace("Engine API request received", "method", "ExecutePayload", params.BlockHash, "number", params.Number)
|
||||
block, err := beacon.ExecutableDataToBlock(params)
|
||||
if err != nil {
|
||||
return api.invalid(), err
|
||||
}
|
||||
if api.light {
|
||||
parent := api.les.BlockChain().GetHeaderByHash(params.ParentHash)
|
||||
if parent == nil {
|
||||
return api.invalid(), fmt.Errorf("could not find parent %x", params.ParentHash)
|
||||
}
|
||||
if err = api.les.BlockChain().InsertHeader(block.Header()); err != nil {
|
||||
return api.invalid(), err
|
||||
}
|
||||
return ExecutePayloadResponse{Status: VALID.Status, LatestValidHash: block.Hash()}, nil
|
||||
}
|
||||
if !api.eth.BlockChain().HasBlock(block.ParentHash(), block.NumberU64()-1) {
|
||||
/*
|
||||
TODO (MariusVanDerWijden) reenable once sync is merged
|
||||
@ -268,7 +134,7 @@ func (api *ConsensusAPI) ExecutePayloadV1(params ExecutableDataV1) (ExecutePaylo
|
||||
}
|
||||
*/
|
||||
// TODO (MariusVanDerWijden) we should return nil here not empty hash
|
||||
return ExecutePayloadResponse{Status: SYNCING.Status, LatestValidHash: common.Hash{}}, nil
|
||||
return beacon.ExecutePayloadResponse{Status: beacon.SYNCING.Status, LatestValidHash: common.Hash{}}, nil
|
||||
}
|
||||
parent := api.eth.BlockChain().GetBlockByHash(params.ParentHash)
|
||||
td := api.eth.BlockChain().GetTd(parent.Hash(), block.NumberU64()-1)
|
||||
@ -276,188 +142,44 @@ func (api *ConsensusAPI) ExecutePayloadV1(params ExecutableDataV1) (ExecutePaylo
|
||||
if td.Cmp(ttd) < 0 {
|
||||
return api.invalid(), fmt.Errorf("can not execute payload on top of block with low td got: %v threshold %v", td, ttd)
|
||||
}
|
||||
log.Trace("Inserting block without head", "hash", block.Hash(), "number", block.Number)
|
||||
if err := api.eth.BlockChain().InsertBlockWithoutSetHead(block); err != nil {
|
||||
return api.invalid(), err
|
||||
}
|
||||
|
||||
if merger := api.merger(); !merger.TDDReached() {
|
||||
if merger := api.eth.Merger(); !merger.TDDReached() {
|
||||
merger.ReachTTD()
|
||||
}
|
||||
return ExecutePayloadResponse{Status: VALID.Status, LatestValidHash: block.Hash()}, nil
|
||||
return beacon.ExecutePayloadResponse{Status: beacon.VALID.Status, LatestValidHash: block.Hash()}, nil
|
||||
}
|
||||
|
||||
// AssembleBlock creates a new block, inserts it into the chain, and returns the "execution
|
||||
// data" required for eth2 clients to process the new block.
|
||||
func (api *ConsensusAPI) assembleBlock(parentHash common.Hash, params *PayloadAttributesV1) (*ExecutableDataV1, error) {
|
||||
if api.light {
|
||||
return nil, errors.New("not supported")
|
||||
// computePayloadId computes a pseudo-random payloadid, based on the parameters.
|
||||
func computePayloadId(headBlockHash common.Hash, params *beacon.PayloadAttributesV1) beacon.PayloadID {
|
||||
// Hash
|
||||
hasher := sha256.New()
|
||||
hasher.Write(headBlockHash[:])
|
||||
binary.Write(hasher, binary.BigEndian, params.Timestamp)
|
||||
hasher.Write(params.Random[:])
|
||||
hasher.Write(params.SuggestedFeeRecipient[:])
|
||||
var out beacon.PayloadID
|
||||
copy(out[:], hasher.Sum(nil)[:8])
|
||||
return out
|
||||
}
|
||||
|
||||
// invalid returns a response "INVALID" with the latest valid hash set to the current head.
|
||||
func (api *ConsensusAPI) invalid() beacon.ExecutePayloadResponse {
|
||||
return beacon.ExecutePayloadResponse{Status: beacon.INVALID.Status, LatestValidHash: api.eth.BlockChain().CurrentHeader().Hash()}
|
||||
}
|
||||
|
||||
// assembleBlock creates a new block and returns the "execution
|
||||
// data" required for beacon clients to process the new block.
|
||||
func (api *ConsensusAPI) assembleBlock(parentHash common.Hash, params *beacon.PayloadAttributesV1) (*beacon.ExecutableDataV1, error) {
|
||||
log.Info("Producing block", "parentHash", parentHash)
|
||||
|
||||
bc := api.eth.BlockChain()
|
||||
parent := bc.GetBlockByHash(parentHash)
|
||||
if parent == nil {
|
||||
log.Warn("Cannot assemble block with parent hash to unknown block", "parentHash", parentHash)
|
||||
return nil, fmt.Errorf("cannot assemble block with unknown parent %s", parentHash)
|
||||
}
|
||||
|
||||
if params.Timestamp < parent.Time() {
|
||||
return nil, fmt.Errorf("child timestamp lower than parent's: %d < %d", params.Timestamp, parent.Time())
|
||||
}
|
||||
if now := uint64(time.Now().Unix()); params.Timestamp > now+1 {
|
||||
diff := time.Duration(params.Timestamp-now) * time.Second
|
||||
log.Warn("Producing block too far in the future", "diff", common.PrettyDuration(diff))
|
||||
}
|
||||
pending := api.eth.TxPool().Pending(true)
|
||||
coinbase := params.SuggestedFeeRecipient
|
||||
num := parent.Number()
|
||||
header := &types.Header{
|
||||
ParentHash: parent.Hash(),
|
||||
Number: num.Add(num, common.Big1),
|
||||
Coinbase: coinbase,
|
||||
GasLimit: parent.GasLimit(), // Keep the gas limit constant in this prototype
|
||||
Extra: []byte{}, // TODO (MariusVanDerWijden) properly set extra data
|
||||
Time: params.Timestamp,
|
||||
}
|
||||
if config := api.eth.BlockChain().Config(); config.IsLondon(header.Number) {
|
||||
header.BaseFee = misc.CalcBaseFee(config, parent.Header())
|
||||
}
|
||||
if err := api.engine.Prepare(bc, header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
env, err := api.makeEnv(parent, header)
|
||||
block, err := api.eth.Miner().GetSealingBlock(parentHash, params.Timestamp, params.SuggestedFeeRecipient, params.Random)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
signer = types.MakeSigner(bc.Config(), header.Number)
|
||||
txHeap = types.NewTransactionsByPriceAndNonce(signer, pending, nil)
|
||||
transactions []*types.Transaction
|
||||
)
|
||||
for {
|
||||
if env.gasPool.Gas() < chainParams.TxGas {
|
||||
log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", chainParams.TxGas)
|
||||
break
|
||||
}
|
||||
tx := txHeap.Peek()
|
||||
if tx == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// The sender is only for logging purposes, and it doesn't really matter if it's correct.
|
||||
from, _ := types.Sender(signer, tx)
|
||||
|
||||
// Execute the transaction
|
||||
env.state.Prepare(tx.Hash(), env.tcount)
|
||||
err = env.commitTransaction(tx, coinbase)
|
||||
switch err {
|
||||
case core.ErrGasLimitReached:
|
||||
// Pop the current out-of-gas transaction without shifting in the next from the account
|
||||
log.Trace("Gas limit exceeded for current block", "sender", from)
|
||||
txHeap.Pop()
|
||||
|
||||
case core.ErrNonceTooLow:
|
||||
// New head notification data race between the transaction pool and miner, shift
|
||||
log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce())
|
||||
txHeap.Shift()
|
||||
|
||||
case core.ErrNonceTooHigh:
|
||||
// Reorg notification data race between the transaction pool and miner, skip account =
|
||||
log.Trace("Skipping account with high nonce", "sender", from, "nonce", tx.Nonce())
|
||||
txHeap.Pop()
|
||||
|
||||
case nil:
|
||||
// Everything ok, collect the logs and shift in the next transaction from the same account
|
||||
env.tcount++
|
||||
txHeap.Shift()
|
||||
transactions = append(transactions, tx)
|
||||
|
||||
default:
|
||||
// Strange error, discard the transaction and get the next in line (note, the
|
||||
// nonce-too-high clause will prevent us from executing in vain).
|
||||
log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err)
|
||||
txHeap.Shift()
|
||||
}
|
||||
}
|
||||
// Create the block.
|
||||
block, err := api.engine.FinalizeAndAssemble(bc, header, env.state, transactions, nil /* uncles */, env.receipts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return BlockToExecutableData(block, params.Random), nil
|
||||
}
|
||||
|
||||
func encodeTransactions(txs []*types.Transaction) [][]byte {
|
||||
var enc = make([][]byte, len(txs))
|
||||
for i, tx := range txs {
|
||||
enc[i], _ = tx.MarshalBinary()
|
||||
}
|
||||
return enc
|
||||
}
|
||||
|
||||
func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
|
||||
var txs = make([]*types.Transaction, len(enc))
|
||||
for i, encTx := range enc {
|
||||
var tx types.Transaction
|
||||
if err := tx.UnmarshalBinary(encTx); err != nil {
|
||||
return nil, fmt.Errorf("invalid transaction %d: %v", i, err)
|
||||
}
|
||||
txs[i] = &tx
|
||||
}
|
||||
return txs, nil
|
||||
}
|
||||
|
||||
func ExecutableDataToBlock(params ExecutableDataV1) (*types.Block, error) {
|
||||
txs, err := decodeTransactions(params.Transactions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(params.ExtraData) > 32 {
|
||||
return nil, fmt.Errorf("invalid extradata length: %v", len(params.ExtraData))
|
||||
}
|
||||
number := big.NewInt(0)
|
||||
number.SetUint64(params.Number)
|
||||
header := &types.Header{
|
||||
ParentHash: params.ParentHash,
|
||||
UncleHash: types.EmptyUncleHash,
|
||||
Coinbase: params.FeeRecipient,
|
||||
Root: params.StateRoot,
|
||||
TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
|
||||
ReceiptHash: params.ReceiptsRoot,
|
||||
Bloom: types.BytesToBloom(params.LogsBloom),
|
||||
Difficulty: common.Big0,
|
||||
Number: number,
|
||||
GasLimit: params.GasLimit,
|
||||
GasUsed: params.GasUsed,
|
||||
Time: params.Timestamp,
|
||||
BaseFee: params.BaseFeePerGas,
|
||||
Extra: params.ExtraData,
|
||||
// TODO (MariusVanDerWijden) add params.Random to header once required
|
||||
}
|
||||
block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */)
|
||||
if block.Hash() != params.BlockHash {
|
||||
return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash())
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
|
||||
func BlockToExecutableData(block *types.Block, random common.Hash) *ExecutableDataV1 {
|
||||
return &ExecutableDataV1{
|
||||
BlockHash: block.Hash(),
|
||||
ParentHash: block.ParentHash(),
|
||||
FeeRecipient: block.Coinbase(),
|
||||
StateRoot: block.Root(),
|
||||
Number: block.NumberU64(),
|
||||
GasLimit: block.GasLimit(),
|
||||
GasUsed: block.GasUsed(),
|
||||
BaseFeePerGas: block.BaseFee(),
|
||||
Timestamp: block.Time(),
|
||||
ReceiptsRoot: block.ReceiptHash(),
|
||||
LogsBloom: block.Bloom().Bytes(),
|
||||
Transactions: encodeTransactions(block.Transactions()),
|
||||
Random: random,
|
||||
ExtraData: block.Extra(),
|
||||
}
|
||||
return beacon.BlockToExecutableData(block), nil
|
||||
}
|
||||
|
||||
// Used in tests to add a the list of transactions from a block to the tx pool.
|
||||
@ -470,17 +192,17 @@ func (api *ConsensusAPI) insertTransactions(txs types.Transactions) error {
|
||||
|
||||
func (api *ConsensusAPI) checkTerminalTotalDifficulty(head common.Hash) error {
|
||||
// shortcut if we entered PoS already
|
||||
if api.merger().PoSFinalized() {
|
||||
if api.eth.Merger().PoSFinalized() {
|
||||
return nil
|
||||
}
|
||||
// make sure the parent has enough terminal total difficulty
|
||||
newHeadBlock := api.eth.BlockChain().GetBlockByHash(head)
|
||||
if newHeadBlock == nil {
|
||||
return &GenericServerError
|
||||
return &beacon.GenericServerError
|
||||
}
|
||||
td := api.eth.BlockChain().GetTd(newHeadBlock.Hash(), newHeadBlock.NumberU64())
|
||||
if td != nil && td.Cmp(api.eth.BlockChain().Config().TerminalTotalDifficulty) < 0 {
|
||||
return &InvalidTB
|
||||
return &beacon.InvalidTB
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -488,49 +210,22 @@ func (api *ConsensusAPI) checkTerminalTotalDifficulty(head common.Hash) error {
|
||||
// setHead is called to perform a force choice.
|
||||
func (api *ConsensusAPI) setHead(newHead common.Hash) error {
|
||||
log.Info("Setting head", "head", newHead)
|
||||
if api.light {
|
||||
headHeader := api.les.BlockChain().CurrentHeader()
|
||||
if headHeader.Hash() == newHead {
|
||||
return nil
|
||||
}
|
||||
newHeadHeader := api.les.BlockChain().GetHeaderByHash(newHead)
|
||||
if newHeadHeader == nil {
|
||||
return &GenericServerError
|
||||
}
|
||||
if err := api.les.BlockChain().SetChainHead(newHeadHeader); err != nil {
|
||||
return err
|
||||
}
|
||||
// Trigger the transition if it's the first `NewHead` event.
|
||||
merger := api.merger()
|
||||
if !merger.PoSFinalized() {
|
||||
merger.FinalizePoS()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
headBlock := api.eth.BlockChain().CurrentBlock()
|
||||
if headBlock.Hash() == newHead {
|
||||
return nil
|
||||
}
|
||||
newHeadBlock := api.eth.BlockChain().GetBlockByHash(newHead)
|
||||
if newHeadBlock == nil {
|
||||
return &GenericServerError
|
||||
return &beacon.GenericServerError
|
||||
}
|
||||
if err := api.eth.BlockChain().SetChainHead(newHeadBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
// Trigger the transition if it's the first `NewHead` event.
|
||||
if merger := api.merger(); !merger.PoSFinalized() {
|
||||
if merger := api.eth.Merger(); !merger.PoSFinalized() {
|
||||
merger.FinalizePoS()
|
||||
}
|
||||
// TODO (MariusVanDerWijden) are we really synced now?
|
||||
api.eth.SetSynced()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper function, return the merger instance.
|
||||
func (api *ConsensusAPI) merger() *consensus.Merger {
|
||||
if api.light {
|
||||
return api.les.Merger()
|
||||
}
|
||||
return api.eth.Merger()
|
||||
}
|
||||
|
@ -17,14 +17,15 @@
|
||||
package catalyst
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/beacon"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
@ -78,14 +79,14 @@ func TestEth2AssembleBlock(t *testing.T) {
|
||||
n, ethservice := startEthService(t, genesis, blocks)
|
||||
defer n.Close()
|
||||
|
||||
api := NewConsensusAPI(ethservice, nil)
|
||||
api := NewConsensusAPI(ethservice)
|
||||
signer := types.NewEIP155Signer(ethservice.BlockChain().Config().ChainID)
|
||||
tx, err := types.SignTx(types.NewTransaction(uint64(10), blocks[9].Coinbase(), big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, testKey)
|
||||
if err != nil {
|
||||
t.Fatalf("error signing transaction, err=%v", err)
|
||||
}
|
||||
ethservice.TxPool().AddLocal(tx)
|
||||
blockParams := PayloadAttributesV1{
|
||||
blockParams := beacon.PayloadAttributesV1{
|
||||
Timestamp: blocks[9].Time() + 5,
|
||||
}
|
||||
execData, err := api.assembleBlock(blocks[9].Hash(), &blockParams)
|
||||
@ -102,11 +103,11 @@ func TestEth2AssembleBlockWithAnotherBlocksTxs(t *testing.T) {
|
||||
n, ethservice := startEthService(t, genesis, blocks[:9])
|
||||
defer n.Close()
|
||||
|
||||
api := NewConsensusAPI(ethservice, nil)
|
||||
api := NewConsensusAPI(ethservice)
|
||||
|
||||
// Put the 10th block's tx in the pool and produce a new block
|
||||
api.insertTransactions(blocks[9].Transactions())
|
||||
blockParams := PayloadAttributesV1{
|
||||
blockParams := beacon.PayloadAttributesV1{
|
||||
Timestamp: blocks[8].Time() + 5,
|
||||
}
|
||||
execData, err := api.assembleBlock(blocks[8].Hash(), &blockParams)
|
||||
@ -123,8 +124,8 @@ func TestSetHeadBeforeTotalDifficulty(t *testing.T) {
|
||||
n, ethservice := startEthService(t, genesis, blocks)
|
||||
defer n.Close()
|
||||
|
||||
api := NewConsensusAPI(ethservice, nil)
|
||||
fcState := ForkchoiceStateV1{
|
||||
api := NewConsensusAPI(ethservice)
|
||||
fcState := beacon.ForkchoiceStateV1{
|
||||
HeadBlockHash: blocks[5].Hash(),
|
||||
SafeBlockHash: common.Hash{},
|
||||
FinalizedBlockHash: common.Hash{},
|
||||
@ -141,14 +142,14 @@ func TestEth2PrepareAndGetPayload(t *testing.T) {
|
||||
n, ethservice := startEthService(t, genesis, blocks[:9])
|
||||
defer n.Close()
|
||||
|
||||
api := NewConsensusAPI(ethservice, nil)
|
||||
api := NewConsensusAPI(ethservice)
|
||||
|
||||
// Put the 10th block's tx in the pool and produce a new block
|
||||
api.insertTransactions(blocks[9].Transactions())
|
||||
blockParams := PayloadAttributesV1{
|
||||
blockParams := beacon.PayloadAttributesV1{
|
||||
Timestamp: blocks[8].Time() + 5,
|
||||
}
|
||||
fcState := ForkchoiceStateV1{
|
||||
fcState := beacon.ForkchoiceStateV1{
|
||||
HeadBlockHash: blocks[8].Hash(),
|
||||
SafeBlockHash: common.Hash{},
|
||||
FinalizedBlockHash: common.Hash{},
|
||||
@ -158,13 +159,21 @@ func TestEth2PrepareAndGetPayload(t *testing.T) {
|
||||
t.Fatalf("error preparing payload, err=%v", err)
|
||||
}
|
||||
payloadID := computePayloadId(fcState.HeadBlockHash, &blockParams)
|
||||
execData, err := api.GetPayloadV1(hexutil.Bytes(payloadID))
|
||||
execData, err := api.GetPayloadV1(payloadID)
|
||||
if err != nil {
|
||||
t.Fatalf("error getting payload, err=%v", err)
|
||||
}
|
||||
if len(execData.Transactions) != blocks[9].Transactions().Len() {
|
||||
t.Fatalf("invalid number of transactions %d != 1", len(execData.Transactions))
|
||||
}
|
||||
// Test invalid payloadID
|
||||
var invPayload beacon.PayloadID
|
||||
copy(invPayload[:], payloadID[:])
|
||||
invPayload[0] = ^invPayload[0]
|
||||
_, err = api.GetPayloadV1(invPayload)
|
||||
if err == nil {
|
||||
t.Fatal("expected error retrieving invalid payload")
|
||||
}
|
||||
}
|
||||
|
||||
func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan core.RemovedLogsEvent, wantNew, wantRemoved int) {
|
||||
@ -185,6 +194,48 @@ func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan co
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidPayloadTimestamp(t *testing.T) {
|
||||
genesis, preMergeBlocks := generatePreMergeChain(10)
|
||||
n, ethservice := startEthService(t, genesis, preMergeBlocks)
|
||||
ethservice.Merger().ReachTTD()
|
||||
defer n.Close()
|
||||
var (
|
||||
api = NewConsensusAPI(ethservice)
|
||||
parent = ethservice.BlockChain().CurrentBlock()
|
||||
)
|
||||
tests := []struct {
|
||||
time uint64
|
||||
shouldErr bool
|
||||
}{
|
||||
{0, true},
|
||||
{parent.Time(), true},
|
||||
{parent.Time() - 1, true},
|
||||
{parent.Time() + 1, false},
|
||||
{uint64(time.Now().Unix()) + uint64(time.Minute), false},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("Timestamp test: %v", i), func(t *testing.T) {
|
||||
params := beacon.PayloadAttributesV1{
|
||||
Timestamp: test.time,
|
||||
Random: crypto.Keccak256Hash([]byte{byte(123)}),
|
||||
SuggestedFeeRecipient: parent.Coinbase(),
|
||||
}
|
||||
fcState := beacon.ForkchoiceStateV1{
|
||||
HeadBlockHash: parent.Hash(),
|
||||
SafeBlockHash: common.Hash{},
|
||||
FinalizedBlockHash: common.Hash{},
|
||||
}
|
||||
_, err := api.ForkchoiceUpdatedV1(fcState, ¶ms)
|
||||
if test.shouldErr && err == nil {
|
||||
t.Fatalf("expected error preparing payload with invalid timestamp, err=%v", err)
|
||||
} else if !test.shouldErr && err != nil {
|
||||
t.Fatalf("error preparing payload with valid timestamp, err=%v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEth2NewBlock(t *testing.T) {
|
||||
genesis, preMergeBlocks := generatePreMergeChain(10)
|
||||
n, ethservice := startEthService(t, genesis, preMergeBlocks)
|
||||
@ -192,7 +243,7 @@ func TestEth2NewBlock(t *testing.T) {
|
||||
defer n.Close()
|
||||
|
||||
var (
|
||||
api = NewConsensusAPI(ethservice, nil)
|
||||
api = NewConsensusAPI(ethservice)
|
||||
parent = preMergeBlocks[len(preMergeBlocks)-1]
|
||||
|
||||
// This EVM code generates a log when the contract is created.
|
||||
@ -210,13 +261,13 @@ func TestEth2NewBlock(t *testing.T) {
|
||||
tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
|
||||
ethservice.TxPool().AddLocal(tx)
|
||||
|
||||
execData, err := api.assembleBlock(parent.Hash(), &PayloadAttributesV1{
|
||||
execData, err := api.assembleBlock(parent.Hash(), &beacon.PayloadAttributesV1{
|
||||
Timestamp: parent.Time() + 5,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create the executable data %v", err)
|
||||
}
|
||||
block, err := ExecutableDataToBlock(*execData)
|
||||
block, err := beacon.ExecutableDataToBlock(*execData)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to convert executable data to block %v", err)
|
||||
}
|
||||
@ -228,7 +279,7 @@ func TestEth2NewBlock(t *testing.T) {
|
||||
t.Fatalf("Chain head shouldn't be updated")
|
||||
}
|
||||
checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
|
||||
fcState := ForkchoiceStateV1{
|
||||
fcState := beacon.ForkchoiceStateV1{
|
||||
HeadBlockHash: block.Hash(),
|
||||
SafeBlockHash: block.Hash(),
|
||||
FinalizedBlockHash: block.Hash(),
|
||||
@ -250,13 +301,13 @@ func TestEth2NewBlock(t *testing.T) {
|
||||
)
|
||||
parent = preMergeBlocks[len(preMergeBlocks)-1]
|
||||
for i := 0; i < 10; i++ {
|
||||
execData, err := api.assembleBlock(parent.Hash(), &PayloadAttributesV1{
|
||||
execData, err := api.assembleBlock(parent.Hash(), &beacon.PayloadAttributesV1{
|
||||
Timestamp: parent.Time() + 6,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create the executable data %v", err)
|
||||
}
|
||||
block, err := ExecutableDataToBlock(*execData)
|
||||
block, err := beacon.ExecutableDataToBlock(*execData)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to convert executable data to block %v", err)
|
||||
}
|
||||
@ -268,7 +319,7 @@ func TestEth2NewBlock(t *testing.T) {
|
||||
t.Fatalf("Chain head shouldn't be updated")
|
||||
}
|
||||
|
||||
fcState := ForkchoiceStateV1{
|
||||
fcState := beacon.ForkchoiceStateV1{
|
||||
HeadBlockHash: block.Hash(),
|
||||
SafeBlockHash: block.Hash(),
|
||||
FinalizedBlockHash: block.Hash(),
|
||||
@ -362,7 +413,7 @@ func TestFullAPI(t *testing.T) {
|
||||
ethservice.Merger().ReachTTD()
|
||||
defer n.Close()
|
||||
var (
|
||||
api = NewConsensusAPI(ethservice, nil)
|
||||
api = NewConsensusAPI(ethservice)
|
||||
parent = ethservice.BlockChain().CurrentBlock()
|
||||
// This EVM code generates a log when the contract is created.
|
||||
logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
|
||||
@ -373,12 +424,12 @@ func TestFullAPI(t *testing.T) {
|
||||
tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
|
||||
ethservice.TxPool().AddLocal(tx)
|
||||
|
||||
params := PayloadAttributesV1{
|
||||
params := beacon.PayloadAttributesV1{
|
||||
Timestamp: parent.Time() + 1,
|
||||
Random: crypto.Keccak256Hash([]byte{byte(i)}),
|
||||
SuggestedFeeRecipient: parent.Coinbase(),
|
||||
}
|
||||
fcState := ForkchoiceStateV1{
|
||||
fcState := beacon.ForkchoiceStateV1{
|
||||
HeadBlockHash: parent.Hash(),
|
||||
SafeBlockHash: common.Hash{},
|
||||
FinalizedBlockHash: common.Hash{},
|
||||
@ -387,11 +438,11 @@ func TestFullAPI(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("error preparing payload, err=%v", err)
|
||||
}
|
||||
if resp.Status != SUCCESS.Status {
|
||||
if resp.Status != beacon.SUCCESS.Status {
|
||||
t.Fatalf("error preparing payload, invalid status: %v", resp.Status)
|
||||
}
|
||||
payloadID := computePayloadId(parent.Hash(), ¶ms)
|
||||
payload, err := api.GetPayloadV1(hexutil.Bytes(payloadID))
|
||||
payload, err := api.GetPayloadV1(payloadID)
|
||||
if err != nil {
|
||||
t.Fatalf("can't get payload: %v", err)
|
||||
}
|
||||
@ -399,10 +450,10 @@ func TestFullAPI(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("can't execute payload: %v", err)
|
||||
}
|
||||
if execResp.Status != VALID.Status {
|
||||
if execResp.Status != beacon.VALID.Status {
|
||||
t.Fatalf("invalid status: %v", execResp.Status)
|
||||
}
|
||||
fcState = ForkchoiceStateV1{
|
||||
fcState = beacon.ForkchoiceStateV1{
|
||||
HeadBlockHash: payload.BlockHash,
|
||||
SafeBlockHash: payload.ParentHash,
|
||||
FinalizedBlockHash: payload.ParentHash,
|
||||
@ -414,6 +465,5 @@ func TestFullAPI(t *testing.T) {
|
||||
t.Fatalf("Chain head should be updated")
|
||||
}
|
||||
parent = ethservice.BlockChain().CurrentBlock()
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -1,36 +0,0 @@
|
||||
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
|
||||
|
||||
package catalyst
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
var _ = (*payloadResponseMarshaling)(nil)
|
||||
|
||||
// MarshalJSON marshals as JSON.
|
||||
func (p PayloadResponse) MarshalJSON() ([]byte, error) {
|
||||
type PayloadResponse struct {
|
||||
PayloadID hexutil.Uint64 `json:"payloadId"`
|
||||
}
|
||||
var enc PayloadResponse
|
||||
enc.PayloadID = hexutil.Uint64(p.PayloadID)
|
||||
return json.Marshal(&enc)
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals from JSON.
|
||||
func (p *PayloadResponse) UnmarshalJSON(input []byte) error {
|
||||
type PayloadResponse struct {
|
||||
PayloadID *hexutil.Uint64 `json:"payloadId"`
|
||||
}
|
||||
var dec PayloadResponse
|
||||
if err := json.Unmarshal(input, &dec); err != nil {
|
||||
return err
|
||||
}
|
||||
if dec.PayloadID != nil {
|
||||
p.PayloadID = uint64(*dec.PayloadID)
|
||||
}
|
||||
return nil
|
||||
}
|
78
eth/catalyst/queue.go
Normal file
78
eth/catalyst/queue.go
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package catalyst
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/beacon"
|
||||
)
|
||||
|
||||
// maxTrackedPayloads is the maximum number of prepared payloads the execution
|
||||
// engine tracks before evicting old ones. Ideally we should only ever track the
|
||||
// latest one; but have a slight wiggle room for non-ideal conditions.
|
||||
const maxTrackedPayloads = 10
|
||||
|
||||
// payloadQueueItem represents an id->payload tuple to store until it's retrieved
|
||||
// or evicted.
|
||||
type payloadQueueItem struct {
|
||||
id beacon.PayloadID
|
||||
payload *beacon.ExecutableDataV1
|
||||
}
|
||||
|
||||
// payloadQueue tracks the latest handful of constructed payloads to be retrieved
|
||||
// by the beacon chain if block production is requested.
|
||||
type payloadQueue struct {
|
||||
payloads []*payloadQueueItem
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// newPayloadQueue creates a pre-initialized queue with a fixed number of slots
|
||||
// all containing empty items.
|
||||
func newPayloadQueue() *payloadQueue {
|
||||
return &payloadQueue{
|
||||
payloads: make([]*payloadQueueItem, maxTrackedPayloads),
|
||||
}
|
||||
}
|
||||
|
||||
// put inserts a new payload into the queue at the given id.
|
||||
func (q *payloadQueue) put(id beacon.PayloadID, data *beacon.ExecutableDataV1) {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
|
||||
copy(q.payloads[1:], q.payloads)
|
||||
q.payloads[0] = &payloadQueueItem{
|
||||
id: id,
|
||||
payload: data,
|
||||
}
|
||||
}
|
||||
|
||||
// get retrieves a previously stored payload item or nil if it does not exist.
|
||||
func (q *payloadQueue) get(id beacon.PayloadID) *beacon.ExecutableDataV1 {
|
||||
q.lock.RLock()
|
||||
defer q.lock.RUnlock()
|
||||
|
||||
for _, item := range q.payloads {
|
||||
if item == nil {
|
||||
return nil // no more items
|
||||
}
|
||||
if item.id == id {
|
||||
return item.payload
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
@ -364,6 +364,7 @@ func testSequentialAnnouncements(t *testing.T, light bool) {
|
||||
hashes, blocks := makeChain(targetBlocks, 0, genesis)
|
||||
|
||||
tester := newTester(light)
|
||||
defer tester.fetcher.Stop()
|
||||
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
|
||||
|
||||
@ -743,7 +744,7 @@ func testInvalidNumberAnnouncement(t *testing.T, light bool) {
|
||||
badBodyFetcher := tester.makeBodyFetcher("bad", blocks, 0)
|
||||
|
||||
imported := make(chan interface{})
|
||||
announced := make(chan interface{})
|
||||
announced := make(chan interface{}, 2)
|
||||
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) {
|
||||
if light {
|
||||
if header == nil {
|
||||
@ -806,6 +807,7 @@ func TestEmptyBlockShortCircuit(t *testing.T) {
|
||||
hashes, blocks := makeChain(32, 0, genesis)
|
||||
|
||||
tester := newTester(false)
|
||||
defer tester.fetcher.Stop()
|
||||
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack)
|
||||
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
|
||||
|
||||
|
@ -433,7 +433,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
|
||||
return
|
||||
}
|
||||
peer.Log().Debug("Whitelist block verified", "number", number, "hash", hash)
|
||||
|
||||
res.Done <- nil
|
||||
case <-timeout.C:
|
||||
peer.Log().Warn("Whitelist challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name())
|
||||
h.removePeer(peer.ID())
|
||||
|
@ -299,7 +299,7 @@ func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePac
|
||||
size uint64
|
||||
last common.Hash
|
||||
)
|
||||
for it.Next() && size < req.Bytes {
|
||||
for it.Next() {
|
||||
hash, account := it.Hash(), common.CopyBytes(it.Account())
|
||||
|
||||
// Track the returned interval for the Merkle proofs
|
||||
@ -315,6 +315,9 @@ func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePac
|
||||
if bytes.Compare(hash[:], req.Limit[:]) >= 0 {
|
||||
break
|
||||
}
|
||||
if size > req.Bytes {
|
||||
break
|
||||
}
|
||||
}
|
||||
it.Release()
|
||||
|
||||
@ -464,7 +467,7 @@ func ServiceGetByteCodesQuery(chain *core.BlockChain, req *GetByteCodesPacket) [
|
||||
// Peers should not request the empty code, but if they do, at
|
||||
// least sent them back a correct response without db lookups
|
||||
codes = append(codes, []byte{})
|
||||
} else if blob, err := chain.ContractCode(hash); err == nil {
|
||||
} else if blob, err := chain.ContractCodeWithPrefix(hash); err == nil {
|
||||
codes = append(codes, blob)
|
||||
bytes += uint64(len(blob))
|
||||
}
|
||||
|
@ -1781,7 +1781,7 @@ func (s *Syncer) processAccountResponse(res *accountResponse) {
|
||||
for i, account := range res.accounts {
|
||||
// Check if the account is a contract with an unknown code
|
||||
if !bytes.Equal(account.CodeHash, emptyCode[:]) {
|
||||
if code := rawdb.ReadCodeWithPrefix(s.db, common.BytesToHash(account.CodeHash)); code == nil {
|
||||
if !rawdb.HasCodeWithPrefix(s.db, common.BytesToHash(account.CodeHash)) {
|
||||
res.task.codeTasks[common.BytesToHash(account.CodeHash)] = struct{}{}
|
||||
res.task.needCode[i] = true
|
||||
res.task.pend++
|
||||
@ -1789,7 +1789,7 @@ func (s *Syncer) processAccountResponse(res *accountResponse) {
|
||||
}
|
||||
// Check if the account is a contract with an unknown storage trie
|
||||
if account.Root != emptyRoot {
|
||||
if node, err := s.db.Get(account.Root[:]); err != nil || node == nil {
|
||||
if ok, err := s.db.Has(account.Root[:]); err != nil || !ok {
|
||||
// If there was a previous large state retrieval in progress,
|
||||
// don't restart it from scratch. This happens if a sync cycle
|
||||
// is interrupted and resumed later. However, *do* update the
|
||||
|
@ -593,11 +593,11 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
|
||||
if threads > len(txs) {
|
||||
threads = len(txs)
|
||||
}
|
||||
blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
|
||||
blockHash := block.Hash()
|
||||
for th := 0; th < threads; th++ {
|
||||
pend.Add(1)
|
||||
go func() {
|
||||
blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
|
||||
defer pend.Done()
|
||||
// Fetch and execute the next transaction trace tasks
|
||||
for task := range jobs {
|
||||
@ -618,6 +618,7 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
|
||||
}
|
||||
// Feed the transactions into the tracers and return
|
||||
var failed error
|
||||
blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
|
||||
for i, tx := range txs {
|
||||
// Send the trace task over for execution
|
||||
jobs <- &txTraceTask{statedb: statedb.Copy(), index: i}
|
||||
|
@ -130,10 +130,6 @@ func TestCallTracerLegacy(t *testing.T) {
|
||||
testCallTracer("callTracerLegacy", "call_tracer_legacy", t)
|
||||
}
|
||||
|
||||
func TestCallTracerJs(t *testing.T) {
|
||||
testCallTracer("callTracerJs", "call_tracer", t)
|
||||
}
|
||||
|
||||
func TestCallTracerNative(t *testing.T) {
|
||||
testCallTracer("callTracer", "call_tracer", t)
|
||||
}
|
||||
|
File diff suppressed because one or more lines are too long
@ -1,112 +0,0 @@
|
||||
// Copyright 2021 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
// callFrameTracer uses the new call frame tracing methods to report useful information
|
||||
// about internal messages of a transaction.
|
||||
{
|
||||
callstack: [{}],
|
||||
fault: function(log, db) {},
|
||||
result: function(ctx, db) {
|
||||
// Prepare outer message info
|
||||
var result = {
|
||||
type: ctx.type,
|
||||
from: toHex(ctx.from),
|
||||
to: toHex(ctx.to),
|
||||
value: '0x' + ctx.value.toString(16),
|
||||
gas: '0x' + bigInt(ctx.gas).toString(16),
|
||||
gasUsed: '0x' + bigInt(ctx.gasUsed).toString(16),
|
||||
input: toHex(ctx.input),
|
||||
output: toHex(ctx.output),
|
||||
}
|
||||
if (this.callstack[0].calls !== undefined) {
|
||||
result.calls = this.callstack[0].calls
|
||||
}
|
||||
if (this.callstack[0].error !== undefined) {
|
||||
result.error = this.callstack[0].error
|
||||
} else if (ctx.error !== undefined) {
|
||||
result.error = ctx.error
|
||||
}
|
||||
if (result.error !== undefined && (result.error !== "execution reverted" || result.output ==="0x")) {
|
||||
delete result.output
|
||||
}
|
||||
|
||||
return this.finalize(result)
|
||||
},
|
||||
enter: function(frame) {
|
||||
var call = {
|
||||
type: frame.getType(),
|
||||
from: toHex(frame.getFrom()),
|
||||
to: toHex(frame.getTo()),
|
||||
input: toHex(frame.getInput()),
|
||||
gas: '0x' + bigInt(frame.getGas()).toString('16'),
|
||||
}
|
||||
if (frame.getValue() !== undefined){
|
||||
call.value='0x' + bigInt(frame.getValue()).toString(16)
|
||||
}
|
||||
this.callstack.push(call)
|
||||
},
|
||||
exit: function(frameResult) {
|
||||
var len = this.callstack.length
|
||||
if (len > 1) {
|
||||
var call = this.callstack.pop()
|
||||
call.gasUsed = '0x' + bigInt(frameResult.getGasUsed()).toString('16')
|
||||
var error = frameResult.getError()
|
||||
if (error === undefined) {
|
||||
call.output = toHex(frameResult.getOutput())
|
||||
} else {
|
||||
call.error = error
|
||||
if (call.type === 'CREATE' || call.type === 'CREATE2') {
|
||||
delete call.to
|
||||
}
|
||||
}
|
||||
len -= 1
|
||||
if (this.callstack[len-1].calls === undefined) {
|
||||
this.callstack[len-1].calls = []
|
||||
}
|
||||
this.callstack[len-1].calls.push(call)
|
||||
}
|
||||
},
|
||||
// finalize recreates a call object using the final desired field oder for json
|
||||
// serialization. This is a nicety feature to pass meaningfully ordered results
|
||||
// to users who don't interpret it, just display it.
|
||||
finalize: function(call) {
|
||||
var sorted = {
|
||||
type: call.type,
|
||||
from: call.from,
|
||||
to: call.to,
|
||||
value: call.value,
|
||||
gas: call.gas,
|
||||
gasUsed: call.gasUsed,
|
||||
input: call.input,
|
||||
output: call.output,
|
||||
error: call.error,
|
||||
time: call.time,
|
||||
calls: call.calls,
|
||||
}
|
||||
for (var key in sorted) {
|
||||
if (sorted[key] === undefined) {
|
||||
delete sorted[key]
|
||||
}
|
||||
}
|
||||
if (sorted.calls !== undefined) {
|
||||
for (var i=0; i<sorted.calls.length; i++) {
|
||||
sorted.calls[i] = this.finalize(sorted.calls[i])
|
||||
}
|
||||
}
|
||||
return sorted
|
||||
}
|
||||
}
|
@ -47,6 +47,13 @@
|
||||
// result is invoked when all the opcodes have been iterated over and returns
|
||||
// the final result of the tracing.
|
||||
result: function(ctx, db) {
|
||||
if (this.prestate === null) {
|
||||
this.prestate = {};
|
||||
// If tx is transfer-only, the recipient account
|
||||
// hasn't been populated.
|
||||
this.lookupAccount(ctx.to, db);
|
||||
}
|
||||
|
||||
// At this point, we need to deduct the 'value' from the
|
||||
// outer transaction, and move it back to the origin
|
||||
this.lookupAccount(ctx.from, db);
|
||||
@ -79,7 +86,7 @@
|
||||
}
|
||||
// Whenever new state is accessed, add it to the prestate
|
||||
switch (log.op.toString()) {
|
||||
case "EXTCODECOPY": case "EXTCODESIZE": case "BALANCE":
|
||||
case "EXTCODECOPY": case "EXTCODESIZE": case "EXTCODEHASH": case "BALANCE":
|
||||
this.lookupAccount(toAddress(log.stack.peek(0).toString(16)), db);
|
||||
break;
|
||||
case "CREATE":
|
@ -697,7 +697,7 @@ func (jst *jsTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Ad
|
||||
jst.ctx["block"] = env.Context.BlockNumber.Uint64()
|
||||
jst.dbWrapper.db = env.StateDB
|
||||
// Update list of precompiles based on current block
|
||||
rules := env.ChainConfig().Rules(env.Context.BlockNumber)
|
||||
rules := env.ChainConfig().Rules(env.Context.BlockNumber, env.Context.Random != nil)
|
||||
jst.activePrecompiles = vm.ActivePrecompiles(rules)
|
||||
|
||||
// Compute intrinsic gas
|
||||
|
@ -83,7 +83,7 @@ func (t *fourByteTracer) CaptureStart(env *vm.EVM, from common.Address, to commo
|
||||
t.env = env
|
||||
|
||||
// Update list of precompiles based on current block
|
||||
rules := env.ChainConfig().Rules(env.Context.BlockNumber)
|
||||
rules := env.ChainConfig().Rules(env.Context.BlockNumber, env.Context.Random != nil)
|
||||
t.activePrecompiles = vm.ActivePrecompiles(rules)
|
||||
|
||||
// Save the outer calldata also
|
||||
|
@ -59,8 +59,7 @@ type callTracer struct {
|
||||
func newCallTracer() tracers.Tracer {
|
||||
// First callframe contains tx context info
|
||||
// and is populated on start and end.
|
||||
t := &callTracer{callstack: make([]callFrame, 1)}
|
||||
return t
|
||||
return &callTracer{callstack: make([]callFrame, 1)}
|
||||
}
|
||||
|
||||
// CaptureStart implements the EVMLogger interface to initialize the tracing operation.
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("noopTracerNative", newNoopTracer)
|
||||
register("noopTracer", newNoopTracer)
|
||||
}
|
||||
|
||||
// noopTracer is a go implementation of the Tracer interface which
|
||||
|
186
eth/tracers/native/prestate.go
Normal file
186
eth/tracers/native/prestate.go
Normal file
@ -0,0 +1,186 @@
|
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package native
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register("prestateTracer", newPrestateTracer)
|
||||
}
|
||||
|
||||
type prestate = map[common.Address]*account
|
||||
type account struct {
|
||||
Balance string `json:"balance"`
|
||||
Nonce uint64 `json:"nonce"`
|
||||
Code string `json:"code"`
|
||||
Storage map[common.Hash]common.Hash `json:"storage"`
|
||||
}
|
||||
|
||||
type prestateTracer struct {
|
||||
env *vm.EVM
|
||||
prestate prestate
|
||||
create bool
|
||||
to common.Address
|
||||
interrupt uint32 // Atomic flag to signal execution interruption
|
||||
reason error // Textual reason for the interruption
|
||||
}
|
||||
|
||||
func newPrestateTracer() tracers.Tracer {
|
||||
// First callframe contains tx context info
|
||||
// and is populated on start and end.
|
||||
return &prestateTracer{prestate: prestate{}}
|
||||
}
|
||||
|
||||
// CaptureStart implements the EVMLogger interface to initialize the tracing operation.
|
||||
func (t *prestateTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
|
||||
t.env = env
|
||||
t.create = create
|
||||
t.to = to
|
||||
|
||||
// Compute intrinsic gas
|
||||
isHomestead := env.ChainConfig().IsHomestead(env.Context.BlockNumber)
|
||||
isIstanbul := env.ChainConfig().IsIstanbul(env.Context.BlockNumber)
|
||||
intrinsicGas, err := core.IntrinsicGas(input, nil, create, isHomestead, isIstanbul)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
t.lookupAccount(from)
|
||||
t.lookupAccount(to)
|
||||
|
||||
// The recipient balance includes the value transferred.
|
||||
toBal := hexutil.MustDecodeBig(t.prestate[to].Balance)
|
||||
toBal = new(big.Int).Sub(toBal, value)
|
||||
t.prestate[to].Balance = hexutil.EncodeBig(toBal)
|
||||
|
||||
// The sender balance is after reducing: value, gasLimit, intrinsicGas.
|
||||
// We need to re-add them to get the pre-tx balance.
|
||||
fromBal := hexutil.MustDecodeBig(t.prestate[from].Balance)
|
||||
gasPrice := env.TxContext.GasPrice
|
||||
consumedGas := new(big.Int).Mul(
|
||||
gasPrice,
|
||||
new(big.Int).Add(
|
||||
new(big.Int).SetUint64(intrinsicGas),
|
||||
new(big.Int).SetUint64(gas),
|
||||
),
|
||||
)
|
||||
fromBal.Add(fromBal, new(big.Int).Add(value, consumedGas))
|
||||
t.prestate[from].Balance = hexutil.EncodeBig(fromBal)
|
||||
t.prestate[from].Nonce--
|
||||
}
|
||||
|
||||
// CaptureEnd is called after the call finishes to finalize the tracing.
|
||||
func (t *prestateTracer) CaptureEnd(output []byte, gasUsed uint64, _ time.Duration, err error) {
|
||||
if t.create {
|
||||
// Exclude created contract.
|
||||
delete(t.prestate, t.to)
|
||||
}
|
||||
}
|
||||
|
||||
// CaptureState implements the EVMLogger interface to trace a single step of VM execution.
|
||||
func (t *prestateTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
|
||||
stack := scope.Stack
|
||||
stackData := stack.Data()
|
||||
stackLen := len(stackData)
|
||||
switch {
|
||||
case stackLen >= 1 && (op == vm.SLOAD || op == vm.SSTORE):
|
||||
slot := common.Hash(stackData[stackLen-1].Bytes32())
|
||||
t.lookupStorage(scope.Contract.Address(), slot)
|
||||
case stackLen >= 1 && (op == vm.EXTCODECOPY || op == vm.EXTCODEHASH || op == vm.EXTCODESIZE || op == vm.BALANCE || op == vm.SELFDESTRUCT):
|
||||
addr := common.Address(stackData[stackLen-1].Bytes20())
|
||||
t.lookupAccount(addr)
|
||||
case stackLen >= 5 && (op == vm.DELEGATECALL || op == vm.CALL || op == vm.STATICCALL || op == vm.CALLCODE):
|
||||
addr := common.Address(stackData[stackLen-2].Bytes20())
|
||||
t.lookupAccount(addr)
|
||||
case op == vm.CREATE:
|
||||
addr := scope.Contract.Address()
|
||||
nonce := t.env.StateDB.GetNonce(addr)
|
||||
t.lookupAccount(crypto.CreateAddress(addr, nonce))
|
||||
case stackLen >= 4 && op == vm.CREATE2:
|
||||
offset := stackData[stackLen-2]
|
||||
size := stackData[stackLen-3]
|
||||
init := scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64()))
|
||||
inithash := crypto.Keccak256(init)
|
||||
salt := stackData[stackLen-4]
|
||||
t.lookupAccount(crypto.CreateAddress2(scope.Contract.Address(), salt.Bytes32(), inithash))
|
||||
}
|
||||
}
|
||||
|
||||
// CaptureFault implements the EVMLogger interface to trace an execution fault.
|
||||
func (t *prestateTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, _ *vm.ScopeContext, depth int, err error) {
|
||||
}
|
||||
|
||||
// CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct).
|
||||
func (t *prestateTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
|
||||
}
|
||||
|
||||
// CaptureExit is called when EVM exits a scope, even if the scope didn't
|
||||
// execute any code.
|
||||
func (t *prestateTracer) CaptureExit(output []byte, gasUsed uint64, err error) {
|
||||
}
|
||||
|
||||
// GetResult returns the json-encoded nested list of call traces, and any
|
||||
// error arising from the encoding or forceful termination (via `Stop`).
|
||||
func (t *prestateTracer) GetResult() (json.RawMessage, error) {
|
||||
res, err := json.Marshal(t.prestate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.RawMessage(res), t.reason
|
||||
}
|
||||
|
||||
// Stop terminates execution of the tracer at the first opportune moment.
|
||||
func (t *prestateTracer) Stop(err error) {
|
||||
t.reason = err
|
||||
atomic.StoreUint32(&t.interrupt, 1)
|
||||
}
|
||||
|
||||
// lookupAccount fetches details of an account and adds it to the prestate
|
||||
// if it doesn't exist there.
|
||||
func (t *prestateTracer) lookupAccount(addr common.Address) {
|
||||
if _, ok := t.prestate[addr]; ok {
|
||||
return
|
||||
}
|
||||
t.prestate[addr] = &account{
|
||||
Balance: bigToHex(t.env.StateDB.GetBalance(addr)),
|
||||
Nonce: t.env.StateDB.GetNonce(addr),
|
||||
Code: bytesToHex(t.env.StateDB.GetCode(addr)),
|
||||
Storage: make(map[common.Hash]common.Hash),
|
||||
}
|
||||
}
|
||||
|
||||
// lookupStorage fetches the requested storage slot and adds
|
||||
// it to the prestate of the given contract. It assumes `lookupAccount`
|
||||
// has been performed on the contract before.
|
||||
func (t *prestateTracer) lookupStorage(addr common.Address, key common.Hash) {
|
||||
if _, ok := t.prestate[addr].Storage[key]; ok {
|
||||
return
|
||||
}
|
||||
t.prestate[addr].Storage[key] = t.env.StateDB.GetState(addr, key)
|
||||
}
|
@ -456,6 +456,17 @@ func (ec *Client) CallContract(ctx context.Context, msg ethereum.CallMsg, blockN
|
||||
return hex, nil
|
||||
}
|
||||
|
||||
// CallContractAtHash is almost the same as CallContract except that it selects
|
||||
// the block by block hash instead of block height.
|
||||
func (ec *Client) CallContractAtHash(ctx context.Context, msg ethereum.CallMsg, blockHash common.Hash) ([]byte, error) {
|
||||
var hex hexutil.Bytes
|
||||
err := ec.c.CallContext(ctx, &hex, "eth_call", toCallArg(msg), rpc.BlockNumberOrHashWithHash(blockHash, false))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return hex, nil
|
||||
}
|
||||
|
||||
// PendingCallContract executes a message call transaction using the EVM.
|
||||
// The state seen by the contract call is the pending state.
|
||||
func (ec *Client) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) {
|
||||
|
@ -285,6 +285,9 @@ func TestEthClient(t *testing.T) {
|
||||
"CallContract": {
|
||||
func(t *testing.T) { testCallContract(t, client) },
|
||||
},
|
||||
"CallContractAtHash": {
|
||||
func(t *testing.T) { testCallContractAtHash(t, client) },
|
||||
},
|
||||
"AtFunctions": {
|
||||
func(t *testing.T) { testAtFunctions(t, client) },
|
||||
},
|
||||
@ -507,6 +510,33 @@ func testStatusFunctions(t *testing.T, client *rpc.Client) {
|
||||
}
|
||||
}
|
||||
|
||||
func testCallContractAtHash(t *testing.T, client *rpc.Client) {
|
||||
ec := NewClient(client)
|
||||
|
||||
// EstimateGas
|
||||
msg := ethereum.CallMsg{
|
||||
From: testAddr,
|
||||
To: &common.Address{},
|
||||
Gas: 21000,
|
||||
Value: big.NewInt(1),
|
||||
}
|
||||
gas, err := ec.EstimateGas(context.Background(), msg)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if gas != 21000 {
|
||||
t.Fatalf("unexpected gas price: %v", gas)
|
||||
}
|
||||
block, err := ec.HeaderByNumber(context.Background(), big.NewInt(1))
|
||||
if err != nil {
|
||||
t.Fatalf("BlockByNumber error: %v", err)
|
||||
}
|
||||
// CallContract
|
||||
if _, err := ec.CallContractAtHash(context.Background(), msg, block.Hash()); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testCallContract(t *testing.T, client *rpc.Client) {
|
||||
ec := NewClient(client)
|
||||
|
||||
|
8
go.mod
8
go.mod
@ -17,7 +17,7 @@ require (
|
||||
github.com/cloudflare/cloudflare-go v0.14.0
|
||||
github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea
|
||||
github.com/deckarep/golang-set v1.8.0
|
||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf
|
||||
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48
|
||||
@ -32,7 +32,7 @@ require (
|
||||
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa
|
||||
github.com/google/uuid v1.1.5
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29
|
||||
github.com/graph-gophers/graphql-go v1.3.0
|
||||
github.com/hashicorp/go-bexpr v0.1.10
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3
|
||||
@ -41,10 +41,10 @@ require (
|
||||
github.com/influxdata/influxdb v1.8.3
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.4.0
|
||||
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458
|
||||
github.com/jackpal/go-nat-pmp v1.0.2
|
||||
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e
|
||||
github.com/julienschmidt/httprouter v1.2.0
|
||||
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559
|
||||
github.com/karalabe/usb v0.0.2
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.8
|
||||
github.com/mattn/go-isatty v0.0.12
|
||||
|
16
go.sum
16
go.sum
@ -111,8 +111,8 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
|
||||
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
|
||||
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
|
||||
@ -216,8 +216,8 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 h1:sezaKhEfPFg8W0Enm61B9Gs911H8iesGY5R8NDPtd1M=
|
||||
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
|
||||
github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0=
|
||||
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
|
||||
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
|
||||
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
@ -248,8 +248,8 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y
|
||||
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
|
||||
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
|
||||
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U=
|
||||
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
@ -265,8 +265,8 @@ github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
|
||||
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559 h1:0VWDXPNE0brOek1Q8bLfzKkvOzwbQE/snjGojlCr8CY=
|
||||
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4=
|
||||
github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
|
@ -48,6 +48,7 @@ func TestBuildSchema(t *testing.T) {
|
||||
conf := node.DefaultConfig
|
||||
conf.DataDir = ddir
|
||||
stack, err := node.New(&conf)
|
||||
defer stack.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("could not create new node: %v", err)
|
||||
}
|
||||
|
@ -287,7 +287,7 @@ func NewPrivateAccountAPI(b Backend, nonceLock *AddrLocker) *PrivateAccountAPI {
|
||||
}
|
||||
}
|
||||
|
||||
// listAccounts will return a list of addresses for accounts this node manages.
|
||||
// ListAccounts will return a list of addresses for accounts this node manages.
|
||||
func (s *PrivateAccountAPI) ListAccounts() []common.Address {
|
||||
return s.am.Accounts()
|
||||
}
|
||||
@ -767,8 +767,7 @@ func (s *PublicBlockChainAPI) GetBlockByHash(ctx context.Context, hash common.Ha
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// GetUncleByBlockNumberAndIndex returns the uncle block for the given block hash and index. When fullTx is true
|
||||
// all transactions in the block are returned in full detail, otherwise only the transaction hash is returned.
|
||||
// GetUncleByBlockNumberAndIndex returns the uncle block for the given block hash and index.
|
||||
func (s *PublicBlockChainAPI) GetUncleByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) (map[string]interface{}, error) {
|
||||
block, err := s.b.BlockByNumber(ctx, blockNr)
|
||||
if block != nil {
|
||||
@ -783,8 +782,7 @@ func (s *PublicBlockChainAPI) GetUncleByBlockNumberAndIndex(ctx context.Context,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// GetUncleByBlockHashAndIndex returns the uncle block for the given block hash and index. When fullTx is true
|
||||
// all transactions in the block are returned in full detail, otherwise only the transaction hash is returned.
|
||||
// GetUncleByBlockHashAndIndex returns the uncle block for the given block hash and index.
|
||||
func (s *PublicBlockChainAPI) GetUncleByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index hexutil.Uint) (map[string]interface{}, error) {
|
||||
block, err := s.b.BlockByHash(ctx, blockHash)
|
||||
if block != nil {
|
||||
@ -1432,8 +1430,9 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH
|
||||
} else {
|
||||
to = crypto.CreateAddress(args.from(), uint64(*args.Nonce))
|
||||
}
|
||||
isPostMerge := header.Difficulty.Cmp(common.Big0) == 0
|
||||
// Retrieve the precompiles since they don't need to be added to the access list
|
||||
precompiles := vm.ActivePrecompiles(b.ChainConfig().Rules(header.Number))
|
||||
precompiles := vm.ActivePrecompiles(b.ChainConfig().Rules(header.Number, isPostMerge))
|
||||
|
||||
// Create an initial tracer
|
||||
prevTracer := logger.NewAccessListTracer(nil, args.from(), to, precompiles)
|
||||
@ -1657,7 +1656,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(ctx context.Context, ha
|
||||
fields["status"] = hexutil.Uint(receipt.Status)
|
||||
}
|
||||
if receipt.Logs == nil {
|
||||
fields["logs"] = [][]*types.Log{}
|
||||
fields["logs"] = []*types.Log{}
|
||||
}
|
||||
// If the ContractAddress is 20 0x0 bytes, assume it is not a contract creation
|
||||
if receipt.ContractAddress != (common.Address{}) {
|
||||
|
@ -55,20 +55,20 @@ type TransactionArgs struct {
|
||||
}
|
||||
|
||||
// from retrieves the transaction sender address.
|
||||
func (arg *TransactionArgs) from() common.Address {
|
||||
if arg.From == nil {
|
||||
func (args *TransactionArgs) from() common.Address {
|
||||
if args.From == nil {
|
||||
return common.Address{}
|
||||
}
|
||||
return *arg.From
|
||||
return *args.From
|
||||
}
|
||||
|
||||
// data retrieves the transaction calldata. Input field is preferred.
|
||||
func (arg *TransactionArgs) data() []byte {
|
||||
if arg.Input != nil {
|
||||
return *arg.Input
|
||||
func (args *TransactionArgs) data() []byte {
|
||||
if args.Input != nil {
|
||||
return *args.Input
|
||||
}
|
||||
if arg.Data != nil {
|
||||
return *arg.Data
|
||||
if args.Data != nil {
|
||||
return *args.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -83,20 +83,20 @@ func TestNatto(t *testing.T) {
|
||||
|
||||
err := jsre.Exec("test.js")
|
||||
if err != nil {
|
||||
t.Errorf("expected no error, got %v", err)
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
val, err := jsre.Run("msg")
|
||||
if err != nil {
|
||||
t.Errorf("expected no error, got %v", err)
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
if val.ExportType().Kind() != reflect.String {
|
||||
t.Errorf("expected string value, got %v", val)
|
||||
t.Fatalf("expected string value, got %v", val)
|
||||
}
|
||||
exp := "testMsg"
|
||||
got := val.ToString().String()
|
||||
if exp != got {
|
||||
t.Errorf("expected '%v', got '%v'", exp, got)
|
||||
t.Fatalf("expected '%v', got '%v'", exp, got)
|
||||
}
|
||||
jsre.Stop(false)
|
||||
}
|
||||
|
@ -576,6 +576,11 @@ web3._extend({
|
||||
params: 3,
|
||||
inputFormatter: [null, web3._extend.formatters.inputBlockNumberFormatter, null]
|
||||
}),
|
||||
new web3._extend.Method({
|
||||
name: 'getLogs',
|
||||
call: 'eth_getLogs',
|
||||
params: 1,
|
||||
}),
|
||||
],
|
||||
properties: [
|
||||
new web3._extend.Property({
|
||||
|
178
les/catalyst/api.go
Normal file
178
les/catalyst/api.go
Normal file
@ -0,0 +1,178 @@
|
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package catalyst implements the temporary eth1/eth2 RPC integration.
|
||||
package catalyst
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/beacon"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// Register adds catalyst APIs to the light client.
|
||||
func Register(stack *node.Node, backend *les.LightEthereum) error {
|
||||
log.Warn("Catalyst mode enabled", "protocol", "les")
|
||||
stack.RegisterAPIs([]rpc.API{
|
||||
{
|
||||
Namespace: "engine",
|
||||
Version: "1.0",
|
||||
Service: NewConsensusAPI(backend),
|
||||
Public: true,
|
||||
},
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
type ConsensusAPI struct {
|
||||
les *les.LightEthereum
|
||||
}
|
||||
|
||||
// NewConsensusAPI creates a new consensus api for the given backend.
|
||||
// The underlying blockchain needs to have a valid terminal total difficulty set.
|
||||
func NewConsensusAPI(les *les.LightEthereum) *ConsensusAPI {
|
||||
if les.BlockChain().Config().TerminalTotalDifficulty == nil {
|
||||
panic("Catalyst started without valid total difficulty")
|
||||
}
|
||||
return &ConsensusAPI{les: les}
|
||||
}
|
||||
|
||||
// ForkchoiceUpdatedV1 has several responsibilities:
|
||||
// If the method is called with an empty head block:
|
||||
// we return success, which can be used to check if the catalyst mode is enabled
|
||||
// If the total difficulty was not reached:
|
||||
// we return INVALID
|
||||
// If the finalizedBlockHash is set:
|
||||
// we check if we have the finalizedBlockHash in our db, if not we start a sync
|
||||
// We try to set our blockchain to the headBlock
|
||||
// If there are payloadAttributes:
|
||||
// we return an error since block creation is not supported in les mode
|
||||
func (api *ConsensusAPI) ForkchoiceUpdatedV1(heads beacon.ForkchoiceStateV1, payloadAttributes *beacon.PayloadAttributesV1) (beacon.ForkChoiceResponse, error) {
|
||||
if heads.HeadBlockHash == (common.Hash{}) {
|
||||
return beacon.ForkChoiceResponse{Status: beacon.SUCCESS.Status, PayloadID: nil}, nil
|
||||
}
|
||||
if err := api.checkTerminalTotalDifficulty(heads.HeadBlockHash); err != nil {
|
||||
if header := api.les.BlockChain().GetHeaderByHash(heads.HeadBlockHash); header == nil {
|
||||
// TODO (MariusVanDerWijden) trigger sync
|
||||
return beacon.SYNCING, nil
|
||||
}
|
||||
return beacon.INVALID, err
|
||||
}
|
||||
// If the finalized block is set, check if it is in our blockchain
|
||||
if heads.FinalizedBlockHash != (common.Hash{}) {
|
||||
if header := api.les.BlockChain().GetHeaderByHash(heads.FinalizedBlockHash); header == nil {
|
||||
// TODO (MariusVanDerWijden) trigger sync
|
||||
return beacon.SYNCING, nil
|
||||
}
|
||||
}
|
||||
// SetHead
|
||||
if err := api.setHead(heads.HeadBlockHash); err != nil {
|
||||
return beacon.INVALID, err
|
||||
}
|
||||
if payloadAttributes != nil {
|
||||
return beacon.INVALID, errors.New("not supported")
|
||||
}
|
||||
return beacon.ForkChoiceResponse{Status: beacon.SUCCESS.Status, PayloadID: nil}, nil
|
||||
}
|
||||
|
||||
// GetPayloadV1 returns a cached payload by id. It's not supported in les mode.
|
||||
func (api *ConsensusAPI) GetPayloadV1(payloadID beacon.PayloadID) (*beacon.ExecutableDataV1, error) {
|
||||
return nil, &beacon.GenericServerError
|
||||
}
|
||||
|
||||
// ExecutePayloadV1 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
|
||||
func (api *ConsensusAPI) ExecutePayloadV1(params beacon.ExecutableDataV1) (beacon.ExecutePayloadResponse, error) {
|
||||
block, err := beacon.ExecutableDataToBlock(params)
|
||||
if err != nil {
|
||||
return api.invalid(), err
|
||||
}
|
||||
if !api.les.BlockChain().HasHeader(block.ParentHash(), block.NumberU64()-1) {
|
||||
/*
|
||||
TODO (MariusVanDerWijden) reenable once sync is merged
|
||||
if err := api.eth.Downloader().BeaconSync(api.eth.SyncMode(), block.Header()); err != nil {
|
||||
return SYNCING, err
|
||||
}
|
||||
*/
|
||||
// TODO (MariusVanDerWijden) we should return nil here not empty hash
|
||||
return beacon.ExecutePayloadResponse{Status: beacon.SYNCING.Status, LatestValidHash: common.Hash{}}, nil
|
||||
}
|
||||
parent := api.les.BlockChain().GetHeaderByHash(params.ParentHash)
|
||||
if parent == nil {
|
||||
return api.invalid(), fmt.Errorf("could not find parent %x", params.ParentHash)
|
||||
}
|
||||
td := api.les.BlockChain().GetTd(parent.Hash(), block.NumberU64()-1)
|
||||
ttd := api.les.BlockChain().Config().TerminalTotalDifficulty
|
||||
if td.Cmp(ttd) < 0 {
|
||||
return api.invalid(), fmt.Errorf("can not execute payload on top of block with low td got: %v threshold %v", td, ttd)
|
||||
}
|
||||
if err = api.les.BlockChain().InsertHeader(block.Header()); err != nil {
|
||||
return api.invalid(), err
|
||||
}
|
||||
if merger := api.les.Merger(); !merger.TDDReached() {
|
||||
merger.ReachTTD()
|
||||
}
|
||||
return beacon.ExecutePayloadResponse{Status: beacon.VALID.Status, LatestValidHash: block.Hash()}, nil
|
||||
}
|
||||
|
||||
// invalid returns a response "INVALID" with the latest valid hash set to the current head.
|
||||
func (api *ConsensusAPI) invalid() beacon.ExecutePayloadResponse {
|
||||
return beacon.ExecutePayloadResponse{Status: beacon.INVALID.Status, LatestValidHash: api.les.BlockChain().CurrentHeader().Hash()}
|
||||
}
|
||||
|
||||
func (api *ConsensusAPI) checkTerminalTotalDifficulty(head common.Hash) error {
|
||||
// shortcut if we entered PoS already
|
||||
if api.les.Merger().PoSFinalized() {
|
||||
return nil
|
||||
}
|
||||
// make sure the parent has enough terminal total difficulty
|
||||
header := api.les.BlockChain().GetHeaderByHash(head)
|
||||
if header == nil {
|
||||
return &beacon.GenericServerError
|
||||
}
|
||||
td := api.les.BlockChain().GetTd(header.Hash(), header.Number.Uint64())
|
||||
if td != nil && td.Cmp(api.les.BlockChain().Config().TerminalTotalDifficulty) < 0 {
|
||||
return &beacon.InvalidTB
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setHead is called to perform a force choice.
|
||||
func (api *ConsensusAPI) setHead(newHead common.Hash) error {
|
||||
log.Info("Setting head", "head", newHead)
|
||||
|
||||
headHeader := api.les.BlockChain().CurrentHeader()
|
||||
if headHeader.Hash() == newHead {
|
||||
return nil
|
||||
}
|
||||
newHeadHeader := api.les.BlockChain().GetHeaderByHash(newHead)
|
||||
if newHeadHeader == nil {
|
||||
return &beacon.GenericServerError
|
||||
}
|
||||
if err := api.les.BlockChain().SetChainHead(newHeadHeader); err != nil {
|
||||
return err
|
||||
}
|
||||
// Trigger the transition if it's the first `NewHead` event.
|
||||
if merger := api.les.Merger(); !merger.PoSFinalized() {
|
||||
merger.FinalizePoS()
|
||||
}
|
||||
return nil
|
||||
}
|
244
les/catalyst/api_test.go
Normal file
244
les/catalyst/api_test.go
Normal file
@ -0,0 +1,244 @@
|
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package catalyst
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/beacon"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
var (
|
||||
// testKey is a private key to use for funding a tester account.
|
||||
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||
|
||||
// testAddr is the Ethereum address of the tester account.
|
||||
testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
|
||||
|
||||
testBalance = big.NewInt(2e18)
|
||||
)
|
||||
|
||||
func generatePreMergeChain(n int) (*core.Genesis, []*types.Header, []*types.Block) {
|
||||
db := rawdb.NewMemoryDatabase()
|
||||
config := params.AllEthashProtocolChanges
|
||||
genesis := &core.Genesis{
|
||||
Config: config,
|
||||
Alloc: core.GenesisAlloc{testAddr: {Balance: testBalance}},
|
||||
ExtraData: []byte("test genesis"),
|
||||
Timestamp: 9000,
|
||||
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||
}
|
||||
gblock := genesis.ToBlock(db)
|
||||
engine := ethash.NewFaker()
|
||||
blocks, _ := core.GenerateChain(config, gblock, engine, db, n, nil)
|
||||
totalDifficulty := big.NewInt(0)
|
||||
|
||||
var headers []*types.Header
|
||||
for _, b := range blocks {
|
||||
totalDifficulty.Add(totalDifficulty, b.Difficulty())
|
||||
headers = append(headers, b.Header())
|
||||
}
|
||||
config.TerminalTotalDifficulty = totalDifficulty
|
||||
|
||||
return genesis, headers, blocks
|
||||
}
|
||||
|
||||
func TestSetHeadBeforeTotalDifficulty(t *testing.T) {
|
||||
genesis, headers, blocks := generatePreMergeChain(10)
|
||||
n, lesService := startLesService(t, genesis, headers)
|
||||
defer n.Close()
|
||||
|
||||
api := NewConsensusAPI(lesService)
|
||||
fcState := beacon.ForkchoiceStateV1{
|
||||
HeadBlockHash: blocks[5].Hash(),
|
||||
SafeBlockHash: common.Hash{},
|
||||
FinalizedBlockHash: common.Hash{},
|
||||
}
|
||||
if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err == nil {
|
||||
t.Errorf("fork choice updated before total terminal difficulty should fail")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecutePayloadV1(t *testing.T) {
|
||||
genesis, headers, blocks := generatePreMergeChain(10)
|
||||
n, lesService := startLesService(t, genesis, headers[:9])
|
||||
lesService.Merger().ReachTTD()
|
||||
defer n.Close()
|
||||
|
||||
api := NewConsensusAPI(lesService)
|
||||
fcState := beacon.ForkchoiceStateV1{
|
||||
HeadBlockHash: blocks[8].Hash(),
|
||||
SafeBlockHash: common.Hash{},
|
||||
FinalizedBlockHash: common.Hash{},
|
||||
}
|
||||
if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
|
||||
t.Errorf("Failed to update head %v", err)
|
||||
}
|
||||
block := blocks[9]
|
||||
|
||||
fakeBlock := types.NewBlock(&types.Header{
|
||||
ParentHash: block.ParentHash(),
|
||||
UncleHash: crypto.Keccak256Hash(nil),
|
||||
Coinbase: block.Coinbase(),
|
||||
Root: block.Root(),
|
||||
TxHash: crypto.Keccak256Hash(nil),
|
||||
ReceiptHash: crypto.Keccak256Hash(nil),
|
||||
Bloom: block.Bloom(),
|
||||
Difficulty: big.NewInt(0),
|
||||
Number: block.Number(),
|
||||
GasLimit: block.GasLimit(),
|
||||
GasUsed: block.GasUsed(),
|
||||
Time: block.Time(),
|
||||
Extra: block.Extra(),
|
||||
MixDigest: block.MixDigest(),
|
||||
Nonce: types.BlockNonce{},
|
||||
BaseFee: block.BaseFee(),
|
||||
}, nil, nil, nil, trie.NewStackTrie(nil))
|
||||
|
||||
_, err := api.ExecutePayloadV1(beacon.ExecutableDataV1{
|
||||
ParentHash: fakeBlock.ParentHash(),
|
||||
FeeRecipient: fakeBlock.Coinbase(),
|
||||
StateRoot: fakeBlock.Root(),
|
||||
ReceiptsRoot: fakeBlock.ReceiptHash(),
|
||||
LogsBloom: fakeBlock.Bloom().Bytes(),
|
||||
Random: fakeBlock.MixDigest(),
|
||||
Number: fakeBlock.NumberU64(),
|
||||
GasLimit: fakeBlock.GasLimit(),
|
||||
GasUsed: fakeBlock.GasUsed(),
|
||||
Timestamp: fakeBlock.Time(),
|
||||
ExtraData: fakeBlock.Extra(),
|
||||
BaseFeePerGas: fakeBlock.BaseFee(),
|
||||
BlockHash: fakeBlock.Hash(),
|
||||
Transactions: encodeTransactions(fakeBlock.Transactions()),
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to execute payload %v", err)
|
||||
}
|
||||
headHeader := api.les.BlockChain().CurrentHeader()
|
||||
if headHeader.Number.Uint64() != fakeBlock.NumberU64()-1 {
|
||||
t.Fatal("Unexpected chain head update")
|
||||
}
|
||||
fcState = beacon.ForkchoiceStateV1{
|
||||
HeadBlockHash: fakeBlock.Hash(),
|
||||
SafeBlockHash: common.Hash{},
|
||||
FinalizedBlockHash: common.Hash{},
|
||||
}
|
||||
if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
|
||||
t.Fatal("Failed to update head")
|
||||
}
|
||||
headHeader = api.les.BlockChain().CurrentHeader()
|
||||
if headHeader.Number.Uint64() != fakeBlock.NumberU64() {
|
||||
t.Fatal("Failed to update chain head")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEth2DeepReorg(t *testing.T) {
|
||||
// TODO (MariusVanDerWijden) TestEth2DeepReorg is currently broken, because it tries to reorg
|
||||
// before the totalTerminalDifficulty threshold
|
||||
/*
|
||||
genesis, preMergeBlocks := generatePreMergeChain(core.TriesInMemory * 2)
|
||||
n, ethservice := startEthService(t, genesis, preMergeBlocks)
|
||||
defer n.Close()
|
||||
|
||||
var (
|
||||
api = NewConsensusAPI(ethservice, nil)
|
||||
parent = preMergeBlocks[len(preMergeBlocks)-core.TriesInMemory-1]
|
||||
head = ethservice.BlockChain().CurrentBlock().NumberU64()
|
||||
)
|
||||
if ethservice.BlockChain().HasBlockAndState(parent.Hash(), parent.NumberU64()) {
|
||||
t.Errorf("Block %d not pruned", parent.NumberU64())
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
execData, err := api.assembleBlock(AssembleBlockParams{
|
||||
ParentHash: parent.Hash(),
|
||||
Timestamp: parent.Time() + 5,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create the executable data %v", err)
|
||||
}
|
||||
block, err := ExecutableDataToBlock(ethservice.BlockChain().Config(), parent.Header(), *execData)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to convert executable data to block %v", err)
|
||||
}
|
||||
newResp, err := api.ExecutePayload(*execData)
|
||||
if err != nil || newResp.Status != "VALID" {
|
||||
t.Fatalf("Failed to insert block: %v", err)
|
||||
}
|
||||
if ethservice.BlockChain().CurrentBlock().NumberU64() != head {
|
||||
t.Fatalf("Chain head shouldn't be updated")
|
||||
}
|
||||
if err := api.setHead(block.Hash()); err != nil {
|
||||
t.Fatalf("Failed to set head: %v", err)
|
||||
}
|
||||
if ethservice.BlockChain().CurrentBlock().NumberU64() != block.NumberU64() {
|
||||
t.Fatalf("Chain head should be updated")
|
||||
}
|
||||
parent, head = block, block.NumberU64()
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
// startEthService creates a full node instance for testing.
|
||||
func startLesService(t *testing.T, genesis *core.Genesis, headers []*types.Header) (*node.Node, *les.LightEthereum) {
|
||||
t.Helper()
|
||||
|
||||
n, err := node.New(&node.Config{})
|
||||
if err != nil {
|
||||
t.Fatal("can't create node:", err)
|
||||
}
|
||||
ethcfg := ðconfig.Config{
|
||||
Genesis: genesis,
|
||||
Ethash: ethash.Config{PowMode: ethash.ModeFake},
|
||||
SyncMode: downloader.LightSync,
|
||||
TrieDirtyCache: 256,
|
||||
TrieCleanCache: 256,
|
||||
LightPeers: 10,
|
||||
}
|
||||
lesService, err := les.New(n, ethcfg)
|
||||
if err != nil {
|
||||
t.Fatal("can't create eth service:", err)
|
||||
}
|
||||
if err := n.Start(); err != nil {
|
||||
t.Fatal("can't start node:", err)
|
||||
}
|
||||
if _, err := lesService.BlockChain().InsertHeaderChain(headers, 0); err != nil {
|
||||
n.Close()
|
||||
t.Fatal("can't import test headers:", err)
|
||||
}
|
||||
return n, lesService
|
||||
}
|
||||
|
||||
func encodeTransactions(txs []*types.Transaction) [][]byte {
|
||||
var enc = make([][]byte, len(txs))
|
||||
for i, tx := range txs {
|
||||
enc[i], _ = tx.MarshalBinary()
|
||||
}
|
||||
return enc
|
||||
}
|
@ -35,10 +35,12 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
// Backend wraps all methods required for mining.
|
||||
// Backend wraps all methods required for mining. Only full node is capable
|
||||
// to offer all the functions here.
|
||||
type Backend interface {
|
||||
BlockChain() *core.BlockChain
|
||||
TxPool() *core.TxPool
|
||||
StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error)
|
||||
}
|
||||
|
||||
// Config is the configuration parameters of mining.
|
||||
@ -68,7 +70,7 @@ type Miner struct {
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, isLocalBlock func(header *types.Header) bool, merger *consensus.Merger) *Miner {
|
||||
func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, isLocalBlock func(header *types.Header) bool) *Miner {
|
||||
miner := &Miner{
|
||||
eth: eth,
|
||||
mux: mux,
|
||||
@ -76,7 +78,7 @@ func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *even
|
||||
exitCh: make(chan struct{}),
|
||||
startCh: make(chan common.Address),
|
||||
stopCh: make(chan struct{}),
|
||||
worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true, merger),
|
||||
worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true),
|
||||
}
|
||||
miner.wg.Add(1)
|
||||
go miner.update()
|
||||
@ -233,6 +235,12 @@ func (miner *Miner) DisablePreseal() {
|
||||
miner.worker.disablePreseal()
|
||||
}
|
||||
|
||||
// GetSealingBlock retrieves a sealing block based on the given parameters.
|
||||
// The returned block is not sealed but all other fields should be filled.
|
||||
func (miner *Miner) GetSealingBlock(parent common.Hash, timestamp uint64, coinbase common.Address, random common.Hash) (*types.Block, error) {
|
||||
return miner.worker.getSealingBlock(parent, timestamp, coinbase, random)
|
||||
}
|
||||
|
||||
// SubscribePendingLogs starts delivering logs from pending transactions
|
||||
// to the given channel.
|
||||
func (miner *Miner) SubscribePendingLogs(ch chan<- []*types.Log) event.Subscription {
|
||||
|
@ -18,11 +18,11 @@
|
||||
package miner
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/consensus/clique"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
@ -55,6 +55,10 @@ func (m *mockBackend) TxPool() *core.TxPool {
|
||||
return m.txPool
|
||||
}
|
||||
|
||||
func (m *mockBackend) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) {
|
||||
return nil, errors.New("not supported")
|
||||
}
|
||||
|
||||
type testBlockChain struct {
|
||||
statedb *state.StateDB
|
||||
gasLimit uint64
|
||||
@ -80,7 +84,8 @@ func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent)
|
||||
}
|
||||
|
||||
func TestMiner(t *testing.T) {
|
||||
miner, mux := createMiner(t)
|
||||
miner, mux, cleanup := createMiner(t)
|
||||
defer cleanup(false)
|
||||
miner.Start(common.HexToAddress("0x12345"))
|
||||
waitForMiningState(t, miner, true)
|
||||
// Start the downloader
|
||||
@ -107,7 +112,8 @@ func TestMiner(t *testing.T) {
|
||||
// An initial FailedEvent should allow mining to stop on a subsequent
|
||||
// downloader StartEvent.
|
||||
func TestMinerDownloaderFirstFails(t *testing.T) {
|
||||
miner, mux := createMiner(t)
|
||||
miner, mux, cleanup := createMiner(t)
|
||||
defer cleanup(false)
|
||||
miner.Start(common.HexToAddress("0x12345"))
|
||||
waitForMiningState(t, miner, true)
|
||||
// Start the downloader
|
||||
@ -138,8 +144,8 @@ func TestMinerDownloaderFirstFails(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMinerStartStopAfterDownloaderEvents(t *testing.T) {
|
||||
miner, mux := createMiner(t)
|
||||
|
||||
miner, mux, cleanup := createMiner(t)
|
||||
defer cleanup(false)
|
||||
miner.Start(common.HexToAddress("0x12345"))
|
||||
waitForMiningState(t, miner, true)
|
||||
// Start the downloader
|
||||
@ -161,7 +167,8 @@ func TestMinerStartStopAfterDownloaderEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStartWhileDownload(t *testing.T) {
|
||||
miner, mux := createMiner(t)
|
||||
miner, mux, cleanup := createMiner(t)
|
||||
defer cleanup(false)
|
||||
waitForMiningState(t, miner, false)
|
||||
miner.Start(common.HexToAddress("0x12345"))
|
||||
waitForMiningState(t, miner, true)
|
||||
@ -174,16 +181,19 @@ func TestStartWhileDownload(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStartStopMiner(t *testing.T) {
|
||||
miner, _ := createMiner(t)
|
||||
miner, _, cleanup := createMiner(t)
|
||||
defer cleanup(false)
|
||||
waitForMiningState(t, miner, false)
|
||||
miner.Start(common.HexToAddress("0x12345"))
|
||||
waitForMiningState(t, miner, true)
|
||||
miner.Stop()
|
||||
waitForMiningState(t, miner, false)
|
||||
|
||||
}
|
||||
|
||||
func TestCloseMiner(t *testing.T) {
|
||||
miner, _ := createMiner(t)
|
||||
miner, _, cleanup := createMiner(t)
|
||||
defer cleanup(true)
|
||||
waitForMiningState(t, miner, false)
|
||||
miner.Start(common.HexToAddress("0x12345"))
|
||||
waitForMiningState(t, miner, true)
|
||||
@ -195,7 +205,8 @@ func TestCloseMiner(t *testing.T) {
|
||||
// TestMinerSetEtherbase checks that etherbase becomes set even if mining isn't
|
||||
// possible at the moment
|
||||
func TestMinerSetEtherbase(t *testing.T) {
|
||||
miner, mux := createMiner(t)
|
||||
miner, mux, cleanup := createMiner(t)
|
||||
defer cleanup(false)
|
||||
// Start with a 'bad' mining address
|
||||
miner.Start(common.HexToAddress("0xdead"))
|
||||
waitForMiningState(t, miner, true)
|
||||
@ -230,7 +241,7 @@ func waitForMiningState(t *testing.T, m *Miner, mining bool) {
|
||||
t.Fatalf("Mining() == %t, want %t", state, mining)
|
||||
}
|
||||
|
||||
func createMiner(t *testing.T) (*Miner, *event.TypeMux) {
|
||||
func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) {
|
||||
// Create Ethash config
|
||||
config := Config{
|
||||
Etherbase: common.HexToAddress("123456789"),
|
||||
@ -246,7 +257,6 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux) {
|
||||
// Create consensus engine
|
||||
engine := clique.New(chainConfig.Clique, chainDB)
|
||||
// Create Ethereum backend
|
||||
merger := consensus.NewMerger(rawdb.NewMemoryDatabase())
|
||||
bc, err := core.NewBlockChain(chainDB, nil, chainConfig, engine, vm.Config{}, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create new chain %v", err)
|
||||
@ -259,5 +269,14 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux) {
|
||||
// Create event Mux
|
||||
mux := new(event.TypeMux)
|
||||
// Create Miner
|
||||
return New(backend, &config, chainConfig, mux, engine, nil, merger), mux
|
||||
miner := New(backend, &config, chainConfig, mux, engine, nil)
|
||||
cleanup := func(skipMiner bool) {
|
||||
bc.Stop()
|
||||
engine.Close()
|
||||
pool.Stop()
|
||||
if !skipMiner {
|
||||
miner.Close()
|
||||
}
|
||||
}
|
||||
return miner, mux, cleanup
|
||||
}
|
||||
|
@ -32,13 +32,15 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/fdlimit"
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/beacon"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/eth/catalyst"
|
||||
ethcatalyst "github.com/ethereum/go-ethereum/eth/catalyst"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
lescatalyst "github.com/ethereum/go-ethereum/les/catalyst"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/miner"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
@ -88,24 +90,26 @@ var (
|
||||
|
||||
type ethNode struct {
|
||||
typ nodetype
|
||||
api *catalyst.ConsensusAPI
|
||||
ethBackend *eth.Ethereum
|
||||
lesBackend *les.LightEthereum
|
||||
stack *node.Node
|
||||
enode *enode.Node
|
||||
api *ethcatalyst.ConsensusAPI
|
||||
ethBackend *eth.Ethereum
|
||||
lapi *lescatalyst.ConsensusAPI
|
||||
lesBackend *les.LightEthereum
|
||||
}
|
||||
|
||||
func newNode(typ nodetype, genesis *core.Genesis, enodes []*enode.Node) *ethNode {
|
||||
var (
|
||||
err error
|
||||
api *catalyst.ConsensusAPI
|
||||
api *ethcatalyst.ConsensusAPI
|
||||
lapi *lescatalyst.ConsensusAPI
|
||||
stack *node.Node
|
||||
ethBackend *eth.Ethereum
|
||||
lesBackend *les.LightEthereum
|
||||
)
|
||||
// Start the node and wait until it's up
|
||||
if typ == eth2LightClient {
|
||||
stack, lesBackend, api, err = makeLightNode(genesis)
|
||||
stack, lesBackend, lapi, err = makeLightNode(genesis)
|
||||
} else {
|
||||
stack, ethBackend, api, err = makeFullNode(genesis)
|
||||
}
|
||||
@ -131,20 +135,27 @@ func newNode(typ nodetype, genesis *core.Genesis, enodes []*enode.Node) *ethNode
|
||||
typ: typ,
|
||||
api: api,
|
||||
ethBackend: ethBackend,
|
||||
lapi: lapi,
|
||||
lesBackend: lesBackend,
|
||||
stack: stack,
|
||||
enode: enode,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *ethNode) assembleBlock(parentHash common.Hash, parentTimestamp uint64) (*catalyst.ExecutableDataV1, error) {
|
||||
func (n *ethNode) assembleBlock(parentHash common.Hash, parentTimestamp uint64) (*beacon.ExecutableDataV1, error) {
|
||||
if n.typ != eth2MiningNode {
|
||||
return nil, errors.New("invalid node type")
|
||||
}
|
||||
payloadAttribute := catalyst.PayloadAttributesV1{
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
timestamp := uint64(time.Now().Unix())
|
||||
if timestamp <= parentTimestamp {
|
||||
timestamp = parentTimestamp + 1
|
||||
}
|
||||
fcState := catalyst.ForkchoiceStateV1{
|
||||
payloadAttribute := beacon.PayloadAttributesV1{
|
||||
Timestamp: timestamp,
|
||||
Random: common.Hash{},
|
||||
SuggestedFeeRecipient: common.HexToAddress("0xdeadbeef"),
|
||||
}
|
||||
fcState := beacon.ForkchoiceStateV1{
|
||||
HeadBlockHash: parentHash,
|
||||
SafeBlockHash: common.Hash{},
|
||||
FinalizedBlockHash: common.Hash{},
|
||||
@ -156,10 +167,12 @@ func (n *ethNode) assembleBlock(parentHash common.Hash, parentTimestamp uint64)
|
||||
return n.api.GetPayloadV1(*payload.PayloadID)
|
||||
}
|
||||
|
||||
func (n *ethNode) insertBlock(eb catalyst.ExecutableDataV1) error {
|
||||
func (n *ethNode) insertBlock(eb beacon.ExecutableDataV1) error {
|
||||
if !eth2types(n.typ) {
|
||||
return errors.New("invalid node type")
|
||||
}
|
||||
switch n.typ {
|
||||
case eth2NormalNode, eth2MiningNode:
|
||||
newResp, err := n.api.ExecutePayloadV1(eb)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -167,28 +180,49 @@ func (n *ethNode) insertBlock(eb catalyst.ExecutableDataV1) error {
|
||||
return errors.New("failed to insert block")
|
||||
}
|
||||
return nil
|
||||
case eth2LightClient:
|
||||
newResp, err := n.lapi.ExecutePayloadV1(eb)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if newResp.Status != "VALID" {
|
||||
return errors.New("failed to insert block")
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return errors.New("undefined node")
|
||||
}
|
||||
}
|
||||
|
||||
func (n *ethNode) insertBlockAndSetHead(parent *types.Header, ed catalyst.ExecutableDataV1) error {
|
||||
func (n *ethNode) insertBlockAndSetHead(parent *types.Header, ed beacon.ExecutableDataV1) error {
|
||||
if !eth2types(n.typ) {
|
||||
return errors.New("invalid node type")
|
||||
}
|
||||
if err := n.insertBlock(ed); err != nil {
|
||||
return err
|
||||
}
|
||||
block, err := catalyst.ExecutableDataToBlock(ed)
|
||||
block, err := beacon.ExecutableDataToBlock(ed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fcState := catalyst.ForkchoiceStateV1{
|
||||
fcState := beacon.ForkchoiceStateV1{
|
||||
HeadBlockHash: block.ParentHash(),
|
||||
SafeBlockHash: common.Hash{},
|
||||
FinalizedBlockHash: common.Hash{},
|
||||
}
|
||||
switch n.typ {
|
||||
case eth2NormalNode, eth2MiningNode:
|
||||
if _, err := n.api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case eth2LightClient:
|
||||
if _, err := n.lapi.ForkchoiceUpdatedV1(fcState, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return errors.New("undefined node")
|
||||
}
|
||||
}
|
||||
|
||||
type nodeManager struct {
|
||||
@ -284,12 +318,15 @@ func (mgr *nodeManager) run() {
|
||||
nodes = append(nodes, mgr.getNodes(eth2NormalNode)...)
|
||||
nodes = append(nodes, mgr.getNodes(eth2LightClient)...)
|
||||
for _, node := range append(nodes) {
|
||||
fcState := catalyst.ForkchoiceStateV1{
|
||||
fcState := beacon.ForkchoiceStateV1{
|
||||
HeadBlockHash: oldest.Hash(),
|
||||
SafeBlockHash: common.Hash{},
|
||||
FinalizedBlockHash: common.Hash{},
|
||||
FinalizedBlockHash: oldest.Hash(),
|
||||
}
|
||||
node.api.ForkchoiceUpdatedV1(fcState, nil)
|
||||
// TODO(rjl493456442) finalization doesn't work properly, FIX IT
|
||||
_ = fcState
|
||||
_ = node
|
||||
//node.api.ForkchoiceUpdatedV1(fcState, nil)
|
||||
}
|
||||
log.Info("Finalised eth2 block", "number", oldest.NumberU64(), "hash", oldest.Hash())
|
||||
waitFinalise = waitFinalise[1:]
|
||||
@ -327,12 +364,11 @@ func (mgr *nodeManager) run() {
|
||||
log.Error("Failed to assemble the block", "err", err)
|
||||
continue
|
||||
}
|
||||
block, _ := catalyst.ExecutableDataToBlock(*ed)
|
||||
block, _ := beacon.ExecutableDataToBlock(*ed)
|
||||
|
||||
nodes := mgr.getNodes(eth2MiningNode)
|
||||
nodes = append(nodes, mgr.getNodes(eth2NormalNode)...)
|
||||
nodes = append(nodes, mgr.getNodes(eth2LightClient)...)
|
||||
|
||||
for _, node := range nodes {
|
||||
if err := node.insertBlockAndSetHead(parentBlock.Header(), *ed); err != nil {
|
||||
log.Error("Failed to insert block", "type", node.typ, "err", err)
|
||||
@ -410,9 +446,8 @@ func makeGenesis(faucets []*ecdsa.PrivateKey) *core.Genesis {
|
||||
genesis.Difficulty = params.MinimumDifficulty
|
||||
genesis.GasLimit = 25000000
|
||||
|
||||
genesis.Config.ChainID = big.NewInt(18)
|
||||
genesis.Config.EIP150Hash = common.Hash{}
|
||||
genesis.BaseFee = big.NewInt(params.InitialBaseFee)
|
||||
genesis.Config = params.AllEthashProtocolChanges
|
||||
genesis.Config.TerminalTotalDifficulty = transitionDifficulty
|
||||
|
||||
genesis.Alloc = core.GenesisAlloc{}
|
||||
@ -424,7 +459,7 @@ func makeGenesis(faucets []*ecdsa.PrivateKey) *core.Genesis {
|
||||
return genesis
|
||||
}
|
||||
|
||||
func makeFullNode(genesis *core.Genesis) (*node.Node, *eth.Ethereum, *catalyst.ConsensusAPI, error) {
|
||||
func makeFullNode(genesis *core.Genesis) (*node.Node, *eth.Ethereum, *ethcatalyst.ConsensusAPI, error) {
|
||||
// Define the basic configurations for the Ethereum node
|
||||
datadir, _ := ioutil.TempDir("", "")
|
||||
|
||||
@ -472,10 +507,10 @@ func makeFullNode(genesis *core.Genesis) (*node.Node, *eth.Ethereum, *catalyst.C
|
||||
log.Crit("Failed to create the LES server", "err", err)
|
||||
}
|
||||
err = stack.Start()
|
||||
return stack, ethBackend, catalyst.NewConsensusAPI(ethBackend, nil), err
|
||||
return stack, ethBackend, ethcatalyst.NewConsensusAPI(ethBackend), err
|
||||
}
|
||||
|
||||
func makeLightNode(genesis *core.Genesis) (*node.Node, *les.LightEthereum, *catalyst.ConsensusAPI, error) {
|
||||
func makeLightNode(genesis *core.Genesis) (*node.Node, *les.LightEthereum, *lescatalyst.ConsensusAPI, error) {
|
||||
// Define the basic configurations for the Ethereum node
|
||||
datadir, _ := ioutil.TempDir("", "")
|
||||
|
||||
@ -510,7 +545,7 @@ func makeLightNode(genesis *core.Genesis) (*node.Node, *les.LightEthereum, *cata
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
err = stack.Start()
|
||||
return stack, lesBackend, catalyst.NewConsensusAPI(nil, lesBackend), err
|
||||
return stack, lesBackend, lescatalyst.NewConsensusAPI(lesBackend), err
|
||||
}
|
||||
|
||||
func eth2types(typ nodetype) bool {
|
||||
|
531
miner/worker.go
531
miner/worker.go
@ -17,8 +17,8 @@
|
||||
package miner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@ -54,14 +54,14 @@ const (
|
||||
// resubmitAdjustChanSize is the size of resubmitting interval adjustment channel.
|
||||
resubmitAdjustChanSize = 10
|
||||
|
||||
// miningLogAtDepth is the number of confirmations before logging successful mining.
|
||||
miningLogAtDepth = 7
|
||||
// sealingLogAtDepth is the number of confirmations before logging successful sealing.
|
||||
sealingLogAtDepth = 7
|
||||
|
||||
// minRecommitInterval is the minimal time interval to recreate the mining block with
|
||||
// minRecommitInterval is the minimal time interval to recreate the sealing block with
|
||||
// any newly arrived transactions.
|
||||
minRecommitInterval = 1 * time.Second
|
||||
|
||||
// maxRecommitInterval is the maximum time interval to recreate the mining block with
|
||||
// maxRecommitInterval is the maximum time interval to recreate the sealing block with
|
||||
// any newly arrived transactions.
|
||||
maxRecommitInterval = 15 * time.Second
|
||||
|
||||
@ -77,20 +77,68 @@ const (
|
||||
staleThreshold = 7
|
||||
)
|
||||
|
||||
// environment is the worker's current environment and holds all of the current state information.
|
||||
// environment is the worker's current environment and holds all
|
||||
// information of the sealing block generation.
|
||||
type environment struct {
|
||||
signer types.Signer
|
||||
|
||||
state *state.StateDB // apply state changes here
|
||||
ancestors mapset.Set // ancestor set (used for checking uncle parent validity)
|
||||
family mapset.Set // family set (used for checking uncle invalidity)
|
||||
uncles mapset.Set // uncle set
|
||||
tcount int // tx count in cycle
|
||||
gasPool *core.GasPool // available gas used to pack transactions
|
||||
coinbase common.Address
|
||||
|
||||
header *types.Header
|
||||
txs []*types.Transaction
|
||||
receipts []*types.Receipt
|
||||
uncles map[common.Hash]*types.Header
|
||||
}
|
||||
|
||||
// copy creates a deep copy of environment.
|
||||
func (env *environment) copy() *environment {
|
||||
cpy := &environment{
|
||||
signer: env.signer,
|
||||
state: env.state.Copy(),
|
||||
ancestors: env.ancestors.Clone(),
|
||||
family: env.family.Clone(),
|
||||
tcount: env.tcount,
|
||||
coinbase: env.coinbase,
|
||||
header: types.CopyHeader(env.header),
|
||||
receipts: copyReceipts(env.receipts),
|
||||
}
|
||||
if env.gasPool != nil {
|
||||
gasPool := *env.gasPool
|
||||
cpy.gasPool = &gasPool
|
||||
}
|
||||
// The content of txs and uncles are immutable, unnecessary
|
||||
// to do the expensive deep copy for them.
|
||||
cpy.txs = make([]*types.Transaction, len(env.txs))
|
||||
copy(cpy.txs, env.txs)
|
||||
cpy.uncles = make(map[common.Hash]*types.Header)
|
||||
for hash, uncle := range env.uncles {
|
||||
cpy.uncles[hash] = uncle
|
||||
}
|
||||
return cpy
|
||||
}
|
||||
|
||||
// unclelist returns the contained uncles as the list format.
|
||||
func (env *environment) unclelist() []*types.Header {
|
||||
var uncles []*types.Header
|
||||
for _, uncle := range env.uncles {
|
||||
uncles = append(uncles, uncle)
|
||||
}
|
||||
return uncles
|
||||
}
|
||||
|
||||
// discard terminates the background prefetcher go-routine. It should
|
||||
// always be called for all created environment instances otherwise
|
||||
// the go-routine leak can happen.
|
||||
func (env *environment) discard() {
|
||||
if env.state == nil {
|
||||
return
|
||||
}
|
||||
env.state.StopPrefetcher()
|
||||
}
|
||||
|
||||
// task contains all information for consensus engine sealing and result submitting.
|
||||
@ -114,6 +162,13 @@ type newWorkReq struct {
|
||||
timestamp int64
|
||||
}
|
||||
|
||||
// getWorkReq represents a request for getting a new sealing work with provided parameters.
|
||||
type getWorkReq struct {
|
||||
params *generateParams
|
||||
err error
|
||||
result chan *types.Block
|
||||
}
|
||||
|
||||
// intervalAdjust represents a resubmitting interval adjustment.
|
||||
type intervalAdjust struct {
|
||||
ratio float64
|
||||
@ -128,7 +183,6 @@ type worker struct {
|
||||
engine consensus.Engine
|
||||
eth Backend
|
||||
chain *core.BlockChain
|
||||
merger *consensus.Merger
|
||||
|
||||
// Feeds
|
||||
pendingLogsFeed event.Feed
|
||||
@ -144,6 +198,7 @@ type worker struct {
|
||||
|
||||
// Channels
|
||||
newWorkCh chan *newWorkReq
|
||||
getWorkCh chan *getWorkReq
|
||||
taskCh chan *task
|
||||
resultCh chan *types.Block
|
||||
startCh chan struct{}
|
||||
@ -191,7 +246,7 @@ type worker struct {
|
||||
resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval.
|
||||
}
|
||||
|
||||
func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(header *types.Header) bool, init bool, merger *consensus.Merger) *worker {
|
||||
func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(header *types.Header) bool, init bool) *worker {
|
||||
worker := &worker{
|
||||
config: config,
|
||||
chainConfig: chainConfig,
|
||||
@ -199,16 +254,16 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus
|
||||
eth: eth,
|
||||
mux: mux,
|
||||
chain: eth.BlockChain(),
|
||||
merger: merger,
|
||||
isLocalBlock: isLocalBlock,
|
||||
localUncles: make(map[common.Hash]*types.Block),
|
||||
remoteUncles: make(map[common.Hash]*types.Block),
|
||||
unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth),
|
||||
unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), sealingLogAtDepth),
|
||||
pendingTasks: make(map[common.Hash]*task),
|
||||
txsCh: make(chan core.NewTxsEvent, txChanSize),
|
||||
chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
|
||||
chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize),
|
||||
newWorkCh: make(chan *newWorkReq),
|
||||
getWorkCh: make(chan *getWorkReq),
|
||||
taskCh: make(chan *task),
|
||||
resultCh: make(chan *types.Block, resultQueueSize),
|
||||
exitCh: make(chan struct{}),
|
||||
@ -264,15 +319,18 @@ func (w *worker) setExtra(extra []byte) {
|
||||
|
||||
// setRecommitInterval updates the interval for miner sealing work recommitting.
|
||||
func (w *worker) setRecommitInterval(interval time.Duration) {
|
||||
w.resubmitIntervalCh <- interval
|
||||
select {
|
||||
case w.resubmitIntervalCh <- interval:
|
||||
case <-w.exitCh:
|
||||
}
|
||||
}
|
||||
|
||||
// disablePreseal disables pre-sealing mining feature
|
||||
// disablePreseal disables pre-sealing feature
|
||||
func (w *worker) disablePreseal() {
|
||||
atomic.StoreUint32(&w.noempty, 1)
|
||||
}
|
||||
|
||||
// enablePreseal enables pre-sealing mining feature
|
||||
// enablePreseal enables pre-sealing feature
|
||||
func (w *worker) enablePreseal() {
|
||||
atomic.StoreUint32(&w.noempty, 0)
|
||||
}
|
||||
@ -350,13 +408,13 @@ func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) t
|
||||
return time.Duration(int64(next))
|
||||
}
|
||||
|
||||
// newWorkLoop is a standalone goroutine to submit new mining work upon received events.
|
||||
// newWorkLoop is a standalone goroutine to submit new sealing work upon received events.
|
||||
func (w *worker) newWorkLoop(recommit time.Duration) {
|
||||
defer w.wg.Done()
|
||||
var (
|
||||
interrupt *int32
|
||||
minRecommit = recommit // minimal resubmit interval specified by user.
|
||||
timestamp int64 // timestamp for each round of mining.
|
||||
timestamp int64 // timestamp for each round of sealing.
|
||||
)
|
||||
|
||||
timer := time.NewTimer(0)
|
||||
@ -401,7 +459,7 @@ func (w *worker) newWorkLoop(recommit time.Duration) {
|
||||
commit(false, commitInterruptNewHead)
|
||||
|
||||
case <-timer.C:
|
||||
// If mining is running resubmit a new work cycle periodically to pull in
|
||||
// If sealing is running resubmit a new work cycle periodically to pull in
|
||||
// higher priced transactions. Disable this overhead for pending blocks.
|
||||
if w.isRunning() && (w.chainConfig.Clique == nil || w.chainConfig.Clique.Period > 0) {
|
||||
// Short circuit if no new transaction arrives.
|
||||
@ -448,22 +506,36 @@ func (w *worker) newWorkLoop(recommit time.Duration) {
|
||||
}
|
||||
}
|
||||
|
||||
// mainLoop is a standalone goroutine to regenerate the sealing task based on the received event.
|
||||
// mainLoop is responsible for generating and submitting sealing work based on
|
||||
// the received event. It can support two modes: automatically generate task and
|
||||
// submit it or return task according to given parameters for various proposes.
|
||||
func (w *worker) mainLoop() {
|
||||
defer w.wg.Done()
|
||||
defer w.txsSub.Unsubscribe()
|
||||
defer w.chainHeadSub.Unsubscribe()
|
||||
defer w.chainSideSub.Unsubscribe()
|
||||
defer func() {
|
||||
if w.current != nil && w.current.state != nil {
|
||||
w.current.state.StopPrefetcher()
|
||||
if w.current != nil {
|
||||
w.current.discard()
|
||||
}
|
||||
}()
|
||||
|
||||
cleanTicker := time.NewTicker(time.Second * 10)
|
||||
defer cleanTicker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case req := <-w.newWorkCh:
|
||||
w.commitNewWork(req.interrupt, req.noempty, req.timestamp)
|
||||
w.commitWork(req.interrupt, req.noempty, req.timestamp)
|
||||
|
||||
case req := <-w.getWorkCh:
|
||||
block, err := w.generateWork(req.params)
|
||||
if err != nil {
|
||||
req.err = err
|
||||
req.result <- nil
|
||||
} else {
|
||||
req.result <- block
|
||||
}
|
||||
|
||||
case ev := <-w.chainSideCh:
|
||||
// Short circuit for duplicate side blocks
|
||||
@ -479,46 +551,40 @@ func (w *worker) mainLoop() {
|
||||
} else {
|
||||
w.remoteUncles[ev.Block.Hash()] = ev.Block
|
||||
}
|
||||
// If our mining block contains less than 2 uncle blocks,
|
||||
// add the new uncle block if valid and regenerate a mining block.
|
||||
if w.isRunning() && w.current != nil && w.current.uncles.Cardinality() < 2 {
|
||||
// If our sealing block contains less than 2 uncle blocks,
|
||||
// add the new uncle block if valid and regenerate a new
|
||||
// sealing block for higher profit.
|
||||
if w.isRunning() && w.current != nil && len(w.current.uncles) < 2 {
|
||||
start := time.Now()
|
||||
if err := w.commitUncle(w.current, ev.Block.Header()); err == nil {
|
||||
var uncles []*types.Header
|
||||
w.current.uncles.Each(func(item interface{}) bool {
|
||||
hash, ok := item.(common.Hash)
|
||||
if !ok {
|
||||
return false
|
||||
w.commit(w.current.copy(), nil, true, start)
|
||||
}
|
||||
uncle, exist := w.localUncles[hash]
|
||||
if !exist {
|
||||
uncle, exist = w.remoteUncles[hash]
|
||||
}
|
||||
if !exist {
|
||||
return false
|
||||
|
||||
case <-cleanTicker.C:
|
||||
chainHead := w.chain.CurrentBlock()
|
||||
for hash, uncle := range w.localUncles {
|
||||
if uncle.NumberU64()+staleThreshold <= chainHead.NumberU64() {
|
||||
delete(w.localUncles, hash)
|
||||
}
|
||||
uncles = append(uncles, uncle.Header())
|
||||
return false
|
||||
})
|
||||
w.commit(uncles, nil, true, start)
|
||||
}
|
||||
for hash, uncle := range w.remoteUncles {
|
||||
if uncle.NumberU64()+staleThreshold <= chainHead.NumberU64() {
|
||||
delete(w.remoteUncles, hash)
|
||||
}
|
||||
}
|
||||
|
||||
case ev := <-w.txsCh:
|
||||
// Apply transactions to the pending state if we're not mining.
|
||||
// Apply transactions to the pending state if we're not sealing
|
||||
//
|
||||
// Note all transactions received may not be continuous with transactions
|
||||
// already included in the current mining block. These transactions will
|
||||
// already included in the current sealing block. These transactions will
|
||||
// be automatically eliminated.
|
||||
if !w.isRunning() && w.current != nil {
|
||||
// If block is already full, abort
|
||||
if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas {
|
||||
continue
|
||||
}
|
||||
w.mu.RLock()
|
||||
coinbase := w.coinbase
|
||||
w.mu.RUnlock()
|
||||
|
||||
txs := make(map[common.Address]types.Transactions)
|
||||
for _, tx := range ev.Txs {
|
||||
acc, _ := types.Sender(w.current.signer, tx)
|
||||
@ -526,18 +592,19 @@ func (w *worker) mainLoop() {
|
||||
}
|
||||
txset := types.NewTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee)
|
||||
tcount := w.current.tcount
|
||||
w.commitTransactions(txset, coinbase, nil)
|
||||
// Only update the snapshot if any new transactons were added
|
||||
w.commitTransactions(w.current, txset, nil)
|
||||
|
||||
// Only update the snapshot if any new transactions were added
|
||||
// to the pending block
|
||||
if tcount != w.current.tcount {
|
||||
w.updateSnapshot()
|
||||
w.updateSnapshot(w.current)
|
||||
}
|
||||
} else {
|
||||
// Special case, if the consensus engine is 0 period clique(dev mode),
|
||||
// submit mining work here since all empty submission will be rejected
|
||||
// submit sealing work here since all empty submission will be rejected
|
||||
// by clique. Of course the advance sealing(empty submission) is disabled.
|
||||
if w.chainConfig.Clique != nil && w.chainConfig.Clique.Period == 0 {
|
||||
w.commitNewWork(nil, true, time.Now().Unix())
|
||||
w.commitWork(nil, true, time.Now().Unix())
|
||||
}
|
||||
}
|
||||
atomic.AddInt32(&w.newTxs, int32(len(ev.Txs)))
|
||||
@ -679,23 +746,35 @@ func (w *worker) resultLoop() {
|
||||
}
|
||||
}
|
||||
|
||||
// makeCurrent creates a new environment for the current cycle.
|
||||
func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error {
|
||||
// makeEnv creates a new environment for the sealing block.
|
||||
func (w *worker) makeEnv(parent *types.Block, header *types.Header, coinbase common.Address) (*environment, error) {
|
||||
// Retrieve the parent state to execute on top and start a prefetcher for
|
||||
// the miner to speed block sealing up a bit
|
||||
// the miner to speed block sealing up a bit.
|
||||
state, err := w.chain.StateAt(parent.Root())
|
||||
if err != nil {
|
||||
return err
|
||||
// Note since the sealing block can be created upon the arbitrary parent
|
||||
// block, but the state of parent block may already be pruned, so the necessary
|
||||
// state recovery is needed here in the future.
|
||||
//
|
||||
// The maximum acceptable reorg depth can be limited by the finalised block
|
||||
// somehow. TODO(rjl493456442) fix the hard-coded number here later.
|
||||
state, err = w.eth.StateAtBlock(parent, 1024, nil, false, false)
|
||||
log.Warn("Recovered mining state", "root", parent.Root(), "err", err)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state.StartPrefetcher("miner")
|
||||
|
||||
// Note the passed coinbase may be different with header.Coinbase.
|
||||
env := &environment{
|
||||
signer: types.MakeSigner(w.chainConfig, header.Number),
|
||||
state: state,
|
||||
coinbase: coinbase,
|
||||
ancestors: mapset.NewSet(),
|
||||
family: mapset.NewSet(),
|
||||
uncles: mapset.NewSet(),
|
||||
header: header,
|
||||
uncles: make(map[common.Hash]*types.Header),
|
||||
}
|
||||
// when 08 is processed ancestors contain 07 (quick block)
|
||||
for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) {
|
||||
@ -707,20 +786,16 @@ func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error {
|
||||
}
|
||||
// Keep track of transactions which return errors so they can be removed
|
||||
env.tcount = 0
|
||||
|
||||
// Swap out the old work with the new one, terminating any leftover prefetcher
|
||||
// processes in the mean time and starting a new one.
|
||||
if w.current != nil && w.current.state != nil {
|
||||
w.current.state.StopPrefetcher()
|
||||
}
|
||||
w.current = env
|
||||
return nil
|
||||
return env, nil
|
||||
}
|
||||
|
||||
// commitUncle adds the given block to uncle block set, returns error if failed to add.
|
||||
func (w *worker) commitUncle(env *environment, uncle *types.Header) error {
|
||||
if w.isTTDReached(env.header) {
|
||||
return errors.New("ignore uncle for beacon block")
|
||||
}
|
||||
hash := uncle.Hash()
|
||||
if env.uncles.Contains(hash) {
|
||||
if _, exist := env.uncles[hash]; exist {
|
||||
return errors.New("uncle not unique")
|
||||
}
|
||||
if env.header.ParentHash == uncle.ParentHash {
|
||||
@ -732,82 +807,58 @@ func (w *worker) commitUncle(env *environment, uncle *types.Header) error {
|
||||
if env.family.Contains(hash) {
|
||||
return errors.New("uncle already included")
|
||||
}
|
||||
env.uncles.Add(uncle.Hash())
|
||||
env.uncles[hash] = uncle
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateSnapshot updates pending snapshot block and state.
|
||||
// Note this function assumes the current variable is thread safe.
|
||||
func (w *worker) updateSnapshot() {
|
||||
// updateSnapshot updates pending snapshot block, receipts and state.
|
||||
func (w *worker) updateSnapshot(env *environment) {
|
||||
w.snapshotMu.Lock()
|
||||
defer w.snapshotMu.Unlock()
|
||||
|
||||
var uncles []*types.Header
|
||||
w.current.uncles.Each(func(item interface{}) bool {
|
||||
hash, ok := item.(common.Hash)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
uncle, exist := w.localUncles[hash]
|
||||
if !exist {
|
||||
uncle, exist = w.remoteUncles[hash]
|
||||
}
|
||||
if !exist {
|
||||
return false
|
||||
}
|
||||
uncles = append(uncles, uncle.Header())
|
||||
return false
|
||||
})
|
||||
|
||||
w.snapshotBlock = types.NewBlock(
|
||||
w.current.header,
|
||||
w.current.txs,
|
||||
uncles,
|
||||
w.current.receipts,
|
||||
env.header,
|
||||
env.txs,
|
||||
env.unclelist(),
|
||||
env.receipts,
|
||||
trie.NewStackTrie(nil),
|
||||
)
|
||||
w.snapshotReceipts = copyReceipts(w.current.receipts)
|
||||
w.snapshotState = w.current.state.Copy()
|
||||
w.snapshotReceipts = copyReceipts(env.receipts)
|
||||
w.snapshotState = env.state.Copy()
|
||||
}
|
||||
|
||||
func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) {
|
||||
snap := w.current.state.Snapshot()
|
||||
func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]*types.Log, error) {
|
||||
snap := env.state.Snapshot()
|
||||
|
||||
receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, *w.chain.GetVMConfig())
|
||||
receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &env.coinbase, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig())
|
||||
if err != nil {
|
||||
w.current.state.RevertToSnapshot(snap)
|
||||
env.state.RevertToSnapshot(snap)
|
||||
return nil, err
|
||||
}
|
||||
w.current.txs = append(w.current.txs, tx)
|
||||
w.current.receipts = append(w.current.receipts, receipt)
|
||||
env.txs = append(env.txs, tx)
|
||||
env.receipts = append(env.receipts, receipt)
|
||||
|
||||
return receipt.Logs, nil
|
||||
}
|
||||
|
||||
func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coinbase common.Address, interrupt *int32) bool {
|
||||
// Short circuit if current is nil
|
||||
if w.current == nil {
|
||||
return true
|
||||
func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, interrupt *int32) bool {
|
||||
gasLimit := env.header.GasLimit
|
||||
if env.gasPool == nil {
|
||||
env.gasPool = new(core.GasPool).AddGas(gasLimit)
|
||||
}
|
||||
|
||||
gasLimit := w.current.header.GasLimit
|
||||
if w.current.gasPool == nil {
|
||||
w.current.gasPool = new(core.GasPool).AddGas(gasLimit)
|
||||
}
|
||||
|
||||
var coalescedLogs []*types.Log
|
||||
|
||||
for {
|
||||
// In the following three cases, we will interrupt the execution of the transaction.
|
||||
// (1) new head block event arrival, the interrupt signal is 1
|
||||
// (2) worker start or restart, the interrupt signal is 1
|
||||
// (3) worker recreate the mining block with any newly arrived transactions, the interrupt signal is 2.
|
||||
// (3) worker recreate the sealing block with any newly arrived transactions, the interrupt signal is 2.
|
||||
// For the first two cases, the semi-finished work will be discarded.
|
||||
// For the third case, the semi-finished work will be submitted to the consensus engine.
|
||||
if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone {
|
||||
// Notify resubmit loop to increase resubmitting interval due to too frequent commits.
|
||||
if atomic.LoadInt32(interrupt) == commitInterruptResubmit {
|
||||
ratio := float64(gasLimit-w.current.gasPool.Gas()) / float64(gasLimit)
|
||||
ratio := float64(gasLimit-env.gasPool.Gas()) / float64(gasLimit)
|
||||
if ratio < 0.1 {
|
||||
ratio = 0.1
|
||||
}
|
||||
@ -819,8 +870,8 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin
|
||||
return atomic.LoadInt32(interrupt) == commitInterruptNewHead
|
||||
}
|
||||
// If we don't have enough gas for any further transactions then we're done
|
||||
if w.current.gasPool.Gas() < params.TxGas {
|
||||
log.Trace("Not enough gas for further transactions", "have", w.current.gasPool, "want", params.TxGas)
|
||||
if env.gasPool.Gas() < params.TxGas {
|
||||
log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas)
|
||||
break
|
||||
}
|
||||
// Retrieve the next transaction and abort if all done
|
||||
@ -832,19 +883,19 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin
|
||||
// during transaction acceptance is the transaction pool.
|
||||
//
|
||||
// We use the eip155 signer regardless of the current hf.
|
||||
from, _ := types.Sender(w.current.signer, tx)
|
||||
from, _ := types.Sender(env.signer, tx)
|
||||
// Check whether the tx is replay protected. If we're not in the EIP155 hf
|
||||
// phase, start ignoring the sender until we do.
|
||||
if tx.Protected() && !w.chainConfig.IsEIP155(w.current.header.Number) {
|
||||
if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) {
|
||||
log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block)
|
||||
|
||||
txs.Pop()
|
||||
continue
|
||||
}
|
||||
// Start executing the transaction
|
||||
w.current.state.Prepare(tx.Hash(), w.current.tcount)
|
||||
env.state.Prepare(tx.Hash(), env.tcount)
|
||||
|
||||
logs, err := w.commitTransaction(tx, coinbase)
|
||||
logs, err := w.commitTransaction(env, tx)
|
||||
switch {
|
||||
case errors.Is(err, core.ErrGasLimitReached):
|
||||
// Pop the current out-of-gas transaction without shifting in the next from the account
|
||||
@ -864,7 +915,7 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin
|
||||
case errors.Is(err, nil):
|
||||
// Everything ok, collect the logs and shift in the next transaction from the same account
|
||||
coalescedLogs = append(coalescedLogs, logs...)
|
||||
w.current.tcount++
|
||||
env.tcount++
|
||||
txs.Shift()
|
||||
|
||||
case errors.Is(err, core.ErrTxTypeNotSupported):
|
||||
@ -881,8 +932,8 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin
|
||||
}
|
||||
|
||||
if !w.isRunning() && len(coalescedLogs) > 0 {
|
||||
// We don't push the pendingLogsEvent while we are mining. The reason is that
|
||||
// when we are mining, the worker will regenerate a mining block every 3 seconds.
|
||||
// We don't push the pendingLogsEvent while we are sealing. The reason is that
|
||||
// when we are sealing, the worker will regenerate a sealing block every 3 seconds.
|
||||
// In order to avoid pushing the repeated pendingLog, we disable the pending log pushing.
|
||||
|
||||
// make a copy, the state caches the logs and these logs get "upgraded" from pending to mined
|
||||
@ -903,24 +954,56 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin
|
||||
return false
|
||||
}
|
||||
|
||||
// commitNewWork generates several new sealing tasks based on the parent block.
|
||||
func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) {
|
||||
// generateParams wraps various of settings for generating sealing task.
|
||||
type generateParams struct {
|
||||
timestamp uint64 // The timstamp for sealing task
|
||||
forceTime bool // Flag whether the given timestamp is immutable or not
|
||||
parentHash common.Hash // Parent block hash, empty means the latest chain head
|
||||
coinbase common.Address // The fee recipient address for including transaction
|
||||
random common.Hash // The randomness generated by beacon chain, empty before the merge
|
||||
noUncle bool // Flag whether the uncle block inclusion is allowed
|
||||
noExtra bool // Flag whether the extra field assignment is allowed
|
||||
}
|
||||
|
||||
// prepareWork constructs the sealing task according to the given parameters,
|
||||
// either based on the last chain head or specified parent. In this function
|
||||
// the pending transactions are not filled yet, only the empty task returned.
|
||||
func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
|
||||
tstart := time.Now()
|
||||
// Find the parent block for sealing task
|
||||
parent := w.chain.CurrentBlock()
|
||||
|
||||
if parent.Time() >= uint64(timestamp) {
|
||||
timestamp = int64(parent.Time() + 1)
|
||||
if genParams.parentHash != (common.Hash{}) {
|
||||
parent = w.chain.GetBlockByHash(genParams.parentHash)
|
||||
}
|
||||
if parent == nil {
|
||||
return nil, fmt.Errorf("missing parent")
|
||||
}
|
||||
// Sanity check the timestamp correctness, recap the timestamp
|
||||
// to parent+1 if the mutation is allowed.
|
||||
timestamp := genParams.timestamp
|
||||
if parent.Time() >= timestamp {
|
||||
if genParams.forceTime {
|
||||
return nil, fmt.Errorf("invalid timestamp, parent %d given %d", parent.Time(), timestamp)
|
||||
}
|
||||
timestamp = parent.Time() + 1
|
||||
}
|
||||
// Construct the sealing block header, set the extra field if it's allowed
|
||||
num := parent.Number()
|
||||
header := &types.Header{
|
||||
ParentHash: parent.Hash(),
|
||||
Number: num.Add(num, common.Big1),
|
||||
GasLimit: core.CalcGasLimit(parent.GasLimit(), w.config.GasCeil),
|
||||
Extra: w.extra,
|
||||
Time: uint64(timestamp),
|
||||
Time: timestamp,
|
||||
Coinbase: genParams.coinbase,
|
||||
}
|
||||
if !genParams.noExtra && len(w.extra) != 0 {
|
||||
header.Extra = w.extra
|
||||
}
|
||||
// Set the randomness field from the beacon chain if it's available.
|
||||
if genParams.random != (common.Hash{}) {
|
||||
header.MixDigest = genParams.random
|
||||
}
|
||||
// Set baseFee and GasLimit if we are on an EIP-1559 chain
|
||||
if w.chainConfig.IsLondon(header.Number) {
|
||||
@ -930,83 +1013,47 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64)
|
||||
header.GasLimit = core.CalcGasLimit(parentGasLimit, w.config.GasCeil)
|
||||
}
|
||||
}
|
||||
// Only set the coinbase if our consensus engine is running (avoid spurious block rewards)
|
||||
if w.isRunning() {
|
||||
if w.coinbase == (common.Address{}) {
|
||||
log.Error("Refusing to mine without etherbase")
|
||||
return
|
||||
}
|
||||
header.Coinbase = w.coinbase
|
||||
}
|
||||
// Run the consensus preparation with the default or customized consensus engine.
|
||||
if err := w.engine.Prepare(w.chain, header); err != nil {
|
||||
log.Error("Failed to prepare header for mining", "err", err)
|
||||
return
|
||||
}
|
||||
// If we are care about TheDAO hard-fork check whether to override the extra-data or not
|
||||
if daoBlock := w.chainConfig.DAOForkBlock; daoBlock != nil {
|
||||
// Check whether the block is among the fork extra-override range
|
||||
limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange)
|
||||
if header.Number.Cmp(daoBlock) >= 0 && header.Number.Cmp(limit) < 0 {
|
||||
// Depending whether we support or oppose the fork, override differently
|
||||
if w.chainConfig.DAOForkSupport {
|
||||
header.Extra = common.CopyBytes(params.DAOForkBlockExtra)
|
||||
} else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
|
||||
header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data
|
||||
}
|
||||
}
|
||||
log.Error("Failed to prepare header for sealing", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
// Could potentially happen if starting to mine in an odd state.
|
||||
err := w.makeCurrent(parent, header)
|
||||
// Note genParams.coinbase can be different with header.Coinbase
|
||||
// since clique algorithm can modify the coinbase field in header.
|
||||
env, err := w.makeEnv(parent, header, genParams.coinbase)
|
||||
if err != nil {
|
||||
log.Error("Failed to create mining context", "err", err)
|
||||
return
|
||||
log.Error("Failed to create sealing context", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
// Create the current work task and check any fork transitions needed
|
||||
env := w.current
|
||||
if w.chainConfig.DAOForkSupport && w.chainConfig.DAOForkBlock != nil && w.chainConfig.DAOForkBlock.Cmp(header.Number) == 0 {
|
||||
misc.ApplyDAOHardFork(env.state)
|
||||
}
|
||||
// Accumulate the uncles for the current block
|
||||
uncles := make([]*types.Header, 0, 2)
|
||||
// Accumulate the uncles for the sealing work only if it's allowed.
|
||||
if !genParams.noUncle {
|
||||
commitUncles := func(blocks map[common.Hash]*types.Block) {
|
||||
// Clean up stale uncle blocks first
|
||||
for hash, uncle := range blocks {
|
||||
if uncle.NumberU64()+staleThreshold <= header.Number.Uint64() {
|
||||
delete(blocks, hash)
|
||||
}
|
||||
}
|
||||
for hash, uncle := range blocks {
|
||||
if len(uncles) == 2 {
|
||||
if len(env.uncles) == 2 {
|
||||
break
|
||||
}
|
||||
if err := w.commitUncle(env, uncle.Header()); err != nil {
|
||||
log.Trace("Possible uncle rejected", "hash", hash, "reason", err)
|
||||
} else {
|
||||
log.Debug("Committing new uncle to block", "hash", hash)
|
||||
uncles = append(uncles, uncle.Header())
|
||||
}
|
||||
}
|
||||
}
|
||||
// Prefer to locally generated uncle
|
||||
commitUncles(w.localUncles)
|
||||
commitUncles(w.remoteUncles)
|
||||
|
||||
// Create an empty block based on temporary copied state for
|
||||
// sealing in advance without waiting block execution finished.
|
||||
if !noempty && atomic.LoadUint32(&w.noempty) == 0 {
|
||||
w.commit(uncles, nil, false, tstart)
|
||||
}
|
||||
return env, nil
|
||||
}
|
||||
|
||||
// fillTransactions retrieves the pending transactions from the txpool and fills them
|
||||
// into the given sealing block. The transaction selection and ordering strategy can
|
||||
// be customized with the plugin in the future.
|
||||
func (w *worker) fillTransactions(interrupt *int32, env *environment) {
|
||||
// Split the pending transactions into locals and remotes
|
||||
// Fill the block with all available pending transactions.
|
||||
pending := w.eth.TxPool().Pending(true)
|
||||
// Short circuit if there is no available pending transactions.
|
||||
// But if we disable empty precommit already, ignore it. Since
|
||||
// empty block is necessary to keep the liveness of the network.
|
||||
if len(pending) == 0 && atomic.LoadUint32(&w.noempty) == 0 {
|
||||
w.updateSnapshot()
|
||||
return
|
||||
}
|
||||
// Split the pending transactions into locals and remotes
|
||||
localTxs, remoteTxs := make(map[common.Address]types.Transactions), pending
|
||||
for _, account := range w.eth.TxPool().Locals() {
|
||||
if txs := remoteTxs[account]; len(txs) > 0 {
|
||||
@ -1015,57 +1062,139 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64)
|
||||
}
|
||||
}
|
||||
if len(localTxs) > 0 {
|
||||
txs := types.NewTransactionsByPriceAndNonce(w.current.signer, localTxs, header.BaseFee)
|
||||
if w.commitTransactions(txs, w.coinbase, interrupt) {
|
||||
txs := types.NewTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee)
|
||||
if w.commitTransactions(env, txs, interrupt) {
|
||||
return
|
||||
}
|
||||
}
|
||||
if len(remoteTxs) > 0 {
|
||||
txs := types.NewTransactionsByPriceAndNonce(w.current.signer, remoteTxs, header.BaseFee)
|
||||
if w.commitTransactions(txs, w.coinbase, interrupt) {
|
||||
txs := types.NewTransactionsByPriceAndNonce(env.signer, remoteTxs, env.header.BaseFee)
|
||||
if w.commitTransactions(env, txs, interrupt) {
|
||||
return
|
||||
}
|
||||
}
|
||||
w.commit(uncles, w.fullTaskHook, true, tstart)
|
||||
}
|
||||
|
||||
// generateWork generates a sealing block based on the given parameters.
|
||||
func (w *worker) generateWork(params *generateParams) (*types.Block, error) {
|
||||
work, err := w.prepareWork(params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer work.discard()
|
||||
|
||||
w.fillTransactions(nil, work)
|
||||
return w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, work.txs, work.unclelist(), work.receipts)
|
||||
}
|
||||
|
||||
// commitWork generates several new sealing tasks based on the parent block
|
||||
// and submit them to the sealer.
|
||||
func (w *worker) commitWork(interrupt *int32, noempty bool, timestamp int64) {
|
||||
start := time.Now()
|
||||
|
||||
// Set the coinbase if the worker is running or it's required
|
||||
var coinbase common.Address
|
||||
if w.isRunning() {
|
||||
if w.coinbase == (common.Address{}) {
|
||||
log.Error("Refusing to mine without etherbase")
|
||||
return
|
||||
}
|
||||
coinbase = w.coinbase // Use the preset address as the fee recipient
|
||||
}
|
||||
work, err := w.prepareWork(&generateParams{
|
||||
timestamp: uint64(timestamp),
|
||||
coinbase: coinbase,
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Create an empty block based on temporary copied state for
|
||||
// sealing in advance without waiting block execution finished.
|
||||
if !noempty && atomic.LoadUint32(&w.noempty) == 0 {
|
||||
w.commit(work.copy(), nil, false, start)
|
||||
}
|
||||
// Fill pending transactions from the txpool
|
||||
w.fillTransactions(interrupt, work)
|
||||
w.commit(work.copy(), w.fullTaskHook, true, start)
|
||||
|
||||
// Swap out the old work with the new one, terminating any leftover
|
||||
// prefetcher processes in the mean time and starting a new one.
|
||||
if w.current != nil {
|
||||
w.current.discard()
|
||||
}
|
||||
w.current = work
|
||||
}
|
||||
|
||||
// commit runs any post-transaction state modifications, assembles the final block
|
||||
// and commits new work if consensus engine is running.
|
||||
func (w *worker) commit(uncles []*types.Header, interval func(), update bool, start time.Time) error {
|
||||
// Deep copy receipts here to avoid interaction between different tasks.
|
||||
receipts := copyReceipts(w.current.receipts)
|
||||
s := w.current.state.Copy()
|
||||
block, err := w.engine.FinalizeAndAssemble(w.chain, w.current.header, s, w.current.txs, uncles, receipts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Note the assumption is held that the mutation is allowed to the passed env, do
|
||||
// the deep copy first.
|
||||
func (w *worker) commit(env *environment, interval func(), update bool, start time.Time) error {
|
||||
if w.isRunning() {
|
||||
if interval != nil {
|
||||
interval()
|
||||
}
|
||||
// If we're post merge, just ignore
|
||||
td, ttd := w.chain.GetTd(block.ParentHash(), block.NumberU64()-1), w.chain.Config().TerminalTotalDifficulty
|
||||
if td != nil && ttd != nil && td.Cmp(ttd) >= 0 {
|
||||
return nil
|
||||
// Create a local environment copy, avoid the data race with snapshot state.
|
||||
// https://github.com/ethereum/go-ethereum/issues/24299
|
||||
env := env.copy()
|
||||
block, err := w.engine.FinalizeAndAssemble(w.chain, env.header, env.state, env.txs, env.unclelist(), env.receipts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If we're post merge, just ignore
|
||||
if !w.isTTDReached(block.Header()) {
|
||||
select {
|
||||
case w.taskCh <- &task{receipts: receipts, state: s, block: block, createdAt: time.Now()}:
|
||||
case w.taskCh <- &task{receipts: env.receipts, state: env.state, block: block, createdAt: time.Now()}:
|
||||
w.unconfirmed.Shift(block.NumberU64() - 1)
|
||||
log.Info("Commit new mining work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()),
|
||||
"uncles", len(uncles), "txs", w.current.tcount,
|
||||
"gas", block.GasUsed(), "fees", totalFees(block, receipts),
|
||||
log.Info("Commit new sealing work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()),
|
||||
"uncles", len(env.uncles), "txs", env.tcount,
|
||||
"gas", block.GasUsed(), "fees", totalFees(block, env.receipts),
|
||||
"elapsed", common.PrettyDuration(time.Since(start)))
|
||||
|
||||
case <-w.exitCh:
|
||||
log.Info("Worker has exited")
|
||||
}
|
||||
}
|
||||
}
|
||||
if update {
|
||||
w.updateSnapshot()
|
||||
w.updateSnapshot(env)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getSealingBlock generates the sealing block based on the given parameters.
|
||||
func (w *worker) getSealingBlock(parent common.Hash, timestamp uint64, coinbase common.Address, random common.Hash) (*types.Block, error) {
|
||||
req := &getWorkReq{
|
||||
params: &generateParams{
|
||||
timestamp: timestamp,
|
||||
forceTime: true,
|
||||
parentHash: parent,
|
||||
coinbase: coinbase,
|
||||
random: random,
|
||||
noUncle: true,
|
||||
noExtra: true,
|
||||
},
|
||||
result: make(chan *types.Block, 1),
|
||||
}
|
||||
select {
|
||||
case w.getWorkCh <- req:
|
||||
block := <-req.result
|
||||
if block == nil {
|
||||
return nil, req.err
|
||||
}
|
||||
return block, nil
|
||||
case <-w.exitCh:
|
||||
return nil, errors.New("miner closed")
|
||||
}
|
||||
}
|
||||
|
||||
// isTTDReached returns the indicator if the given block has reached the total
|
||||
// terminal difficulty for The Merge transition.
|
||||
func (w *worker) isTTDReached(header *types.Header) bool {
|
||||
td, ttd := w.chain.GetTd(header.ParentHash, header.Number.Uint64()-1), w.chain.Config().TerminalTotalDifficulty
|
||||
return td != nil && ttd != nil && td.Cmp(ttd) >= 0
|
||||
}
|
||||
|
||||
// copyReceipts makes a deep copy of the given receipts.
|
||||
func copyReceipts(receipts []*types.Receipt) []*types.Receipt {
|
||||
result := make([]*types.Receipt, len(receipts))
|
||||
|
@ -17,6 +17,7 @@
|
||||
package miner
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
@ -30,6 +31,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/state"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
@ -166,6 +168,9 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
|
||||
|
||||
func (b *testWorkerBackend) BlockChain() *core.BlockChain { return b.chain }
|
||||
func (b *testWorkerBackend) TxPool() *core.TxPool { return b.txPool }
|
||||
func (b *testWorkerBackend) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) {
|
||||
return nil, errors.New("not supported")
|
||||
}
|
||||
|
||||
func (b *testWorkerBackend) newRandomUncle() *types.Block {
|
||||
var parent *types.Block
|
||||
@ -197,7 +202,7 @@ func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction {
|
||||
func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*worker, *testWorkerBackend) {
|
||||
backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks)
|
||||
backend.txPool.AddLocals(pendingTxs)
|
||||
w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false, consensus.NewMerger(rawdb.NewMemoryDatabase()))
|
||||
w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false)
|
||||
w.setEtherbase(testBankAddress)
|
||||
return w, backend
|
||||
}
|
||||
@ -382,7 +387,7 @@ func testRegenerateMiningBlock(t *testing.T, chainConfig *params.ChainConfig, en
|
||||
w, b := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0)
|
||||
defer w.close()
|
||||
|
||||
var taskCh = make(chan struct{})
|
||||
var taskCh = make(chan struct{}, 3)
|
||||
|
||||
taskIndex := 0
|
||||
w.newTaskHook = func(task *task) {
|
||||
@ -521,3 +526,144 @@ func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine co
|
||||
t.Error("interval reset timeout")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSealingWorkEthash(t *testing.T) {
|
||||
testGetSealingWork(t, ethashChainConfig, ethash.NewFaker(), false)
|
||||
}
|
||||
|
||||
func TestGetSealingWorkClique(t *testing.T) {
|
||||
testGetSealingWork(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, rawdb.NewMemoryDatabase()), false)
|
||||
}
|
||||
|
||||
func TestGetSealingWorkPostMerge(t *testing.T) {
|
||||
local := new(params.ChainConfig)
|
||||
*local = *ethashChainConfig
|
||||
local.TerminalTotalDifficulty = big.NewInt(0)
|
||||
testGetSealingWork(t, local, ethash.NewFaker(), true)
|
||||
}
|
||||
|
||||
func testGetSealingWork(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, postMerge bool) {
|
||||
defer engine.Close()
|
||||
|
||||
w, b := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0)
|
||||
defer w.close()
|
||||
|
||||
w.setExtra([]byte{0x01, 0x02})
|
||||
w.postSideBlock(core.ChainSideEvent{Block: b.uncleBlock})
|
||||
|
||||
w.skipSealHook = func(task *task) bool {
|
||||
return true
|
||||
}
|
||||
w.fullTaskHook = func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
timestamp := uint64(time.Now().Unix())
|
||||
assertBlock := func(block *types.Block, number uint64, coinbase common.Address, random common.Hash) {
|
||||
if block.Time() != timestamp {
|
||||
// Sometime the timestamp will be mutated if the timestamp
|
||||
// is even smaller than parent block's. It's OK.
|
||||
t.Logf("Invalid timestamp, want %d, get %d", timestamp, block.Time())
|
||||
}
|
||||
if len(block.Uncles()) != 0 {
|
||||
t.Error("Unexpected uncle block")
|
||||
}
|
||||
_, isClique := engine.(*clique.Clique)
|
||||
if !isClique {
|
||||
if len(block.Extra()) != 0 {
|
||||
t.Error("Unexpected extra field")
|
||||
}
|
||||
if block.Coinbase() != coinbase {
|
||||
t.Errorf("Unexpected coinbase got %x want %x", block.Coinbase(), coinbase)
|
||||
}
|
||||
} else {
|
||||
if block.Coinbase() != (common.Address{}) {
|
||||
t.Error("Unexpected coinbase")
|
||||
}
|
||||
}
|
||||
if !isClique {
|
||||
if block.MixDigest() != random {
|
||||
t.Error("Unexpected mix digest")
|
||||
}
|
||||
}
|
||||
if block.Nonce() != 0 {
|
||||
t.Error("Unexpected block nonce")
|
||||
}
|
||||
if block.NumberU64() != number {
|
||||
t.Errorf("Mismatched block number, want %d got %d", number, block.NumberU64())
|
||||
}
|
||||
}
|
||||
var cases = []struct {
|
||||
parent common.Hash
|
||||
coinbase common.Address
|
||||
random common.Hash
|
||||
expectNumber uint64
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
b.chain.Genesis().Hash(),
|
||||
common.HexToAddress("0xdeadbeef"),
|
||||
common.HexToHash("0xcafebabe"),
|
||||
uint64(1),
|
||||
false,
|
||||
},
|
||||
{
|
||||
b.chain.CurrentBlock().Hash(),
|
||||
common.HexToAddress("0xdeadbeef"),
|
||||
common.HexToHash("0xcafebabe"),
|
||||
b.chain.CurrentBlock().NumberU64() + 1,
|
||||
false,
|
||||
},
|
||||
{
|
||||
b.chain.CurrentBlock().Hash(),
|
||||
common.Address{},
|
||||
common.HexToHash("0xcafebabe"),
|
||||
b.chain.CurrentBlock().NumberU64() + 1,
|
||||
false,
|
||||
},
|
||||
{
|
||||
b.chain.CurrentBlock().Hash(),
|
||||
common.Address{},
|
||||
common.Hash{},
|
||||
b.chain.CurrentBlock().NumberU64() + 1,
|
||||
false,
|
||||
},
|
||||
{
|
||||
common.HexToHash("0xdeadbeef"),
|
||||
common.HexToAddress("0xdeadbeef"),
|
||||
common.HexToHash("0xcafebabe"),
|
||||
0,
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
// This API should work even when the automatic sealing is not enabled
|
||||
for _, c := range cases {
|
||||
block, err := w.getSealingBlock(c.parent, timestamp, c.coinbase, c.random)
|
||||
if c.expectErr {
|
||||
if err == nil {
|
||||
t.Error("Expect error but get nil")
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
}
|
||||
assertBlock(block, c.expectNumber, c.coinbase, c.random)
|
||||
}
|
||||
}
|
||||
|
||||
// This API should work even when the automatic sealing is enabled
|
||||
w.start()
|
||||
for _, c := range cases {
|
||||
block, err := w.getSealingBlock(c.parent, timestamp, c.coinbase, c.random)
|
||||
if c.expectErr {
|
||||
if err == nil {
|
||||
t.Error("Expect error but get nil")
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
}
|
||||
assertBlock(block, c.expectNumber, c.coinbase, c.random)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -393,7 +393,7 @@ func TestLifecycleTerminationGuarantee(t *testing.T) {
|
||||
// on the given prefix
|
||||
func TestRegisterHandler_Successful(t *testing.T) {
|
||||
node := createNode(t, 7878, 7979)
|
||||
|
||||
defer node.Close()
|
||||
// create and mount handler
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("success"))
|
||||
|
@ -943,9 +943,8 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro
|
||||
}
|
||||
|
||||
// If dialing, figure out the remote public key.
|
||||
var dialPubkey *ecdsa.PublicKey
|
||||
if dialDest != nil {
|
||||
dialPubkey = new(ecdsa.PublicKey)
|
||||
dialPubkey := new(ecdsa.PublicKey)
|
||||
if err := dialDest.Load((*enode.Secp256k1)(dialPubkey)); err != nil {
|
||||
err = errors.New("dial destination doesn't have a secp256k1 public key")
|
||||
srv.log.Trace("Setting up connection failed", "addr", c.fd.RemoteAddr(), "conn", c.flags, "err", err)
|
||||
|
@ -267,7 +267,7 @@ var (
|
||||
AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
|
||||
|
||||
TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil}
|
||||
TestRules = TestChainConfig.Rules(new(big.Int))
|
||||
TestRules = TestChainConfig.Rules(new(big.Int), false)
|
||||
)
|
||||
|
||||
// TrustedCheckpoint represents a set of post-processed trie roots (CHT and
|
||||
@ -668,10 +668,11 @@ type Rules struct {
|
||||
IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool
|
||||
IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool
|
||||
IsBerlin, IsLondon bool
|
||||
IsMerge bool
|
||||
}
|
||||
|
||||
// Rules ensures c's ChainID is not nil.
|
||||
func (c *ChainConfig) Rules(num *big.Int) Rules {
|
||||
func (c *ChainConfig) Rules(num *big.Int, isMerge bool) Rules {
|
||||
chainID := c.ChainID
|
||||
if chainID == nil {
|
||||
chainID = new(big.Int)
|
||||
@ -688,5 +689,6 @@ func (c *ChainConfig) Rules(num *big.Int) Rules {
|
||||
IsIstanbul: c.IsIstanbul(num),
|
||||
IsBerlin: c.IsBerlin(num),
|
||||
IsLondon: c.IsLondon(num),
|
||||
IsMerge: isMerge,
|
||||
}
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
const (
|
||||
VersionMajor = 1 // Major version component of the current release
|
||||
VersionMinor = 10 // Minor version component of the current release
|
||||
VersionPatch = 15 // Patch version component of the current release
|
||||
VersionPatch = 16 // Patch version component of the current release
|
||||
VersionMeta = "stable" // Version metadata to append to the version string
|
||||
)
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user