all: release go-ethereum v1.13.12 (#28961)
all: release go-ethereum v1.13.12
This commit is contained in:
commit
02eb36afc2
12
Makefile
12
Makefile
@ -8,20 +8,25 @@ GOBIN = ./build/bin
|
|||||||
GO ?= latest
|
GO ?= latest
|
||||||
GORUN = go run
|
GORUN = go run
|
||||||
|
|
||||||
|
#? geth: Build geth
|
||||||
geth:
|
geth:
|
||||||
$(GORUN) build/ci.go install ./cmd/geth
|
$(GORUN) build/ci.go install ./cmd/geth
|
||||||
@echo "Done building."
|
@echo "Done building."
|
||||||
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
@echo "Run \"$(GOBIN)/geth\" to launch geth."
|
||||||
|
|
||||||
|
#? all: Build all packages and executables
|
||||||
all:
|
all:
|
||||||
$(GORUN) build/ci.go install
|
$(GORUN) build/ci.go install
|
||||||
|
|
||||||
|
#? test: Run the tests
|
||||||
test: all
|
test: all
|
||||||
$(GORUN) build/ci.go test
|
$(GORUN) build/ci.go test
|
||||||
|
|
||||||
|
#? lint: Run certain pre-selected linters
|
||||||
lint: ## Run linters.
|
lint: ## Run linters.
|
||||||
$(GORUN) build/ci.go lint
|
$(GORUN) build/ci.go lint
|
||||||
|
|
||||||
|
#? clean: Clean go cache, built executables, and the auto generated folder
|
||||||
clean:
|
clean:
|
||||||
go clean -cache
|
go clean -cache
|
||||||
rm -fr build/_workspace/pkg/ $(GOBIN)/*
|
rm -fr build/_workspace/pkg/ $(GOBIN)/*
|
||||||
@ -29,6 +34,7 @@ clean:
|
|||||||
# The devtools target installs tools required for 'go generate'.
|
# The devtools target installs tools required for 'go generate'.
|
||||||
# You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'.
|
# You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'.
|
||||||
|
|
||||||
|
#? devtools: Install recommended developer tools
|
||||||
devtools:
|
devtools:
|
||||||
env GOBIN= go install golang.org/x/tools/cmd/stringer@latest
|
env GOBIN= go install golang.org/x/tools/cmd/stringer@latest
|
||||||
env GOBIN= go install github.com/fjl/gencodec@latest
|
env GOBIN= go install github.com/fjl/gencodec@latest
|
||||||
@ -36,3 +42,9 @@ devtools:
|
|||||||
env GOBIN= go install ./cmd/abigen
|
env GOBIN= go install ./cmd/abigen
|
||||||
@type "solc" 2> /dev/null || echo 'Please install solc'
|
@type "solc" 2> /dev/null || echo 'Please install solc'
|
||||||
@type "protoc" 2> /dev/null || echo 'Please install protoc'
|
@type "protoc" 2> /dev/null || echo 'Please install protoc'
|
||||||
|
|
||||||
|
#? help: Get more info on make commands.
|
||||||
|
help: Makefile
|
||||||
|
@echo " Choose a command run in go-ethereum:"
|
||||||
|
@sed -n 's/^#?//p' $< | column -t -s ':' | sort | sed -e 's/^/ /'
|
||||||
|
.PHONY: help
|
||||||
|
@ -6,7 +6,7 @@ Golang execution layer implementation of the Ethereum protocol.
|
|||||||
https://pkg.go.dev/badge/github.com/ethereum/go-ethereum
|
https://pkg.go.dev/badge/github.com/ethereum/go-ethereum
|
||||||
)](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc)
|
)](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc)
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
|
[![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
|
||||||
[![Travis](https://travis-ci.com/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.com/ethereum/go-ethereum)
|
[![Travis](https://app.travis-ci.com/ethereum/go-ethereum.svg?branch=master)](https://app.travis-ci.com/github/ethereum/go-ethereum)
|
||||||
[![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv)
|
[![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv)
|
||||||
|
|
||||||
Automated builds are available for stable releases and the unstable master branch. Binary
|
Automated builds are available for stable releases and the unstable master branch. Binary
|
||||||
|
@ -29,7 +29,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// The ABI holds information about a contract's context and available
|
// The ABI holds information about a contract's context and available
|
||||||
// invokable methods. It will allow you to type check function calls and
|
// invocable methods. It will allow you to type check function calls and
|
||||||
// packs data accordingly.
|
// packs data accordingly.
|
||||||
type ABI struct {
|
type ABI struct {
|
||||||
Constructor Method
|
Constructor Method
|
||||||
|
@ -65,7 +65,7 @@ func TestWaitDeployed(t *testing.T) {
|
|||||||
|
|
||||||
// Create the transaction
|
// Create the transaction
|
||||||
head, _ := backend.Client().HeaderByNumber(context.Background(), nil) // Should be child's, good enough
|
head, _ := backend.Client().HeaderByNumber(context.Background(), nil) // Should be child's, good enough
|
||||||
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1))
|
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(params.GWei))
|
||||||
|
|
||||||
tx := types.NewContractCreation(0, big.NewInt(0), test.gas, gasPrice, common.FromHex(test.code))
|
tx := types.NewContractCreation(0, big.NewInt(0), test.gas, gasPrice, common.FromHex(test.code))
|
||||||
tx, _ = types.SignTx(tx, types.LatestSignerForChainID(big.NewInt(1337)), testKey)
|
tx, _ = types.SignTx(tx, types.LatestSignerForChainID(big.NewInt(1337)), testKey)
|
||||||
|
@ -241,7 +241,7 @@ func (hub *Hub) refreshWallets() {
|
|||||||
card.Disconnect(pcsc.LeaveCard)
|
card.Disconnect(pcsc.LeaveCard)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Card connected, start tracking in amongs the wallets
|
// Card connected, start tracking among the wallets
|
||||||
hub.wallets[reader] = wallet
|
hub.wallets[reader] = wallet
|
||||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
|
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
|
||||||
}
|
}
|
||||||
|
@ -166,7 +166,7 @@ func (c *Conn) ReadEth() (any, error) {
|
|||||||
case eth.TransactionsMsg:
|
case eth.TransactionsMsg:
|
||||||
msg = new(eth.TransactionsPacket)
|
msg = new(eth.TransactionsPacket)
|
||||||
case eth.NewPooledTransactionHashesMsg:
|
case eth.NewPooledTransactionHashesMsg:
|
||||||
msg = new(eth.NewPooledTransactionHashesPacket68)
|
msg = new(eth.NewPooledTransactionHashesPacket)
|
||||||
case eth.GetPooledTransactionsMsg:
|
case eth.GetPooledTransactionsMsg:
|
||||||
msg = new(eth.GetPooledTransactionsPacket)
|
msg = new(eth.GetPooledTransactionsPacket)
|
||||||
case eth.PooledTransactionsMsg:
|
case eth.PooledTransactionsMsg:
|
||||||
|
@ -710,7 +710,7 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Send announcement.
|
// Send announcement.
|
||||||
ann := eth.NewPooledTransactionHashesPacket68{Types: txTypes, Sizes: sizes, Hashes: hashes}
|
ann := eth.NewPooledTransactionHashesPacket{Types: txTypes, Sizes: sizes, Hashes: hashes}
|
||||||
err = conn.Write(ethProto, eth.NewPooledTransactionHashesMsg, ann)
|
err = conn.Write(ethProto, eth.NewPooledTransactionHashesMsg, ann)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to write to connection: %v", err)
|
t.Fatalf("failed to write to connection: %v", err)
|
||||||
@ -728,7 +728,7 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) {
|
|||||||
t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsRequest))
|
t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsRequest))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
case *eth.NewPooledTransactionHashesPacket68:
|
case *eth.NewPooledTransactionHashesPacket:
|
||||||
continue
|
continue
|
||||||
case *eth.TransactionsPacket:
|
case *eth.TransactionsPacket:
|
||||||
continue
|
continue
|
||||||
@ -796,12 +796,12 @@ func (s *Suite) TestBlobViolations(t *utesting.T) {
|
|||||||
t2 = s.makeBlobTxs(2, 3, 0x2)
|
t2 = s.makeBlobTxs(2, 3, 0x2)
|
||||||
)
|
)
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
ann eth.NewPooledTransactionHashesPacket68
|
ann eth.NewPooledTransactionHashesPacket
|
||||||
resp eth.PooledTransactionsResponse
|
resp eth.PooledTransactionsResponse
|
||||||
}{
|
}{
|
||||||
// Invalid tx size.
|
// Invalid tx size.
|
||||||
{
|
{
|
||||||
ann: eth.NewPooledTransactionHashesPacket68{
|
ann: eth.NewPooledTransactionHashesPacket{
|
||||||
Types: []byte{types.BlobTxType, types.BlobTxType},
|
Types: []byte{types.BlobTxType, types.BlobTxType},
|
||||||
Sizes: []uint32{uint32(t1[0].Size()), uint32(t1[1].Size() + 10)},
|
Sizes: []uint32{uint32(t1[0].Size()), uint32(t1[1].Size() + 10)},
|
||||||
Hashes: []common.Hash{t1[0].Hash(), t1[1].Hash()},
|
Hashes: []common.Hash{t1[0].Hash(), t1[1].Hash()},
|
||||||
@ -810,7 +810,7 @@ func (s *Suite) TestBlobViolations(t *utesting.T) {
|
|||||||
},
|
},
|
||||||
// Wrong tx type.
|
// Wrong tx type.
|
||||||
{
|
{
|
||||||
ann: eth.NewPooledTransactionHashesPacket68{
|
ann: eth.NewPooledTransactionHashesPacket{
|
||||||
Types: []byte{types.DynamicFeeTxType, types.BlobTxType},
|
Types: []byte{types.DynamicFeeTxType, types.BlobTxType},
|
||||||
Sizes: []uint32{uint32(t2[0].Size()), uint32(t2[1].Size())},
|
Sizes: []uint32{uint32(t2[0].Size()), uint32(t2[1].Size())},
|
||||||
Hashes: []common.Hash{t2[0].Hash(), t2[1].Hash()},
|
Hashes: []common.Hash{t2[0].Hash(), t2[1].Hash()},
|
||||||
|
@ -70,7 +70,7 @@ func (s *Suite) sendTxs(txs []*types.Transaction) error {
|
|||||||
for _, tx := range *msg {
|
for _, tx := range *msg {
|
||||||
got[tx.Hash()] = true
|
got[tx.Hash()] = true
|
||||||
}
|
}
|
||||||
case *eth.NewPooledTransactionHashesPacket68:
|
case *eth.NewPooledTransactionHashesPacket:
|
||||||
for _, hash := range msg.Hashes {
|
for _, hash := range msg.Hashes {
|
||||||
got[hash] = true
|
got[hash] = true
|
||||||
}
|
}
|
||||||
@ -146,7 +146,7 @@ func (s *Suite) sendInvalidTxs(txs []*types.Transaction) error {
|
|||||||
return fmt.Errorf("received bad tx: %s", tx.Hash())
|
return fmt.Errorf("received bad tx: %s", tx.Hash())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case *eth.NewPooledTransactionHashesPacket68:
|
case *eth.NewPooledTransactionHashesPacket:
|
||||||
for _, hash := range msg.Hashes {
|
for _, hash := range msg.Hashes {
|
||||||
if _, ok := invalids[hash]; ok {
|
if _, ok := invalids[hash]; ok {
|
||||||
return fmt.Errorf("received bad tx: %s", hash)
|
return fmt.Errorf("received bad tx: %s", hash)
|
||||||
|
324
cmd/era/main.go
Normal file
324
cmd/era/main.go
Normal file
@ -0,0 +1,324 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/era"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var app = flags.NewApp("go-ethereum era tool")
|
||||||
|
|
||||||
|
var (
|
||||||
|
dirFlag = &cli.StringFlag{
|
||||||
|
Name: "dir",
|
||||||
|
Usage: "directory storing all relevant era1 files",
|
||||||
|
Value: "eras",
|
||||||
|
}
|
||||||
|
networkFlag = &cli.StringFlag{
|
||||||
|
Name: "network",
|
||||||
|
Usage: "network name associated with era1 files",
|
||||||
|
Value: "mainnet",
|
||||||
|
}
|
||||||
|
eraSizeFlag = &cli.IntFlag{
|
||||||
|
Name: "size",
|
||||||
|
Usage: "number of blocks per era",
|
||||||
|
Value: era.MaxEra1Size,
|
||||||
|
}
|
||||||
|
txsFlag = &cli.BoolFlag{
|
||||||
|
Name: "txs",
|
||||||
|
Usage: "print full transaction values",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
blockCommand = &cli.Command{
|
||||||
|
Name: "block",
|
||||||
|
Usage: "get block data",
|
||||||
|
ArgsUsage: "<number>",
|
||||||
|
Action: block,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
txsFlag,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
infoCommand = &cli.Command{
|
||||||
|
Name: "info",
|
||||||
|
ArgsUsage: "<epoch>",
|
||||||
|
Usage: "get epoch information",
|
||||||
|
Action: info,
|
||||||
|
}
|
||||||
|
verifyCommand = &cli.Command{
|
||||||
|
Name: "verify",
|
||||||
|
ArgsUsage: "<expected>",
|
||||||
|
Usage: "verifies each era1 against expected accumulator root",
|
||||||
|
Action: verify,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
app.Commands = []*cli.Command{
|
||||||
|
blockCommand,
|
||||||
|
infoCommand,
|
||||||
|
verifyCommand,
|
||||||
|
}
|
||||||
|
app.Flags = []cli.Flag{
|
||||||
|
dirFlag,
|
||||||
|
networkFlag,
|
||||||
|
eraSizeFlag,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if err := app.Run(os.Args); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// block prints the specified block from an era1 store.
|
||||||
|
func block(ctx *cli.Context) error {
|
||||||
|
num, err := strconv.ParseUint(ctx.Args().First(), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid block number: %w", err)
|
||||||
|
}
|
||||||
|
e, err := open(ctx, num/uint64(ctx.Int(eraSizeFlag.Name)))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error opening era1: %w", err)
|
||||||
|
}
|
||||||
|
defer e.Close()
|
||||||
|
// Read block with number.
|
||||||
|
block, err := e.GetBlockByNumber(num)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading block %d: %w", num, err)
|
||||||
|
}
|
||||||
|
// Convert block to JSON and print.
|
||||||
|
val := ethapi.RPCMarshalBlock(block, ctx.Bool(txsFlag.Name), ctx.Bool(txsFlag.Name), params.MainnetChainConfig)
|
||||||
|
b, err := json.MarshalIndent(val, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error marshaling json: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Println(string(b))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// info prints some high-level information about the era1 file.
|
||||||
|
func info(ctx *cli.Context) error {
|
||||||
|
epoch, err := strconv.ParseUint(ctx.Args().First(), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid epoch number: %w", err)
|
||||||
|
}
|
||||||
|
e, err := open(ctx, epoch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer e.Close()
|
||||||
|
acc, err := e.Accumulator()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading accumulator: %w", err)
|
||||||
|
}
|
||||||
|
td, err := e.InitialTD()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading total difficulty: %w", err)
|
||||||
|
}
|
||||||
|
info := struct {
|
||||||
|
Accumulator common.Hash `json:"accumulator"`
|
||||||
|
TotalDifficulty *big.Int `json:"totalDifficulty"`
|
||||||
|
StartBlock uint64 `json:"startBlock"`
|
||||||
|
Count uint64 `json:"count"`
|
||||||
|
}{
|
||||||
|
acc, td, e.Start(), e.Count(),
|
||||||
|
}
|
||||||
|
b, _ := json.MarshalIndent(info, "", " ")
|
||||||
|
fmt.Println(string(b))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// open opens an era1 file at a certain epoch.
|
||||||
|
func open(ctx *cli.Context, epoch uint64) (*era.Era, error) {
|
||||||
|
var (
|
||||||
|
dir = ctx.String(dirFlag.Name)
|
||||||
|
network = ctx.String(networkFlag.Name)
|
||||||
|
)
|
||||||
|
entries, err := era.ReadDir(dir, network)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading era dir: %w", err)
|
||||||
|
}
|
||||||
|
if epoch >= uint64(len(entries)) {
|
||||||
|
return nil, fmt.Errorf("epoch out-of-bounds: last %d, want %d", len(entries)-1, epoch)
|
||||||
|
}
|
||||||
|
return era.Open(path.Join(dir, entries[epoch]))
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify checks each era1 file in a directory to ensure it is well-formed and
|
||||||
|
// that the accumulator matches the expected value.
|
||||||
|
func verify(ctx *cli.Context) error {
|
||||||
|
if ctx.Args().Len() != 1 {
|
||||||
|
return fmt.Errorf("missing accumulators file")
|
||||||
|
}
|
||||||
|
|
||||||
|
roots, err := readHashes(ctx.Args().First())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to read expected roots file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
dir = ctx.String(dirFlag.Name)
|
||||||
|
network = ctx.String(networkFlag.Name)
|
||||||
|
start = time.Now()
|
||||||
|
reported = time.Now()
|
||||||
|
)
|
||||||
|
|
||||||
|
entries, err := era.ReadDir(dir, network)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading %s: %w", dir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(entries) != len(roots) {
|
||||||
|
return fmt.Errorf("number of era1 files should match the number of accumulator hashes")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify each epoch matches the expected root.
|
||||||
|
for i, want := range roots {
|
||||||
|
// Wrap in function so defers don't stack.
|
||||||
|
err := func() error {
|
||||||
|
name := entries[i]
|
||||||
|
e, err := era.Open(path.Join(dir, name))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error opening era1 file %s: %w", name, err)
|
||||||
|
}
|
||||||
|
defer e.Close()
|
||||||
|
// Read accumulator and check against expected.
|
||||||
|
if got, err := e.Accumulator(); err != nil {
|
||||||
|
return fmt.Errorf("error retrieving accumulator for %s: %w", name, err)
|
||||||
|
} else if got != want {
|
||||||
|
return fmt.Errorf("invalid root %s: got %s, want %s", name, got, want)
|
||||||
|
}
|
||||||
|
// Recompute accumulator.
|
||||||
|
if err := checkAccumulator(e); err != nil {
|
||||||
|
return fmt.Errorf("error verify era1 file %s: %w", name, err)
|
||||||
|
}
|
||||||
|
// Give the user some feedback that something is happening.
|
||||||
|
if time.Since(reported) >= 8*time.Second {
|
||||||
|
fmt.Printf("Verifying Era1 files \t\t verified=%d,\t elapsed=%s\n", i, common.PrettyDuration(time.Since(start)))
|
||||||
|
reported = time.Now()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkAccumulator verifies the accumulator matches the data in the Era.
|
||||||
|
func checkAccumulator(e *era.Era) error {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
want common.Hash
|
||||||
|
td *big.Int
|
||||||
|
tds = make([]*big.Int, 0)
|
||||||
|
hashes = make([]common.Hash, 0)
|
||||||
|
)
|
||||||
|
if want, err = e.Accumulator(); err != nil {
|
||||||
|
return fmt.Errorf("error reading accumulator: %w", err)
|
||||||
|
}
|
||||||
|
if td, err = e.InitialTD(); err != nil {
|
||||||
|
return fmt.Errorf("error reading total difficulty: %w", err)
|
||||||
|
}
|
||||||
|
it, err := era.NewIterator(e)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error making era iterator: %w", err)
|
||||||
|
}
|
||||||
|
// To fully verify an era the following attributes must be checked:
|
||||||
|
// 1) the block index is constructed correctly
|
||||||
|
// 2) the tx root matches the value in the block
|
||||||
|
// 3) the receipts root matches the value in the block
|
||||||
|
// 4) the starting total difficulty value is correct
|
||||||
|
// 5) the accumulator is correct by recomputing it locally, which verifies
|
||||||
|
// the blocks are all correct (via hash)
|
||||||
|
//
|
||||||
|
// The attributes 1), 2), and 3) are checked for each block. 4) and 5) require
|
||||||
|
// accumulation across the entire set and are verified at the end.
|
||||||
|
for it.Next() {
|
||||||
|
// 1) next() walks the block index, so we're able to implicitly verify it.
|
||||||
|
if it.Error() != nil {
|
||||||
|
return fmt.Errorf("error reading block %d: %w", it.Number(), err)
|
||||||
|
}
|
||||||
|
block, receipts, err := it.BlockAndReceipts()
|
||||||
|
if it.Error() != nil {
|
||||||
|
return fmt.Errorf("error reading block %d: %w", it.Number(), err)
|
||||||
|
}
|
||||||
|
// 2) recompute tx root and verify against header.
|
||||||
|
tr := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil))
|
||||||
|
if tr != block.TxHash() {
|
||||||
|
return fmt.Errorf("tx root in block %d mismatch: want %s, got %s", block.NumberU64(), block.TxHash(), tr)
|
||||||
|
}
|
||||||
|
// 3) recompute receipt root and check value against block.
|
||||||
|
rr := types.DeriveSha(receipts, trie.NewStackTrie(nil))
|
||||||
|
if rr != block.ReceiptHash() {
|
||||||
|
return fmt.Errorf("receipt root in block %d mismatch: want %s, got %s", block.NumberU64(), block.ReceiptHash(), rr)
|
||||||
|
}
|
||||||
|
hashes = append(hashes, block.Hash())
|
||||||
|
td.Add(td, block.Difficulty())
|
||||||
|
tds = append(tds, new(big.Int).Set(td))
|
||||||
|
}
|
||||||
|
// 4+5) Verify accumulator and total difficulty.
|
||||||
|
got, err := era.ComputeAccumulator(hashes, tds)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error computing accumulator: %w", err)
|
||||||
|
}
|
||||||
|
if got != want {
|
||||||
|
return fmt.Errorf("expected accumulator root does not match calculated: got %s, want %s", got, want)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readHashes reads a file of newline-delimited hashes.
|
||||||
|
func readHashes(f string) ([]common.Hash, error) {
|
||||||
|
b, err := os.ReadFile(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to open accumulators file")
|
||||||
|
}
|
||||||
|
s := strings.Split(string(b), "\n")
|
||||||
|
// Remove empty last element, if present.
|
||||||
|
if s[len(s)-1] == "" {
|
||||||
|
s = s[:len(s)-1]
|
||||||
|
}
|
||||||
|
// Convert to hashes.
|
||||||
|
r := make([]common.Hash, len(s))
|
||||||
|
for i := range s {
|
||||||
|
r[i] = common.HexToHash(s[i])
|
||||||
|
}
|
||||||
|
return r, nil
|
||||||
|
}
|
@ -35,10 +35,12 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/era"
|
||||||
"github.com/ethereum/go-ethereum/internal/flags"
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -122,6 +124,33 @@ Optional second and third arguments control the first and
|
|||||||
last block to write. In this mode, the file will be appended
|
last block to write. In this mode, the file will be appended
|
||||||
if already existing. If the file ends with .gz, the output will
|
if already existing. If the file ends with .gz, the output will
|
||||||
be gzipped.`,
|
be gzipped.`,
|
||||||
|
}
|
||||||
|
importHistoryCommand = &cli.Command{
|
||||||
|
Action: importHistory,
|
||||||
|
Name: "import-history",
|
||||||
|
Usage: "Import an Era archive",
|
||||||
|
ArgsUsage: "<dir>",
|
||||||
|
Flags: flags.Merge([]cli.Flag{
|
||||||
|
utils.TxLookupLimitFlag,
|
||||||
|
},
|
||||||
|
utils.DatabaseFlags,
|
||||||
|
utils.NetworkFlags,
|
||||||
|
),
|
||||||
|
Description: `
|
||||||
|
The import-history command will import blocks and their corresponding receipts
|
||||||
|
from Era archives.
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
exportHistoryCommand = &cli.Command{
|
||||||
|
Action: exportHistory,
|
||||||
|
Name: "export-history",
|
||||||
|
Usage: "Export blockchain history to Era archives",
|
||||||
|
ArgsUsage: "<dir> <first> <last>",
|
||||||
|
Flags: flags.Merge(utils.DatabaseFlags),
|
||||||
|
Description: `
|
||||||
|
The export-history command will export blocks and their corresponding receipts
|
||||||
|
into Era archives. Eras are typically packaged in steps of 8192 blocks.
|
||||||
|
`,
|
||||||
}
|
}
|
||||||
importPreimagesCommand = &cli.Command{
|
importPreimagesCommand = &cli.Command{
|
||||||
Action: importPreimages,
|
Action: importPreimages,
|
||||||
@ -364,7 +393,97 @@ func exportChain(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
|
err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
|
||||||
}
|
}
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("Export error: %v\n", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Export done in %v\n", time.Since(start))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func importHistory(ctx *cli.Context) error {
|
||||||
|
if ctx.Args().Len() != 1 {
|
||||||
|
utils.Fatalf("usage: %s", ctx.Command.ArgsUsage)
|
||||||
|
}
|
||||||
|
|
||||||
|
stack, _ := makeConfigNode(ctx)
|
||||||
|
defer stack.Close()
|
||||||
|
|
||||||
|
chain, db := utils.MakeChain(ctx, stack, false)
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
var (
|
||||||
|
start = time.Now()
|
||||||
|
dir = ctx.Args().Get(0)
|
||||||
|
network string
|
||||||
|
)
|
||||||
|
|
||||||
|
// Determine network.
|
||||||
|
if utils.IsNetworkPreset(ctx) {
|
||||||
|
switch {
|
||||||
|
case ctx.Bool(utils.MainnetFlag.Name):
|
||||||
|
network = "mainnet"
|
||||||
|
case ctx.Bool(utils.SepoliaFlag.Name):
|
||||||
|
network = "sepolia"
|
||||||
|
case ctx.Bool(utils.GoerliFlag.Name):
|
||||||
|
network = "goerli"
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// No network flag set, try to determine network based on files
|
||||||
|
// present in directory.
|
||||||
|
var networks []string
|
||||||
|
for _, n := range params.NetworkNames {
|
||||||
|
entries, err := era.ReadDir(dir, n)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading %s: %w", dir, err)
|
||||||
|
}
|
||||||
|
if len(entries) > 0 {
|
||||||
|
networks = append(networks, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(networks) == 0 {
|
||||||
|
return fmt.Errorf("no era1 files found in %s", dir)
|
||||||
|
}
|
||||||
|
if len(networks) > 1 {
|
||||||
|
return fmt.Errorf("multiple networks found, use a network flag to specify desired network")
|
||||||
|
}
|
||||||
|
network = networks[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := utils.ImportHistory(chain, db, dir, network); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fmt.Printf("Import done in %v\n", time.Since(start))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// exportHistory exports chain history in Era archives at a specified
|
||||||
|
// directory.
|
||||||
|
func exportHistory(ctx *cli.Context) error {
|
||||||
|
if ctx.Args().Len() != 3 {
|
||||||
|
utils.Fatalf("usage: %s", ctx.Command.ArgsUsage)
|
||||||
|
}
|
||||||
|
|
||||||
|
stack, _ := makeConfigNode(ctx)
|
||||||
|
defer stack.Close()
|
||||||
|
|
||||||
|
chain, _ := utils.MakeChain(ctx, stack, true)
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
var (
|
||||||
|
dir = ctx.Args().Get(0)
|
||||||
|
first, ferr = strconv.ParseInt(ctx.Args().Get(1), 10, 64)
|
||||||
|
last, lerr = strconv.ParseInt(ctx.Args().Get(2), 10, 64)
|
||||||
|
)
|
||||||
|
if ferr != nil || lerr != nil {
|
||||||
|
utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
|
||||||
|
}
|
||||||
|
if first < 0 || last < 0 {
|
||||||
|
utils.Fatalf("Export error: block number must be greater than 0\n")
|
||||||
|
}
|
||||||
|
if head := chain.CurrentSnapBlock(); uint64(last) > head.Number.Uint64() {
|
||||||
|
utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.Number.Uint64())
|
||||||
|
}
|
||||||
|
err := utils.ExportHistory(chain, dir, uint64(first), uint64(last), uint64(era.MaxEra1Size))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Export error: %v\n", err)
|
utils.Fatalf("Export error: %v\n", err)
|
||||||
}
|
}
|
||||||
|
@ -208,6 +208,8 @@ func init() {
|
|||||||
initCommand,
|
initCommand,
|
||||||
importCommand,
|
importCommand,
|
||||||
exportCommand,
|
exportCommand,
|
||||||
|
importHistoryCommand,
|
||||||
|
exportHistoryCommand,
|
||||||
importPreimagesCommand,
|
importPreimagesCommand,
|
||||||
removedbCommand,
|
removedbCommand,
|
||||||
dumpCommand,
|
dumpCommand,
|
||||||
|
191
cmd/utils/cmd.go
191
cmd/utils/cmd.go
@ -19,12 +19,15 @@ package utils
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"bytes"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"crypto/sha256"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
@ -39,8 +42,10 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/internal/debug"
|
"github.com/ethereum/go-ethereum/internal/debug"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/era"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
@ -228,6 +233,105 @@ func ImportChain(chain *core.BlockChain, fn string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func readList(filename string) ([]string, error) {
|
||||||
|
b, err := os.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return strings.Split(string(b), "\n"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImportHistory imports Era1 files containing historical block information,
|
||||||
|
// starting from genesis.
|
||||||
|
func ImportHistory(chain *core.BlockChain, db ethdb.Database, dir string, network string) error {
|
||||||
|
if chain.CurrentSnapBlock().Number.BitLen() != 0 {
|
||||||
|
return fmt.Errorf("history import only supported when starting from genesis")
|
||||||
|
}
|
||||||
|
entries, err := era.ReadDir(dir, network)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading %s: %w", dir, err)
|
||||||
|
}
|
||||||
|
checksums, err := readList(path.Join(dir, "checksums.txt"))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to read checksums.txt: %w", err)
|
||||||
|
}
|
||||||
|
if len(checksums) != len(entries) {
|
||||||
|
return fmt.Errorf("expected equal number of checksums and entries, have: %d checksums, %d entries", len(checksums), len(entries))
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
start = time.Now()
|
||||||
|
reported = time.Now()
|
||||||
|
imported = 0
|
||||||
|
forker = core.NewForkChoice(chain, nil)
|
||||||
|
h = sha256.New()
|
||||||
|
buf = bytes.NewBuffer(nil)
|
||||||
|
)
|
||||||
|
for i, filename := range entries {
|
||||||
|
err := func() error {
|
||||||
|
f, err := os.Open(path.Join(dir, filename))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to open era: %w", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
// Validate checksum.
|
||||||
|
if _, err := io.Copy(h, f); err != nil {
|
||||||
|
return fmt.Errorf("unable to recalculate checksum: %w", err)
|
||||||
|
}
|
||||||
|
if have, want := common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex(), checksums[i]; have != want {
|
||||||
|
return fmt.Errorf("checksum mismatch: have %s, want %s", have, want)
|
||||||
|
}
|
||||||
|
h.Reset()
|
||||||
|
buf.Reset()
|
||||||
|
|
||||||
|
// Import all block data from Era1.
|
||||||
|
e, err := era.From(f)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error opening era: %w", err)
|
||||||
|
}
|
||||||
|
it, err := era.NewIterator(e)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error making era reader: %w", err)
|
||||||
|
}
|
||||||
|
for it.Next() {
|
||||||
|
block, err := it.Block()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading block %d: %w", it.Number(), err)
|
||||||
|
}
|
||||||
|
if block.Number().BitLen() == 0 {
|
||||||
|
continue // skip genesis
|
||||||
|
}
|
||||||
|
receipts, err := it.Receipts()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading receipts %d: %w", it.Number(), err)
|
||||||
|
}
|
||||||
|
if status, err := chain.HeaderChain().InsertHeaderChain([]*types.Header{block.Header()}, start, forker); err != nil {
|
||||||
|
return fmt.Errorf("error inserting header %d: %w", it.Number(), err)
|
||||||
|
} else if status != core.CanonStatTy {
|
||||||
|
return fmt.Errorf("error inserting header %d, not canon: %v", it.Number(), status)
|
||||||
|
}
|
||||||
|
if _, err := chain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{receipts}, 2^64-1); err != nil {
|
||||||
|
return fmt.Errorf("error inserting body %d: %w", it.Number(), err)
|
||||||
|
}
|
||||||
|
imported += 1
|
||||||
|
|
||||||
|
// Give the user some feedback that something is happening.
|
||||||
|
if time.Since(reported) >= 8*time.Second {
|
||||||
|
log.Info("Importing Era files", "head", it.Number(), "imported", imported, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
imported = 0
|
||||||
|
reported = time.Now()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
|
func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
|
||||||
head := chain.CurrentBlock()
|
head := chain.CurrentBlock()
|
||||||
for i, block := range blocks {
|
for i, block := range blocks {
|
||||||
@ -297,6 +401,93 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ExportHistory exports blockchain history into the specified directory,
|
||||||
|
// following the Era format.
|
||||||
|
func ExportHistory(bc *core.BlockChain, dir string, first, last, step uint64) error {
|
||||||
|
log.Info("Exporting blockchain history", "dir", dir)
|
||||||
|
if head := bc.CurrentBlock().Number.Uint64(); head < last {
|
||||||
|
log.Warn("Last block beyond head, setting last = head", "head", head, "last", last)
|
||||||
|
last = head
|
||||||
|
}
|
||||||
|
network := "unknown"
|
||||||
|
if name, ok := params.NetworkNames[bc.Config().ChainID.String()]; ok {
|
||||||
|
network = name
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(dir, os.ModePerm); err != nil {
|
||||||
|
return fmt.Errorf("error creating output directory: %w", err)
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
start = time.Now()
|
||||||
|
reported = time.Now()
|
||||||
|
h = sha256.New()
|
||||||
|
buf = bytes.NewBuffer(nil)
|
||||||
|
checksums []string
|
||||||
|
)
|
||||||
|
for i := first; i <= last; i += step {
|
||||||
|
err := func() error {
|
||||||
|
filename := path.Join(dir, era.Filename(network, int(i/step), common.Hash{}))
|
||||||
|
f, err := os.Create(filename)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not create era file: %w", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
w := era.NewBuilder(f)
|
||||||
|
for j := uint64(0); j < step && j <= last-i; j++ {
|
||||||
|
var (
|
||||||
|
n = i + j
|
||||||
|
block = bc.GetBlockByNumber(n)
|
||||||
|
)
|
||||||
|
if block == nil {
|
||||||
|
return fmt.Errorf("export failed on #%d: not found", n)
|
||||||
|
}
|
||||||
|
receipts := bc.GetReceiptsByHash(block.Hash())
|
||||||
|
if receipts == nil {
|
||||||
|
return fmt.Errorf("export failed on #%d: receipts not found", n)
|
||||||
|
}
|
||||||
|
td := bc.GetTd(block.Hash(), block.NumberU64())
|
||||||
|
if td == nil {
|
||||||
|
return fmt.Errorf("export failed on #%d: total difficulty not found", n)
|
||||||
|
}
|
||||||
|
if err := w.Add(block, receipts, td); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
root, err := w.Finalize()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("export failed to finalize %d: %w", step/i, err)
|
||||||
|
}
|
||||||
|
// Set correct filename with root.
|
||||||
|
os.Rename(filename, path.Join(dir, era.Filename(network, int(i/step), root)))
|
||||||
|
|
||||||
|
// Compute checksum of entire Era1.
|
||||||
|
if _, err := f.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.Copy(h, f); err != nil {
|
||||||
|
return fmt.Errorf("unable to calculate checksum: %w", err)
|
||||||
|
}
|
||||||
|
checksums = append(checksums, common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex())
|
||||||
|
h.Reset()
|
||||||
|
buf.Reset()
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if time.Since(reported) >= 8*time.Second {
|
||||||
|
log.Info("Exporting blocks", "exported", i, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
reported = time.Now()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
os.WriteFile(path.Join(dir, "checksums.txt"), []byte(strings.Join(checksums, "\n")), os.ModePerm)
|
||||||
|
|
||||||
|
log.Info("Exported blockchain to", "dir", dir)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// ImportPreimages imports a batch of exported hash preimages into the database.
|
// ImportPreimages imports a batch of exported hash preimages into the database.
|
||||||
// It's a part of the deprecated functionality, should be removed in the future.
|
// It's a part of the deprecated functionality, should be removed in the future.
|
||||||
func ImportPreimages(db ethdb.Database, fn string) error {
|
func ImportPreimages(db ethdb.Database, fn string) error {
|
||||||
|
184
cmd/utils/history_test.go
Normal file
184
cmd/utils/history_test.go
Normal file
@ -0,0 +1,184 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/sha256"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/era"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
count uint64 = 128
|
||||||
|
step uint64 = 16
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHistoryImportAndExport(t *testing.T) {
|
||||||
|
var (
|
||||||
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
||||||
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
||||||
|
genesis = &core.Genesis{
|
||||||
|
Config: params.TestChainConfig,
|
||||||
|
Alloc: core.GenesisAlloc{address: {Balance: big.NewInt(1000000000000000000)}},
|
||||||
|
}
|
||||||
|
signer = types.LatestSigner(genesis.Config)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Generate chain.
|
||||||
|
db, blocks, _ := core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), int(count), func(i int, g *core.BlockGen) {
|
||||||
|
if i == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tx, err := types.SignNewTx(key, signer, &types.DynamicFeeTx{
|
||||||
|
ChainID: genesis.Config.ChainID,
|
||||||
|
Nonce: uint64(i - 1),
|
||||||
|
GasTipCap: common.Big0,
|
||||||
|
GasFeeCap: g.PrevBlock(0).BaseFee(),
|
||||||
|
Gas: 50000,
|
||||||
|
To: &common.Address{0xaa},
|
||||||
|
Value: big.NewInt(int64(i)),
|
||||||
|
Data: nil,
|
||||||
|
AccessList: nil,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating tx: %v", err)
|
||||||
|
}
|
||||||
|
g.AddTx(tx)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Initialize BlockChain.
|
||||||
|
chain, err := core.NewBlockChain(db, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to initialize chain: %v", err)
|
||||||
|
}
|
||||||
|
if _, err := chain.InsertChain(blocks); err != nil {
|
||||||
|
t.Fatalf("error insterting chain: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make temp directory for era files.
|
||||||
|
dir, err := os.MkdirTemp("", "history-export-test")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating temp test directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(dir)
|
||||||
|
|
||||||
|
// Export history to temp directory.
|
||||||
|
if err := ExportHistory(chain, dir, 0, count, step); err != nil {
|
||||||
|
t.Fatalf("error exporting history: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read checksums.
|
||||||
|
b, err := os.ReadFile(path.Join(dir, "checksums.txt"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to read checksums: %v", err)
|
||||||
|
}
|
||||||
|
checksums := strings.Split(string(b), "\n")
|
||||||
|
|
||||||
|
// Verify each Era.
|
||||||
|
entries, _ := era.ReadDir(dir, "mainnet")
|
||||||
|
for i, filename := range entries {
|
||||||
|
func() {
|
||||||
|
f, err := os.Open(path.Join(dir, filename))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error opening era file: %v", err)
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
h = sha256.New()
|
||||||
|
buf = bytes.NewBuffer(nil)
|
||||||
|
)
|
||||||
|
if _, err := io.Copy(h, f); err != nil {
|
||||||
|
t.Fatalf("unable to recalculate checksum: %v", err)
|
||||||
|
}
|
||||||
|
if got, want := common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex(), checksums[i]; got != want {
|
||||||
|
t.Fatalf("checksum %d does not match: got %s, want %s", i, got, want)
|
||||||
|
}
|
||||||
|
e, err := era.From(f)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error opening era: %v", err)
|
||||||
|
}
|
||||||
|
defer e.Close()
|
||||||
|
it, err := era.NewIterator(e)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error making era reader: %v", err)
|
||||||
|
}
|
||||||
|
for j := 0; it.Next(); j++ {
|
||||||
|
n := i*int(step) + j
|
||||||
|
if it.Error() != nil {
|
||||||
|
t.Fatalf("error reading block entry %d: %v", n, it.Error())
|
||||||
|
}
|
||||||
|
block, receipts, err := it.BlockAndReceipts()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error reading block entry %d: %v", n, err)
|
||||||
|
}
|
||||||
|
want := chain.GetBlockByNumber(uint64(n))
|
||||||
|
if want, got := uint64(n), block.NumberU64(); want != got {
|
||||||
|
t.Fatalf("blocks out of order: want %d, got %d", want, got)
|
||||||
|
}
|
||||||
|
if want.Hash() != block.Hash() {
|
||||||
|
t.Fatalf("block hash mismatch %d: want %s, got %s", n, want.Hash().Hex(), block.Hash().Hex())
|
||||||
|
}
|
||||||
|
if got := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); got != want.TxHash() {
|
||||||
|
t.Fatalf("tx hash %d mismatch: want %s, got %s", n, want.TxHash(), got)
|
||||||
|
}
|
||||||
|
if got := types.CalcUncleHash(block.Uncles()); got != want.UncleHash() {
|
||||||
|
t.Fatalf("uncle hash %d mismatch: want %s, got %s", n, want.UncleHash(), got)
|
||||||
|
}
|
||||||
|
if got := types.DeriveSha(receipts, trie.NewStackTrie(nil)); got != want.ReceiptHash() {
|
||||||
|
t.Fatalf("receipt root %d mismatch: want %s, got %s", n, want.ReceiptHash(), got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now import Era.
|
||||||
|
freezer := t.TempDir()
|
||||||
|
db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
t.Cleanup(func() {
|
||||||
|
db2.Close()
|
||||||
|
})
|
||||||
|
|
||||||
|
genesis.MustCommit(db2, trie.NewDatabase(db, trie.HashDefaults))
|
||||||
|
imported, err := core.NewBlockChain(db2, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to initialize chain: %v", err)
|
||||||
|
}
|
||||||
|
if err := ImportHistory(imported, db2, dir, "mainnet"); err != nil {
|
||||||
|
t.Fatalf("failed to import chain: %v", err)
|
||||||
|
}
|
||||||
|
if have, want := imported.CurrentHeader(), chain.CurrentHeader(); have.Hash() != want.Hash() {
|
||||||
|
t.Fatalf("imported chain does not match expected, have (%d, %s) want (%d, %s)", have.Number, have.Hash(), want.Number, want.Hash())
|
||||||
|
}
|
||||||
|
}
|
@ -1673,7 +1673,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
|
|||||||
// The chain importer is starting and stopping trie prefetchers. If a bad
|
// The chain importer is starting and stopping trie prefetchers. If a bad
|
||||||
// block or other error is hit however, an early return may not properly
|
// block or other error is hit however, an early return may not properly
|
||||||
// terminate the background threads. This defer ensures that we clean up
|
// terminate the background threads. This defer ensures that we clean up
|
||||||
// and dangling prefetcher, without defering each and holding on live refs.
|
// and dangling prefetcher, without deferring each and holding on live refs.
|
||||||
if activeState != nil {
|
if activeState != nil {
|
||||||
activeState.StopPrefetcher()
|
activeState.StopPrefetcher()
|
||||||
}
|
}
|
||||||
@ -2188,6 +2188,12 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
|
|||||||
// rewind the canonical chain to a lower point.
|
// rewind the canonical chain to a lower point.
|
||||||
log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain))
|
log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain))
|
||||||
}
|
}
|
||||||
|
// Reset the tx lookup cache in case to clear stale txlookups.
|
||||||
|
// This is done before writing any new chain data to avoid the
|
||||||
|
// weird scenario that canonical chain is changed while the
|
||||||
|
// stale lookups are still cached.
|
||||||
|
bc.txLookupCache.Purge()
|
||||||
|
|
||||||
// Insert the new chain(except the head block(reverse order)),
|
// Insert the new chain(except the head block(reverse order)),
|
||||||
// taking care of the proper incremental order.
|
// taking care of the proper incremental order.
|
||||||
for i := len(newChain) - 1; i >= 1; i-- {
|
for i := len(newChain) - 1; i >= 1; i-- {
|
||||||
@ -2202,11 +2208,13 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
|
|||||||
|
|
||||||
// Delete useless indexes right now which includes the non-canonical
|
// Delete useless indexes right now which includes the non-canonical
|
||||||
// transaction indexes, canonical chain indexes which above the head.
|
// transaction indexes, canonical chain indexes which above the head.
|
||||||
indexesBatch := bc.db.NewBatch()
|
var (
|
||||||
for _, tx := range types.HashDifference(deletedTxs, addedTxs) {
|
indexesBatch = bc.db.NewBatch()
|
||||||
|
diffs = types.HashDifference(deletedTxs, addedTxs)
|
||||||
|
)
|
||||||
|
for _, tx := range diffs {
|
||||||
rawdb.DeleteTxLookupEntry(indexesBatch, tx)
|
rawdb.DeleteTxLookupEntry(indexesBatch, tx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete all hash markers that are not part of the new canonical chain.
|
// Delete all hash markers that are not part of the new canonical chain.
|
||||||
// Because the reorg function does not handle new chain head, all hash
|
// Because the reorg function does not handle new chain head, all hash
|
||||||
// markers greater than or equal to new chain head should be deleted.
|
// markers greater than or equal to new chain head should be deleted.
|
||||||
|
@ -410,6 +410,11 @@ func (bc *BlockChain) TrieDB() *trie.Database {
|
|||||||
return bc.triedb
|
return bc.triedb
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HeaderChain returns the underlying header chain.
|
||||||
|
func (bc *BlockChain) HeaderChain() *HeaderChain {
|
||||||
|
return bc.hc
|
||||||
|
}
|
||||||
|
|
||||||
// SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
|
// SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
|
||||||
func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
|
func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
|
||||||
return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
|
return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
|
||||||
|
@ -83,7 +83,7 @@ func (b *BlockGen) SetDifficulty(diff *big.Int) {
|
|||||||
b.header.Difficulty = diff
|
b.header.Difficulty = diff
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetPos makes the header a PoS-header (0 difficulty)
|
// SetPoS makes the header a PoS-header (0 difficulty)
|
||||||
func (b *BlockGen) SetPoS() {
|
func (b *BlockGen) SetPoS() {
|
||||||
b.header.Difficulty = new(big.Int)
|
b.header.Difficulty = new(big.Int)
|
||||||
}
|
}
|
||||||
|
@ -74,8 +74,10 @@ func TestCreation(t *testing.T) {
|
|||||||
{15049999, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block
|
{15049999, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block
|
||||||
{15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // First Gray Glacier block
|
{15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // First Gray Glacier block
|
||||||
{20000000, 1681338454, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // Last Gray Glacier block
|
{20000000, 1681338454, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // Last Gray Glacier block
|
||||||
{20000000, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}}, // First Shanghai block
|
{20000000, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}}, // First Shanghai block
|
||||||
{30000000, 2000000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}}, // Future Shanghai block
|
{30000000, 1710338134, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}}, // Last Shanghai block
|
||||||
|
{40000000, 1710338135, ID{Hash: checksumToBytes(0x9f3d2254), Next: 0}}, // First Cancun block
|
||||||
|
{50000000, 2000000000, ID{Hash: checksumToBytes(0x9f3d2254), Next: 0}}, // Future Cancun block
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Goerli test cases
|
// Goerli test cases
|
||||||
@ -141,6 +143,7 @@ func TestValidation(t *testing.T) {
|
|||||||
// Config that has not timestamp enabled
|
// Config that has not timestamp enabled
|
||||||
legacyConfig := *params.MainnetChainConfig
|
legacyConfig := *params.MainnetChainConfig
|
||||||
legacyConfig.ShanghaiTime = nil
|
legacyConfig.ShanghaiTime = nil
|
||||||
|
legacyConfig.CancunTime = nil
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
config *params.ChainConfig
|
config *params.ChainConfig
|
||||||
@ -213,14 +216,10 @@ func TestValidation(t *testing.T) {
|
|||||||
// at some future block 88888888, for itself, but past block for local. Local is incompatible.
|
// at some future block 88888888, for itself, but past block for local. Local is incompatible.
|
||||||
//
|
//
|
||||||
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
|
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
|
||||||
//
|
|
||||||
// TODO(karalabe): This testcase will fail once mainnet gets timestamped forks, make legacy chain config
|
|
||||||
{&legacyConfig, 88888888, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 88888888}, ErrLocalIncompatibleOrStale},
|
{&legacyConfig, 88888888, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 88888888}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
|
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
|
||||||
// fork) at block 7279999, before Petersburg. Local is incompatible.
|
// fork) at block 7279999, before Petersburg. Local is incompatible.
|
||||||
//
|
|
||||||
// TODO(karalabe): This testcase will fail once mainnet gets timestamped forks, make legacy chain config
|
|
||||||
{&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale},
|
{&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
//------------------------------------
|
//------------------------------------
|
||||||
@ -297,34 +296,25 @@ func TestValidation(t *testing.T) {
|
|||||||
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
|
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
|
||||||
// also Shanghai, but it's not yet aware of Cancun (e.g. non updated node before the fork).
|
// also Shanghai, but it's not yet aware of Cancun (e.g. non updated node before the fork).
|
||||||
// In this case we don't know if Cancun passed yet or not.
|
// In this case we don't know if Cancun passed yet or not.
|
||||||
//
|
{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}, nil},
|
||||||
// TODO(karalabe): Enable this when Cancun is specced
|
|
||||||
//{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}, nil},
|
|
||||||
|
|
||||||
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
|
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
|
||||||
// also Shanghai, and it's also aware of Cancun (e.g. updated node before the fork). We
|
// also Shanghai, and it's also aware of Cancun (e.g. updated node before the fork). We
|
||||||
// don't know if Cancun passed yet (will pass) or not.
|
// don't know if Cancun passed yet (will pass) or not.
|
||||||
//
|
{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}, nil},
|
||||||
// TODO(karalabe): Enable this when Cancun is specced and update next timestamp
|
|
||||||
//{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil},
|
|
||||||
|
|
||||||
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
|
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
|
||||||
// also Shanghai, and it's also aware of some random fork (e.g. misconfigured Cancun). As
|
// also Shanghai, and it's also aware of some random fork (e.g. misconfigured Cancun). As
|
||||||
// neither forks passed at neither nodes, they may mismatch, but we still connect for now.
|
// neither forks passed at neither nodes, they may mismatch, but we still connect for now.
|
||||||
//
|
{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: math.MaxUint64}, nil},
|
||||||
// TODO(karalabe): Enable this when Cancun is specced
|
|
||||||
//{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: math.MaxUint64}, nil},
|
|
||||||
|
|
||||||
// Local is mainnet exactly on Cancun, remote announces Shanghai + knowledge about Cancun. Remote
|
// Local is mainnet exactly on Cancun, remote announces Shanghai + knowledge about Cancun. Remote
|
||||||
// is simply out of sync, accept.
|
// is simply out of sync, accept.
|
||||||
//
|
{params.MainnetChainConfig, 21000000, 1710338135, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}, nil},
|
||||||
// TODO(karalabe): Enable this when Cancun is specced, update local head and time, next timestamp
|
|
||||||
// {params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil},
|
|
||||||
|
|
||||||
// Local is mainnet Cancun, remote announces Shanghai + knowledge about Cancun. Remote
|
// Local is mainnet Cancun, remote announces Shanghai + knowledge about Cancun. Remote
|
||||||
// is simply out of sync, accept.
|
// is simply out of sync, accept.
|
||||||
// TODO(karalabe): Enable this when Cancun is specced, update local head and time, next timestamp
|
{params.MainnetChainConfig, 21123456, 1710338136, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}, nil},
|
||||||
//{params.MainnetChainConfig, 21123456, 1678123456, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil},
|
|
||||||
|
|
||||||
// Local is mainnet Prague, remote announces Shanghai + knowledge about Cancun. Remote
|
// Local is mainnet Prague, remote announces Shanghai + knowledge about Cancun. Remote
|
||||||
// is definitely out of sync. It may or may not need the Prague update, we don't know yet.
|
// is definitely out of sync. It may or may not need the Prague update, we don't know yet.
|
||||||
@ -333,9 +323,7 @@ func TestValidation(t *testing.T) {
|
|||||||
//{params.MainnetChainConfig, 0, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil},
|
//{params.MainnetChainConfig, 0, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil},
|
||||||
|
|
||||||
// Local is mainnet Shanghai, remote announces Cancun. Local is out of sync, accept.
|
// Local is mainnet Shanghai, remote announces Cancun. Local is out of sync, accept.
|
||||||
//
|
{params.MainnetChainConfig, 21000000, 1700000000, ID{Hash: checksumToBytes(0x9f3d2254), Next: 0}, nil},
|
||||||
// TODO(karalabe): Enable this when Cancun is specced, update remote checksum
|
|
||||||
//{params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x00000000), Next: 0}, nil},
|
|
||||||
|
|
||||||
// Local is mainnet Shanghai, remote announces Cancun, but is not aware of Prague. Local
|
// Local is mainnet Shanghai, remote announces Cancun, but is not aware of Prague. Local
|
||||||
// out of sync. Local also knows about a future fork, but that is uncertain yet.
|
// out of sync. Local also knows about a future fork, but that is uncertain yet.
|
||||||
@ -345,9 +333,7 @@ func TestValidation(t *testing.T) {
|
|||||||
|
|
||||||
// Local is mainnet Cancun. remote announces Shanghai but is not aware of further forks.
|
// Local is mainnet Cancun. remote announces Shanghai but is not aware of further forks.
|
||||||
// Remote needs software update.
|
// Remote needs software update.
|
||||||
//
|
{params.MainnetChainConfig, 21000000, 1710338135, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}, ErrRemoteStale},
|
||||||
// TODO(karalabe): Enable this when Cancun is specced, update local head and time
|
|
||||||
//{params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}, ErrRemoteStale},
|
|
||||||
|
|
||||||
// Local is mainnet Shanghai, and isn't aware of more forks. Remote announces Shanghai +
|
// Local is mainnet Shanghai, and isn't aware of more forks. Remote announces Shanghai +
|
||||||
// 0xffffffff. Local needs software update, reject.
|
// 0xffffffff. Local needs software update, reject.
|
||||||
@ -355,24 +341,20 @@ func TestValidation(t *testing.T) {
|
|||||||
|
|
||||||
// Local is mainnet Shanghai, and is aware of Cancun. Remote announces Cancun +
|
// Local is mainnet Shanghai, and is aware of Cancun. Remote announces Cancun +
|
||||||
// 0xffffffff. Local needs software update, reject.
|
// 0xffffffff. Local needs software update, reject.
|
||||||
//
|
{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(checksumUpdate(0x9f3d2254, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
// TODO(karalabe): Enable this when Cancun is specced, update remote checksum
|
|
||||||
//{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(checksumUpdate(0x00000000, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale},
|
|
||||||
|
|
||||||
// Local is mainnet Shanghai, remote is random Shanghai.
|
// Local is mainnet Shanghai, remote is random Shanghai.
|
||||||
{params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0x12345678), Next: 0}, ErrLocalIncompatibleOrStale},
|
{params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0x12345678), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
// Local is mainnet Shanghai, far in the future. Remote announces Gopherium (non existing fork)
|
// Local is mainnet Cancun, far in the future. Remote announces Gopherium (non existing fork)
|
||||||
// at some future timestamp 8888888888, for itself, but past block for local. Local is incompatible.
|
// at some future timestamp 8888888888, for itself, but past block for local. Local is incompatible.
|
||||||
//
|
//
|
||||||
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
|
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
|
||||||
{params.MainnetChainConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0xdce96c2d), Next: 8888888888}, ErrLocalIncompatibleOrStale},
|
{params.MainnetChainConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0x9f3d2254), Next: 8888888888}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
// Local is mainnet Shanghai. Remote is also in Shanghai, but announces Gopherium (non existing
|
// Local is mainnet Shanghai. Remote is also in Shanghai, but announces Gopherium (non existing
|
||||||
// fork) at timestamp 1668000000, before Cancun. Local is incompatible.
|
// fork) at timestamp 1668000000, before Cancun. Local is incompatible.
|
||||||
//
|
{params.MainnetChainConfig, 20999999, 1699999999, ID{Hash: checksumToBytes(0x71147644), Next: 1700000000}, ErrLocalIncompatibleOrStale},
|
||||||
// TODO(karalabe): Enable this when Cancun is specced
|
|
||||||
//{params.MainnetChainConfig, 20999999, 1677999999, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, ErrLocalIncompatibleOrStale},
|
|
||||||
}
|
}
|
||||||
genesis := core.DefaultGenesisBlock().ToBlock()
|
genesis := core.DefaultGenesisBlock().ToBlock()
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
|
@ -413,6 +413,8 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
|
|||||||
return g.Config
|
return g.Config
|
||||||
case ghash == params.MainnetGenesisHash:
|
case ghash == params.MainnetGenesisHash:
|
||||||
return params.MainnetChainConfig
|
return params.MainnetChainConfig
|
||||||
|
case ghash == params.HoleskyGenesisHash:
|
||||||
|
return params.HoleskyChainConfig
|
||||||
case ghash == params.SepoliaGenesisHash:
|
case ghash == params.SepoliaGenesisHash:
|
||||||
return params.SepoliaChainConfig
|
return params.SepoliaChainConfig
|
||||||
case ghash == params.GoerliGenesisHash:
|
case ghash == params.GoerliGenesisHash:
|
||||||
|
@ -121,7 +121,7 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta
|
|||||||
// the trie nodes(and codes) belong to the active state will be filtered
|
// the trie nodes(and codes) belong to the active state will be filtered
|
||||||
// out. A very small part of stale tries will also be filtered because of
|
// out. A very small part of stale tries will also be filtered because of
|
||||||
// the false-positive rate of bloom filter. But the assumption is held here
|
// the false-positive rate of bloom filter. But the assumption is held here
|
||||||
// that the false-positive is low enough(~0.05%). The probablity of the
|
// that the false-positive is low enough(~0.05%). The probability of the
|
||||||
// dangling node is the state root is super low. So the dangling nodes in
|
// dangling node is the state root is super low. So the dangling nodes in
|
||||||
// theory will never ever be visited again.
|
// theory will never ever be visited again.
|
||||||
var (
|
var (
|
||||||
|
@ -43,7 +43,7 @@ var (
|
|||||||
aggregatorMemoryLimit = uint64(4 * 1024 * 1024)
|
aggregatorMemoryLimit = uint64(4 * 1024 * 1024)
|
||||||
|
|
||||||
// aggregatorItemLimit is an approximate number of items that will end up
|
// aggregatorItemLimit is an approximate number of items that will end up
|
||||||
// in the agregator layer before it's flushed out to disk. A plain account
|
// in the aggregator layer before it's flushed out to disk. A plain account
|
||||||
// weighs around 14B (+hash), a storage slot 32B (+hash), a deleted slot
|
// weighs around 14B (+hash), a storage slot 32B (+hash), a deleted slot
|
||||||
// 0B (+hash). Slots are mostly set/unset in lockstep, so that average at
|
// 0B (+hash). Slots are mostly set/unset in lockstep, so that average at
|
||||||
// 16B (+hash). All in all, the average entry seems to be 15+32=47B. Use a
|
// 16B (+hash). All in all, the average entry seems to be 15+32=47B. Use a
|
||||||
|
@ -139,7 +139,7 @@ func TestDiskMerge(t *testing.T) {
|
|||||||
// Retrieve all the data through the disk layer and validate it
|
// Retrieve all the data through the disk layer and validate it
|
||||||
base = snaps.Snapshot(diffRoot)
|
base = snaps.Snapshot(diffRoot)
|
||||||
if _, ok := base.(*diskLayer); !ok {
|
if _, ok := base.(*diskLayer); !ok {
|
||||||
t.Fatalf("update not flattend into the disk layer")
|
t.Fatalf("update not flattened into the disk layer")
|
||||||
}
|
}
|
||||||
|
|
||||||
// assertAccount ensures that an account matches the given blob.
|
// assertAccount ensures that an account matches the given blob.
|
||||||
@ -362,7 +362,7 @@ func TestDiskPartialMerge(t *testing.T) {
|
|||||||
// Retrieve all the data through the disk layer and validate it
|
// Retrieve all the data through the disk layer and validate it
|
||||||
base = snaps.Snapshot(diffRoot)
|
base = snaps.Snapshot(diffRoot)
|
||||||
if _, ok := base.(*diskLayer); !ok {
|
if _, ok := base.(*diskLayer); !ok {
|
||||||
t.Fatalf("test %d: update not flattend into the disk layer", i)
|
t.Fatalf("test %d: update not flattened into the disk layer", i)
|
||||||
}
|
}
|
||||||
assertAccount(accNoModNoCache, accNoModNoCache[:])
|
assertAccount(accNoModNoCache, accNoModNoCache[:])
|
||||||
assertAccount(accNoModCache, accNoModCache[:])
|
assertAccount(accNoModCache, accNoModCache[:])
|
||||||
|
@ -93,7 +93,7 @@ type stateObject struct {
|
|||||||
|
|
||||||
// empty returns whether the account is considered empty.
|
// empty returns whether the account is considered empty.
|
||||||
func (s *stateObject) empty() bool {
|
func (s *stateObject) empty() bool {
|
||||||
return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, types.EmptyCodeHash.Bytes())
|
return s.data.Nonce == 0 && s.data.Balance.IsZero() && bytes.Equal(s.data.CodeHash, types.EmptyCodeHash.Bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
// newObject creates a state object.
|
// newObject creates a state object.
|
||||||
@ -408,7 +408,7 @@ func (s *stateObject) commit() (*trienode.NodeSet, error) {
|
|||||||
func (s *stateObject) AddBalance(amount *uint256.Int) {
|
func (s *stateObject) AddBalance(amount *uint256.Int) {
|
||||||
// EIP161: We must check emptiness for the objects such that the account
|
// EIP161: We must check emptiness for the objects such that the account
|
||||||
// clearing (0,0,0 objects) can take effect.
|
// clearing (0,0,0 objects) can take effect.
|
||||||
if amount.Sign() == 0 {
|
if amount.IsZero() {
|
||||||
if s.empty() {
|
if s.empty() {
|
||||||
s.touch()
|
s.touch()
|
||||||
}
|
}
|
||||||
@ -420,7 +420,7 @@ func (s *stateObject) AddBalance(amount *uint256.Int) {
|
|||||||
// SubBalance removes amount from s's balance.
|
// SubBalance removes amount from s's balance.
|
||||||
// It is used to remove funds from the origin account of a transfer.
|
// It is used to remove funds from the origin account of a transfer.
|
||||||
func (s *stateObject) SubBalance(amount *uint256.Int) {
|
func (s *stateObject) SubBalance(amount *uint256.Int) {
|
||||||
if amount.Sign() == 0 {
|
if amount.IsZero() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
s.SetBalance(new(uint256.Int).Sub(s.Balance(), amount))
|
s.SetBalance(new(uint256.Int).Sub(s.Balance(), amount))
|
||||||
|
@ -237,7 +237,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool, s
|
|||||||
id := trie.StorageTrieID(srcRoot, common.BytesToHash(node.syncPath[0]), acc.Root)
|
id := trie.StorageTrieID(srcRoot, common.BytesToHash(node.syncPath[0]), acc.Root)
|
||||||
stTrie, err := trie.New(id, ndb)
|
stTrie, err := trie.New(id, ndb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to retriev storage trie for path %x: %v", node.syncPath[1], err)
|
t.Fatalf("failed to retrieve storage trie for path %x: %v", node.syncPath[1], err)
|
||||||
}
|
}
|
||||||
data, _, err := stTrie.GetNode(node.syncPath[1])
|
data, _, err := stTrie.GetNode(node.syncPath[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -130,6 +130,7 @@ func (indexer *txIndexer) loop(chain *BlockChain) {
|
|||||||
stop chan struct{} // Non-nil if background routine is active.
|
stop chan struct{} // Non-nil if background routine is active.
|
||||||
done chan struct{} // Non-nil if background routine is active.
|
done chan struct{} // Non-nil if background routine is active.
|
||||||
lastHead uint64 // The latest announced chain head (whose tx indexes are assumed created)
|
lastHead uint64 // The latest announced chain head (whose tx indexes are assumed created)
|
||||||
|
lastTail = rawdb.ReadTxIndexTail(indexer.db) // The oldest indexed block, nil means nothing indexed
|
||||||
|
|
||||||
headCh = make(chan ChainHeadEvent)
|
headCh = make(chan ChainHeadEvent)
|
||||||
sub = chain.SubscribeChainHeadEvent(headCh)
|
sub = chain.SubscribeChainHeadEvent(headCh)
|
||||||
@ -156,8 +157,9 @@ func (indexer *txIndexer) loop(chain *BlockChain) {
|
|||||||
case <-done:
|
case <-done:
|
||||||
stop = nil
|
stop = nil
|
||||||
done = nil
|
done = nil
|
||||||
|
lastTail = rawdb.ReadTxIndexTail(indexer.db)
|
||||||
case ch := <-indexer.progress:
|
case ch := <-indexer.progress:
|
||||||
ch <- indexer.report(lastHead)
|
ch <- indexer.report(lastHead, lastTail)
|
||||||
case ch := <-indexer.term:
|
case ch := <-indexer.term:
|
||||||
if stop != nil {
|
if stop != nil {
|
||||||
close(stop)
|
close(stop)
|
||||||
@ -173,11 +175,7 @@ func (indexer *txIndexer) loop(chain *BlockChain) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// report returns the tx indexing progress.
|
// report returns the tx indexing progress.
|
||||||
func (indexer *txIndexer) report(head uint64) TxIndexProgress {
|
func (indexer *txIndexer) report(head uint64, tail *uint64) TxIndexProgress {
|
||||||
var (
|
|
||||||
remaining uint64
|
|
||||||
tail = rawdb.ReadTxIndexTail(indexer.db)
|
|
||||||
)
|
|
||||||
total := indexer.limit
|
total := indexer.limit
|
||||||
if indexer.limit == 0 || total > head {
|
if indexer.limit == 0 || total > head {
|
||||||
total = head + 1 // genesis included
|
total = head + 1 // genesis included
|
||||||
@ -188,6 +186,7 @@ func (indexer *txIndexer) report(head uint64) TxIndexProgress {
|
|||||||
}
|
}
|
||||||
// The value of indexed might be larger than total if some blocks need
|
// The value of indexed might be larger than total if some blocks need
|
||||||
// to be unindexed, avoiding a negative remaining.
|
// to be unindexed, avoiding a negative remaining.
|
||||||
|
var remaining uint64
|
||||||
if indexed < total {
|
if indexed < total {
|
||||||
remaining = total - indexed
|
remaining = total - indexed
|
||||||
}
|
}
|
||||||
|
@ -85,7 +85,7 @@ func TestTxIndexer(t *testing.T) {
|
|||||||
for number := *tail; number <= chainHead; number += 1 {
|
for number := *tail; number <= chainHead; number += 1 {
|
||||||
verifyIndexes(db, number, true)
|
verifyIndexes(db, number, true)
|
||||||
}
|
}
|
||||||
progress := indexer.report(chainHead)
|
progress := indexer.report(chainHead, tail)
|
||||||
if !progress.Done() {
|
if !progress.Done() {
|
||||||
t.Fatalf("Expect fully indexed")
|
t.Fatalf("Expect fully indexed")
|
||||||
}
|
}
|
||||||
|
@ -386,6 +386,8 @@ func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.Addr
|
|||||||
|
|
||||||
if len(fails) > 0 {
|
if len(fails) > 0 {
|
||||||
log.Warn("Dropping invalidated blob transactions", "ids", fails)
|
log.Warn("Dropping invalidated blob transactions", "ids", fails)
|
||||||
|
dropInvalidMeter.Mark(int64(len(fails)))
|
||||||
|
|
||||||
for _, id := range fails {
|
for _, id := range fails {
|
||||||
if err := p.store.Delete(id); err != nil {
|
if err := p.store.Delete(id); err != nil {
|
||||||
p.Close()
|
p.Close()
|
||||||
@ -456,7 +458,7 @@ func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
|
|||||||
tx := new(types.Transaction)
|
tx := new(types.Transaction)
|
||||||
if err := rlp.DecodeBytes(blob, tx); err != nil {
|
if err := rlp.DecodeBytes(blob, tx); err != nil {
|
||||||
// This path is impossible unless the disk data representation changes
|
// This path is impossible unless the disk data representation changes
|
||||||
// across restarts. For that ever unprobable case, recover gracefully
|
// across restarts. For that ever improbable case, recover gracefully
|
||||||
// by ignoring this data entry.
|
// by ignoring this data entry.
|
||||||
log.Error("Failed to decode blob pool entry", "id", id, "err", err)
|
log.Error("Failed to decode blob pool entry", "id", id, "err", err)
|
||||||
return err
|
return err
|
||||||
@ -467,11 +469,17 @@ func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
meta := newBlobTxMeta(id, size, tx)
|
meta := newBlobTxMeta(id, size, tx)
|
||||||
|
if _, exists := p.lookup[meta.hash]; exists {
|
||||||
|
// This path is only possible after a crash, where deleted items are not
|
||||||
|
// removed via the normal shutdown-startup procedure and thus may get
|
||||||
|
// partially resurrected.
|
||||||
|
log.Error("Rejecting duplicate blob pool entry", "id", id, "hash", tx.Hash())
|
||||||
|
return errors.New("duplicate blob entry")
|
||||||
|
}
|
||||||
sender, err := p.signer.Sender(tx)
|
sender, err := p.signer.Sender(tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// This path is impossible unless the signature validity changes across
|
// This path is impossible unless the signature validity changes across
|
||||||
// restarts. For that ever unprobable case, recover gracefully by ignoring
|
// restarts. For that ever improbable case, recover gracefully by ignoring
|
||||||
// this data entry.
|
// this data entry.
|
||||||
log.Error("Failed to recover blob tx sender", "id", id, "hash", tx.Hash(), "err", err)
|
log.Error("Failed to recover blob tx sender", "id", id, "hash", tx.Hash(), "err", err)
|
||||||
return err
|
return err
|
||||||
@ -537,8 +545,10 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||||||
|
|
||||||
if gapped {
|
if gapped {
|
||||||
log.Warn("Dropping dangling blob transactions", "from", addr, "missing", next, "drop", nonces, "ids", ids)
|
log.Warn("Dropping dangling blob transactions", "from", addr, "missing", next, "drop", nonces, "ids", ids)
|
||||||
|
dropDanglingMeter.Mark(int64(len(ids)))
|
||||||
} else {
|
} else {
|
||||||
log.Trace("Dropping filled blob transactions", "from", addr, "filled", nonces, "ids", ids)
|
log.Trace("Dropping filled blob transactions", "from", addr, "filled", nonces, "ids", ids)
|
||||||
|
dropFilledMeter.Mark(int64(len(ids)))
|
||||||
}
|
}
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
if err := p.store.Delete(id); err != nil {
|
if err := p.store.Delete(id); err != nil {
|
||||||
@ -569,6 +579,8 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||||||
txs = txs[1:]
|
txs = txs[1:]
|
||||||
}
|
}
|
||||||
log.Trace("Dropping overlapped blob transactions", "from", addr, "overlapped", nonces, "ids", ids, "left", len(txs))
|
log.Trace("Dropping overlapped blob transactions", "from", addr, "overlapped", nonces, "ids", ids, "left", len(txs))
|
||||||
|
dropOverlappedMeter.Mark(int64(len(ids)))
|
||||||
|
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
if err := p.store.Delete(id); err != nil {
|
if err := p.store.Delete(id); err != nil {
|
||||||
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
||||||
@ -600,10 +612,30 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Sanity check that there's no double nonce. This case would be a coding
|
// Sanity check that there's no double nonce. This case would generally
|
||||||
// error, but better know about it
|
// be a coding error, so better know about it.
|
||||||
|
//
|
||||||
|
// Also, Billy behind the blobpool does not journal deletes. A process
|
||||||
|
// crash would result in previously deleted entities being resurrected.
|
||||||
|
// That could potentially cause a duplicate nonce to appear.
|
||||||
if txs[i].nonce == txs[i-1].nonce {
|
if txs[i].nonce == txs[i-1].nonce {
|
||||||
log.Error("Duplicate nonce blob transaction", "from", addr, "nonce", txs[i].nonce)
|
id := p.lookup[txs[i].hash]
|
||||||
|
|
||||||
|
log.Error("Dropping repeat nonce blob transaction", "from", addr, "nonce", txs[i].nonce, "id", id)
|
||||||
|
dropRepeatedMeter.Mark(1)
|
||||||
|
|
||||||
|
p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[i].costCap)
|
||||||
|
p.stored -= uint64(txs[i].size)
|
||||||
|
delete(p.lookup, txs[i].hash)
|
||||||
|
|
||||||
|
if err := p.store.Delete(id); err != nil {
|
||||||
|
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
||||||
|
}
|
||||||
|
txs = append(txs[:i], txs[i+1:]...)
|
||||||
|
p.index[addr] = txs
|
||||||
|
|
||||||
|
i--
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
// Otherwise if there's a nonce gap evict all later transactions
|
// Otherwise if there's a nonce gap evict all later transactions
|
||||||
var (
|
var (
|
||||||
@ -621,6 +653,8 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||||||
txs = txs[:i]
|
txs = txs[:i]
|
||||||
|
|
||||||
log.Error("Dropping gapped blob transactions", "from", addr, "missing", txs[i-1].nonce+1, "drop", nonces, "ids", ids)
|
log.Error("Dropping gapped blob transactions", "from", addr, "missing", txs[i-1].nonce+1, "drop", nonces, "ids", ids)
|
||||||
|
dropGappedMeter.Mark(int64(len(ids)))
|
||||||
|
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
if err := p.store.Delete(id); err != nil {
|
if err := p.store.Delete(id); err != nil {
|
||||||
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
||||||
@ -665,6 +699,8 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||||||
p.index[addr] = txs
|
p.index[addr] = txs
|
||||||
}
|
}
|
||||||
log.Warn("Dropping overdrafted blob transactions", "from", addr, "balance", balance, "spent", spent, "drop", nonces, "ids", ids)
|
log.Warn("Dropping overdrafted blob transactions", "from", addr, "balance", balance, "spent", spent, "drop", nonces, "ids", ids)
|
||||||
|
dropOverdraftedMeter.Mark(int64(len(ids)))
|
||||||
|
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
if err := p.store.Delete(id); err != nil {
|
if err := p.store.Delete(id); err != nil {
|
||||||
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
||||||
@ -695,6 +731,8 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||||||
p.index[addr] = txs
|
p.index[addr] = txs
|
||||||
|
|
||||||
log.Warn("Dropping overcapped blob transactions", "from", addr, "kept", len(txs), "drop", nonces, "ids", ids)
|
log.Warn("Dropping overcapped blob transactions", "from", addr, "kept", len(txs), "drop", nonces, "ids", ids)
|
||||||
|
dropOvercappedMeter.Mark(int64(len(ids)))
|
||||||
|
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
if err := p.store.Delete(id); err != nil {
|
if err := p.store.Delete(id); err != nil {
|
||||||
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
||||||
@ -711,7 +749,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||||||
// offload removes a tracked blob transaction from the pool and moves it into the
|
// offload removes a tracked blob transaction from the pool and moves it into the
|
||||||
// limbo for tracking until finality.
|
// limbo for tracking until finality.
|
||||||
//
|
//
|
||||||
// The method may log errors for various unexpcted scenarios but will not return
|
// The method may log errors for various unexpected scenarios but will not return
|
||||||
// any of it since there's no clear error case. Some errors may be due to coding
|
// any of it since there's no clear error case. Some errors may be due to coding
|
||||||
// issues, others caused by signers mining MEV stuff or swapping transactions. In
|
// issues, others caused by signers mining MEV stuff or swapping transactions. In
|
||||||
// all cases, the pool needs to continue operating.
|
// all cases, the pool needs to continue operating.
|
||||||
@ -952,7 +990,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the indixes and metrics
|
// Update the indices and metrics
|
||||||
meta := newBlobTxMeta(id, p.store.Size(id), tx)
|
meta := newBlobTxMeta(id, p.store.Size(id), tx)
|
||||||
if _, ok := p.index[addr]; !ok {
|
if _, ok := p.index[addr]; !ok {
|
||||||
if err := p.reserve(addr, true); err != nil {
|
if err := p.reserve(addr, true); err != nil {
|
||||||
@ -1019,6 +1057,8 @@ func (p *BlobPool) SetGasTip(tip *big.Int) {
|
|||||||
}
|
}
|
||||||
// Clear out the transactions from the data store
|
// Clear out the transactions from the data store
|
||||||
log.Warn("Dropping underpriced blob transaction", "from", addr, "rejected", tx.nonce, "tip", tx.execTipCap, "want", tip, "drop", nonces, "ids", ids)
|
log.Warn("Dropping underpriced blob transaction", "from", addr, "rejected", tx.nonce, "tip", tx.execTipCap, "want", tip, "drop", nonces, "ids", ids)
|
||||||
|
dropUnderpricedMeter.Mark(int64(len(ids)))
|
||||||
|
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
if err := p.store.Delete(id); err != nil {
|
if err := p.store.Delete(id); err != nil {
|
||||||
log.Error("Failed to delete dropped transaction", "id", id, "err", err)
|
log.Error("Failed to delete dropped transaction", "id", id, "err", err)
|
||||||
@ -1161,7 +1201,7 @@ func (p *BlobPool) Get(hash common.Hash) *types.Transaction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add inserts a set of blob transactions into the pool if they pass validation (both
|
// Add inserts a set of blob transactions into the pool if they pass validation (both
|
||||||
// consensus validity and pool restictions).
|
// consensus validity and pool restrictions).
|
||||||
func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
|
func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
|
||||||
var (
|
var (
|
||||||
adds = make([]*types.Transaction, 0, len(txs))
|
adds = make([]*types.Transaction, 0, len(txs))
|
||||||
@ -1181,7 +1221,7 @@ func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add inserts a new blob transaction into the pool if it passes validation (both
|
// Add inserts a new blob transaction into the pool if it passes validation (both
|
||||||
// consensus validity and pool restictions).
|
// consensus validity and pool restrictions).
|
||||||
func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
||||||
// The blob pool blocks on adding a transaction. This is because blob txs are
|
// The blob pool blocks on adding a transaction. This is because blob txs are
|
||||||
// only even pulled form the network, so this method will act as the overload
|
// only even pulled form the network, so this method will act as the overload
|
||||||
@ -1198,6 +1238,22 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
|||||||
// Ensure the transaction is valid from all perspectives
|
// Ensure the transaction is valid from all perspectives
|
||||||
if err := p.validateTx(tx); err != nil {
|
if err := p.validateTx(tx); err != nil {
|
||||||
log.Trace("Transaction validation failed", "hash", tx.Hash(), "err", err)
|
log.Trace("Transaction validation failed", "hash", tx.Hash(), "err", err)
|
||||||
|
switch {
|
||||||
|
case errors.Is(err, txpool.ErrUnderpriced):
|
||||||
|
addUnderpricedMeter.Mark(1)
|
||||||
|
case errors.Is(err, core.ErrNonceTooLow):
|
||||||
|
addStaleMeter.Mark(1)
|
||||||
|
case errors.Is(err, core.ErrNonceTooHigh):
|
||||||
|
addGappedMeter.Mark(1)
|
||||||
|
case errors.Is(err, core.ErrInsufficientFunds):
|
||||||
|
addOverdraftedMeter.Mark(1)
|
||||||
|
case errors.Is(err, txpool.ErrAccountLimitExceeded):
|
||||||
|
addOvercappedMeter.Mark(1)
|
||||||
|
case errors.Is(err, txpool.ErrReplaceUnderpriced):
|
||||||
|
addNoreplaceMeter.Mark(1)
|
||||||
|
default:
|
||||||
|
addInvalidMeter.Mark(1)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// If the address is not yet known, request exclusivity to track the account
|
// If the address is not yet known, request exclusivity to track the account
|
||||||
@ -1205,6 +1261,7 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
|||||||
from, _ := types.Sender(p.signer, tx) // already validated above
|
from, _ := types.Sender(p.signer, tx) // already validated above
|
||||||
if _, ok := p.index[from]; !ok {
|
if _, ok := p.index[from]; !ok {
|
||||||
if err := p.reserve(from, true); err != nil {
|
if err := p.reserve(from, true); err != nil {
|
||||||
|
addNonExclusiveMeter.Mark(1)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -1244,6 +1301,8 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
|||||||
}
|
}
|
||||||
if len(p.index[from]) > offset {
|
if len(p.index[from]) > offset {
|
||||||
// Transaction replaces a previously queued one
|
// Transaction replaces a previously queued one
|
||||||
|
dropReplacedMeter.Mark(1)
|
||||||
|
|
||||||
prev := p.index[from][offset]
|
prev := p.index[from][offset]
|
||||||
if err := p.store.Delete(prev.id); err != nil {
|
if err := p.store.Delete(prev.id); err != nil {
|
||||||
// Shitty situation, but try to recover gracefully instead of going boom
|
// Shitty situation, but try to recover gracefully instead of going boom
|
||||||
@ -1322,6 +1381,7 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
|||||||
}
|
}
|
||||||
p.updateStorageMetrics()
|
p.updateStorageMetrics()
|
||||||
|
|
||||||
|
addValidMeter.Mark(1)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1371,7 +1431,9 @@ func (p *BlobPool) drop() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Remove the transaction from the data store
|
// Remove the transaction from the data store
|
||||||
log.Warn("Evicting overflown blob transaction", "from", from, "evicted", drop.nonce, "id", drop.id)
|
log.Debug("Evicting overflown blob transaction", "from", from, "evicted", drop.nonce, "id", drop.id)
|
||||||
|
dropOverflownMeter.Mark(1)
|
||||||
|
|
||||||
if err := p.store.Delete(drop.id); err != nil {
|
if err := p.store.Delete(drop.id); err != nil {
|
||||||
log.Error("Failed to drop evicted transaction", "id", drop.id, "err", err)
|
log.Error("Failed to drop evicted transaction", "id", drop.id, "err", err)
|
||||||
}
|
}
|
||||||
|
@ -305,7 +305,16 @@ func verifyPoolInternals(t *testing.T, pool *BlobPool) {
|
|||||||
// - 1. A transaction that cannot be decoded must be dropped
|
// - 1. A transaction that cannot be decoded must be dropped
|
||||||
// - 2. A transaction that cannot be recovered (bad signature) must be dropped
|
// - 2. A transaction that cannot be recovered (bad signature) must be dropped
|
||||||
// - 3. All transactions after a nonce gap must be dropped
|
// - 3. All transactions after a nonce gap must be dropped
|
||||||
// - 4. All transactions after an underpriced one (including it) must be dropped
|
// - 4. All transactions after an already included nonce must be dropped
|
||||||
|
// - 5. All transactions after an underpriced one (including it) must be dropped
|
||||||
|
// - 6. All transactions after an overdrafting sequence must be dropped
|
||||||
|
// - 7. All transactions exceeding the per-account limit must be dropped
|
||||||
|
//
|
||||||
|
// Furthermore, some strange corner-cases can also occur after a crash, as Billy's
|
||||||
|
// simplicity also allows it to resurrect past deleted entities:
|
||||||
|
//
|
||||||
|
// - 8. Fully duplicate transactions (matching hash) must be dropped
|
||||||
|
// - 9. Duplicate nonces from the same account must be dropped
|
||||||
func TestOpenDrops(t *testing.T) {
|
func TestOpenDrops(t *testing.T) {
|
||||||
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
|
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
|
||||||
|
|
||||||
@ -338,7 +347,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
badsig, _ := store.Put(blob)
|
badsig, _ := store.Put(blob)
|
||||||
|
|
||||||
// Insert a sequence of transactions with a nonce gap in between to verify
|
// Insert a sequence of transactions with a nonce gap in between to verify
|
||||||
// that anything gapped will get evicted (case 3)
|
// that anything gapped will get evicted (case 3).
|
||||||
var (
|
var (
|
||||||
gapper, _ = crypto.GenerateKey()
|
gapper, _ = crypto.GenerateKey()
|
||||||
|
|
||||||
@ -357,7 +366,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Insert a sequence of transactions with a gapped starting nonce to verify
|
// Insert a sequence of transactions with a gapped starting nonce to verify
|
||||||
// that the entire set will get dropped.
|
// that the entire set will get dropped (case 3).
|
||||||
var (
|
var (
|
||||||
dangler, _ = crypto.GenerateKey()
|
dangler, _ = crypto.GenerateKey()
|
||||||
dangling = make(map[uint64]struct{})
|
dangling = make(map[uint64]struct{})
|
||||||
@ -370,7 +379,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
dangling[id] = struct{}{}
|
dangling[id] = struct{}{}
|
||||||
}
|
}
|
||||||
// Insert a sequence of transactions with already passed nonces to veirfy
|
// Insert a sequence of transactions with already passed nonces to veirfy
|
||||||
// that the entire set will get dropped.
|
// that the entire set will get dropped (case 4).
|
||||||
var (
|
var (
|
||||||
filler, _ = crypto.GenerateKey()
|
filler, _ = crypto.GenerateKey()
|
||||||
filled = make(map[uint64]struct{})
|
filled = make(map[uint64]struct{})
|
||||||
@ -383,7 +392,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
filled[id] = struct{}{}
|
filled[id] = struct{}{}
|
||||||
}
|
}
|
||||||
// Insert a sequence of transactions with partially passed nonces to veirfy
|
// Insert a sequence of transactions with partially passed nonces to veirfy
|
||||||
// that the included part of the set will get dropped
|
// that the included part of the set will get dropped (case 4).
|
||||||
var (
|
var (
|
||||||
overlapper, _ = crypto.GenerateKey()
|
overlapper, _ = crypto.GenerateKey()
|
||||||
overlapped = make(map[uint64]struct{})
|
overlapped = make(map[uint64]struct{})
|
||||||
@ -400,7 +409,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Insert a sequence of transactions with an underpriced first to verify that
|
// Insert a sequence of transactions with an underpriced first to verify that
|
||||||
// the entire set will get dropped (case 4).
|
// the entire set will get dropped (case 5).
|
||||||
var (
|
var (
|
||||||
underpayer, _ = crypto.GenerateKey()
|
underpayer, _ = crypto.GenerateKey()
|
||||||
underpaid = make(map[uint64]struct{})
|
underpaid = make(map[uint64]struct{})
|
||||||
@ -419,7 +428,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Insert a sequence of transactions with an underpriced in between to verify
|
// Insert a sequence of transactions with an underpriced in between to verify
|
||||||
// that it and anything newly gapped will get evicted (case 4).
|
// that it and anything newly gapped will get evicted (case 5).
|
||||||
var (
|
var (
|
||||||
outpricer, _ = crypto.GenerateKey()
|
outpricer, _ = crypto.GenerateKey()
|
||||||
outpriced = make(map[uint64]struct{})
|
outpriced = make(map[uint64]struct{})
|
||||||
@ -441,7 +450,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Insert a sequence of transactions fully overdrafted to verify that the
|
// Insert a sequence of transactions fully overdrafted to verify that the
|
||||||
// entire set will get invalidated.
|
// entire set will get invalidated (case 6).
|
||||||
var (
|
var (
|
||||||
exceeder, _ = crypto.GenerateKey()
|
exceeder, _ = crypto.GenerateKey()
|
||||||
exceeded = make(map[uint64]struct{})
|
exceeded = make(map[uint64]struct{})
|
||||||
@ -459,7 +468,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
exceeded[id] = struct{}{}
|
exceeded[id] = struct{}{}
|
||||||
}
|
}
|
||||||
// Insert a sequence of transactions partially overdrafted to verify that part
|
// Insert a sequence of transactions partially overdrafted to verify that part
|
||||||
// of the set will get invalidated.
|
// of the set will get invalidated (case 6).
|
||||||
var (
|
var (
|
||||||
overdrafter, _ = crypto.GenerateKey()
|
overdrafter, _ = crypto.GenerateKey()
|
||||||
overdrafted = make(map[uint64]struct{})
|
overdrafted = make(map[uint64]struct{})
|
||||||
@ -481,7 +490,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Insert a sequence of transactions overflowing the account cap to verify
|
// Insert a sequence of transactions overflowing the account cap to verify
|
||||||
// that part of the set will get invalidated.
|
// that part of the set will get invalidated (case 7).
|
||||||
var (
|
var (
|
||||||
overcapper, _ = crypto.GenerateKey()
|
overcapper, _ = crypto.GenerateKey()
|
||||||
overcapped = make(map[uint64]struct{})
|
overcapped = make(map[uint64]struct{})
|
||||||
@ -496,6 +505,42 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
overcapped[id] = struct{}{}
|
overcapped[id] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Insert a batch of duplicated transactions to verify that only one of each
|
||||||
|
// version will remain (case 8).
|
||||||
|
var (
|
||||||
|
duplicater, _ = crypto.GenerateKey()
|
||||||
|
duplicated = make(map[uint64]struct{})
|
||||||
|
)
|
||||||
|
for _, nonce := range []uint64{0, 1, 2} {
|
||||||
|
blob, _ := rlp.EncodeToBytes(makeTx(nonce, 1, 1, 1, duplicater))
|
||||||
|
|
||||||
|
for i := 0; i < int(nonce)+1; i++ {
|
||||||
|
id, _ := store.Put(blob)
|
||||||
|
if i == 0 {
|
||||||
|
valids[id] = struct{}{}
|
||||||
|
} else {
|
||||||
|
duplicated[id] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Insert a batch of duplicated nonces to verify that only one of each will
|
||||||
|
// remain (case 9).
|
||||||
|
var (
|
||||||
|
repeater, _ = crypto.GenerateKey()
|
||||||
|
repeated = make(map[uint64]struct{})
|
||||||
|
)
|
||||||
|
for _, nonce := range []uint64{0, 1, 2} {
|
||||||
|
for i := 0; i < int(nonce)+1; i++ {
|
||||||
|
blob, _ := rlp.EncodeToBytes(makeTx(nonce, 1, uint64(i)+1 /* unique hashes */, 1, repeater))
|
||||||
|
|
||||||
|
id, _ := store.Put(blob)
|
||||||
|
if i == 0 {
|
||||||
|
valids[id] = struct{}{}
|
||||||
|
} else {
|
||||||
|
repeated[id] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
store.Close()
|
store.Close()
|
||||||
|
|
||||||
// Create a blob pool out of the pre-seeded data
|
// Create a blob pool out of the pre-seeded data
|
||||||
@ -511,6 +556,8 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
statedb.AddBalance(crypto.PubkeyToAddress(exceeder.PublicKey), uint256.NewInt(1000000))
|
statedb.AddBalance(crypto.PubkeyToAddress(exceeder.PublicKey), uint256.NewInt(1000000))
|
||||||
statedb.AddBalance(crypto.PubkeyToAddress(overdrafter.PublicKey), uint256.NewInt(1000000))
|
statedb.AddBalance(crypto.PubkeyToAddress(overdrafter.PublicKey), uint256.NewInt(1000000))
|
||||||
statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), uint256.NewInt(10000000))
|
statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), uint256.NewInt(10000000))
|
||||||
|
statedb.AddBalance(crypto.PubkeyToAddress(duplicater.PublicKey), uint256.NewInt(1000000))
|
||||||
|
statedb.AddBalance(crypto.PubkeyToAddress(repeater.PublicKey), uint256.NewInt(1000000))
|
||||||
statedb.Commit(0, true)
|
statedb.Commit(0, true)
|
||||||
|
|
||||||
chain := &testBlockChain{
|
chain := &testBlockChain{
|
||||||
@ -554,6 +601,10 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
t.Errorf("partially overdrafted transaction remained in storage: %d", tx.id)
|
t.Errorf("partially overdrafted transaction remained in storage: %d", tx.id)
|
||||||
} else if _, ok := overcapped[tx.id]; ok {
|
} else if _, ok := overcapped[tx.id]; ok {
|
||||||
t.Errorf("overcapped transaction remained in storage: %d", tx.id)
|
t.Errorf("overcapped transaction remained in storage: %d", tx.id)
|
||||||
|
} else if _, ok := duplicated[tx.id]; ok {
|
||||||
|
t.Errorf("duplicated transaction remained in storage: %d", tx.id)
|
||||||
|
} else if _, ok := repeated[tx.id]; ok {
|
||||||
|
t.Errorf("repeated nonce transaction remained in storage: %d", tx.id)
|
||||||
} else {
|
} else {
|
||||||
alive[tx.id] = struct{}{}
|
alive[tx.id] = struct{}{}
|
||||||
}
|
}
|
||||||
@ -584,7 +635,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
|
|
||||||
// Tests that transactions loaded from disk are indexed correctly.
|
// Tests that transactions loaded from disk are indexed correctly.
|
||||||
//
|
//
|
||||||
// - 1. Transactions must be groupped by sender, sorted by nonce
|
// - 1. Transactions must be grouped by sender, sorted by nonce
|
||||||
// - 2. Eviction thresholds are calculated correctly for the sequences
|
// - 2. Eviction thresholds are calculated correctly for the sequences
|
||||||
// - 3. Balance usage of an account is totals across all transactions
|
// - 3. Balance usage of an account is totals across all transactions
|
||||||
func TestOpenIndex(t *testing.T) {
|
func TestOpenIndex(t *testing.T) {
|
||||||
@ -598,7 +649,7 @@ func TestOpenIndex(t *testing.T) {
|
|||||||
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
|
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
|
||||||
|
|
||||||
// Insert a sequence of transactions with varying price points to check that
|
// Insert a sequence of transactions with varying price points to check that
|
||||||
// the cumulative minimumw will be maintained.
|
// the cumulative minimum will be maintained.
|
||||||
var (
|
var (
|
||||||
key, _ = crypto.GenerateKey()
|
key, _ = crypto.GenerateKey()
|
||||||
addr = crypto.PubkeyToAddress(key.PublicKey)
|
addr = crypto.PubkeyToAddress(key.PublicKey)
|
||||||
@ -1197,7 +1248,7 @@ func TestAdd(t *testing.T) {
|
|||||||
keys[acc], _ = crypto.GenerateKey()
|
keys[acc], _ = crypto.GenerateKey()
|
||||||
addrs[acc] = crypto.PubkeyToAddress(keys[acc].PublicKey)
|
addrs[acc] = crypto.PubkeyToAddress(keys[acc].PublicKey)
|
||||||
|
|
||||||
// Seed the state database with this acocunt
|
// Seed the state database with this account
|
||||||
statedb.AddBalance(addrs[acc], new(uint256.Int).SetUint64(seed.balance))
|
statedb.AddBalance(addrs[acc], new(uint256.Int).SetUint64(seed.balance))
|
||||||
statedb.SetNonce(addrs[acc], seed.nonce)
|
statedb.SetNonce(addrs[acc], seed.nonce)
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ func newLimbo(datadir string) (*limbo, error) {
|
|||||||
index: make(map[common.Hash]uint64),
|
index: make(map[common.Hash]uint64),
|
||||||
groups: make(map[uint64]map[uint64]common.Hash),
|
groups: make(map[uint64]map[uint64]common.Hash),
|
||||||
}
|
}
|
||||||
// Index all limboed blobs on disk and delete anything inprocessable
|
// Index all limboed blobs on disk and delete anything unprocessable
|
||||||
var fails []uint64
|
var fails []uint64
|
||||||
index := func(id uint64, size uint32, data []byte) {
|
index := func(id uint64, size uint32, data []byte) {
|
||||||
if l.parseBlob(id, data) != nil {
|
if l.parseBlob(id, data) != nil {
|
||||||
@ -89,7 +89,7 @@ func (l *limbo) parseBlob(id uint64, data []byte) error {
|
|||||||
item := new(limboBlob)
|
item := new(limboBlob)
|
||||||
if err := rlp.DecodeBytes(data, item); err != nil {
|
if err := rlp.DecodeBytes(data, item); err != nil {
|
||||||
// This path is impossible unless the disk data representation changes
|
// This path is impossible unless the disk data representation changes
|
||||||
// across restarts. For that ever unprobable case, recover gracefully
|
// across restarts. For that ever improbable case, recover gracefully
|
||||||
// by ignoring this data entry.
|
// by ignoring this data entry.
|
||||||
log.Error("Failed to decode blob limbo entry", "id", id, "err", err)
|
log.Error("Failed to decode blob limbo entry", "id", id, "err", err)
|
||||||
return err
|
return err
|
||||||
@ -172,7 +172,7 @@ func (l *limbo) pull(tx common.Hash) (*types.Transaction, error) {
|
|||||||
// update changes the block number under which a blob transaction is tracked. This
|
// update changes the block number under which a blob transaction is tracked. This
|
||||||
// method should be used when a reorg changes a transaction's inclusion block.
|
// method should be used when a reorg changes a transaction's inclusion block.
|
||||||
//
|
//
|
||||||
// The method may log errors for various unexpcted scenarios but will not return
|
// The method may log errors for various unexpected scenarios but will not return
|
||||||
// any of it since there's no clear error case. Some errors may be due to coding
|
// any of it since there's no clear error case. Some errors may be due to coding
|
||||||
// issues, others caused by signers mining MEV stuff or swapping transactions. In
|
// issues, others caused by signers mining MEV stuff or swapping transactions. In
|
||||||
// all cases, the pool needs to continue operating.
|
// all cases, the pool needs to continue operating.
|
||||||
|
@ -65,8 +65,8 @@ var (
|
|||||||
pooltipGauge = metrics.NewRegisteredGauge("blobpool/pooltip", nil)
|
pooltipGauge = metrics.NewRegisteredGauge("blobpool/pooltip", nil)
|
||||||
|
|
||||||
// addwait/time, resetwait/time and getwait/time track the rough health of
|
// addwait/time, resetwait/time and getwait/time track the rough health of
|
||||||
// the pool and whether or not it's capable of keeping up with the load from
|
// the pool and whether it's capable of keeping up with the load from the
|
||||||
// the network.
|
// network.
|
||||||
addwaitHist = metrics.NewRegisteredHistogram("blobpool/addwait", nil, metrics.NewExpDecaySample(1028, 0.015))
|
addwaitHist = metrics.NewRegisteredHistogram("blobpool/addwait", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||||
addtimeHist = metrics.NewRegisteredHistogram("blobpool/addtime", nil, metrics.NewExpDecaySample(1028, 0.015))
|
addtimeHist = metrics.NewRegisteredHistogram("blobpool/addtime", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||||
getwaitHist = metrics.NewRegisteredHistogram("blobpool/getwait", nil, metrics.NewExpDecaySample(1028, 0.015))
|
getwaitHist = metrics.NewRegisteredHistogram("blobpool/getwait", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||||
@ -75,4 +75,31 @@ var (
|
|||||||
pendtimeHist = metrics.NewRegisteredHistogram("blobpool/pendtime", nil, metrics.NewExpDecaySample(1028, 0.015))
|
pendtimeHist = metrics.NewRegisteredHistogram("blobpool/pendtime", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||||
resetwaitHist = metrics.NewRegisteredHistogram("blobpool/resetwait", nil, metrics.NewExpDecaySample(1028, 0.015))
|
resetwaitHist = metrics.NewRegisteredHistogram("blobpool/resetwait", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||||
resettimeHist = metrics.NewRegisteredHistogram("blobpool/resettime", nil, metrics.NewExpDecaySample(1028, 0.015))
|
resettimeHist = metrics.NewRegisteredHistogram("blobpool/resettime", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||||
|
|
||||||
|
// The below metrics track various cases where transactions are dropped out
|
||||||
|
// of the pool. Most are exceptional, some are chain progression and some
|
||||||
|
// threshold cappings.
|
||||||
|
dropInvalidMeter = metrics.NewRegisteredMeter("blobpool/drop/invalid", nil) // Invalid transaction, consensus change or bugfix, neutral-ish
|
||||||
|
dropDanglingMeter = metrics.NewRegisteredMeter("blobpool/drop/dangling", nil) // First nonce gapped, bad
|
||||||
|
dropFilledMeter = metrics.NewRegisteredMeter("blobpool/drop/filled", nil) // State full-overlap, chain progress, ok
|
||||||
|
dropOverlappedMeter = metrics.NewRegisteredMeter("blobpool/drop/overlapped", nil) // State partial-overlap, chain progress, ok
|
||||||
|
dropRepeatedMeter = metrics.NewRegisteredMeter("blobpool/drop/repeated", nil) // Repeated nonce, bad
|
||||||
|
dropGappedMeter = metrics.NewRegisteredMeter("blobpool/drop/gapped", nil) // Non-first nonce gapped, bad
|
||||||
|
dropOverdraftedMeter = metrics.NewRegisteredMeter("blobpool/drop/overdrafted", nil) // Balance exceeded, bad
|
||||||
|
dropOvercappedMeter = metrics.NewRegisteredMeter("blobpool/drop/overcapped", nil) // Per-account cap exceeded, bad
|
||||||
|
dropOverflownMeter = metrics.NewRegisteredMeter("blobpool/drop/overflown", nil) // Global disk cap exceeded, neutral-ish
|
||||||
|
dropUnderpricedMeter = metrics.NewRegisteredMeter("blobpool/drop/underpriced", nil) // Gas tip changed, neutral
|
||||||
|
dropReplacedMeter = metrics.NewRegisteredMeter("blobpool/drop/replaced", nil) // Transaction replaced, neutral
|
||||||
|
|
||||||
|
// The below metrics track various outcomes of transactions being added to
|
||||||
|
// the pool.
|
||||||
|
addInvalidMeter = metrics.NewRegisteredMeter("blobpool/add/invalid", nil) // Invalid transaction, reject, neutral
|
||||||
|
addUnderpricedMeter = metrics.NewRegisteredMeter("blobpool/add/underpriced", nil) // Gas tip too low, neutral
|
||||||
|
addStaleMeter = metrics.NewRegisteredMeter("blobpool/add/stale", nil) // Nonce already filled, reject, bad-ish
|
||||||
|
addGappedMeter = metrics.NewRegisteredMeter("blobpool/add/gapped", nil) // Nonce gapped, reject, bad-ish
|
||||||
|
addOverdraftedMeter = metrics.NewRegisteredMeter("blobpool/add/overdrafted", nil) // Balance exceeded, reject, neutral
|
||||||
|
addOvercappedMeter = metrics.NewRegisteredMeter("blobpool/add/overcapped", nil) // Per-account cap exceeded, reject, neutral
|
||||||
|
addNoreplaceMeter = metrics.NewRegisteredMeter("blobpool/add/noreplace", nil) // Replacement fees or tips too low, neutral
|
||||||
|
addNonExclusiveMeter = metrics.NewRegisteredMeter("blobpool/add/nonexclusive", nil) // Plain transaction from same account exists, reject, neutral
|
||||||
|
addValidMeter = metrics.NewRegisteredMeter("blobpool/add/valid", nil) // Valid transaction, add, neutral
|
||||||
)
|
)
|
||||||
|
@ -44,12 +44,18 @@ type LazyTransaction struct {
|
|||||||
|
|
||||||
// Resolve retrieves the full transaction belonging to a lazy handle if it is still
|
// Resolve retrieves the full transaction belonging to a lazy handle if it is still
|
||||||
// maintained by the transaction pool.
|
// maintained by the transaction pool.
|
||||||
|
//
|
||||||
|
// Note, the method will *not* cache the retrieved transaction if the original
|
||||||
|
// pool has not cached it. The idea being, that if the tx was too big to insert
|
||||||
|
// originally, silently saving it will cause more trouble down the line (and
|
||||||
|
// indeed seems to have caused a memory bloat in the original implementation
|
||||||
|
// which did just that).
|
||||||
func (ltx *LazyTransaction) Resolve() *types.Transaction {
|
func (ltx *LazyTransaction) Resolve() *types.Transaction {
|
||||||
if ltx.Tx == nil {
|
if ltx.Tx != nil {
|
||||||
ltx.Tx = ltx.Pool.Get(ltx.Hash)
|
|
||||||
}
|
|
||||||
return ltx.Tx
|
return ltx.Tx
|
||||||
}
|
}
|
||||||
|
return ltx.Pool.Get(ltx.Hash)
|
||||||
|
}
|
||||||
|
|
||||||
// LazyResolver is a minimal interface needed for a transaction pool to satisfy
|
// LazyResolver is a minimal interface needed for a transaction pool to satisfy
|
||||||
// resolving lazy transactions. It's mostly a helper to avoid the entire sub-
|
// resolving lazy transactions. It's mostly a helper to avoid the entire sub-
|
||||||
@ -69,7 +75,7 @@ type AddressReserver func(addr common.Address, reserve bool) error
|
|||||||
// production, this interface defines the common methods that allow the primary
|
// production, this interface defines the common methods that allow the primary
|
||||||
// transaction pool to manage the subpools.
|
// transaction pool to manage the subpools.
|
||||||
type SubPool interface {
|
type SubPool interface {
|
||||||
// Filter is a selector used to decide whether a transaction whould be added
|
// Filter is a selector used to decide whether a transaction would be added
|
||||||
// to this particular subpool.
|
// to this particular subpool.
|
||||||
Filter(tx *types.Transaction) bool
|
Filter(tx *types.Transaction) bool
|
||||||
|
|
||||||
|
@ -19,6 +19,7 @@ package types
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/big"
|
"math/big"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@ -320,6 +321,7 @@ func (tx *Transaction) Cost() *big.Int {
|
|||||||
|
|
||||||
// RawSignatureValues returns the V, R, S signature values of the transaction.
|
// RawSignatureValues returns the V, R, S signature values of the transaction.
|
||||||
// The return values should not be modified by the caller.
|
// The return values should not be modified by the caller.
|
||||||
|
// The return values may be nil or zero, if the transaction is unsigned.
|
||||||
func (tx *Transaction) RawSignatureValues() (v, r, s *big.Int) {
|
func (tx *Transaction) RawSignatureValues() (v, r, s *big.Int) {
|
||||||
return tx.inner.rawSignatureValues()
|
return tx.inner.rawSignatureValues()
|
||||||
}
|
}
|
||||||
@ -508,6 +510,9 @@ func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if r == nil || s == nil || v == nil {
|
||||||
|
return nil, fmt.Errorf("%w: r: %s, s: %s, v: %s", ErrInvalidSig, r, s, v)
|
||||||
|
}
|
||||||
cpy := tx.inner.copy()
|
cpy := tx.inner.copy()
|
||||||
cpy.setSignatureValues(signer.ChainID(), v, r, s)
|
cpy.setSignatureValues(signer.ChainID(), v, r, s)
|
||||||
return &Transaction{inner: cpy, time: tx.time}, nil
|
return &Transaction{inner: cpy, time: tx.time}, nil
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||||
"github.com/holiman/uint256"
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -47,6 +48,11 @@ type txJSON struct {
|
|||||||
S *hexutil.Big `json:"s"`
|
S *hexutil.Big `json:"s"`
|
||||||
YParity *hexutil.Uint64 `json:"yParity,omitempty"`
|
YParity *hexutil.Uint64 `json:"yParity,omitempty"`
|
||||||
|
|
||||||
|
// Blob transaction sidecar encoding:
|
||||||
|
Blobs []kzg4844.Blob `json:"blobs,omitempty"`
|
||||||
|
Commitments []kzg4844.Commitment `json:"commitments,omitempty"`
|
||||||
|
Proofs []kzg4844.Proof `json:"proofs,omitempty"`
|
||||||
|
|
||||||
// Only used for encoding:
|
// Only used for encoding:
|
||||||
Hash common.Hash `json:"hash"`
|
Hash common.Hash `json:"hash"`
|
||||||
}
|
}
|
||||||
@ -142,6 +148,11 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) {
|
|||||||
enc.S = (*hexutil.Big)(itx.S.ToBig())
|
enc.S = (*hexutil.Big)(itx.S.ToBig())
|
||||||
yparity := itx.V.Uint64()
|
yparity := itx.V.Uint64()
|
||||||
enc.YParity = (*hexutil.Uint64)(&yparity)
|
enc.YParity = (*hexutil.Uint64)(&yparity)
|
||||||
|
if sidecar := itx.Sidecar; sidecar != nil {
|
||||||
|
enc.Blobs = itx.Sidecar.Blobs
|
||||||
|
enc.Commitments = itx.Sidecar.Commitments
|
||||||
|
enc.Proofs = itx.Sidecar.Proofs
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return json.Marshal(&enc)
|
return json.Marshal(&enc)
|
||||||
}
|
}
|
||||||
|
@ -18,11 +18,13 @@ package types
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -41,7 +43,7 @@ func TestEIP155Signing(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if from != addr {
|
if from != addr {
|
||||||
t.Errorf("exected from and address to be equal. Got %x want %x", from, addr)
|
t.Errorf("expected from and address to be equal. Got %x want %x", from, addr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -136,3 +138,53 @@ func TestChainId(t *testing.T) {
|
|||||||
t.Error("expected no error")
|
t.Error("expected no error")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type nilSigner struct {
|
||||||
|
v, r, s *big.Int
|
||||||
|
Signer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ns *nilSigner) SignatureValues(tx *Transaction, sig []byte) (r, s, v *big.Int, err error) {
|
||||||
|
return ns.v, ns.r, ns.s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestNilSigner ensures a faulty Signer implementation does not result in nil signature values or panics.
|
||||||
|
func TestNilSigner(t *testing.T) {
|
||||||
|
key, _ := crypto.GenerateKey()
|
||||||
|
innerSigner := LatestSignerForChainID(big.NewInt(1))
|
||||||
|
for i, signer := range []Signer{
|
||||||
|
&nilSigner{v: nil, r: nil, s: nil, Signer: innerSigner},
|
||||||
|
&nilSigner{v: big.NewInt(1), r: big.NewInt(1), s: nil, Signer: innerSigner},
|
||||||
|
&nilSigner{v: big.NewInt(1), r: nil, s: big.NewInt(1), Signer: innerSigner},
|
||||||
|
&nilSigner{v: nil, r: big.NewInt(1), s: big.NewInt(1), Signer: innerSigner},
|
||||||
|
} {
|
||||||
|
t.Run(fmt.Sprintf("signer_%d", i), func(t *testing.T) {
|
||||||
|
t.Run("legacy", func(t *testing.T) {
|
||||||
|
legacyTx := createTestLegacyTxInner()
|
||||||
|
_, err := SignNewTx(key, signer, legacyTx)
|
||||||
|
if !errors.Is(err, ErrInvalidSig) {
|
||||||
|
t.Fatal("expected signature values error, no nil result or panic")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
// test Blob tx specifically, since the signature value types changed
|
||||||
|
t.Run("blobtx", func(t *testing.T) {
|
||||||
|
blobtx := createEmptyBlobTxInner(false)
|
||||||
|
_, err := SignNewTx(key, signer, blobtx)
|
||||||
|
if !errors.Is(err, ErrInvalidSig) {
|
||||||
|
t.Fatal("expected signature values error, no nil result or panic")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTestLegacyTxInner() *LegacyTx {
|
||||||
|
return &LegacyTx{
|
||||||
|
Nonce: uint64(0),
|
||||||
|
To: nil,
|
||||||
|
Value: big.NewInt(0),
|
||||||
|
Gas: params.TxGas,
|
||||||
|
GasPrice: big.NewInt(params.GWei),
|
||||||
|
Data: nil,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -43,7 +43,7 @@ type BlobTx struct {
|
|||||||
BlobHashes []common.Hash
|
BlobHashes []common.Hash
|
||||||
|
|
||||||
// A blob transaction can optionally contain blobs. This field must be set when BlobTx
|
// A blob transaction can optionally contain blobs. This field must be set when BlobTx
|
||||||
// is used to create a transaction for sigining.
|
// is used to create a transaction for signing.
|
||||||
Sidecar *BlobTxSidecar `rlp:"-"`
|
Sidecar *BlobTxSidecar `rlp:"-"`
|
||||||
|
|
||||||
// Signature values
|
// Signature values
|
||||||
|
@ -65,6 +65,12 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func createEmptyBlobTx(key *ecdsa.PrivateKey, withSidecar bool) *Transaction {
|
func createEmptyBlobTx(key *ecdsa.PrivateKey, withSidecar bool) *Transaction {
|
||||||
|
blobtx := createEmptyBlobTxInner(withSidecar)
|
||||||
|
signer := NewCancunSigner(blobtx.ChainID.ToBig())
|
||||||
|
return MustSignNewTx(key, signer, blobtx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func createEmptyBlobTxInner(withSidecar bool) *BlobTx {
|
||||||
sidecar := &BlobTxSidecar{
|
sidecar := &BlobTxSidecar{
|
||||||
Blobs: []kzg4844.Blob{emptyBlob},
|
Blobs: []kzg4844.Blob{emptyBlob},
|
||||||
Commitments: []kzg4844.Commitment{emptyBlobCommit},
|
Commitments: []kzg4844.Commitment{emptyBlobCommit},
|
||||||
@ -85,6 +91,5 @@ func createEmptyBlobTx(key *ecdsa.PrivateKey, withSidecar bool) *Transaction {
|
|||||||
if withSidecar {
|
if withSidecar {
|
||||||
blobtx.Sidecar = sidecar
|
blobtx.Sidecar = sidecar
|
||||||
}
|
}
|
||||||
signer := NewCancunSigner(blobtx.ChainID.ToBig())
|
return blobtx
|
||||||
return MustSignNewTx(key, signer, blobtx)
|
|
||||||
}
|
}
|
||||||
|
@ -223,7 +223,7 @@ func BenchmarkPrecompiledRipeMD(bench *testing.B) {
|
|||||||
benchmarkPrecompiled("03", t, bench)
|
benchmarkPrecompiled("03", t, bench)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Benchmarks the sample inputs from the identiy precompile.
|
// Benchmarks the sample inputs from the identity precompile.
|
||||||
func BenchmarkPrecompiledIdentity(bench *testing.B) {
|
func BenchmarkPrecompiledIdentity(bench *testing.B) {
|
||||||
t := precompiledTest{
|
t := precompiledTest{
|
||||||
Input: "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02",
|
Input: "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02",
|
||||||
|
@ -182,7 +182,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
|
|||||||
return nil, gas, ErrDepth
|
return nil, gas, ErrDepth
|
||||||
}
|
}
|
||||||
// Fail if we're trying to transfer more than the available balance
|
// Fail if we're trying to transfer more than the available balance
|
||||||
if value.Sign() != 0 && !evm.Context.CanTransfer(evm.StateDB, caller.Address(), value) {
|
if !value.IsZero() && !evm.Context.CanTransfer(evm.StateDB, caller.Address(), value) {
|
||||||
return nil, gas, ErrInsufficientBalance
|
return nil, gas, ErrInsufficientBalance
|
||||||
}
|
}
|
||||||
snapshot := evm.StateDB.Snapshot()
|
snapshot := evm.StateDB.Snapshot()
|
||||||
@ -190,7 +190,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
|
|||||||
debug := evm.Config.Tracer != nil
|
debug := evm.Config.Tracer != nil
|
||||||
|
|
||||||
if !evm.StateDB.Exist(addr) {
|
if !evm.StateDB.Exist(addr) {
|
||||||
if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 {
|
if !isPrecompile && evm.chainRules.IsEIP158 && value.IsZero() {
|
||||||
// Calling a non existing account, don't do anything, but ping the tracer
|
// Calling a non existing account, don't do anything, but ping the tracer
|
||||||
if debug {
|
if debug {
|
||||||
if evm.depth == 0 {
|
if evm.depth == 0 {
|
||||||
|
@ -347,9 +347,7 @@ func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func opCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
func opCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
l := new(uint256.Int)
|
scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(scope.Contract.Code))))
|
||||||
l.SetUint64(uint64(len(scope.Contract.Code)))
|
|
||||||
scope.Stack.push(l)
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,7 +147,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
|
|||||||
debug = in.evm.Config.Tracer != nil
|
debug = in.evm.Config.Tracer != nil
|
||||||
)
|
)
|
||||||
// Don't move this deferred function, it's placed before the capturestate-deferred method,
|
// Don't move this deferred function, it's placed before the capturestate-deferred method,
|
||||||
// so that it get's executed _after_: the capturestate needs the stacks before
|
// so that it gets executed _after_: the capturestate needs the stacks before
|
||||||
// they are returned to the pools
|
// they are returned to the pools
|
||||||
defer func() {
|
defer func() {
|
||||||
returnStack(stack)
|
returnStack(stack)
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestJumpTableCopy tests that deep copy is necessery to prevent modify shared jump table
|
// TestJumpTableCopy tests that deep copy is necessary to prevent modify shared jump table
|
||||||
func TestJumpTableCopy(t *testing.T) {
|
func TestJumpTableCopy(t *testing.T) {
|
||||||
tbl := newMergeInstructionSet()
|
tbl := newMergeInstructionSet()
|
||||||
require.Equal(t, uint64(0), tbl[SLOAD].constantGas)
|
require.Equal(t, uint64(0), tbl[SLOAD].constantGas)
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
// If z is equal to one the point is considered as in affine form.
|
// If z is equal to one the point is considered as in affine form.
|
||||||
type PointG2 [3]fe2
|
type PointG2 [3]fe2
|
||||||
|
|
||||||
// Set copies valeus of one point to another.
|
// Set copies values of one point to another.
|
||||||
func (p *PointG2) Set(p2 *PointG2) *PointG2 {
|
func (p *PointG2) Set(p2 *PointG2) *PointG2 {
|
||||||
p[0].set(&p2[0])
|
p[0].set(&p2[0])
|
||||||
p[1].set(&p2[1])
|
p[1].set(&p2[1])
|
||||||
|
@ -166,7 +166,7 @@ type G2 struct {
|
|||||||
p *twistPoint
|
p *twistPoint
|
||||||
}
|
}
|
||||||
|
|
||||||
// RandomG1 returns x and g₂ˣ where x is a random, non-zero number read from r.
|
// RandomG2 returns x and g₂ˣ where x is a random, non-zero number read from r.
|
||||||
func RandomG2(r io.Reader) (*big.Int, *G2, error) {
|
func RandomG2(r io.Reader) (*big.Int, *G2, error) {
|
||||||
var k *big.Int
|
var k *big.Int
|
||||||
var err error
|
var err error
|
||||||
|
@ -21,21 +21,60 @@ import (
|
|||||||
"embed"
|
"embed"
|
||||||
"errors"
|
"errors"
|
||||||
"hash"
|
"hash"
|
||||||
|
"reflect"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:embed trusted_setup.json
|
//go:embed trusted_setup.json
|
||||||
var content embed.FS
|
var content embed.FS
|
||||||
|
|
||||||
|
var (
|
||||||
|
blobT = reflect.TypeOf(Blob{})
|
||||||
|
commitmentT = reflect.TypeOf(Commitment{})
|
||||||
|
proofT = reflect.TypeOf(Proof{})
|
||||||
|
)
|
||||||
|
|
||||||
// Blob represents a 4844 data blob.
|
// Blob represents a 4844 data blob.
|
||||||
type Blob [131072]byte
|
type Blob [131072]byte
|
||||||
|
|
||||||
|
// UnmarshalJSON parses a blob in hex syntax.
|
||||||
|
func (b *Blob) UnmarshalJSON(input []byte) error {
|
||||||
|
return hexutil.UnmarshalFixedJSON(blobT, input, b[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText returns the hex representation of b.
|
||||||
|
func (b Blob) MarshalText() ([]byte, error) {
|
||||||
|
return hexutil.Bytes(b[:]).MarshalText()
|
||||||
|
}
|
||||||
|
|
||||||
// Commitment is a serialized commitment to a polynomial.
|
// Commitment is a serialized commitment to a polynomial.
|
||||||
type Commitment [48]byte
|
type Commitment [48]byte
|
||||||
|
|
||||||
|
// UnmarshalJSON parses a commitment in hex syntax.
|
||||||
|
func (c *Commitment) UnmarshalJSON(input []byte) error {
|
||||||
|
return hexutil.UnmarshalFixedJSON(commitmentT, input, c[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText returns the hex representation of c.
|
||||||
|
func (c Commitment) MarshalText() ([]byte, error) {
|
||||||
|
return hexutil.Bytes(c[:]).MarshalText()
|
||||||
|
}
|
||||||
|
|
||||||
// Proof is a serialized commitment to the quotient polynomial.
|
// Proof is a serialized commitment to the quotient polynomial.
|
||||||
type Proof [48]byte
|
type Proof [48]byte
|
||||||
|
|
||||||
|
// UnmarshalJSON parses a proof in hex syntax.
|
||||||
|
func (p *Proof) UnmarshalJSON(input []byte) error {
|
||||||
|
return hexutil.UnmarshalFixedJSON(proofT, input, p[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalText returns the hex representation of p.
|
||||||
|
func (p Proof) MarshalText() ([]byte, error) {
|
||||||
|
return hexutil.Bytes(p[:]).MarshalText()
|
||||||
|
}
|
||||||
|
|
||||||
// Point is a BLS field element.
|
// Point is a BLS field element.
|
||||||
type Point [32]byte
|
type Point [32]byte
|
||||||
|
|
||||||
|
@ -87,7 +87,7 @@ The blocks on the 'bad' chain were investigated, and Tim Beiko reached out to th
|
|||||||
|
|
||||||
### Disclosure decision
|
### Disclosure decision
|
||||||
|
|
||||||
The geth-team have an official policy regarding [vulnerability disclosure](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities).
|
The geth-team have an official policy regarding [vulnerability disclosure](https://geth.ethereum.org/docs/developers/geth-developer/disclosures).
|
||||||
|
|
||||||
> The primary goal for the Geth team is the health of the Ethereum network as a whole, and the decision whether or not to publish details about a serious vulnerability boils down to minimizing the risk and/or impact of discovery and exploitation.
|
> The primary goal for the Geth team is the health of the Ethereum network as a whole, and the decision whether or not to publish details about a serious vulnerability boils down to minimizing the risk and/or impact of discovery and exploitation.
|
||||||
|
|
||||||
|
@ -64,6 +64,7 @@ func (api *MinerAPI) SetGasPrice(gasPrice hexutil.Big) bool {
|
|||||||
api.e.lock.Unlock()
|
api.e.lock.Unlock()
|
||||||
|
|
||||||
api.e.txPool.SetGasTip((*big.Int)(&gasPrice))
|
api.e.txPool.SetGasTip((*big.Int)(&gasPrice))
|
||||||
|
api.e.Miner().SetGasTip((*big.Int)(&gasPrice))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -173,8 +173,8 @@ func newConsensusAPIWithoutHeartbeat(eth *eth.Ethereum) *ConsensusAPI {
|
|||||||
// and return its payloadID.
|
// and return its payloadID.
|
||||||
func (api *ConsensusAPI) ForkchoiceUpdatedV1(update engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) {
|
func (api *ConsensusAPI) ForkchoiceUpdatedV1(update engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) {
|
||||||
if payloadAttributes != nil {
|
if payloadAttributes != nil {
|
||||||
if payloadAttributes.Withdrawals != nil {
|
if payloadAttributes.Withdrawals != nil || payloadAttributes.BeaconRoot != nil {
|
||||||
return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("withdrawals not supported in V1"))
|
return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("withdrawals and beacon root not supported in V1"))
|
||||||
}
|
}
|
||||||
if api.eth.BlockChain().Config().IsShanghai(api.eth.BlockChain().Config().LondonBlock, payloadAttributes.Timestamp) {
|
if api.eth.BlockChain().Config().IsShanghai(api.eth.BlockChain().Config().LondonBlock, payloadAttributes.Timestamp) {
|
||||||
return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("forkChoiceUpdateV1 called post-shanghai"))
|
return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("forkChoiceUpdateV1 called post-shanghai"))
|
||||||
@ -183,23 +183,31 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV1(update engine.ForkchoiceStateV1, pa
|
|||||||
return api.forkchoiceUpdated(update, payloadAttributes, engine.PayloadV1, false)
|
return api.forkchoiceUpdated(update, payloadAttributes, engine.PayloadV1, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForkchoiceUpdatedV2 is equivalent to V1 with the addition of withdrawals in the payload attributes.
|
// ForkchoiceUpdatedV2 is equivalent to V1 with the addition of withdrawals in the payload
|
||||||
|
// attributes. It supports both PayloadAttributesV1 and PayloadAttributesV2.
|
||||||
func (api *ConsensusAPI) ForkchoiceUpdatedV2(update engine.ForkchoiceStateV1, params *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) {
|
func (api *ConsensusAPI) ForkchoiceUpdatedV2(update engine.ForkchoiceStateV1, params *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) {
|
||||||
if params != nil {
|
if params != nil {
|
||||||
|
switch api.eth.BlockChain().Config().LatestFork(params.Timestamp) {
|
||||||
|
case forks.Paris:
|
||||||
|
if params.Withdrawals != nil {
|
||||||
|
return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("withdrawals before shanghai"))
|
||||||
|
}
|
||||||
|
case forks.Shanghai:
|
||||||
if params.Withdrawals == nil {
|
if params.Withdrawals == nil {
|
||||||
return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("missing withdrawals"))
|
return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("missing withdrawals"))
|
||||||
}
|
}
|
||||||
|
default:
|
||||||
|
return engine.STATUS_INVALID, engine.UnsupportedFork.With(errors.New("forkchoiceUpdatedV2 must only be called with paris and shanghai payloads"))
|
||||||
|
}
|
||||||
if params.BeaconRoot != nil {
|
if params.BeaconRoot != nil {
|
||||||
return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("unexpected beacon root"))
|
return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("unexpected beacon root"))
|
||||||
}
|
}
|
||||||
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Shanghai {
|
|
||||||
return engine.STATUS_INVALID, engine.UnsupportedFork.With(errors.New("forkchoiceUpdatedV2 must only be called for shanghai payloads"))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return api.forkchoiceUpdated(update, params, engine.PayloadV2, false)
|
return api.forkchoiceUpdated(update, params, engine.PayloadV2, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForkchoiceUpdatedV3 is equivalent to V2 with the addition of parent beacon block root in the payload attributes.
|
// ForkchoiceUpdatedV3 is equivalent to V2 with the addition of parent beacon block root
|
||||||
|
// in the payload attributes. It supports only PayloadAttributesV3.
|
||||||
func (api *ConsensusAPI) ForkchoiceUpdatedV3(update engine.ForkchoiceStateV1, params *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) {
|
func (api *ConsensusAPI) ForkchoiceUpdatedV3(update engine.ForkchoiceStateV1, params *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) {
|
||||||
if params != nil {
|
if params != nil {
|
||||||
// TODO(matt): according to https://github.com/ethereum/execution-apis/pull/498,
|
// TODO(matt): according to https://github.com/ethereum/execution-apis/pull/498,
|
||||||
|
@ -50,7 +50,8 @@ func newBeaconBackfiller(dl *Downloader, success func()) backfiller {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// suspend cancels any background downloader threads and returns the last header
|
// suspend cancels any background downloader threads and returns the last header
|
||||||
// that has been successfully backfilled.
|
// that has been successfully backfilled (potentially in a previous run), or the
|
||||||
|
// genesis.
|
||||||
func (b *beaconBackfiller) suspend() *types.Header {
|
func (b *beaconBackfiller) suspend() *types.Header {
|
||||||
// If no filling is running, don't waste cycles
|
// If no filling is running, don't waste cycles
|
||||||
b.lock.Lock()
|
b.lock.Lock()
|
||||||
|
@ -611,6 +611,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *
|
|||||||
if err := d.lightchain.SetHead(origin); err != nil {
|
if err := d.lightchain.SetHead(origin); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
log.Info("Truncated excess ancient chain segment", "oldhead", frozen-1, "newhead", origin)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Initiate the sync using a concurrent header and content retrieval algorithm
|
// Initiate the sync using a concurrent header and content retrieval algorithm
|
||||||
|
@ -440,9 +440,6 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
|
|||||||
func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) }
|
func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) }
|
||||||
func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) }
|
func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) }
|
||||||
func TestCanonicalSynchronisation68Light(t *testing.T) { testCanonSync(t, eth.ETH68, LightSync) }
|
func TestCanonicalSynchronisation68Light(t *testing.T) { testCanonSync(t, eth.ETH68, LightSync) }
|
||||||
func TestCanonicalSynchronisation67Full(t *testing.T) { testCanonSync(t, eth.ETH67, FullSync) }
|
|
||||||
func TestCanonicalSynchronisation67Snap(t *testing.T) { testCanonSync(t, eth.ETH67, SnapSync) }
|
|
||||||
func TestCanonicalSynchronisation67Light(t *testing.T) { testCanonSync(t, eth.ETH67, LightSync) }
|
|
||||||
|
|
||||||
func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
|
func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
tester := newTester(t)
|
tester := newTester(t)
|
||||||
@ -463,8 +460,6 @@ func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
// until the cached blocks are retrieved.
|
// until the cached blocks are retrieved.
|
||||||
func TestThrottling68Full(t *testing.T) { testThrottling(t, eth.ETH68, FullSync) }
|
func TestThrottling68Full(t *testing.T) { testThrottling(t, eth.ETH68, FullSync) }
|
||||||
func TestThrottling68Snap(t *testing.T) { testThrottling(t, eth.ETH68, SnapSync) }
|
func TestThrottling68Snap(t *testing.T) { testThrottling(t, eth.ETH68, SnapSync) }
|
||||||
func TestThrottling67Full(t *testing.T) { testThrottling(t, eth.ETH67, FullSync) }
|
|
||||||
func TestThrottling67Snap(t *testing.T) { testThrottling(t, eth.ETH67, SnapSync) }
|
|
||||||
|
|
||||||
func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
|
func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
tester := newTester(t)
|
tester := newTester(t)
|
||||||
@ -546,9 +541,6 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
func TestForkedSync68Full(t *testing.T) { testForkedSync(t, eth.ETH68, FullSync) }
|
func TestForkedSync68Full(t *testing.T) { testForkedSync(t, eth.ETH68, FullSync) }
|
||||||
func TestForkedSync68Snap(t *testing.T) { testForkedSync(t, eth.ETH68, SnapSync) }
|
func TestForkedSync68Snap(t *testing.T) { testForkedSync(t, eth.ETH68, SnapSync) }
|
||||||
func TestForkedSync68Light(t *testing.T) { testForkedSync(t, eth.ETH68, LightSync) }
|
func TestForkedSync68Light(t *testing.T) { testForkedSync(t, eth.ETH68, LightSync) }
|
||||||
func TestForkedSync67Full(t *testing.T) { testForkedSync(t, eth.ETH67, FullSync) }
|
|
||||||
func TestForkedSync67Snap(t *testing.T) { testForkedSync(t, eth.ETH67, SnapSync) }
|
|
||||||
func TestForkedSync67Light(t *testing.T) { testForkedSync(t, eth.ETH67, LightSync) }
|
|
||||||
|
|
||||||
func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
tester := newTester(t)
|
tester := newTester(t)
|
||||||
@ -576,9 +568,6 @@ func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
func TestHeavyForkedSync68Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, FullSync) }
|
func TestHeavyForkedSync68Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, FullSync) }
|
||||||
func TestHeavyForkedSync68Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, SnapSync) }
|
func TestHeavyForkedSync68Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, SnapSync) }
|
||||||
func TestHeavyForkedSync68Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, LightSync) }
|
func TestHeavyForkedSync68Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, LightSync) }
|
||||||
func TestHeavyForkedSync67Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, FullSync) }
|
|
||||||
func TestHeavyForkedSync67Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, SnapSync) }
|
|
||||||
func TestHeavyForkedSync67Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, LightSync) }
|
|
||||||
|
|
||||||
func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
tester := newTester(t)
|
tester := newTester(t)
|
||||||
@ -608,9 +597,6 @@ func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
func TestBoundedForkedSync68Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, FullSync) }
|
func TestBoundedForkedSync68Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, FullSync) }
|
||||||
func TestBoundedForkedSync68Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, SnapSync) }
|
func TestBoundedForkedSync68Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, SnapSync) }
|
||||||
func TestBoundedForkedSync68Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, LightSync) }
|
func TestBoundedForkedSync68Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, LightSync) }
|
||||||
func TestBoundedForkedSync67Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, FullSync) }
|
|
||||||
func TestBoundedForkedSync67Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, SnapSync) }
|
|
||||||
func TestBoundedForkedSync67Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, LightSync) }
|
|
||||||
|
|
||||||
func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
tester := newTester(t)
|
tester := newTester(t)
|
||||||
@ -645,15 +631,6 @@ func TestBoundedHeavyForkedSync68Snap(t *testing.T) {
|
|||||||
func TestBoundedHeavyForkedSync68Light(t *testing.T) {
|
func TestBoundedHeavyForkedSync68Light(t *testing.T) {
|
||||||
testBoundedHeavyForkedSync(t, eth.ETH68, LightSync)
|
testBoundedHeavyForkedSync(t, eth.ETH68, LightSync)
|
||||||
}
|
}
|
||||||
func TestBoundedHeavyForkedSync67Full(t *testing.T) {
|
|
||||||
testBoundedHeavyForkedSync(t, eth.ETH67, FullSync)
|
|
||||||
}
|
|
||||||
func TestBoundedHeavyForkedSync67Snap(t *testing.T) {
|
|
||||||
testBoundedHeavyForkedSync(t, eth.ETH67, SnapSync)
|
|
||||||
}
|
|
||||||
func TestBoundedHeavyForkedSync67Light(t *testing.T) {
|
|
||||||
testBoundedHeavyForkedSync(t, eth.ETH67, LightSync)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
tester := newTester(t)
|
tester := newTester(t)
|
||||||
@ -681,9 +658,6 @@ func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) }
|
func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) }
|
||||||
func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) }
|
func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) }
|
||||||
func TestCancel68Light(t *testing.T) { testCancel(t, eth.ETH68, LightSync) }
|
func TestCancel68Light(t *testing.T) { testCancel(t, eth.ETH68, LightSync) }
|
||||||
func TestCancel67Full(t *testing.T) { testCancel(t, eth.ETH67, FullSync) }
|
|
||||||
func TestCancel67Snap(t *testing.T) { testCancel(t, eth.ETH67, SnapSync) }
|
|
||||||
func TestCancel67Light(t *testing.T) { testCancel(t, eth.ETH67, LightSync) }
|
|
||||||
|
|
||||||
func testCancel(t *testing.T, protocol uint, mode SyncMode) {
|
func testCancel(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
tester := newTester(t)
|
tester := newTester(t)
|
||||||
@ -711,9 +685,6 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
func TestMultiSynchronisation68Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, FullSync) }
|
func TestMultiSynchronisation68Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, FullSync) }
|
||||||
func TestMultiSynchronisation68Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, SnapSync) }
|
func TestMultiSynchronisation68Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, SnapSync) }
|
||||||
func TestMultiSynchronisation68Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, LightSync) }
|
func TestMultiSynchronisation68Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, LightSync) }
|
||||||
func TestMultiSynchronisation67Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, FullSync) }
|
|
||||||
func TestMultiSynchronisation67Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, SnapSync) }
|
|
||||||
func TestMultiSynchronisation67Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, LightSync) }
|
|
||||||
|
|
||||||
func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
|
func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
tester := newTester(t)
|
tester := newTester(t)
|
||||||
@ -738,9 +709,6 @@ func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) }
|
func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) }
|
||||||
func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) }
|
func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) }
|
||||||
func TestMultiProtoSynchronisation68Light(t *testing.T) { testMultiProtoSync(t, eth.ETH68, LightSync) }
|
func TestMultiProtoSynchronisation68Light(t *testing.T) { testMultiProtoSync(t, eth.ETH68, LightSync) }
|
||||||
func TestMultiProtoSynchronisation67Full(t *testing.T) { testMultiProtoSync(t, eth.ETH67, FullSync) }
|
|
||||||
func TestMultiProtoSynchronisation67Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH67, SnapSync) }
|
|
||||||
func TestMultiProtoSynchronisation67Light(t *testing.T) { testMultiProtoSync(t, eth.ETH67, LightSync) }
|
|
||||||
|
|
||||||
func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
|
func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
tester := newTester(t)
|
tester := newTester(t)
|
||||||
@ -751,7 +719,6 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
|
|
||||||
// Create peers of every type
|
// Create peers of every type
|
||||||
tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:])
|
tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:])
|
||||||
tester.newPeer("peer 67", eth.ETH67, chain.blocks[1:])
|
|
||||||
|
|
||||||
// Synchronise with the requested peer and make sure all blocks were retrieved
|
// Synchronise with the requested peer and make sure all blocks were retrieved
|
||||||
if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
|
if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
|
||||||
@ -760,7 +727,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
assertOwnChain(t, tester, len(chain.blocks))
|
assertOwnChain(t, tester, len(chain.blocks))
|
||||||
|
|
||||||
// Check that no peers have been dropped off
|
// Check that no peers have been dropped off
|
||||||
for _, version := range []int{68, 67} {
|
for _, version := range []int{68} {
|
||||||
peer := fmt.Sprintf("peer %d", version)
|
peer := fmt.Sprintf("peer %d", version)
|
||||||
if _, ok := tester.peers[peer]; !ok {
|
if _, ok := tester.peers[peer]; !ok {
|
||||||
t.Errorf("%s dropped", peer)
|
t.Errorf("%s dropped", peer)
|
||||||
@ -773,9 +740,6 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) }
|
func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) }
|
||||||
func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) }
|
func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) }
|
||||||
func TestEmptyShortCircuit68Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, LightSync) }
|
func TestEmptyShortCircuit68Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, LightSync) }
|
||||||
func TestEmptyShortCircuit67Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, FullSync) }
|
|
||||||
func TestEmptyShortCircuit67Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, SnapSync) }
|
|
||||||
func TestEmptyShortCircuit67Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, LightSync) }
|
|
||||||
|
|
||||||
func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
|
func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
tester := newTester(t)
|
tester := newTester(t)
|
||||||
@ -824,9 +788,6 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
func TestMissingHeaderAttack68Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, FullSync) }
|
func TestMissingHeaderAttack68Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, FullSync) }
|
||||||
func TestMissingHeaderAttack68Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, SnapSync) }
|
func TestMissingHeaderAttack68Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, SnapSync) }
|
||||||
func TestMissingHeaderAttack68Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, LightSync) }
|
func TestMissingHeaderAttack68Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, LightSync) }
|
||||||
func TestMissingHeaderAttack67Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, FullSync) }
|
|
||||||
func TestMissingHeaderAttack67Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, SnapSync) }
|
|
||||||
func TestMissingHeaderAttack67Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, LightSync) }
|
|
||||||
|
|
||||||
func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
|
func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
tester := newTester(t)
|
tester := newTester(t)
|
||||||
@ -853,9 +814,6 @@ func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
func TestShiftedHeaderAttack68Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, FullSync) }
|
func TestShiftedHeaderAttack68Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, FullSync) }
|
||||||
func TestShiftedHeaderAttack68Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, SnapSync) }
|
func TestShiftedHeaderAttack68Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, SnapSync) }
|
||||||
func TestShiftedHeaderAttack68Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, LightSync) }
|
func TestShiftedHeaderAttack68Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, LightSync) }
|
||||||
func TestShiftedHeaderAttack67Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, FullSync) }
|
|
||||||
func TestShiftedHeaderAttack67Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, SnapSync) }
|
|
||||||
func TestShiftedHeaderAttack67Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, LightSync) }
|
|
||||||
|
|
||||||
func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
|
func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
tester := newTester(t)
|
tester := newTester(t)
|
||||||
@ -889,15 +847,6 @@ func TestHighTDStarvationAttack68Snap(t *testing.T) {
|
|||||||
func TestHighTDStarvationAttack68Light(t *testing.T) {
|
func TestHighTDStarvationAttack68Light(t *testing.T) {
|
||||||
testHighTDStarvationAttack(t, eth.ETH68, LightSync)
|
testHighTDStarvationAttack(t, eth.ETH68, LightSync)
|
||||||
}
|
}
|
||||||
func TestHighTDStarvationAttack67Full(t *testing.T) {
|
|
||||||
testHighTDStarvationAttack(t, eth.ETH67, FullSync)
|
|
||||||
}
|
|
||||||
func TestHighTDStarvationAttack67Snap(t *testing.T) {
|
|
||||||
testHighTDStarvationAttack(t, eth.ETH67, SnapSync)
|
|
||||||
}
|
|
||||||
func TestHighTDStarvationAttack67Light(t *testing.T) {
|
|
||||||
testHighTDStarvationAttack(t, eth.ETH67, LightSync)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
|
func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
tester := newTester(t)
|
tester := newTester(t)
|
||||||
@ -912,7 +861,6 @@ func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
|
|
||||||
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
|
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
|
||||||
func TestBlockHeaderAttackerDropping68(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH68) }
|
func TestBlockHeaderAttackerDropping68(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH68) }
|
||||||
func TestBlockHeaderAttackerDropping67(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH67) }
|
|
||||||
|
|
||||||
func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
|
func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
|
||||||
// Define the disconnection requirement for individual hash fetch errors
|
// Define the disconnection requirement for individual hash fetch errors
|
||||||
@ -963,9 +911,6 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
|
|||||||
func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) }
|
func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) }
|
||||||
func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) }
|
func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) }
|
||||||
func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) }
|
func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) }
|
||||||
func TestSyncProgress67Full(t *testing.T) { testSyncProgress(t, eth.ETH67, FullSync) }
|
|
||||||
func TestSyncProgress67Snap(t *testing.T) { testSyncProgress(t, eth.ETH67, SnapSync) }
|
|
||||||
func TestSyncProgress67Light(t *testing.T) { testSyncProgress(t, eth.ETH67, LightSync) }
|
|
||||||
|
|
||||||
func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
|
func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
tester := newTester(t)
|
tester := newTester(t)
|
||||||
@ -1043,9 +988,6 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync
|
|||||||
func TestForkedSyncProgress68Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, FullSync) }
|
func TestForkedSyncProgress68Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, FullSync) }
|
||||||
func TestForkedSyncProgress68Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, SnapSync) }
|
func TestForkedSyncProgress68Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, SnapSync) }
|
||||||
func TestForkedSyncProgress68Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, LightSync) }
|
func TestForkedSyncProgress68Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, LightSync) }
|
||||||
func TestForkedSyncProgress67Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, FullSync) }
|
|
||||||
func TestForkedSyncProgress67Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, SnapSync) }
|
|
||||||
func TestForkedSyncProgress67Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, LightSync) }
|
|
||||||
|
|
||||||
func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
|
func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
tester := newTester(t)
|
tester := newTester(t)
|
||||||
@ -1117,9 +1059,6 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
func TestFailedSyncProgress68Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, FullSync) }
|
func TestFailedSyncProgress68Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, FullSync) }
|
||||||
func TestFailedSyncProgress68Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, SnapSync) }
|
func TestFailedSyncProgress68Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, SnapSync) }
|
||||||
func TestFailedSyncProgress68Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, LightSync) }
|
func TestFailedSyncProgress68Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, LightSync) }
|
||||||
func TestFailedSyncProgress67Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, FullSync) }
|
|
||||||
func TestFailedSyncProgress67Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, SnapSync) }
|
|
||||||
func TestFailedSyncProgress67Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, LightSync) }
|
|
||||||
|
|
||||||
func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
|
func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
tester := newTester(t)
|
tester := newTester(t)
|
||||||
@ -1186,9 +1125,6 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
|
|||||||
func TestFakedSyncProgress68Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, FullSync) }
|
func TestFakedSyncProgress68Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, FullSync) }
|
||||||
func TestFakedSyncProgress68Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, SnapSync) }
|
func TestFakedSyncProgress68Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, SnapSync) }
|
||||||
func TestFakedSyncProgress68Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, LightSync) }
|
func TestFakedSyncProgress68Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, LightSync) }
|
||||||
func TestFakedSyncProgress67Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, FullSync) }
|
|
||||||
func TestFakedSyncProgress67Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, SnapSync) }
|
|
||||||
func TestFakedSyncProgress67Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, LightSync) }
|
|
||||||
|
|
||||||
func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
|
func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
tester := newTester(t)
|
tester := newTester(t)
|
||||||
@ -1332,8 +1268,6 @@ func TestRemoteHeaderRequestSpan(t *testing.T) {
|
|||||||
// being fast-synced from, avoiding potential cheap eclipse attacks.
|
// being fast-synced from, avoiding potential cheap eclipse attacks.
|
||||||
func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, eth.ETH68, FullSync) }
|
func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, eth.ETH68, FullSync) }
|
||||||
func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, eth.ETH68, SnapSync) }
|
func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, eth.ETH68, SnapSync) }
|
||||||
func TestBeaconSync67Full(t *testing.T) { testBeaconSync(t, eth.ETH67, FullSync) }
|
|
||||||
func TestBeaconSync67Snap(t *testing.T) { testBeaconSync(t, eth.ETH67, SnapSync) }
|
|
||||||
|
|
||||||
func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) {
|
func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) {
|
||||||
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
||||||
|
@ -161,7 +161,7 @@ type backfiller interface {
|
|||||||
// on initial startup.
|
// on initial startup.
|
||||||
//
|
//
|
||||||
// The method should return the last block header that has been successfully
|
// The method should return the last block header that has been successfully
|
||||||
// backfilled, or nil if the backfiller was not resumed.
|
// backfilled (in the current or a previous run), falling back to the genesis.
|
||||||
suspend() *types.Header
|
suspend() *types.Header
|
||||||
|
|
||||||
// resume requests the backfiller to start running fill or snap sync based on
|
// resume requests the backfiller to start running fill or snap sync based on
|
||||||
@ -382,7 +382,11 @@ func (s *skeleton) sync(head *types.Header) (*types.Header, error) {
|
|||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
defer close(done)
|
defer close(done)
|
||||||
if filled := s.filler.suspend(); filled != nil {
|
filled := s.filler.suspend()
|
||||||
|
if filled == nil {
|
||||||
|
log.Error("Latest filled block is not available")
|
||||||
|
return
|
||||||
|
}
|
||||||
// If something was filled, try to delete stale sync helpers. If
|
// If something was filled, try to delete stale sync helpers. If
|
||||||
// unsuccessful, warn the user, but not much else we can do (it's
|
// unsuccessful, warn the user, but not much else we can do (it's
|
||||||
// a programming error, just let users report an issue and don't
|
// a programming error, just let users report an issue and don't
|
||||||
@ -390,7 +394,6 @@ func (s *skeleton) sync(head *types.Header) (*types.Header, error) {
|
|||||||
if err := s.cleanStales(filled); err != nil {
|
if err := s.cleanStales(filled); err != nil {
|
||||||
log.Error("Failed to clean stale beacon headers", "err", err)
|
log.Error("Failed to clean stale beacon headers", "err", err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}()
|
}()
|
||||||
// Wait for the suspend to finish, consuming head events in the meantime
|
// Wait for the suspend to finish, consuming head events in the meantime
|
||||||
// and dropping them on the floor.
|
// and dropping them on the floor.
|
||||||
@ -1120,26 +1123,38 @@ func (s *skeleton) cleanStales(filled *types.Header) error {
|
|||||||
number := filled.Number.Uint64()
|
number := filled.Number.Uint64()
|
||||||
log.Trace("Cleaning stale beacon headers", "filled", number, "hash", filled.Hash())
|
log.Trace("Cleaning stale beacon headers", "filled", number, "hash", filled.Hash())
|
||||||
|
|
||||||
// If the filled header is below the linked subchain, something's
|
// If the filled header is below the linked subchain, something's corrupted
|
||||||
// corrupted internally. Report and error and refuse to do anything.
|
// internally. Report and error and refuse to do anything.
|
||||||
if number < s.progress.Subchains[0].Tail {
|
if number+1 < s.progress.Subchains[0].Tail {
|
||||||
return fmt.Errorf("filled header below beacon header tail: %d < %d", number, s.progress.Subchains[0].Tail)
|
return fmt.Errorf("filled header below beacon header tail: %d < %d", number, s.progress.Subchains[0].Tail)
|
||||||
}
|
}
|
||||||
// Subchain seems trimmable, push the tail forward up to the last
|
// If nothing in subchain is filled, don't bother to do cleanup.
|
||||||
// filled header and delete everything before it - if available. In
|
if number+1 == s.progress.Subchains[0].Tail {
|
||||||
// case we filled past the head, recreate the subchain with a new
|
return nil
|
||||||
// head to keep it consistent with the data on disk.
|
}
|
||||||
var (
|
var (
|
||||||
start = s.progress.Subchains[0].Tail // start deleting from the first known header
|
start uint64
|
||||||
end = number // delete until the requested threshold
|
end uint64
|
||||||
batch = s.db.NewBatch()
|
batch = s.db.NewBatch()
|
||||||
)
|
)
|
||||||
s.progress.Subchains[0].Tail = number
|
if number < s.progress.Subchains[0].Head {
|
||||||
|
// The skeleton chain is partially consumed, set the new tail as filled+1.
|
||||||
|
tail := rawdb.ReadSkeletonHeader(s.db, number+1)
|
||||||
|
if tail.ParentHash != filled.Hash() {
|
||||||
|
return fmt.Errorf("filled header is discontinuous with subchain: %d %s, please file an issue", number, filled.Hash())
|
||||||
|
}
|
||||||
|
start, end = s.progress.Subchains[0].Tail, number+1 // remove headers in [tail, filled]
|
||||||
|
s.progress.Subchains[0].Tail = tail.Number.Uint64()
|
||||||
|
s.progress.Subchains[0].Next = tail.ParentHash
|
||||||
|
} else {
|
||||||
|
// The skeleton chain is fully consumed, set both head and tail as filled.
|
||||||
|
start, end = s.progress.Subchains[0].Tail, filled.Number.Uint64() // remove headers in [tail, filled)
|
||||||
|
s.progress.Subchains[0].Tail = filled.Number.Uint64()
|
||||||
s.progress.Subchains[0].Next = filled.ParentHash
|
s.progress.Subchains[0].Next = filled.ParentHash
|
||||||
|
|
||||||
if s.progress.Subchains[0].Head < number {
|
// If more headers were filled than available, push the entire subchain
|
||||||
// If more headers were filled than available, push the entire
|
// forward to keep tracking the node's block imports.
|
||||||
// subchain forward to keep tracking the node's block imports
|
if number > s.progress.Subchains[0].Head {
|
||||||
end = s.progress.Subchains[0].Head + 1 // delete the entire original range, including the head
|
end = s.progress.Subchains[0].Head + 1 // delete the entire original range, including the head
|
||||||
s.progress.Subchains[0].Head = number // assign a new head (tail is already assigned to this)
|
s.progress.Subchains[0].Head = number // assign a new head (tail is already assigned to this)
|
||||||
|
|
||||||
@ -1148,6 +1163,7 @@ func (s *skeleton) cleanStales(filled *types.Header) error {
|
|||||||
// disk to keep internal state consistent.
|
// disk to keep internal state consistent.
|
||||||
rawdb.WriteSkeletonHeader(batch, filled)
|
rawdb.WriteSkeletonHeader(batch, filled)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
// Execute the trimming and the potential rewiring of the progress
|
// Execute the trimming and the potential rewiring of the progress
|
||||||
s.saveSyncStatus(batch)
|
s.saveSyncStatus(batch)
|
||||||
for n := start; n < end; n++ {
|
for n := start; n < end; n++ {
|
||||||
|
@ -811,7 +811,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
|
|||||||
// Create a peer set to feed headers through
|
// Create a peer set to feed headers through
|
||||||
peerset := newPeerSet()
|
peerset := newPeerSet()
|
||||||
for _, peer := range tt.peers {
|
for _, peer := range tt.peers {
|
||||||
peerset.Register(newPeerConnection(peer.id, eth.ETH67, peer, log.New("id", peer.id)))
|
peerset.Register(newPeerConnection(peer.id, eth.ETH68, peer, log.New("id", peer.id)))
|
||||||
}
|
}
|
||||||
// Create a peer dropper to track malicious peers
|
// Create a peer dropper to track malicious peers
|
||||||
dropped := make(map[string]int)
|
dropped := make(map[string]int)
|
||||||
@ -913,7 +913,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
|
|||||||
skeleton.Sync(tt.newHead, nil, true)
|
skeleton.Sync(tt.newHead, nil, true)
|
||||||
}
|
}
|
||||||
if tt.newPeer != nil {
|
if tt.newPeer != nil {
|
||||||
if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH67, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil {
|
if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH68, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil {
|
||||||
t.Errorf("test %d: failed to register new peer: %v", i, err)
|
t.Errorf("test %d: failed to register new peer: %v", i, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -227,8 +227,8 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks uint64, unresolvedL
|
|||||||
if p < 0 || p > 100 {
|
if p < 0 || p > 100 {
|
||||||
return common.Big0, nil, nil, nil, fmt.Errorf("%w: %f", errInvalidPercentile, p)
|
return common.Big0, nil, nil, nil, fmt.Errorf("%w: %f", errInvalidPercentile, p)
|
||||||
}
|
}
|
||||||
if i > 0 && p < rewardPercentiles[i-1] {
|
if i > 0 && p <= rewardPercentiles[i-1] {
|
||||||
return common.Big0, nil, nil, nil, fmt.Errorf("%w: #%d:%f > #%d:%f", errInvalidPercentile, i-1, rewardPercentiles[i-1], i, p)
|
return common.Big0, nil, nil, nil, fmt.Errorf("%w: #%d:%f >= #%d:%f", errInvalidPercentile, i-1, rewardPercentiles[i-1], i, p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
|
@ -67,10 +67,7 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
|
|||||||
case *eth.NewBlockPacket:
|
case *eth.NewBlockPacket:
|
||||||
return h.handleBlockBroadcast(peer, packet.Block, packet.TD)
|
return h.handleBlockBroadcast(peer, packet.Block, packet.TD)
|
||||||
|
|
||||||
case *eth.NewPooledTransactionHashesPacket67:
|
case *eth.NewPooledTransactionHashesPacket:
|
||||||
return h.txFetcher.Notify(peer.ID(), nil, nil, *packet)
|
|
||||||
|
|
||||||
case *eth.NewPooledTransactionHashesPacket68:
|
|
||||||
return h.txFetcher.Notify(peer.ID(), packet.Types, packet.Sizes, packet.Hashes)
|
return h.txFetcher.Notify(peer.ID(), packet.Types, packet.Sizes, packet.Hashes)
|
||||||
|
|
||||||
case *eth.TransactionsPacket:
|
case *eth.TransactionsPacket:
|
||||||
|
@ -58,11 +58,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
|
|||||||
h.blockBroadcasts.Send(packet.Block)
|
h.blockBroadcasts.Send(packet.Block)
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
case *eth.NewPooledTransactionHashesPacket67:
|
case *eth.NewPooledTransactionHashesPacket:
|
||||||
h.txAnnounces.Send(([]common.Hash)(*packet))
|
|
||||||
return nil
|
|
||||||
|
|
||||||
case *eth.NewPooledTransactionHashesPacket68:
|
|
||||||
h.txAnnounces.Send(packet.Hashes)
|
h.txAnnounces.Send(packet.Hashes)
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
@ -81,7 +77,6 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
|
|||||||
|
|
||||||
// Tests that peers are correctly accepted (or rejected) based on the advertised
|
// Tests that peers are correctly accepted (or rejected) based on the advertised
|
||||||
// fork IDs in the protocol handshake.
|
// fork IDs in the protocol handshake.
|
||||||
func TestForkIDSplit67(t *testing.T) { testForkIDSplit(t, eth.ETH67) }
|
|
||||||
func TestForkIDSplit68(t *testing.T) { testForkIDSplit(t, eth.ETH68) }
|
func TestForkIDSplit68(t *testing.T) { testForkIDSplit(t, eth.ETH68) }
|
||||||
|
|
||||||
func testForkIDSplit(t *testing.T, protocol uint) {
|
func testForkIDSplit(t *testing.T, protocol uint) {
|
||||||
@ -236,7 +231,6 @@ func testForkIDSplit(t *testing.T, protocol uint) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that received transactions are added to the local pool.
|
// Tests that received transactions are added to the local pool.
|
||||||
func TestRecvTransactions67(t *testing.T) { testRecvTransactions(t, eth.ETH67) }
|
|
||||||
func TestRecvTransactions68(t *testing.T) { testRecvTransactions(t, eth.ETH68) }
|
func TestRecvTransactions68(t *testing.T) { testRecvTransactions(t, eth.ETH68) }
|
||||||
|
|
||||||
func testRecvTransactions(t *testing.T, protocol uint) {
|
func testRecvTransactions(t *testing.T, protocol uint) {
|
||||||
@ -294,7 +288,6 @@ func testRecvTransactions(t *testing.T, protocol uint) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// This test checks that pending transactions are sent.
|
// This test checks that pending transactions are sent.
|
||||||
func TestSendTransactions67(t *testing.T) { testSendTransactions(t, eth.ETH67) }
|
|
||||||
func TestSendTransactions68(t *testing.T) { testSendTransactions(t, eth.ETH68) }
|
func TestSendTransactions68(t *testing.T) { testSendTransactions(t, eth.ETH68) }
|
||||||
|
|
||||||
func testSendTransactions(t *testing.T, protocol uint) {
|
func testSendTransactions(t *testing.T, protocol uint) {
|
||||||
@ -353,7 +346,7 @@ func testSendTransactions(t *testing.T, protocol uint) {
|
|||||||
seen := make(map[common.Hash]struct{})
|
seen := make(map[common.Hash]struct{})
|
||||||
for len(seen) < len(insert) {
|
for len(seen) < len(insert) {
|
||||||
switch protocol {
|
switch protocol {
|
||||||
case 67, 68:
|
case 68:
|
||||||
select {
|
select {
|
||||||
case hashes := <-anns:
|
case hashes := <-anns:
|
||||||
for _, hash := range hashes {
|
for _, hash := range hashes {
|
||||||
@ -379,7 +372,6 @@ func testSendTransactions(t *testing.T, protocol uint) {
|
|||||||
|
|
||||||
// Tests that transactions get propagated to all attached peers, either via direct
|
// Tests that transactions get propagated to all attached peers, either via direct
|
||||||
// broadcasts or via announcements/retrievals.
|
// broadcasts or via announcements/retrievals.
|
||||||
func TestTransactionPropagation67(t *testing.T) { testTransactionPropagation(t, eth.ETH67) }
|
|
||||||
func TestTransactionPropagation68(t *testing.T) { testTransactionPropagation(t, eth.ETH68) }
|
func TestTransactionPropagation68(t *testing.T) { testTransactionPropagation(t, eth.ETH68) }
|
||||||
|
|
||||||
func testTransactionPropagation(t *testing.T, protocol uint) {
|
func testTransactionPropagation(t *testing.T, protocol uint) {
|
||||||
@ -486,8 +478,8 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
|
|||||||
defer sourcePipe.Close()
|
defer sourcePipe.Close()
|
||||||
defer sinkPipe.Close()
|
defer sinkPipe.Close()
|
||||||
|
|
||||||
sourcePeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil)
|
sourcePeer := eth.NewPeer(eth.ETH68, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil)
|
||||||
sinkPeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil)
|
sinkPeer := eth.NewPeer(eth.ETH68, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil)
|
||||||
defer sourcePeer.Close()
|
defer sourcePeer.Close()
|
||||||
defer sinkPeer.Close()
|
defer sinkPeer.Close()
|
||||||
|
|
||||||
@ -539,7 +531,6 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
|
|||||||
|
|
||||||
// Tests that a propagated malformed block (uncles or transactions don't match
|
// Tests that a propagated malformed block (uncles or transactions don't match
|
||||||
// with the hashes in the header) gets discarded and not broadcast forward.
|
// with the hashes in the header) gets discarded and not broadcast forward.
|
||||||
func TestBroadcastMalformedBlock67(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH67) }
|
|
||||||
func TestBroadcastMalformedBlock68(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH68) }
|
func TestBroadcastMalformedBlock68(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH68) }
|
||||||
|
|
||||||
func testBroadcastMalformedBlock(t *testing.T, protocol uint) {
|
func testBroadcastMalformedBlock(t *testing.T, protocol uint) {
|
||||||
|
@ -163,17 +163,10 @@ func (p *Peer) announceTransactions() {
|
|||||||
if len(pending) > 0 {
|
if len(pending) > 0 {
|
||||||
done = make(chan struct{})
|
done = make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
if p.version >= ETH68 {
|
if err := p.sendPooledTransactionHashes(pending, pendingTypes, pendingSizes); err != nil {
|
||||||
if err := p.sendPooledTransactionHashes68(pending, pendingTypes, pendingSizes); err != nil {
|
|
||||||
fail <- err
|
fail <- err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
if err := p.sendPooledTransactionHashes66(pending); err != nil {
|
|
||||||
fail <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
close(done)
|
close(done)
|
||||||
p.Log().Trace("Sent transaction announcements", "count", len(pending))
|
p.Log().Trace("Sent transaction announcements", "count", len(pending))
|
||||||
}()
|
}()
|
||||||
|
@ -93,10 +93,6 @@ type TxPool interface {
|
|||||||
func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol {
|
func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol {
|
||||||
protocols := make([]p2p.Protocol, 0, len(ProtocolVersions))
|
protocols := make([]p2p.Protocol, 0, len(ProtocolVersions))
|
||||||
for _, version := range ProtocolVersions {
|
for _, version := range ProtocolVersions {
|
||||||
// Blob transactions require eth/68 announcements, disable everything else
|
|
||||||
if version <= ETH67 && backend.Chain().Config().CancunTime != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
version := version // Closure
|
version := version // Closure
|
||||||
|
|
||||||
protocols = append(protocols, p2p.Protocol{
|
protocols = append(protocols, p2p.Protocol{
|
||||||
@ -166,26 +162,11 @@ type Decoder interface {
|
|||||||
Time() time.Time
|
Time() time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
var eth67 = map[uint64]msgHandler{
|
|
||||||
NewBlockHashesMsg: handleNewBlockhashes,
|
|
||||||
NewBlockMsg: handleNewBlock,
|
|
||||||
TransactionsMsg: handleTransactions,
|
|
||||||
NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes67,
|
|
||||||
GetBlockHeadersMsg: handleGetBlockHeaders,
|
|
||||||
BlockHeadersMsg: handleBlockHeaders,
|
|
||||||
GetBlockBodiesMsg: handleGetBlockBodies,
|
|
||||||
BlockBodiesMsg: handleBlockBodies,
|
|
||||||
GetReceiptsMsg: handleGetReceipts,
|
|
||||||
ReceiptsMsg: handleReceipts,
|
|
||||||
GetPooledTransactionsMsg: handleGetPooledTransactions,
|
|
||||||
PooledTransactionsMsg: handlePooledTransactions,
|
|
||||||
}
|
|
||||||
|
|
||||||
var eth68 = map[uint64]msgHandler{
|
var eth68 = map[uint64]msgHandler{
|
||||||
NewBlockHashesMsg: handleNewBlockhashes,
|
NewBlockHashesMsg: handleNewBlockhashes,
|
||||||
NewBlockMsg: handleNewBlock,
|
NewBlockMsg: handleNewBlock,
|
||||||
TransactionsMsg: handleTransactions,
|
TransactionsMsg: handleTransactions,
|
||||||
NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes68,
|
NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes,
|
||||||
GetBlockHeadersMsg: handleGetBlockHeaders,
|
GetBlockHeadersMsg: handleGetBlockHeaders,
|
||||||
BlockHeadersMsg: handleBlockHeaders,
|
BlockHeadersMsg: handleBlockHeaders,
|
||||||
GetBlockBodiesMsg: handleGetBlockBodies,
|
GetBlockBodiesMsg: handleGetBlockBodies,
|
||||||
@ -209,10 +190,8 @@ func handleMessage(backend Backend, peer *Peer) error {
|
|||||||
}
|
}
|
||||||
defer msg.Discard()
|
defer msg.Discard()
|
||||||
|
|
||||||
var handlers = eth67
|
var handlers = eth68
|
||||||
if peer.Version() >= ETH68 {
|
|
||||||
handlers = eth68
|
|
||||||
}
|
|
||||||
// Track the amount of time it takes to serve the request and run the handler
|
// Track the amount of time it takes to serve the request and run the handler
|
||||||
if metrics.Enabled {
|
if metrics.Enabled {
|
||||||
h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
|
h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
|
||||||
|
@ -150,7 +150,6 @@ func (b *testBackend) Handle(*Peer, Packet) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that block headers can be retrieved from a remote chain based on user queries.
|
// Tests that block headers can be retrieved from a remote chain based on user queries.
|
||||||
func TestGetBlockHeaders67(t *testing.T) { testGetBlockHeaders(t, ETH67) }
|
|
||||||
func TestGetBlockHeaders68(t *testing.T) { testGetBlockHeaders(t, ETH68) }
|
func TestGetBlockHeaders68(t *testing.T) { testGetBlockHeaders(t, ETH68) }
|
||||||
|
|
||||||
func testGetBlockHeaders(t *testing.T, protocol uint) {
|
func testGetBlockHeaders(t *testing.T, protocol uint) {
|
||||||
@ -336,7 +335,6 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that block contents can be retrieved from a remote chain based on their hashes.
|
// Tests that block contents can be retrieved from a remote chain based on their hashes.
|
||||||
func TestGetBlockBodies67(t *testing.T) { testGetBlockBodies(t, ETH67) }
|
|
||||||
func TestGetBlockBodies68(t *testing.T) { testGetBlockBodies(t, ETH68) }
|
func TestGetBlockBodies68(t *testing.T) { testGetBlockBodies(t, ETH68) }
|
||||||
|
|
||||||
func testGetBlockBodies(t *testing.T, protocol uint) {
|
func testGetBlockBodies(t *testing.T, protocol uint) {
|
||||||
@ -431,7 +429,6 @@ func testGetBlockBodies(t *testing.T, protocol uint) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Tests that the transaction receipts can be retrieved based on hashes.
|
// Tests that the transaction receipts can be retrieved based on hashes.
|
||||||
func TestGetBlockReceipts67(t *testing.T) { testGetBlockReceipts(t, ETH67) }
|
|
||||||
func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) }
|
func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) }
|
||||||
|
|
||||||
func testGetBlockReceipts(t *testing.T, protocol uint) {
|
func testGetBlockReceipts(t *testing.T, protocol uint) {
|
||||||
|
@ -383,30 +383,13 @@ func handleReceipts(backend Backend, msg Decoder, peer *Peer) error {
|
|||||||
}, metadata)
|
}, metadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleNewPooledTransactionHashes67(backend Backend, msg Decoder, peer *Peer) error {
|
func handleNewPooledTransactionHashes(backend Backend, msg Decoder, peer *Peer) error {
|
||||||
// New transaction announcement arrived, make sure we have
|
// New transaction announcement arrived, make sure we have
|
||||||
// a valid and fresh chain to handle them
|
// a valid and fresh chain to handle them
|
||||||
if !backend.AcceptTxs() {
|
if !backend.AcceptTxs() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
ann := new(NewPooledTransactionHashesPacket67)
|
ann := new(NewPooledTransactionHashesPacket)
|
||||||
if err := msg.Decode(ann); err != nil {
|
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
|
||||||
}
|
|
||||||
// Schedule all the unknown hashes for retrieval
|
|
||||||
for _, hash := range *ann {
|
|
||||||
peer.markTransaction(hash)
|
|
||||||
}
|
|
||||||
return backend.Handle(peer, ann)
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleNewPooledTransactionHashes68(backend Backend, msg Decoder, peer *Peer) error {
|
|
||||||
// New transaction announcement arrived, make sure we have
|
|
||||||
// a valid and fresh chain to handle them
|
|
||||||
if !backend.AcceptTxs() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
ann := new(NewPooledTransactionHashesPacket68)
|
|
||||||
if err := msg.Decode(ann); err != nil {
|
if err := msg.Decode(ann); err != nil {
|
||||||
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Tests that handshake failures are detected and reported correctly.
|
// Tests that handshake failures are detected and reported correctly.
|
||||||
func TestHandshake67(t *testing.T) { testHandshake(t, ETH67) }
|
|
||||||
func TestHandshake68(t *testing.T) { testHandshake(t, ETH68) }
|
func TestHandshake68(t *testing.T) { testHandshake(t, ETH68) }
|
||||||
|
|
||||||
func testHandshake(t *testing.T, protocol uint) {
|
func testHandshake(t *testing.T, protocol uint) {
|
||||||
|
@ -210,29 +210,17 @@ func (p *Peer) AsyncSendTransactions(hashes []common.Hash) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendPooledTransactionHashes66 sends transaction hashes to the peer and includes
|
// sendPooledTransactionHashes sends transaction hashes (tagged with their type
|
||||||
// them in its transaction hash set for future reference.
|
|
||||||
//
|
|
||||||
// This method is a helper used by the async transaction announcer. Don't call it
|
|
||||||
// directly as the queueing (memory) and transmission (bandwidth) costs should
|
|
||||||
// not be managed directly.
|
|
||||||
func (p *Peer) sendPooledTransactionHashes66(hashes []common.Hash) error {
|
|
||||||
// Mark all the transactions as known, but ensure we don't overflow our limits
|
|
||||||
p.knownTxs.Add(hashes...)
|
|
||||||
return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket67(hashes))
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendPooledTransactionHashes68 sends transaction hashes (tagged with their type
|
|
||||||
// and size) to the peer and includes them in its transaction hash set for future
|
// and size) to the peer and includes them in its transaction hash set for future
|
||||||
// reference.
|
// reference.
|
||||||
//
|
//
|
||||||
// This method is a helper used by the async transaction announcer. Don't call it
|
// This method is a helper used by the async transaction announcer. Don't call it
|
||||||
// directly as the queueing (memory) and transmission (bandwidth) costs should
|
// directly as the queueing (memory) and transmission (bandwidth) costs should
|
||||||
// not be managed directly.
|
// not be managed directly.
|
||||||
func (p *Peer) sendPooledTransactionHashes68(hashes []common.Hash, types []byte, sizes []uint32) error {
|
func (p *Peer) sendPooledTransactionHashes(hashes []common.Hash, types []byte, sizes []uint32) error {
|
||||||
// Mark all the transactions as known, but ensure we don't overflow our limits
|
// Mark all the transactions as known, but ensure we don't overflow our limits
|
||||||
p.knownTxs.Add(hashes...)
|
p.knownTxs.Add(hashes...)
|
||||||
return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket68{Types: types, Sizes: sizes, Hashes: hashes})
|
return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket{Types: types, Sizes: sizes, Hashes: hashes})
|
||||||
}
|
}
|
||||||
|
|
||||||
// AsyncSendPooledTransactionHashes queues a list of transactions hashes to eventually
|
// AsyncSendPooledTransactionHashes queues a list of transactions hashes to eventually
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
|
|
||||||
// Constants to match up protocol versions and messages
|
// Constants to match up protocol versions and messages
|
||||||
const (
|
const (
|
||||||
ETH67 = 67
|
|
||||||
ETH68 = 68
|
ETH68 = 68
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -40,11 +39,11 @@ const ProtocolName = "eth"
|
|||||||
|
|
||||||
// ProtocolVersions are the supported versions of the `eth` protocol (first
|
// ProtocolVersions are the supported versions of the `eth` protocol (first
|
||||||
// is primary).
|
// is primary).
|
||||||
var ProtocolVersions = []uint{ETH68, ETH67}
|
var ProtocolVersions = []uint{ETH68}
|
||||||
|
|
||||||
// protocolLengths are the number of implemented message corresponding to
|
// protocolLengths are the number of implemented message corresponding to
|
||||||
// different protocol versions.
|
// different protocol versions.
|
||||||
var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 17}
|
var protocolLengths = map[uint]uint64{ETH68: 17}
|
||||||
|
|
||||||
// maxMessageSize is the maximum cap on the size of a protocol message.
|
// maxMessageSize is the maximum cap on the size of a protocol message.
|
||||||
const maxMessageSize = 10 * 1024 * 1024
|
const maxMessageSize = 10 * 1024 * 1024
|
||||||
@ -283,11 +282,8 @@ type ReceiptsRLPPacket struct {
|
|||||||
ReceiptsRLPResponse
|
ReceiptsRLPResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPooledTransactionHashesPacket67 represents a transaction announcement packet on eth/67.
|
// NewPooledTransactionHashesPacket represents a transaction announcement packet on eth/68 and newer.
|
||||||
type NewPooledTransactionHashesPacket67 []common.Hash
|
type NewPooledTransactionHashesPacket struct {
|
||||||
|
|
||||||
// NewPooledTransactionHashesPacket68 represents a transaction announcement packet on eth/68 and newer.
|
|
||||||
type NewPooledTransactionHashesPacket68 struct {
|
|
||||||
Types []byte
|
Types []byte
|
||||||
Sizes []uint32
|
Sizes []uint32
|
||||||
Hashes []common.Hash
|
Hashes []common.Hash
|
||||||
@ -346,10 +342,8 @@ func (*BlockBodiesResponse) Kind() byte { return BlockBodiesMsg }
|
|||||||
func (*NewBlockPacket) Name() string { return "NewBlock" }
|
func (*NewBlockPacket) Name() string { return "NewBlock" }
|
||||||
func (*NewBlockPacket) Kind() byte { return NewBlockMsg }
|
func (*NewBlockPacket) Kind() byte { return NewBlockMsg }
|
||||||
|
|
||||||
func (*NewPooledTransactionHashesPacket67) Name() string { return "NewPooledTransactionHashes" }
|
func (*NewPooledTransactionHashesPacket) Name() string { return "NewPooledTransactionHashes" }
|
||||||
func (*NewPooledTransactionHashesPacket67) Kind() byte { return NewPooledTransactionHashesMsg }
|
func (*NewPooledTransactionHashesPacket) Kind() byte { return NewPooledTransactionHashesMsg }
|
||||||
func (*NewPooledTransactionHashesPacket68) Name() string { return "NewPooledTransactionHashes" }
|
|
||||||
func (*NewPooledTransactionHashesPacket68) Kind() byte { return NewPooledTransactionHashesMsg }
|
|
||||||
|
|
||||||
func (*GetPooledTransactionsRequest) Name() string { return "GetPooledTransactions" }
|
func (*GetPooledTransactionsRequest) Name() string { return "GetPooledTransactions" }
|
||||||
func (*GetPooledTransactionsRequest) Kind() byte { return GetPooledTransactionsMsg }
|
func (*GetPooledTransactionsRequest) Kind() byte { return GetPooledTransactionsMsg }
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Tests that snap sync is disabled after a successful sync cycle.
|
// Tests that snap sync is disabled after a successful sync cycle.
|
||||||
func TestSnapSyncDisabling67(t *testing.T) { testSnapSyncDisabling(t, eth.ETH67, snap.SNAP1) }
|
|
||||||
func TestSnapSyncDisabling68(t *testing.T) { testSnapSyncDisabling(t, eth.ETH68, snap.SNAP1) }
|
func TestSnapSyncDisabling68(t *testing.T) { testSnapSyncDisabling(t, eth.ETH68, snap.SNAP1) }
|
||||||
|
|
||||||
// Tests that snap sync gets disabled as soon as a real block is successfully
|
// Tests that snap sync gets disabled as soon as a real block is successfully
|
||||||
|
@ -219,7 +219,7 @@
|
|||||||
return this.finalize(result);
|
return this.finalize(result);
|
||||||
},
|
},
|
||||||
|
|
||||||
// finalize recreates a call object using the final desired field oder for json
|
// finalize recreates a call object using the final desired field order for json
|
||||||
// serialization. This is a nicety feature to pass meaningfully ordered results
|
// serialization. This is a nicety feature to pass meaningfully ordered results
|
||||||
// to users who don't interpret it, just display it.
|
// to users who don't interpret it, just display it.
|
||||||
finalize: function(call) {
|
finalize: function(call) {
|
||||||
|
@ -124,9 +124,9 @@ func TestMemCopying(t *testing.T) {
|
|||||||
{0, 100, 0, "", 0}, // No need to pad (0 size)
|
{0, 100, 0, "", 0}, // No need to pad (0 size)
|
||||||
{100, 50, 100, "", 100}, // Should pad 100-150
|
{100, 50, 100, "", 100}, // Should pad 100-150
|
||||||
{100, 50, 5, "", 5}, // Wanted range fully within memory
|
{100, 50, 5, "", 5}, // Wanted range fully within memory
|
||||||
{100, -50, 0, "offset or size must not be negative", 0}, // Errror
|
{100, -50, 0, "offset or size must not be negative", 0}, // Error
|
||||||
{0, 1, 1024*1024 + 1, "reached limit for padding memory slice: 1048578", 0}, // Errror
|
{0, 1, 1024*1024 + 1, "reached limit for padding memory slice: 1048578", 0}, // Error
|
||||||
{10, 0, 1024*1024 + 100, "reached limit for padding memory slice: 1048666", 0}, // Errror
|
{10, 0, 1024*1024 + 100, "reached limit for padding memory slice: 1048666", 0}, // Error
|
||||||
|
|
||||||
} {
|
} {
|
||||||
mem := vm.NewMemory()
|
mem := vm.NewMemory()
|
||||||
|
@ -52,7 +52,7 @@ func newTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) {
|
|||||||
|
|
||||||
// create a signed transaction to send
|
// create a signed transaction to send
|
||||||
head, _ := client.HeaderByNumber(context.Background(), nil) // Should be child's, good enough
|
head, _ := client.HeaderByNumber(context.Background(), nil) // Should be child's, good enough
|
||||||
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1))
|
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(params.GWei))
|
||||||
addr := crypto.PubkeyToAddress(key.PublicKey)
|
addr := crypto.PubkeyToAddress(key.PublicKey)
|
||||||
chainid, _ := client.ChainID(context.Background())
|
chainid, _ := client.ChainID(context.Background())
|
||||||
nonce, err := client.PendingNonceAt(context.Background(), addr)
|
nonce, err := client.PendingNonceAt(context.Background(), addr)
|
||||||
@ -62,7 +62,7 @@ func newTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) {
|
|||||||
tx := types.NewTx(&types.DynamicFeeTx{
|
tx := types.NewTx(&types.DynamicFeeTx{
|
||||||
ChainID: chainid,
|
ChainID: chainid,
|
||||||
Nonce: nonce,
|
Nonce: nonce,
|
||||||
GasTipCap: big.NewInt(1),
|
GasTipCap: big.NewInt(params.GWei),
|
||||||
GasFeeCap: gasPrice,
|
GasFeeCap: gasPrice,
|
||||||
Gas: 21000,
|
Gas: 21000,
|
||||||
To: &addr,
|
To: &addr,
|
||||||
|
@ -17,6 +17,8 @@
|
|||||||
package simulated
|
package simulated
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
)
|
)
|
||||||
@ -37,3 +39,17 @@ func WithCallGasLimit(gaslimit uint64) func(nodeConf *node.Config, ethConf *ethc
|
|||||||
ethConf.RPCGasCap = gaslimit
|
ethConf.RPCGasCap = gaslimit
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithMinerMinTip configures the simulated backend to require a specific minimum
|
||||||
|
// gas tip for a transaction to be included.
|
||||||
|
//
|
||||||
|
// 0 is not possible as a live Geth node would reject that due to DoS protection,
|
||||||
|
// so the simulated backend will replicate that behavior for consistency.
|
||||||
|
func WithMinerMinTip(tip *big.Int) func(nodeConf *node.Config, ethConf *ethconfig.Config) {
|
||||||
|
if tip == nil || tip.Cmp(new(big.Int)) <= 0 {
|
||||||
|
panic("invalid miner minimum tip")
|
||||||
|
}
|
||||||
|
return func(nodeConf *node.Config, ethConf *ethconfig.Config) {
|
||||||
|
ethConf.Miner.GasPrice = tip
|
||||||
|
}
|
||||||
|
}
|
||||||
|
5
go.mod
5
go.mod
@ -22,8 +22,9 @@ require (
|
|||||||
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127
|
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127
|
||||||
github.com/ethereum/c-kzg-4844 v0.4.0
|
github.com/ethereum/c-kzg-4844 v0.4.0
|
||||||
github.com/fatih/color v1.13.0
|
github.com/fatih/color v1.13.0
|
||||||
|
github.com/ferranbt/fastssz v0.1.2
|
||||||
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e
|
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e
|
||||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5
|
github.com/fjl/memsize v0.0.2
|
||||||
github.com/fsnotify/fsnotify v1.6.0
|
github.com/fsnotify/fsnotify v1.6.0
|
||||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
|
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
|
||||||
github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46
|
github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46
|
||||||
@ -114,10 +115,12 @@ require (
|
|||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/kilic/bls12-381 v0.1.0 // indirect
|
github.com/kilic/bls12-381 v0.1.0 // indirect
|
||||||
github.com/klauspost/compress v1.15.15 // indirect
|
github.com/klauspost/compress v1.15.15 // indirect
|
||||||
|
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
|
||||||
github.com/kr/pretty v0.3.1 // indirect
|
github.com/kr/pretty v0.3.1 // indirect
|
||||||
github.com/kr/text v0.2.0 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||||
|
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
||||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||||
|
15
go.sum
15
go.sum
@ -187,10 +187,12 @@ github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod
|
|||||||
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||||
|
github.com/ferranbt/fastssz v0.1.2 h1:Dky6dXlngF6Qjc+EfDipAkE83N5I5DE68bY6O0VLNPk=
|
||||||
|
github.com/ferranbt/fastssz v0.1.2/go.mod h1:X5UPrE2u1UJjxHA8X54u04SBwdAQjG2sFtWs39YxyWs=
|
||||||
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e h1:bBLctRc7kr01YGvaDfgLbTwjFNW5jdp5y5rj8XXBHfY=
|
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e h1:bBLctRc7kr01YGvaDfgLbTwjFNW5jdp5y5rj8XXBHfY=
|
||||||
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY=
|
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY=
|
||||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||||
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
|
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
@ -221,7 +223,6 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
|
|||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
|
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
|
||||||
github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY=
|
|
||||||
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||||
@ -400,6 +401,9 @@ github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
|
|||||||
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
|
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
|
||||||
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
|
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
|
||||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
@ -447,6 +451,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff
|
|||||||
github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg=
|
github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg=
|
||||||
github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ=
|
github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ=
|
||||||
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
|
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
|
||||||
|
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
||||||
|
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
|
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
|
||||||
@ -524,6 +530,7 @@ github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0
|
|||||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7 h1:cZC+usqsYgHtlBaGulVnZ1hfKAi8iWtujBnRLQE698c=
|
github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7 h1:cZC+usqsYgHtlBaGulVnZ1hfKAi8iWtujBnRLQE698c=
|
||||||
github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7/go.mod h1:IToEjHuttnUzwZI5KBSM/LOOW3qLbbrHOEfp3SbECGY=
|
github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7/go.mod h1:IToEjHuttnUzwZI5KBSM/LOOW3qLbbrHOEfp3SbECGY=
|
||||||
|
github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220714111606-acbb2962fb48 h1:cSo6/vk8YpvkLbk9v3FO97cakNmUoxwi2KMP8hd5WIw=
|
||||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
@ -777,8 +784,6 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
|
||||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
|
||||||
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
|
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
|
||||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
|
@ -40,7 +40,7 @@ func MustLoadChecksums(file string) *ChecksumDB {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("can't load checksum file: " + err.Error())
|
log.Fatal("can't load checksum file: " + err.Error())
|
||||||
}
|
}
|
||||||
return &ChecksumDB{strings.Split(string(content), "\n")}
|
return &ChecksumDB{strings.Split(strings.ReplaceAll(string(content), "\r\n", "\n"), "\n")}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify checks whether the given file is valid according to the checksum database.
|
// Verify checks whether the given file is valid according to the checksum database.
|
||||||
|
90
internal/era/accumulator.go
Normal file
90
internal/era/accumulator.go
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package era
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
ssz "github.com/ferranbt/fastssz"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ComputeAccumulator calculates the SSZ hash tree root of the Era1
|
||||||
|
// accumulator of header records.
|
||||||
|
func ComputeAccumulator(hashes []common.Hash, tds []*big.Int) (common.Hash, error) {
|
||||||
|
if len(hashes) != len(tds) {
|
||||||
|
return common.Hash{}, fmt.Errorf("must have equal number hashes as td values")
|
||||||
|
}
|
||||||
|
if len(hashes) > MaxEra1Size {
|
||||||
|
return common.Hash{}, fmt.Errorf("too many records: have %d, max %d", len(hashes), MaxEra1Size)
|
||||||
|
}
|
||||||
|
hh := ssz.NewHasher()
|
||||||
|
for i := range hashes {
|
||||||
|
rec := headerRecord{hashes[i], tds[i]}
|
||||||
|
root, err := rec.HashTreeRoot()
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}, err
|
||||||
|
}
|
||||||
|
hh.Append(root[:])
|
||||||
|
}
|
||||||
|
hh.MerkleizeWithMixin(0, uint64(len(hashes)), uint64(MaxEra1Size))
|
||||||
|
return hh.HashRoot()
|
||||||
|
}
|
||||||
|
|
||||||
|
// headerRecord is an individual record for a historical header.
|
||||||
|
//
|
||||||
|
// See https://github.com/ethereum/portal-network-specs/blob/master/history-network.md#the-header-accumulator
|
||||||
|
// for more information.
|
||||||
|
type headerRecord struct {
|
||||||
|
Hash common.Hash
|
||||||
|
TotalDifficulty *big.Int
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTree completes the ssz.HashRoot interface, but is unused.
|
||||||
|
func (h *headerRecord) GetTree() (*ssz.Node, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashTreeRoot ssz hashes the headerRecord object.
|
||||||
|
func (h *headerRecord) HashTreeRoot() ([32]byte, error) {
|
||||||
|
return ssz.HashWithDefaultHasher(h)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashTreeRootWith ssz hashes the headerRecord object with a hasher.
|
||||||
|
func (h *headerRecord) HashTreeRootWith(hh ssz.HashWalker) (err error) {
|
||||||
|
hh.PutBytes(h.Hash[:])
|
||||||
|
td := bigToBytes32(h.TotalDifficulty)
|
||||||
|
hh.PutBytes(td[:])
|
||||||
|
hh.Merkleize(0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// bigToBytes32 converts a big.Int into a little-endian 32-byte array.
|
||||||
|
func bigToBytes32(n *big.Int) (b [32]byte) {
|
||||||
|
n.FillBytes(b[:])
|
||||||
|
reverseOrder(b[:])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// reverseOrder reverses the byte order of a slice.
|
||||||
|
func reverseOrder(b []byte) []byte {
|
||||||
|
for i := 0; i < 16; i++ {
|
||||||
|
b[i], b[32-i-1] = b[32-i-1], b[i]
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
224
internal/era/builder.go
Normal file
224
internal/era/builder.go
Normal file
@ -0,0 +1,224 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
package era
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/era/e2store"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/golang/snappy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Builder is used to create Era1 archives of block data.
|
||||||
|
//
|
||||||
|
// Era1 files are themselves e2store files. For more information on this format,
|
||||||
|
// see https://github.com/status-im/nimbus-eth2/blob/stable/docs/e2store.md.
|
||||||
|
//
|
||||||
|
// The overall structure of an Era1 file follows closely the structure of an Era file
|
||||||
|
// which contains consensus Layer data (and as a byproduct, EL data after the merge).
|
||||||
|
//
|
||||||
|
// The structure can be summarized through this definition:
|
||||||
|
//
|
||||||
|
// era1 := Version | block-tuple* | other-entries* | Accumulator | BlockIndex
|
||||||
|
// block-tuple := CompressedHeader | CompressedBody | CompressedReceipts | TotalDifficulty
|
||||||
|
//
|
||||||
|
// Each basic element is its own entry:
|
||||||
|
//
|
||||||
|
// Version = { type: [0x65, 0x32], data: nil }
|
||||||
|
// CompressedHeader = { type: [0x03, 0x00], data: snappyFramed(rlp(header)) }
|
||||||
|
// CompressedBody = { type: [0x04, 0x00], data: snappyFramed(rlp(body)) }
|
||||||
|
// CompressedReceipts = { type: [0x05, 0x00], data: snappyFramed(rlp(receipts)) }
|
||||||
|
// TotalDifficulty = { type: [0x06, 0x00], data: uint256(header.total_difficulty) }
|
||||||
|
// AccumulatorRoot = { type: [0x07, 0x00], data: accumulator-root }
|
||||||
|
// BlockIndex = { type: [0x32, 0x66], data: block-index }
|
||||||
|
//
|
||||||
|
// Accumulator is computed by constructing an SSZ list of header-records of length at most
|
||||||
|
// 8192 and then calculating the hash_tree_root of that list.
|
||||||
|
//
|
||||||
|
// header-record := { block-hash: Bytes32, total-difficulty: Uint256 }
|
||||||
|
// accumulator := hash_tree_root([]header-record, 8192)
|
||||||
|
//
|
||||||
|
// BlockIndex stores relative offsets to each compressed block entry. The
|
||||||
|
// format is:
|
||||||
|
//
|
||||||
|
// block-index := starting-number | index | index | index ... | count
|
||||||
|
//
|
||||||
|
// starting-number is the first block number in the archive. Every index is a
|
||||||
|
// defined relative to beginning of the record. The total number of block
|
||||||
|
// entries in the file is recorded with count.
|
||||||
|
//
|
||||||
|
// Due to the accumulator size limit of 8192, the maximum number of blocks in
|
||||||
|
// an Era1 batch is also 8192.
|
||||||
|
type Builder struct {
|
||||||
|
w *e2store.Writer
|
||||||
|
startNum *uint64
|
||||||
|
startTd *big.Int
|
||||||
|
indexes []uint64
|
||||||
|
hashes []common.Hash
|
||||||
|
tds []*big.Int
|
||||||
|
written int
|
||||||
|
|
||||||
|
buf *bytes.Buffer
|
||||||
|
snappy *snappy.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBuilder returns a new Builder instance.
|
||||||
|
func NewBuilder(w io.Writer) *Builder {
|
||||||
|
buf := bytes.NewBuffer(nil)
|
||||||
|
return &Builder{
|
||||||
|
w: e2store.NewWriter(w),
|
||||||
|
buf: buf,
|
||||||
|
snappy: snappy.NewBufferedWriter(buf),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add writes a compressed block entry and compressed receipts entry to the
|
||||||
|
// underlying e2store file.
|
||||||
|
func (b *Builder) Add(block *types.Block, receipts types.Receipts, td *big.Int) error {
|
||||||
|
eh, err := rlp.EncodeToBytes(block.Header())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
eb, err := rlp.EncodeToBytes(block.Body())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
er, err := rlp.EncodeToBytes(receipts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return b.AddRLP(eh, eb, er, block.NumberU64(), block.Hash(), td, block.Difficulty())
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRLP writes a compressed block entry and compressed receipts entry to the
|
||||||
|
// underlying e2store file.
|
||||||
|
func (b *Builder) AddRLP(header, body, receipts []byte, number uint64, hash common.Hash, td, difficulty *big.Int) error {
|
||||||
|
// Write Era1 version entry before first block.
|
||||||
|
if b.startNum == nil {
|
||||||
|
n, err := b.w.Write(TypeVersion, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
startNum := number
|
||||||
|
b.startNum = &startNum
|
||||||
|
b.startTd = new(big.Int).Sub(td, difficulty)
|
||||||
|
b.written += n
|
||||||
|
}
|
||||||
|
if len(b.indexes) >= MaxEra1Size {
|
||||||
|
return fmt.Errorf("exceeds maximum batch size of %d", MaxEra1Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.indexes = append(b.indexes, uint64(b.written))
|
||||||
|
b.hashes = append(b.hashes, hash)
|
||||||
|
b.tds = append(b.tds, td)
|
||||||
|
|
||||||
|
// Write block data.
|
||||||
|
if err := b.snappyWrite(TypeCompressedHeader, header); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := b.snappyWrite(TypeCompressedBody, body); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := b.snappyWrite(TypeCompressedReceipts, receipts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also write total difficulty, but don't snappy encode.
|
||||||
|
btd := bigToBytes32(td)
|
||||||
|
n, err := b.w.Write(TypeTotalDifficulty, btd[:])
|
||||||
|
b.written += n
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finalize computes the accumulator and block index values, then writes the
|
||||||
|
// corresponding e2store entries.
|
||||||
|
func (b *Builder) Finalize() (common.Hash, error) {
|
||||||
|
if b.startNum == nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("finalize called on empty builder")
|
||||||
|
}
|
||||||
|
// Compute accumulator root and write entry.
|
||||||
|
root, err := ComputeAccumulator(b.hashes, b.tds)
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("error calculating accumulator root: %w", err)
|
||||||
|
}
|
||||||
|
n, err := b.w.Write(TypeAccumulator, root[:])
|
||||||
|
b.written += n
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("error writing accumulator: %w", err)
|
||||||
|
}
|
||||||
|
// Get beginning of index entry to calculate block relative offset.
|
||||||
|
base := int64(b.written)
|
||||||
|
|
||||||
|
// Construct block index. Detailed format described in Builder
|
||||||
|
// documentation, but it is essentially encoded as:
|
||||||
|
// "start | index | index | ... | count"
|
||||||
|
var (
|
||||||
|
count = len(b.indexes)
|
||||||
|
index = make([]byte, 16+count*8)
|
||||||
|
)
|
||||||
|
binary.LittleEndian.PutUint64(index, *b.startNum)
|
||||||
|
// Each offset is relative from the position it is encoded in the
|
||||||
|
// index. This means that even if the same block was to be included in
|
||||||
|
// the index twice (this would be invalid anyways), the relative offset
|
||||||
|
// would be different. The idea with this is that after reading a
|
||||||
|
// relative offset, the corresponding block can be quickly read by
|
||||||
|
// performing a seek relative to the current position.
|
||||||
|
for i, offset := range b.indexes {
|
||||||
|
relative := int64(offset) - base
|
||||||
|
binary.LittleEndian.PutUint64(index[8+i*8:], uint64(relative))
|
||||||
|
}
|
||||||
|
binary.LittleEndian.PutUint64(index[8+count*8:], uint64(count))
|
||||||
|
|
||||||
|
// Finally, write the block index entry.
|
||||||
|
if _, err := b.w.Write(TypeBlockIndex, index); err != nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("unable to write block index: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return root, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// snappyWrite is a small helper to take care snappy encoding and writing an e2store entry.
|
||||||
|
func (b *Builder) snappyWrite(typ uint16, in []byte) error {
|
||||||
|
var (
|
||||||
|
buf = b.buf
|
||||||
|
s = b.snappy
|
||||||
|
)
|
||||||
|
buf.Reset()
|
||||||
|
s.Reset(buf)
|
||||||
|
if _, err := b.snappy.Write(in); err != nil {
|
||||||
|
return fmt.Errorf("error snappy encoding: %w", err)
|
||||||
|
}
|
||||||
|
if err := s.Flush(); err != nil {
|
||||||
|
return fmt.Errorf("error flushing snappy encoding: %w", err)
|
||||||
|
}
|
||||||
|
n, err := b.w.Write(typ, b.buf.Bytes())
|
||||||
|
b.written += n
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error writing e2store entry: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
220
internal/era/e2store/e2store.go
Normal file
220
internal/era/e2store/e2store.go
Normal file
@ -0,0 +1,220 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package e2store
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
headerSize = 8
|
||||||
|
valueSizeLimit = 1024 * 1024 * 50
|
||||||
|
)
|
||||||
|
|
||||||
|
// Entry is a variable-length-data record in an e2store.
|
||||||
|
type Entry struct {
|
||||||
|
Type uint16
|
||||||
|
Value []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writer writes entries using e2store encoding.
|
||||||
|
// For more information on this format, see:
|
||||||
|
// https://github.com/status-im/nimbus-eth2/blob/stable/docs/e2store.md
|
||||||
|
type Writer struct {
|
||||||
|
w io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriter returns a new Writer that writes to w.
|
||||||
|
func NewWriter(w io.Writer) *Writer {
|
||||||
|
return &Writer{w}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes a single e2store entry to w.
|
||||||
|
// An entry is encoded in a type-length-value format. The first 8 bytes of the
|
||||||
|
// record store the type (2 bytes), the length (4 bytes), and some reserved
|
||||||
|
// data (2 bytes). The remaining bytes store b.
|
||||||
|
func (w *Writer) Write(typ uint16, b []byte) (int, error) {
|
||||||
|
buf := make([]byte, headerSize)
|
||||||
|
binary.LittleEndian.PutUint16(buf, typ)
|
||||||
|
binary.LittleEndian.PutUint32(buf[2:], uint32(len(b)))
|
||||||
|
|
||||||
|
// Write header.
|
||||||
|
if n, err := w.w.Write(buf); err != nil {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
// Write value, return combined write size.
|
||||||
|
n, err := w.w.Write(b)
|
||||||
|
return n + headerSize, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Reader reads entries from an e2store-encoded file.
|
||||||
|
// For more information on this format, see
|
||||||
|
// https://github.com/status-im/nimbus-eth2/blob/stable/docs/e2store.md
|
||||||
|
type Reader struct {
|
||||||
|
r io.ReaderAt
|
||||||
|
offset int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReader returns a new Reader that reads from r.
|
||||||
|
func NewReader(r io.ReaderAt) *Reader {
|
||||||
|
return &Reader{r, 0}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads one Entry from r.
|
||||||
|
func (r *Reader) Read() (*Entry, error) {
|
||||||
|
var e Entry
|
||||||
|
n, err := r.ReadAt(&e, r.offset)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r.offset += int64(n)
|
||||||
|
return &e, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAt reads one Entry from r at the specified offset.
|
||||||
|
func (r *Reader) ReadAt(entry *Entry, off int64) (int, error) {
|
||||||
|
typ, length, err := r.ReadMetadataAt(off)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
entry.Type = typ
|
||||||
|
|
||||||
|
// Check length bounds.
|
||||||
|
if length > valueSizeLimit {
|
||||||
|
return headerSize, fmt.Errorf("item larger than item size limit %d: have %d", valueSizeLimit, length)
|
||||||
|
}
|
||||||
|
if length == 0 {
|
||||||
|
return headerSize, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read value.
|
||||||
|
val := make([]byte, length)
|
||||||
|
if n, err := r.r.ReadAt(val, off+headerSize); err != nil {
|
||||||
|
n += headerSize
|
||||||
|
// An entry with a non-zero length should not return EOF when
|
||||||
|
// reading the value.
|
||||||
|
if err == io.EOF {
|
||||||
|
return n, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
entry.Value = val
|
||||||
|
return int(headerSize + length), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReaderAt returns an io.Reader delivering value data for the entry at
|
||||||
|
// the specified offset. If the entry type does not match the expected type, an
|
||||||
|
// error is returned.
|
||||||
|
func (r *Reader) ReaderAt(expectedType uint16, off int64) (io.Reader, int, error) {
|
||||||
|
// problem = need to return length+headerSize not just value length via section reader
|
||||||
|
typ, length, err := r.ReadMetadataAt(off)
|
||||||
|
if err != nil {
|
||||||
|
return nil, headerSize, err
|
||||||
|
}
|
||||||
|
if typ != expectedType {
|
||||||
|
return nil, headerSize, fmt.Errorf("wrong type, want %d have %d", expectedType, typ)
|
||||||
|
}
|
||||||
|
if length > valueSizeLimit {
|
||||||
|
return nil, headerSize, fmt.Errorf("item larger than item size limit %d: have %d", valueSizeLimit, length)
|
||||||
|
}
|
||||||
|
return io.NewSectionReader(r.r, off+headerSize, int64(length)), headerSize + int(length), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LengthAt reads the header at off and returns the total length of the entry,
|
||||||
|
// including header.
|
||||||
|
func (r *Reader) LengthAt(off int64) (int64, error) {
|
||||||
|
_, length, err := r.ReadMetadataAt(off)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return int64(length) + headerSize, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadMetadataAt reads the header metadata at the given offset.
|
||||||
|
func (r *Reader) ReadMetadataAt(off int64) (typ uint16, length uint32, err error) {
|
||||||
|
b := make([]byte, headerSize)
|
||||||
|
if n, err := r.r.ReadAt(b, off); err != nil {
|
||||||
|
if err == io.EOF && n > 0 {
|
||||||
|
return 0, 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return 0, 0, err
|
||||||
|
}
|
||||||
|
typ = binary.LittleEndian.Uint16(b)
|
||||||
|
length = binary.LittleEndian.Uint32(b[2:])
|
||||||
|
|
||||||
|
// Check reserved bytes of header.
|
||||||
|
if b[6] != 0 || b[7] != 0 {
|
||||||
|
return 0, 0, fmt.Errorf("reserved bytes are non-zero")
|
||||||
|
}
|
||||||
|
|
||||||
|
return typ, length, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find returns the first entry with the matching type.
|
||||||
|
func (r *Reader) Find(want uint16) (*Entry, error) {
|
||||||
|
var (
|
||||||
|
off int64
|
||||||
|
typ uint16
|
||||||
|
length uint32
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
for {
|
||||||
|
typ, length, err = r.ReadMetadataAt(off)
|
||||||
|
if err == io.EOF {
|
||||||
|
return nil, io.EOF
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if typ == want {
|
||||||
|
var e Entry
|
||||||
|
if _, err := r.ReadAt(&e, off); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &e, nil
|
||||||
|
}
|
||||||
|
off += int64(headerSize + length)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindAll returns all entries with the matching type.
|
||||||
|
func (r *Reader) FindAll(want uint16) ([]*Entry, error) {
|
||||||
|
var (
|
||||||
|
off int64
|
||||||
|
typ uint16
|
||||||
|
length uint32
|
||||||
|
entries []*Entry
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
for {
|
||||||
|
typ, length, err = r.ReadMetadataAt(off)
|
||||||
|
if err == io.EOF {
|
||||||
|
return entries, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return entries, err
|
||||||
|
}
|
||||||
|
if typ == want {
|
||||||
|
e := new(Entry)
|
||||||
|
if _, err := r.ReadAt(e, off); err != nil {
|
||||||
|
return entries, err
|
||||||
|
}
|
||||||
|
entries = append(entries, e)
|
||||||
|
}
|
||||||
|
off += int64(headerSize + length)
|
||||||
|
}
|
||||||
|
}
|
150
internal/era/e2store/e2store_test.go
Normal file
150
internal/era/e2store/e2store_test.go
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package e2store
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEncode(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
entries []Entry
|
||||||
|
want string
|
||||||
|
name string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "emptyEntry",
|
||||||
|
entries: []Entry{{0xffff, nil}},
|
||||||
|
want: "ffff000000000000",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "beef",
|
||||||
|
entries: []Entry{{42, common.Hex2Bytes("beef")}},
|
||||||
|
want: "2a00020000000000beef",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "twoEntries",
|
||||||
|
entries: []Entry{
|
||||||
|
{42, common.Hex2Bytes("beef")},
|
||||||
|
{9, common.Hex2Bytes("abcdabcd")},
|
||||||
|
},
|
||||||
|
want: "2a00020000000000beef0900040000000000abcdabcd",
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
tt := test
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
var (
|
||||||
|
b = bytes.NewBuffer(nil)
|
||||||
|
w = NewWriter(b)
|
||||||
|
)
|
||||||
|
for _, e := range tt.entries {
|
||||||
|
if _, err := w.Write(e.Type, e.Value); err != nil {
|
||||||
|
t.Fatalf("encoding error: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if want, have := common.FromHex(tt.want), b.Bytes(); !bytes.Equal(want, have) {
|
||||||
|
t.Fatalf("encoding mismatch (want %x, have %x", want, have)
|
||||||
|
}
|
||||||
|
r := NewReader(bytes.NewReader(b.Bytes()))
|
||||||
|
for _, want := range tt.entries {
|
||||||
|
have, err := r.Read()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("decoding error: %v", err)
|
||||||
|
}
|
||||||
|
if have.Type != want.Type {
|
||||||
|
t.Fatalf("decoded entry does type mismatch (want %v, got %v)", want.Type, have.Type)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(have.Value, want.Value) {
|
||||||
|
t.Fatalf("decoded entry does not match (want %#x, got %#x)", want.Value, have.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDecode(t *testing.T) {
|
||||||
|
for i, tt := range []struct {
|
||||||
|
have string
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{ // basic valid decoding
|
||||||
|
have: "ffff000000000000",
|
||||||
|
},
|
||||||
|
{ // basic invalid decoding
|
||||||
|
have: "ffff000000000001",
|
||||||
|
err: fmt.Errorf("reserved bytes are non-zero"),
|
||||||
|
},
|
||||||
|
{ // no more entries to read, returns EOF
|
||||||
|
have: "",
|
||||||
|
err: io.EOF,
|
||||||
|
},
|
||||||
|
{ // malformed type
|
||||||
|
have: "bad",
|
||||||
|
err: io.ErrUnexpectedEOF,
|
||||||
|
},
|
||||||
|
{ // malformed length
|
||||||
|
have: "badbeef",
|
||||||
|
err: io.ErrUnexpectedEOF,
|
||||||
|
},
|
||||||
|
{ // specified length longer than actual value
|
||||||
|
have: "beef010000000000",
|
||||||
|
err: io.ErrUnexpectedEOF,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
r := NewReader(bytes.NewReader(common.FromHex(tt.have)))
|
||||||
|
if tt.err != nil {
|
||||||
|
_, err := r.Read()
|
||||||
|
if err == nil && tt.err != nil {
|
||||||
|
t.Fatalf("test %d, expected error, got none", i)
|
||||||
|
}
|
||||||
|
if err != nil && tt.err == nil {
|
||||||
|
t.Fatalf("test %d, expected no error, got %v", i, err)
|
||||||
|
}
|
||||||
|
if err != nil && tt.err != nil && err.Error() != tt.err.Error() {
|
||||||
|
t.Fatalf("expected error %v, got %v", tt.err, err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzCodec(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, input []byte) {
|
||||||
|
r := NewReader(bytes.NewReader(input))
|
||||||
|
entry, err := r.Read()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
b = bytes.NewBuffer(nil)
|
||||||
|
w = NewWriter(b)
|
||||||
|
)
|
||||||
|
w.Write(entry.Type, entry.Value)
|
||||||
|
output := b.Bytes()
|
||||||
|
// Only care about the input that was actually consumed
|
||||||
|
input = input[:r.offset]
|
||||||
|
if !bytes.Equal(input, output) {
|
||||||
|
t.Fatalf("decode-encode mismatch, input %#x output %#x", input, output)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
283
internal/era/era.go
Normal file
283
internal/era/era.go
Normal file
@ -0,0 +1,283 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package era
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/era/e2store"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/golang/snappy"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
TypeVersion uint16 = 0x3265
|
||||||
|
TypeCompressedHeader uint16 = 0x03
|
||||||
|
TypeCompressedBody uint16 = 0x04
|
||||||
|
TypeCompressedReceipts uint16 = 0x05
|
||||||
|
TypeTotalDifficulty uint16 = 0x06
|
||||||
|
TypeAccumulator uint16 = 0x07
|
||||||
|
TypeBlockIndex uint16 = 0x3266
|
||||||
|
|
||||||
|
MaxEra1Size = 8192
|
||||||
|
)
|
||||||
|
|
||||||
|
// Filename returns a recognizable Era1-formatted file name for the specified
|
||||||
|
// epoch and network.
|
||||||
|
func Filename(network string, epoch int, root common.Hash) string {
|
||||||
|
return fmt.Sprintf("%s-%05d-%s.era1", network, epoch, root.Hex()[2:10])
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDir reads all the era1 files in a directory for a given network.
|
||||||
|
// Format: <network>-<epoch>-<hexroot>.era1
|
||||||
|
func ReadDir(dir, network string) ([]string, error) {
|
||||||
|
entries, err := os.ReadDir(dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading directory %s: %w", dir, err)
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
next = uint64(0)
|
||||||
|
eras []string
|
||||||
|
)
|
||||||
|
for _, entry := range entries {
|
||||||
|
if path.Ext(entry.Name()) != ".era1" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
parts := strings.Split(entry.Name(), "-")
|
||||||
|
if len(parts) != 3 || parts[0] != network {
|
||||||
|
// invalid era1 filename, skip
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if epoch, err := strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||||
|
return nil, fmt.Errorf("malformed era1 filename: %s", entry.Name())
|
||||||
|
} else if epoch != next {
|
||||||
|
return nil, fmt.Errorf("missing epoch %d", next)
|
||||||
|
}
|
||||||
|
next += 1
|
||||||
|
eras = append(eras, entry.Name())
|
||||||
|
}
|
||||||
|
return eras, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReadAtSeekCloser interface {
|
||||||
|
io.ReaderAt
|
||||||
|
io.Seeker
|
||||||
|
io.Closer
|
||||||
|
}
|
||||||
|
|
||||||
|
// Era reads and Era1 file.
|
||||||
|
type Era struct {
|
||||||
|
f ReadAtSeekCloser // backing era1 file
|
||||||
|
s *e2store.Reader // e2store reader over f
|
||||||
|
m metadata // start, count, length info
|
||||||
|
mu *sync.Mutex // lock for buf
|
||||||
|
buf [8]byte // buffer reading entry offsets
|
||||||
|
}
|
||||||
|
|
||||||
|
// From returns an Era backed by f.
|
||||||
|
func From(f ReadAtSeekCloser) (*Era, error) {
|
||||||
|
m, err := readMetadata(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Era{
|
||||||
|
f: f,
|
||||||
|
s: e2store.NewReader(f),
|
||||||
|
m: m,
|
||||||
|
mu: new(sync.Mutex),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open returns an Era backed by the given filename.
|
||||||
|
func Open(filename string) (*Era, error) {
|
||||||
|
f, err := os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return From(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Era) Close() error {
|
||||||
|
return e.f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Era) GetBlockByNumber(num uint64) (*types.Block, error) {
|
||||||
|
if e.m.start > num || e.m.start+e.m.count <= num {
|
||||||
|
return nil, fmt.Errorf("out-of-bounds")
|
||||||
|
}
|
||||||
|
off, err := e.readOffset(num)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r, n, err := newSnappyReader(e.s, TypeCompressedHeader, off)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var header types.Header
|
||||||
|
if err := rlp.Decode(r, &header); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
off += n
|
||||||
|
r, _, err = newSnappyReader(e.s, TypeCompressedBody, off)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var body types.Body
|
||||||
|
if err := rlp.Decode(r, &body); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return types.NewBlockWithHeader(&header).WithBody(body.Transactions, body.Uncles), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accumulator reads the accumulator entry in the Era1 file.
|
||||||
|
func (e *Era) Accumulator() (common.Hash, error) {
|
||||||
|
entry, err := e.s.Find(TypeAccumulator)
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}, err
|
||||||
|
}
|
||||||
|
return common.BytesToHash(entry.Value), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InitialTD returns initial total difficulty before the difficulty of the
|
||||||
|
// first block of the Era1 is applied.
|
||||||
|
func (e *Era) InitialTD() (*big.Int, error) {
|
||||||
|
var (
|
||||||
|
r io.Reader
|
||||||
|
header types.Header
|
||||||
|
rawTd []byte
|
||||||
|
n int64
|
||||||
|
off int64
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
// Read first header.
|
||||||
|
if off, err = e.readOffset(e.m.start); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if r, n, err = newSnappyReader(e.s, TypeCompressedHeader, off); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := rlp.Decode(r, &header); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
off += n
|
||||||
|
|
||||||
|
// Skip over next two records.
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
length, err := e.s.LengthAt(off)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
off += length
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read total difficulty after first block.
|
||||||
|
if r, _, err = e.s.ReaderAt(TypeTotalDifficulty, off); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rawTd, err = io.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
td := new(big.Int).SetBytes(reverseOrder(rawTd))
|
||||||
|
return td.Sub(td, header.Difficulty), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start returns the listed start block.
|
||||||
|
func (e *Era) Start() uint64 {
|
||||||
|
return e.m.start
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the total number of blocks in the Era1.
|
||||||
|
func (e *Era) Count() uint64 {
|
||||||
|
return e.m.count
|
||||||
|
}
|
||||||
|
|
||||||
|
// readOffset reads a specific block's offset from the block index. The value n
|
||||||
|
// is the absolute block number desired.
|
||||||
|
func (e *Era) readOffset(n uint64) (int64, error) {
|
||||||
|
var (
|
||||||
|
blockIndexRecordOffset = e.m.length - 24 - int64(e.m.count)*8 // skips start, count, and header
|
||||||
|
firstIndex = blockIndexRecordOffset + 16 // first index after header / start-num
|
||||||
|
indexOffset = int64(n-e.m.start) * 8 // desired index * size of indexes
|
||||||
|
offOffset = firstIndex + indexOffset // offset of block offset
|
||||||
|
)
|
||||||
|
e.mu.Lock()
|
||||||
|
defer e.mu.Unlock()
|
||||||
|
clearBuffer(e.buf[:])
|
||||||
|
if _, err := e.f.ReadAt(e.buf[:], offOffset); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
// Since the block offset is relative from the start of the block index record
|
||||||
|
// we need to add the record offset to it's offset to get the block's absolute
|
||||||
|
// offset.
|
||||||
|
return blockIndexRecordOffset + int64(binary.LittleEndian.Uint64(e.buf[:])), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// newReader returns a snappy.Reader for the e2store entry value at off.
|
||||||
|
func newSnappyReader(e *e2store.Reader, expectedType uint16, off int64) (io.Reader, int64, error) {
|
||||||
|
r, n, err := e.ReaderAt(expectedType, off)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
return snappy.NewReader(r), int64(n), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// clearBuffer zeroes out the buffer.
|
||||||
|
func clearBuffer(buf []byte) {
|
||||||
|
for i := 0; i < len(buf); i++ {
|
||||||
|
buf[i] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// metadata wraps the metadata in the block index.
|
||||||
|
type metadata struct {
|
||||||
|
start uint64
|
||||||
|
count uint64
|
||||||
|
length int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// readMetadata reads the metadata stored in an Era1 file's block index.
|
||||||
|
func readMetadata(f ReadAtSeekCloser) (m metadata, err error) {
|
||||||
|
// Determine length of reader.
|
||||||
|
if m.length, err = f.Seek(0, io.SeekEnd); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b := make([]byte, 16)
|
||||||
|
// Read count. It's the last 8 bytes of the file.
|
||||||
|
if _, err = f.ReadAt(b[:8], m.length-8); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.count = binary.LittleEndian.Uint64(b)
|
||||||
|
// Read start. It's at the offset -sizeof(m.count) -
|
||||||
|
// count*sizeof(indexEntry) - sizeof(m.start)
|
||||||
|
if _, err = f.ReadAt(b[8:], m.length-16-int64(m.count*8)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.start = binary.LittleEndian.Uint64(b[8:])
|
||||||
|
return
|
||||||
|
}
|
142
internal/era/era_test.go
Normal file
142
internal/era/era_test.go
Normal file
@ -0,0 +1,142 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package era
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testchain struct {
|
||||||
|
headers [][]byte
|
||||||
|
bodies [][]byte
|
||||||
|
receipts [][]byte
|
||||||
|
tds []*big.Int
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEra1Builder(t *testing.T) {
|
||||||
|
// Get temp directory.
|
||||||
|
f, err := os.CreateTemp("", "era1-test")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating temp file: %v", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
var (
|
||||||
|
builder = NewBuilder(f)
|
||||||
|
chain = testchain{}
|
||||||
|
)
|
||||||
|
for i := 0; i < 128; i++ {
|
||||||
|
chain.headers = append(chain.headers, []byte{byte('h'), byte(i)})
|
||||||
|
chain.bodies = append(chain.bodies, []byte{byte('b'), byte(i)})
|
||||||
|
chain.receipts = append(chain.receipts, []byte{byte('r'), byte(i)})
|
||||||
|
chain.tds = append(chain.tds, big.NewInt(int64(i)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write blocks to Era1.
|
||||||
|
for i := 0; i < len(chain.headers); i++ {
|
||||||
|
var (
|
||||||
|
header = chain.headers[i]
|
||||||
|
body = chain.bodies[i]
|
||||||
|
receipts = chain.receipts[i]
|
||||||
|
hash = common.Hash{byte(i)}
|
||||||
|
td = chain.tds[i]
|
||||||
|
)
|
||||||
|
if err = builder.AddRLP(header, body, receipts, uint64(i), hash, td, big.NewInt(1)); err != nil {
|
||||||
|
t.Fatalf("error adding entry: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finalize Era1.
|
||||||
|
if _, err := builder.Finalize(); err != nil {
|
||||||
|
t.Fatalf("error finalizing era1: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify Era1 contents.
|
||||||
|
e, err := Open(f.Name())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to open era: %v", err)
|
||||||
|
}
|
||||||
|
it, err := NewRawIterator(e)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to make iterator: %s", err)
|
||||||
|
}
|
||||||
|
for i := uint64(0); i < uint64(len(chain.headers)); i++ {
|
||||||
|
if !it.Next() {
|
||||||
|
t.Fatalf("expected more entries")
|
||||||
|
}
|
||||||
|
if it.Error() != nil {
|
||||||
|
t.Fatalf("unexpected error %v", it.Error())
|
||||||
|
}
|
||||||
|
// Check headers.
|
||||||
|
header, err := io.ReadAll(it.Header)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error reading header: %v", err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(header, chain.headers[i]) {
|
||||||
|
t.Fatalf("mismatched header: want %s, got %s", chain.headers[i], header)
|
||||||
|
}
|
||||||
|
// Check bodies.
|
||||||
|
body, err := io.ReadAll(it.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error reading body: %v", err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(body, chain.bodies[i]) {
|
||||||
|
t.Fatalf("mismatched body: want %s, got %s", chain.bodies[i], body)
|
||||||
|
}
|
||||||
|
// Check receipts.
|
||||||
|
receipts, err := io.ReadAll(it.Receipts)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error reading receipts: %v", err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(receipts, chain.receipts[i]) {
|
||||||
|
t.Fatalf("mismatched receipts: want %s, got %s", chain.receipts[i], receipts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check total difficulty.
|
||||||
|
rawTd, err := io.ReadAll(it.TotalDifficulty)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error reading td: %v", err)
|
||||||
|
}
|
||||||
|
td := new(big.Int).SetBytes(reverseOrder(rawTd))
|
||||||
|
if td.Cmp(chain.tds[i]) != 0 {
|
||||||
|
t.Fatalf("mismatched tds: want %s, got %s", chain.tds[i], td)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEraFilename(t *testing.T) {
|
||||||
|
for i, tt := range []struct {
|
||||||
|
network string
|
||||||
|
epoch int
|
||||||
|
root common.Hash
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{"mainnet", 1, common.Hash{1}, "mainnet-00001-01000000.era1"},
|
||||||
|
{"goerli", 99999, common.HexToHash("0xdeadbeef00000000000000000000000000000000000000000000000000000000"), "goerli-99999-deadbeef.era1"},
|
||||||
|
} {
|
||||||
|
got := Filename(tt.network, tt.epoch, tt.root)
|
||||||
|
if tt.expected != got {
|
||||||
|
t.Errorf("test %d: invalid filename: want %s, got %s", i, tt.expected, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
197
internal/era/iterator.go
Normal file
197
internal/era/iterator.go
Normal file
@ -0,0 +1,197 @@
|
|||||||
|
// Copyright 2023 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package era
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Iterator wraps RawIterator and returns decoded Era1 entries.
|
||||||
|
type Iterator struct {
|
||||||
|
inner *RawIterator
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRawIterator returns a new Iterator instance. Next must be immediately
|
||||||
|
// called on new iterators to load the first item.
|
||||||
|
func NewIterator(e *Era) (*Iterator, error) {
|
||||||
|
inner, err := NewRawIterator(e)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Iterator{inner}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next moves the iterator to the next block entry. It returns false when all
|
||||||
|
// items have been read or an error has halted its progress. Block, Receipts,
|
||||||
|
// and BlockAndReceipts should no longer be called after false is returned.
|
||||||
|
func (it *Iterator) Next() bool {
|
||||||
|
return it.inner.Next()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Number returns the current number block the iterator will return.
|
||||||
|
func (it *Iterator) Number() uint64 {
|
||||||
|
return it.inner.next - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the error status of the iterator. It should be called before
|
||||||
|
// reading from any of the iterator's values.
|
||||||
|
func (it *Iterator) Error() error {
|
||||||
|
return it.inner.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Block returns the block for the iterator's current position.
|
||||||
|
func (it *Iterator) Block() (*types.Block, error) {
|
||||||
|
if it.inner.Header == nil || it.inner.Body == nil {
|
||||||
|
return nil, fmt.Errorf("header and body must be non-nil")
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
header types.Header
|
||||||
|
body types.Body
|
||||||
|
)
|
||||||
|
if err := rlp.Decode(it.inner.Header, &header); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := rlp.Decode(it.inner.Body, &body); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return types.NewBlockWithHeader(&header).WithBody(body.Transactions, body.Uncles), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Receipts returns the receipts for the iterator's current position.
|
||||||
|
func (it *Iterator) Receipts() (types.Receipts, error) {
|
||||||
|
if it.inner.Receipts == nil {
|
||||||
|
return nil, fmt.Errorf("receipts must be non-nil")
|
||||||
|
}
|
||||||
|
var receipts types.Receipts
|
||||||
|
err := rlp.Decode(it.inner.Receipts, &receipts)
|
||||||
|
return receipts, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockAndReceipts returns the block and receipts for the iterator's current
|
||||||
|
// position.
|
||||||
|
func (it *Iterator) BlockAndReceipts() (*types.Block, types.Receipts, error) {
|
||||||
|
b, err := it.Block()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
r, err := it.Receipts()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return b, r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TotalDifficulty returns the total difficulty for the iterator's current
|
||||||
|
// position.
|
||||||
|
func (it *Iterator) TotalDifficulty() (*big.Int, error) {
|
||||||
|
td, err := io.ReadAll(it.inner.TotalDifficulty)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return new(big.Int).SetBytes(reverseOrder(td)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawIterator reads an RLP-encode Era1 entries.
|
||||||
|
type RawIterator struct {
|
||||||
|
e *Era // backing Era1
|
||||||
|
next uint64 // next block to read
|
||||||
|
err error // last error
|
||||||
|
|
||||||
|
Header io.Reader
|
||||||
|
Body io.Reader
|
||||||
|
Receipts io.Reader
|
||||||
|
TotalDifficulty io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRawIterator returns a new RawIterator instance. Next must be immediately
|
||||||
|
// called on new iterators to load the first item.
|
||||||
|
func NewRawIterator(e *Era) (*RawIterator, error) {
|
||||||
|
return &RawIterator{
|
||||||
|
e: e,
|
||||||
|
next: e.m.start,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next moves the iterator to the next block entry. It returns false when all
|
||||||
|
// items have been read or an error has halted its progress. Header, Body,
|
||||||
|
// Receipts, TotalDifficulty will be set to nil in the case returning false or
|
||||||
|
// finding an error and should therefore no longer be read from.
|
||||||
|
func (it *RawIterator) Next() bool {
|
||||||
|
// Clear old errors.
|
||||||
|
it.err = nil
|
||||||
|
if it.e.m.start+it.e.m.count <= it.next {
|
||||||
|
it.clear()
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
off, err := it.e.readOffset(it.next)
|
||||||
|
if err != nil {
|
||||||
|
// Error here means block index is corrupted, so don't
|
||||||
|
// continue.
|
||||||
|
it.clear()
|
||||||
|
it.err = err
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
var n int64
|
||||||
|
if it.Header, n, it.err = newSnappyReader(it.e.s, TypeCompressedHeader, off); it.err != nil {
|
||||||
|
it.clear()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
off += n
|
||||||
|
if it.Body, n, it.err = newSnappyReader(it.e.s, TypeCompressedBody, off); it.err != nil {
|
||||||
|
it.clear()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
off += n
|
||||||
|
if it.Receipts, n, it.err = newSnappyReader(it.e.s, TypeCompressedReceipts, off); it.err != nil {
|
||||||
|
it.clear()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
off += n
|
||||||
|
if it.TotalDifficulty, _, it.err = it.e.s.ReaderAt(TypeTotalDifficulty, off); it.err != nil {
|
||||||
|
it.clear()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
it.next += 1
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Number returns the current number block the iterator will return.
|
||||||
|
func (it *RawIterator) Number() uint64 {
|
||||||
|
return it.next - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the error status of the iterator. It should be called before
|
||||||
|
// reading from any of the iterator's values.
|
||||||
|
func (it *RawIterator) Error() error {
|
||||||
|
if it.err == io.EOF {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return it.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// clear sets all the outputs to nil.
|
||||||
|
func (it *RawIterator) clear() {
|
||||||
|
it.Header = nil
|
||||||
|
it.Body = nil
|
||||||
|
it.Receipts = nil
|
||||||
|
it.TotalDifficulty = nil
|
||||||
|
}
|
@ -655,7 +655,7 @@ func (s *BlockChainAPI) GetBalance(ctx context.Context, address common.Address,
|
|||||||
return (*hexutil.Big)(b), state.Error()
|
return (*hexutil.Big)(b), state.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Result structs for GetProof
|
// AccountResult structs for GetProof
|
||||||
type AccountResult struct {
|
type AccountResult struct {
|
||||||
Address common.Address `json:"address"`
|
Address common.Address `json:"address"`
|
||||||
AccountProof []string `json:"accountProof"`
|
AccountProof []string `json:"accountProof"`
|
||||||
@ -1812,13 +1812,14 @@ func (s *TransactionAPI) SendTransaction(ctx context.Context, args TransactionAr
|
|||||||
// on a given unsigned transaction, and returns it to the caller for further
|
// on a given unsigned transaction, and returns it to the caller for further
|
||||||
// processing (signing + broadcast).
|
// processing (signing + broadcast).
|
||||||
func (s *TransactionAPI) FillTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) {
|
func (s *TransactionAPI) FillTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) {
|
||||||
|
args.blobSidecarAllowed = true
|
||||||
|
|
||||||
// Set some sanity defaults and terminate on failure
|
// Set some sanity defaults and terminate on failure
|
||||||
if err := args.setDefaults(ctx, s.b); err != nil {
|
if err := args.setDefaults(ctx, s.b); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Assemble the transaction and obtain rlp
|
// Assemble the transaction and obtain rlp
|
||||||
tx := args.toTransaction()
|
tx := args.toTransaction()
|
||||||
// TODO(s1na): fill in blob proofs, commitments
|
|
||||||
data, err := tx.MarshalBinary()
|
data, err := tx.MarshalBinary()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
|
"crypto/sha256"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -45,6 +46,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"github.com/ethereum/go-ethereum/internal/blocktest"
|
"github.com/ethereum/go-ethereum/internal/blocktest"
|
||||||
@ -1079,6 +1081,195 @@ func TestSendBlobTransaction(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFillBlobTransaction(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
// Initialize test accounts
|
||||||
|
var (
|
||||||
|
key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
||||||
|
to = crypto.PubkeyToAddress(key.PublicKey)
|
||||||
|
genesis = &core.Genesis{
|
||||||
|
Config: params.MergedTestChainConfig,
|
||||||
|
Alloc: core.GenesisAlloc{},
|
||||||
|
}
|
||||||
|
emptyBlob = kzg4844.Blob{}
|
||||||
|
emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob)
|
||||||
|
emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit)
|
||||||
|
emptyBlobHash common.Hash = kzg4844.CalcBlobHashV1(sha256.New(), &emptyBlobCommit)
|
||||||
|
)
|
||||||
|
b := newTestBackend(t, 1, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) {
|
||||||
|
b.SetPoS()
|
||||||
|
})
|
||||||
|
api := NewTransactionAPI(b, nil)
|
||||||
|
type result struct {
|
||||||
|
Hashes []common.Hash
|
||||||
|
Sidecar *types.BlobTxSidecar
|
||||||
|
}
|
||||||
|
suite := []struct {
|
||||||
|
name string
|
||||||
|
args TransactionArgs
|
||||||
|
err string
|
||||||
|
want *result
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "TestInvalidParamsCombination1",
|
||||||
|
args: TransactionArgs{
|
||||||
|
From: &b.acc.Address,
|
||||||
|
To: &to,
|
||||||
|
Value: (*hexutil.Big)(big.NewInt(1)),
|
||||||
|
Blobs: []kzg4844.Blob{{}},
|
||||||
|
Proofs: []kzg4844.Proof{{}},
|
||||||
|
},
|
||||||
|
err: `blob proofs provided while commitments were not`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "TestInvalidParamsCombination2",
|
||||||
|
args: TransactionArgs{
|
||||||
|
From: &b.acc.Address,
|
||||||
|
To: &to,
|
||||||
|
Value: (*hexutil.Big)(big.NewInt(1)),
|
||||||
|
Blobs: []kzg4844.Blob{{}},
|
||||||
|
Commitments: []kzg4844.Commitment{{}},
|
||||||
|
},
|
||||||
|
err: `blob commitments provided while proofs were not`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "TestInvalidParamsCount1",
|
||||||
|
args: TransactionArgs{
|
||||||
|
From: &b.acc.Address,
|
||||||
|
To: &to,
|
||||||
|
Value: (*hexutil.Big)(big.NewInt(1)),
|
||||||
|
Blobs: []kzg4844.Blob{{}},
|
||||||
|
Commitments: []kzg4844.Commitment{{}, {}},
|
||||||
|
Proofs: []kzg4844.Proof{{}, {}},
|
||||||
|
},
|
||||||
|
err: `number of blobs and commitments mismatch (have=2, want=1)`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "TestInvalidParamsCount2",
|
||||||
|
args: TransactionArgs{
|
||||||
|
From: &b.acc.Address,
|
||||||
|
To: &to,
|
||||||
|
Value: (*hexutil.Big)(big.NewInt(1)),
|
||||||
|
Blobs: []kzg4844.Blob{{}, {}},
|
||||||
|
Commitments: []kzg4844.Commitment{{}, {}},
|
||||||
|
Proofs: []kzg4844.Proof{{}},
|
||||||
|
},
|
||||||
|
err: `number of blobs and proofs mismatch (have=1, want=2)`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "TestInvalidProofVerification",
|
||||||
|
args: TransactionArgs{
|
||||||
|
From: &b.acc.Address,
|
||||||
|
To: &to,
|
||||||
|
Value: (*hexutil.Big)(big.NewInt(1)),
|
||||||
|
Blobs: []kzg4844.Blob{{}, {}},
|
||||||
|
Commitments: []kzg4844.Commitment{{}, {}},
|
||||||
|
Proofs: []kzg4844.Proof{{}, {}},
|
||||||
|
},
|
||||||
|
err: `failed to verify blob proof: short buffer`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "TestGenerateBlobHashes",
|
||||||
|
args: TransactionArgs{
|
||||||
|
From: &b.acc.Address,
|
||||||
|
To: &to,
|
||||||
|
Value: (*hexutil.Big)(big.NewInt(1)),
|
||||||
|
Blobs: []kzg4844.Blob{emptyBlob},
|
||||||
|
Commitments: []kzg4844.Commitment{emptyBlobCommit},
|
||||||
|
Proofs: []kzg4844.Proof{emptyBlobProof},
|
||||||
|
},
|
||||||
|
want: &result{
|
||||||
|
Hashes: []common.Hash{emptyBlobHash},
|
||||||
|
Sidecar: &types.BlobTxSidecar{
|
||||||
|
Blobs: []kzg4844.Blob{emptyBlob},
|
||||||
|
Commitments: []kzg4844.Commitment{emptyBlobCommit},
|
||||||
|
Proofs: []kzg4844.Proof{emptyBlobProof},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "TestValidBlobHashes",
|
||||||
|
args: TransactionArgs{
|
||||||
|
From: &b.acc.Address,
|
||||||
|
To: &to,
|
||||||
|
Value: (*hexutil.Big)(big.NewInt(1)),
|
||||||
|
BlobHashes: []common.Hash{emptyBlobHash},
|
||||||
|
Blobs: []kzg4844.Blob{emptyBlob},
|
||||||
|
Commitments: []kzg4844.Commitment{emptyBlobCommit},
|
||||||
|
Proofs: []kzg4844.Proof{emptyBlobProof},
|
||||||
|
},
|
||||||
|
want: &result{
|
||||||
|
Hashes: []common.Hash{emptyBlobHash},
|
||||||
|
Sidecar: &types.BlobTxSidecar{
|
||||||
|
Blobs: []kzg4844.Blob{emptyBlob},
|
||||||
|
Commitments: []kzg4844.Commitment{emptyBlobCommit},
|
||||||
|
Proofs: []kzg4844.Proof{emptyBlobProof},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "TestInvalidBlobHashes",
|
||||||
|
args: TransactionArgs{
|
||||||
|
From: &b.acc.Address,
|
||||||
|
To: &to,
|
||||||
|
Value: (*hexutil.Big)(big.NewInt(1)),
|
||||||
|
BlobHashes: []common.Hash{{0x01, 0x22}},
|
||||||
|
Blobs: []kzg4844.Blob{emptyBlob},
|
||||||
|
Commitments: []kzg4844.Commitment{emptyBlobCommit},
|
||||||
|
Proofs: []kzg4844.Proof{emptyBlobProof},
|
||||||
|
},
|
||||||
|
err: fmt.Sprintf("blob hash verification failed (have=%s, want=%s)", common.Hash{0x01, 0x22}, emptyBlobHash),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "TestGenerateBlobProofs",
|
||||||
|
args: TransactionArgs{
|
||||||
|
From: &b.acc.Address,
|
||||||
|
To: &to,
|
||||||
|
Value: (*hexutil.Big)(big.NewInt(1)),
|
||||||
|
Blobs: []kzg4844.Blob{emptyBlob},
|
||||||
|
},
|
||||||
|
want: &result{
|
||||||
|
Hashes: []common.Hash{emptyBlobHash},
|
||||||
|
Sidecar: &types.BlobTxSidecar{
|
||||||
|
Blobs: []kzg4844.Blob{emptyBlob},
|
||||||
|
Commitments: []kzg4844.Commitment{emptyBlobCommit},
|
||||||
|
Proofs: []kzg4844.Proof{emptyBlobProof},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range suite {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
res, err := api.FillTransaction(context.Background(), tc.args)
|
||||||
|
if len(tc.err) > 0 {
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("missing error. want: %s", tc.err)
|
||||||
|
} else if err != nil && err.Error() != tc.err {
|
||||||
|
t.Fatalf("error mismatch. want: %s, have: %s", tc.err, err.Error())
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil && len(tc.err) == 0 {
|
||||||
|
t.Fatalf("expected no error. have: %s", err)
|
||||||
|
}
|
||||||
|
if res == nil {
|
||||||
|
t.Fatal("result missing")
|
||||||
|
}
|
||||||
|
want, err := json.Marshal(tc.want)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to encode expected: %v", err)
|
||||||
|
}
|
||||||
|
have, err := json.Marshal(result{Hashes: res.Tx.BlobHashes(), Sidecar: res.Tx.BlobTxSidecar()})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to encode computed sidecar: %v", err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(have, want) {
|
||||||
|
t.Errorf("blob sidecar mismatch. Have: %s, want: %s", have, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func argsFromTransaction(tx *types.Transaction, from common.Address) TransactionArgs {
|
func argsFromTransaction(tx *types.Transaction, from common.Address) TransactionArgs {
|
||||||
var (
|
var (
|
||||||
gas = tx.Gas()
|
gas = tx.Gas()
|
||||||
|
@ -19,6 +19,7 @@ package ethapi
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
@ -29,11 +30,17 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
|
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/holiman/uint256"
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
maxBlobsPerTransaction = params.MaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob
|
||||||
|
)
|
||||||
|
|
||||||
// TransactionArgs represents the arguments to construct a new transaction
|
// TransactionArgs represents the arguments to construct a new transaction
|
||||||
// or a message call.
|
// or a message call.
|
||||||
type TransactionArgs struct {
|
type TransactionArgs struct {
|
||||||
@ -56,9 +63,17 @@ type TransactionArgs struct {
|
|||||||
AccessList *types.AccessList `json:"accessList,omitempty"`
|
AccessList *types.AccessList `json:"accessList,omitempty"`
|
||||||
ChainID *hexutil.Big `json:"chainId,omitempty"`
|
ChainID *hexutil.Big `json:"chainId,omitempty"`
|
||||||
|
|
||||||
// Introduced by EIP-4844.
|
// For BlobTxType
|
||||||
BlobFeeCap *hexutil.Big `json:"maxFeePerBlobGas"`
|
BlobFeeCap *hexutil.Big `json:"maxFeePerBlobGas"`
|
||||||
BlobHashes []common.Hash `json:"blobVersionedHashes,omitempty"`
|
BlobHashes []common.Hash `json:"blobVersionedHashes,omitempty"`
|
||||||
|
|
||||||
|
// For BlobTxType transactions with blob sidecar
|
||||||
|
Blobs []kzg4844.Blob `json:"blobs"`
|
||||||
|
Commitments []kzg4844.Commitment `json:"commitments"`
|
||||||
|
Proofs []kzg4844.Proof `json:"proofs"`
|
||||||
|
|
||||||
|
// This configures whether blobs are allowed to be passed.
|
||||||
|
blobSidecarAllowed bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// from retrieves the transaction sender address.
|
// from retrieves the transaction sender address.
|
||||||
@ -82,9 +97,13 @@ func (args *TransactionArgs) data() []byte {
|
|||||||
|
|
||||||
// setDefaults fills in default values for unspecified tx fields.
|
// setDefaults fills in default values for unspecified tx fields.
|
||||||
func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
|
func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
|
||||||
|
if err := args.setBlobTxSidecar(ctx, b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err := args.setFeeDefaults(ctx, b); err != nil {
|
if err := args.setFeeDefaults(ctx, b); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if args.Value == nil {
|
if args.Value == nil {
|
||||||
args.Value = new(hexutil.Big)
|
args.Value = new(hexutil.Big)
|
||||||
}
|
}
|
||||||
@ -98,15 +117,25 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
|
|||||||
if args.Data != nil && args.Input != nil && !bytes.Equal(*args.Data, *args.Input) {
|
if args.Data != nil && args.Input != nil && !bytes.Equal(*args.Data, *args.Input) {
|
||||||
return errors.New(`both "data" and "input" are set and not equal. Please use "input" to pass transaction call data`)
|
return errors.New(`both "data" and "input" are set and not equal. Please use "input" to pass transaction call data`)
|
||||||
}
|
}
|
||||||
if args.BlobHashes != nil && args.To == nil {
|
|
||||||
return errors.New(`blob transactions cannot have the form of a create transaction`)
|
// BlobTx fields
|
||||||
}
|
|
||||||
if args.BlobHashes != nil && len(args.BlobHashes) == 0 {
|
if args.BlobHashes != nil && len(args.BlobHashes) == 0 {
|
||||||
return errors.New(`need at least 1 blob for a blob transaction`)
|
return errors.New(`need at least 1 blob for a blob transaction`)
|
||||||
}
|
}
|
||||||
if args.To == nil && len(args.data()) == 0 {
|
if args.BlobHashes != nil && len(args.BlobHashes) > maxBlobsPerTransaction {
|
||||||
|
return fmt.Errorf(`too many blobs in transaction (have=%d, max=%d)`, len(args.BlobHashes), maxBlobsPerTransaction)
|
||||||
|
}
|
||||||
|
|
||||||
|
// create check
|
||||||
|
if args.To == nil {
|
||||||
|
if args.BlobHashes != nil {
|
||||||
|
return errors.New(`missing "to" in blob transaction`)
|
||||||
|
}
|
||||||
|
if len(args.data()) == 0 {
|
||||||
return errors.New(`contract creation without any data provided`)
|
return errors.New(`contract creation without any data provided`)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Estimate the gas usage if necessary.
|
// Estimate the gas usage if necessary.
|
||||||
if args.Gas == nil {
|
if args.Gas == nil {
|
||||||
// These fields are immutable during the estimation, safe to
|
// These fields are immutable during the estimation, safe to
|
||||||
@ -130,6 +159,7 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
|
|||||||
args.Gas = &estimated
|
args.Gas = &estimated
|
||||||
log.Trace("Estimate gas usage automatically", "gas", args.Gas)
|
log.Trace("Estimate gas usage automatically", "gas", args.Gas)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If chain id is provided, ensure it matches the local chain id. Otherwise, set the local
|
// If chain id is provided, ensure it matches the local chain id. Otherwise, set the local
|
||||||
// chain id as the default.
|
// chain id as the default.
|
||||||
want := b.ChainConfig().ChainID
|
want := b.ChainConfig().ChainID
|
||||||
@ -165,10 +195,12 @@ func (args *TransactionArgs) setFeeDefaults(ctx context.Context, b Backend) erro
|
|||||||
}
|
}
|
||||||
return nil // No need to set anything, user already set MaxFeePerGas and MaxPriorityFeePerGas
|
return nil // No need to set anything, user already set MaxFeePerGas and MaxPriorityFeePerGas
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sanity check the EIP-4844 fee parameters.
|
// Sanity check the EIP-4844 fee parameters.
|
||||||
if args.BlobFeeCap != nil && args.BlobFeeCap.ToInt().Sign() == 0 {
|
if args.BlobFeeCap != nil && args.BlobFeeCap.ToInt().Sign() == 0 {
|
||||||
return errors.New("maxFeePerBlobGas must be non-zero")
|
return errors.New("maxFeePerBlobGas must be non-zero")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sanity check the non-EIP-1559 fee parameters.
|
// Sanity check the non-EIP-1559 fee parameters.
|
||||||
head := b.CurrentHeader()
|
head := b.CurrentHeader()
|
||||||
isLondon := b.ChainConfig().IsLondon(head.Number)
|
isLondon := b.ChainConfig().IsLondon(head.Number)
|
||||||
@ -250,6 +282,81 @@ func (args *TransactionArgs) setLondonFeeDefaults(ctx context.Context, head *typ
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setBlobTxSidecar adds the blob tx
|
||||||
|
func (args *TransactionArgs) setBlobTxSidecar(ctx context.Context, b Backend) error {
|
||||||
|
// No blobs, we're done.
|
||||||
|
if args.Blobs == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Passing blobs is not allowed in all contexts, only in specific methods.
|
||||||
|
if !args.blobSidecarAllowed {
|
||||||
|
return errors.New(`"blobs" is not supported for this RPC method`)
|
||||||
|
}
|
||||||
|
|
||||||
|
n := len(args.Blobs)
|
||||||
|
// Assume user provides either only blobs (w/o hashes), or
|
||||||
|
// blobs together with commitments and proofs.
|
||||||
|
if args.Commitments == nil && args.Proofs != nil {
|
||||||
|
return errors.New(`blob proofs provided while commitments were not`)
|
||||||
|
} else if args.Commitments != nil && args.Proofs == nil {
|
||||||
|
return errors.New(`blob commitments provided while proofs were not`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// len(blobs) == len(commitments) == len(proofs) == len(hashes)
|
||||||
|
if args.Commitments != nil && len(args.Commitments) != n {
|
||||||
|
return fmt.Errorf("number of blobs and commitments mismatch (have=%d, want=%d)", len(args.Commitments), n)
|
||||||
|
}
|
||||||
|
if args.Proofs != nil && len(args.Proofs) != n {
|
||||||
|
return fmt.Errorf("number of blobs and proofs mismatch (have=%d, want=%d)", len(args.Proofs), n)
|
||||||
|
}
|
||||||
|
if args.BlobHashes != nil && len(args.BlobHashes) != n {
|
||||||
|
return fmt.Errorf("number of blobs and hashes mismatch (have=%d, want=%d)", len(args.BlobHashes), n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if args.Commitments == nil {
|
||||||
|
// Generate commitment and proof.
|
||||||
|
commitments := make([]kzg4844.Commitment, n)
|
||||||
|
proofs := make([]kzg4844.Proof, n)
|
||||||
|
for i, b := range args.Blobs {
|
||||||
|
c, err := kzg4844.BlobToCommitment(b)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("blobs[%d]: error computing commitment: %v", i, err)
|
||||||
|
}
|
||||||
|
commitments[i] = c
|
||||||
|
p, err := kzg4844.ComputeBlobProof(b, c)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("blobs[%d]: error computing proof: %v", i, err)
|
||||||
|
}
|
||||||
|
proofs[i] = p
|
||||||
|
}
|
||||||
|
args.Commitments = commitments
|
||||||
|
args.Proofs = proofs
|
||||||
|
} else {
|
||||||
|
for i, b := range args.Blobs {
|
||||||
|
if err := kzg4844.VerifyBlobProof(b, args.Commitments[i], args.Proofs[i]); err != nil {
|
||||||
|
return fmt.Errorf("failed to verify blob proof: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
hashes := make([]common.Hash, n)
|
||||||
|
hasher := sha256.New()
|
||||||
|
for i, c := range args.Commitments {
|
||||||
|
hashes[i] = kzg4844.CalcBlobHashV1(hasher, &c)
|
||||||
|
}
|
||||||
|
if args.BlobHashes != nil {
|
||||||
|
for i, h := range hashes {
|
||||||
|
if h != args.BlobHashes[i] {
|
||||||
|
return fmt.Errorf("blob hash verification failed (have=%s, want=%s)", args.BlobHashes[i], h)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
args.BlobHashes = hashes
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// ToMessage converts the transaction arguments to the Message type used by the
|
// ToMessage converts the transaction arguments to the Message type used by the
|
||||||
// core evm. This method is used in calls and traces that do not require a real
|
// core evm. This method is used in calls and traces that do not require a real
|
||||||
// live transaction.
|
// live transaction.
|
||||||
@ -363,6 +470,14 @@ func (args *TransactionArgs) toTransaction() *types.Transaction {
|
|||||||
BlobHashes: args.BlobHashes,
|
BlobHashes: args.BlobHashes,
|
||||||
BlobFeeCap: uint256.MustFromBig((*big.Int)(args.BlobFeeCap)),
|
BlobFeeCap: uint256.MustFromBig((*big.Int)(args.BlobFeeCap)),
|
||||||
}
|
}
|
||||||
|
if args.Blobs != nil {
|
||||||
|
data.(*types.BlobTx).Sidecar = &types.BlobTxSidecar{
|
||||||
|
Blobs: args.Blobs,
|
||||||
|
Commitments: args.Commitments,
|
||||||
|
Proofs: args.Proofs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
case args.MaxFeePerGas != nil:
|
case args.MaxFeePerGas != nil:
|
||||||
al := types.AccessList{}
|
al := types.AccessList{}
|
||||||
if args.AccessList != nil {
|
if args.AccessList != nil {
|
||||||
@ -379,6 +494,7 @@ func (args *TransactionArgs) toTransaction() *types.Transaction {
|
|||||||
Data: args.data(),
|
Data: args.data(),
|
||||||
AccessList: al,
|
AccessList: al,
|
||||||
}
|
}
|
||||||
|
|
||||||
case args.AccessList != nil:
|
case args.AccessList != nil:
|
||||||
data = &types.AccessListTx{
|
data = &types.AccessListTx{
|
||||||
To: args.To,
|
To: args.To,
|
||||||
@ -390,6 +506,7 @@ func (args *TransactionArgs) toTransaction() *types.Transaction {
|
|||||||
Data: args.data(),
|
Data: args.data(),
|
||||||
AccessList: *args.AccessList,
|
AccessList: *args.AccessList,
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
data = &types.LegacyTx{
|
data = &types.LegacyTx{
|
||||||
To: args.To,
|
To: args.To,
|
||||||
@ -403,12 +520,6 @@ func (args *TransactionArgs) toTransaction() *types.Transaction {
|
|||||||
return types.NewTx(data)
|
return types.NewTx(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToTransaction converts the arguments to a transaction.
|
|
||||||
// This assumes that setDefaults has been called.
|
|
||||||
func (args *TransactionArgs) ToTransaction() *types.Transaction {
|
|
||||||
return args.toTransaction()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsEIP4844 returns an indicator if the args contains EIP4844 fields.
|
// IsEIP4844 returns an indicator if the args contains EIP4844 fields.
|
||||||
func (args *TransactionArgs) IsEIP4844() bool {
|
func (args *TransactionArgs) IsEIP4844() bool {
|
||||||
return args.BlobHashes != nil || args.BlobFeeCap != nil
|
return args.BlobHashes != nil || args.BlobFeeCap != nil
|
||||||
|
@ -257,6 +257,7 @@ type BigFlag struct {
|
|||||||
HasBeenSet bool
|
HasBeenSet bool
|
||||||
|
|
||||||
Value *big.Int
|
Value *big.Int
|
||||||
|
defaultValue *big.Int
|
||||||
|
|
||||||
Aliases []string
|
Aliases []string
|
||||||
EnvVars []string
|
EnvVars []string
|
||||||
@ -269,6 +270,10 @@ func (f *BigFlag) IsSet() bool { return f.HasBeenSet }
|
|||||||
func (f *BigFlag) String() string { return cli.FlagStringer(f) }
|
func (f *BigFlag) String() string { return cli.FlagStringer(f) }
|
||||||
|
|
||||||
func (f *BigFlag) Apply(set *flag.FlagSet) error {
|
func (f *BigFlag) Apply(set *flag.FlagSet) error {
|
||||||
|
// Set default value so that environment wont be able to overwrite it
|
||||||
|
if f.Value != nil {
|
||||||
|
f.defaultValue = new(big.Int).Set(f.Value)
|
||||||
|
}
|
||||||
for _, envVar := range f.EnvVars {
|
for _, envVar := range f.EnvVars {
|
||||||
envVar = strings.TrimSpace(envVar)
|
envVar = strings.TrimSpace(envVar)
|
||||||
if value, found := syscall.Getenv(envVar); found {
|
if value, found := syscall.Getenv(envVar); found {
|
||||||
@ -283,7 +288,6 @@ func (f *BigFlag) Apply(set *flag.FlagSet) error {
|
|||||||
f.Value = new(big.Int)
|
f.Value = new(big.Int)
|
||||||
set.Var((*bigValue)(f.Value), f.Name, f.Usage)
|
set.Var((*bigValue)(f.Value), f.Name, f.Usage)
|
||||||
})
|
})
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -310,7 +314,7 @@ func (f *BigFlag) GetDefaultText() string {
|
|||||||
if f.DefaultText != "" {
|
if f.DefaultText != "" {
|
||||||
return f.DefaultText
|
return f.DefaultText
|
||||||
}
|
}
|
||||||
return f.GetValue()
|
return f.defaultValue.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// bigValue turns *big.Int into a flag.Value
|
// bigValue turns *big.Int into a flag.Value
|
||||||
|
@ -115,7 +115,7 @@ func doMigrateFlags(ctx *cli.Context) {
|
|||||||
for _, parent := range ctx.Lineage()[1:] {
|
for _, parent := range ctx.Lineage()[1:] {
|
||||||
if parent.IsSet(name) {
|
if parent.IsSet(name) {
|
||||||
// When iterating across the lineage, we will be served both
|
// When iterating across the lineage, we will be served both
|
||||||
// the 'canon' and alias formats of all commmands. In most cases,
|
// the 'canon' and alias formats of all commands. In most cases,
|
||||||
// it's fine to set it in the ctx multiple times (one for each
|
// it's fine to set it in the ctx multiple times (one for each
|
||||||
// name), however, the Slice-flags are not fine.
|
// name), however, the Slice-flags are not fine.
|
||||||
// The slice-flags accumulate, so if we set it once as
|
// The slice-flags accumulate, so if we set it once as
|
||||||
|
@ -2031,7 +2031,7 @@ var fromAscii = function(str) {
|
|||||||
*
|
*
|
||||||
* @method transformToFullName
|
* @method transformToFullName
|
||||||
* @param {Object} json-abi
|
* @param {Object} json-abi
|
||||||
* @return {String} full fnction/event name
|
* @return {String} full function/event name
|
||||||
*/
|
*/
|
||||||
var transformToFullName = function (json) {
|
var transformToFullName = function (json) {
|
||||||
if (json.name.indexOf('(') !== -1) {
|
if (json.name.indexOf('(') !== -1) {
|
||||||
@ -2361,7 +2361,7 @@ var isFunction = function (object) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if object is Objet, otherwise false
|
* Returns true if object is Object, otherwise false
|
||||||
*
|
*
|
||||||
* @method isObject
|
* @method isObject
|
||||||
* @param {Object}
|
* @param {Object}
|
||||||
@ -2757,7 +2757,7 @@ var Batch = function (web3) {
|
|||||||
* Should be called to add create new request to batch request
|
* Should be called to add create new request to batch request
|
||||||
*
|
*
|
||||||
* @method add
|
* @method add
|
||||||
* @param {Object} jsonrpc requet object
|
* @param {Object} jsonrpc request object
|
||||||
*/
|
*/
|
||||||
Batch.prototype.add = function (request) {
|
Batch.prototype.add = function (request) {
|
||||||
this.requests.push(request);
|
this.requests.push(request);
|
||||||
@ -4559,7 +4559,7 @@ Iban.createIndirect = function (options) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Thos method should be used to check if given string is valid iban object
|
* This method should be used to check if given string is valid iban object
|
||||||
*
|
*
|
||||||
* @method isValid
|
* @method isValid
|
||||||
* @param {String} iban string
|
* @param {String} iban string
|
||||||
@ -6708,7 +6708,7 @@ var exchangeAbi = require('../contracts/SmartExchange.json');
|
|||||||
* @method transfer
|
* @method transfer
|
||||||
* @param {String} from
|
* @param {String} from
|
||||||
* @param {String} to iban
|
* @param {String} to iban
|
||||||
* @param {Value} value to be tranfered
|
* @param {Value} value to be transferred
|
||||||
* @param {Function} callback, callback
|
* @param {Function} callback, callback
|
||||||
*/
|
*/
|
||||||
var transfer = function (eth, from, to, value, callback) {
|
var transfer = function (eth, from, to, value, callback) {
|
||||||
@ -6738,7 +6738,7 @@ var transfer = function (eth, from, to, value, callback) {
|
|||||||
* @method transferToAddress
|
* @method transferToAddress
|
||||||
* @param {String} from
|
* @param {String} from
|
||||||
* @param {String} to
|
* @param {String} to
|
||||||
* @param {Value} value to be tranfered
|
* @param {Value} value to be transferred
|
||||||
* @param {Function} callback, callback
|
* @param {Function} callback, callback
|
||||||
*/
|
*/
|
||||||
var transferToAddress = function (eth, from, to, value, callback) {
|
var transferToAddress = function (eth, from, to, value, callback) {
|
||||||
@ -7092,7 +7092,7 @@ module.exports = transfer;
|
|||||||
/**
|
/**
|
||||||
* Initializes a newly created cipher.
|
* Initializes a newly created cipher.
|
||||||
*
|
*
|
||||||
* @param {number} xformMode Either the encryption or decryption transormation mode constant.
|
* @param {number} xformMode Either the encryption or decryption transformation mode constant.
|
||||||
* @param {WordArray} key The key.
|
* @param {WordArray} key The key.
|
||||||
* @param {Object} cfg (Optional) The configuration options to use for this operation.
|
* @param {Object} cfg (Optional) The configuration options to use for this operation.
|
||||||
*
|
*
|
||||||
@ -9446,7 +9446,7 @@ module.exports = transfer;
|
|||||||
var M_offset_14 = M[offset + 14];
|
var M_offset_14 = M[offset + 14];
|
||||||
var M_offset_15 = M[offset + 15];
|
var M_offset_15 = M[offset + 15];
|
||||||
|
|
||||||
// Working varialbes
|
// Working variables
|
||||||
var a = H[0];
|
var a = H[0];
|
||||||
var b = H[1];
|
var b = H[1];
|
||||||
var c = H[2];
|
var c = H[2];
|
||||||
|
@ -192,7 +192,7 @@ func (h *GlogHandler) Handle(_ context.Context, r slog.Record) error {
|
|||||||
frame, _ := fs.Next()
|
frame, _ := fs.Next()
|
||||||
|
|
||||||
for _, rule := range h.patterns {
|
for _, rule := range h.patterns {
|
||||||
if rule.pattern.MatchString(fmt.Sprintf("%+s", frame.File)) {
|
if rule.pattern.MatchString(fmt.Sprintf("+%s", frame.File)) {
|
||||||
h.siteCache[r.PC], lvl, ok = rule.level, rule.level, true
|
h.siteCache[r.PC], lvl, ok = rule.level, rule.level, true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,7 @@ type CounterSnapshot interface {
|
|||||||
Count() int64
|
Count() int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Counters hold an int64 value that can be incremented and decremented.
|
// Counter hold an int64 value that can be incremented and decremented.
|
||||||
type Counter interface {
|
type Counter interface {
|
||||||
Clear()
|
Clear()
|
||||||
Dec(int64)
|
Dec(int64)
|
||||||
|
@ -2,12 +2,12 @@ package metrics
|
|||||||
|
|
||||||
import "sync/atomic"
|
import "sync/atomic"
|
||||||
|
|
||||||
// gaugeSnapshot contains a readonly int64.
|
// GaugeSnapshot contains a readonly int64.
|
||||||
type GaugeSnapshot interface {
|
type GaugeSnapshot interface {
|
||||||
Value() int64
|
Value() int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gauges hold an int64 value that can be set arbitrarily.
|
// Gauge holds an int64 value that can be set arbitrarily.
|
||||||
type Gauge interface {
|
type Gauge interface {
|
||||||
Snapshot() GaugeSnapshot
|
Snapshot() GaugeSnapshot
|
||||||
Update(int64)
|
Update(int64)
|
||||||
@ -74,7 +74,7 @@ func (g *StandardGauge) Update(v int64) {
|
|||||||
g.value.Store(v)
|
g.value.Store(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update updates the gauge's value if v is larger then the current valie.
|
// Update updates the gauge's value if v is larger then the current value.
|
||||||
func (g *StandardGauge) UpdateIfGt(v int64) {
|
func (g *StandardGauge) UpdateIfGt(v int64) {
|
||||||
for {
|
for {
|
||||||
exist := g.value.Load()
|
exist := g.value.Load()
|
||||||
|
@ -48,7 +48,7 @@ type gaugeFloat64Snapshot float64
|
|||||||
// Value returns the value at the time the snapshot was taken.
|
// Value returns the value at the time the snapshot was taken.
|
||||||
func (g gaugeFloat64Snapshot) Value() float64 { return float64(g) }
|
func (g gaugeFloat64Snapshot) Value() float64 { return float64(g) }
|
||||||
|
|
||||||
// NilGauge is a no-op Gauge.
|
// NilGaugeFloat64 is a no-op Gauge.
|
||||||
type NilGaugeFloat64 struct{}
|
type NilGaugeFloat64 struct{}
|
||||||
|
|
||||||
func (NilGaugeFloat64) Snapshot() GaugeFloat64Snapshot { return NilGaugeFloat64{} }
|
func (NilGaugeFloat64) Snapshot() GaugeFloat64Snapshot { return NilGaugeFloat64{} }
|
||||||
|
@ -9,7 +9,7 @@ type GaugeInfoSnapshot interface {
|
|||||||
Value() GaugeInfoValue
|
Value() GaugeInfoValue
|
||||||
}
|
}
|
||||||
|
|
||||||
// GaugeInfos hold a GaugeInfoValue value that can be set arbitrarily.
|
// GaugeInfo holds a GaugeInfoValue value that can be set arbitrarily.
|
||||||
type GaugeInfo interface {
|
type GaugeInfo interface {
|
||||||
Update(GaugeInfoValue)
|
Update(GaugeInfoValue)
|
||||||
Snapshot() GaugeInfoSnapshot
|
Snapshot() GaugeInfoSnapshot
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
package metrics
|
package metrics
|
||||||
|
|
||||||
// Healthchecks hold an error value describing an arbitrary up/down status.
|
// Healthcheck holds an error value describing an arbitrary up/down status.
|
||||||
type Healthcheck interface {
|
type Healthcheck interface {
|
||||||
Check()
|
Check()
|
||||||
Error() error
|
Error() error
|
||||||
|
@ -4,7 +4,7 @@ type HistogramSnapshot interface {
|
|||||||
SampleSnapshot
|
SampleSnapshot
|
||||||
}
|
}
|
||||||
|
|
||||||
// Histograms calculate distribution statistics from a series of int64 values.
|
// Histogram calculates distribution statistics from a series of int64 values.
|
||||||
type Histogram interface {
|
type Histogram interface {
|
||||||
Clear()
|
Clear()
|
||||||
Update(int64)
|
Update(int64)
|
||||||
|
@ -25,7 +25,7 @@ type v2Reporter struct {
|
|||||||
write api.WriteAPI
|
write api.WriteAPI
|
||||||
}
|
}
|
||||||
|
|
||||||
// InfluxDBWithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
|
// InfluxDBV2WithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
|
||||||
func InfluxDBV2WithTags(r metrics.Registry, d time.Duration, endpoint string, token string, bucket string, organization string, namespace string, tags map[string]string) {
|
func InfluxDBV2WithTags(r metrics.Registry, d time.Duration, endpoint string, token string, bucket string, organization string, namespace string, tags map[string]string) {
|
||||||
rep := &v2Reporter{
|
rep := &v2Reporter{
|
||||||
reg: r,
|
reg: r,
|
||||||
|
@ -197,6 +197,11 @@ func (miner *Miner) SetExtra(extra []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (miner *Miner) SetGasTip(tip *big.Int) error {
|
||||||
|
miner.worker.setGasTip(tip)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// SetRecommitInterval sets the interval for sealing work resubmitting.
|
// SetRecommitInterval sets the interval for sealing work resubmitting.
|
||||||
func (miner *Miner) SetRecommitInterval(interval time.Duration) {
|
func (miner *Miner) SetRecommitInterval(interval time.Duration) {
|
||||||
miner.worker.setRecommitInterval(interval)
|
miner.worker.setRecommitInterval(interval)
|
||||||
|
@ -119,11 +119,11 @@ func newTransactionsByPriceAndNonce(signer types.Signer, txs map[common.Address]
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Peek returns the next transaction by price.
|
// Peek returns the next transaction by price.
|
||||||
func (t *transactionsByPriceAndNonce) Peek() *txpool.LazyTransaction {
|
func (t *transactionsByPriceAndNonce) Peek() (*txpool.LazyTransaction, *big.Int) {
|
||||||
if len(t.heads) == 0 {
|
if len(t.heads) == 0 {
|
||||||
return nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
return t.heads[0].tx
|
return t.heads[0].tx, t.heads[0].fees
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shift replaces the current best head with the next one from the same account.
|
// Shift replaces the current best head with the next one from the same account.
|
||||||
|
@ -104,7 +104,7 @@ func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) {
|
|||||||
txset := newTransactionsByPriceAndNonce(signer, groups, baseFee)
|
txset := newTransactionsByPriceAndNonce(signer, groups, baseFee)
|
||||||
|
|
||||||
txs := types.Transactions{}
|
txs := types.Transactions{}
|
||||||
for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
|
for tx, _ := txset.Peek(); tx != nil; tx, _ = txset.Peek() {
|
||||||
txs = append(txs, tx.Tx)
|
txs = append(txs, tx.Tx)
|
||||||
txset.Shift()
|
txset.Shift()
|
||||||
}
|
}
|
||||||
@ -170,7 +170,7 @@ func TestTransactionTimeSort(t *testing.T) {
|
|||||||
txset := newTransactionsByPriceAndNonce(signer, groups, nil)
|
txset := newTransactionsByPriceAndNonce(signer, groups, nil)
|
||||||
|
|
||||||
txs := types.Transactions{}
|
txs := types.Transactions{}
|
||||||
for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
|
for tx, _ := txset.Peek(); tx != nil; tx, _ = txset.Peek() {
|
||||||
txs = append(txs, tx.Tx)
|
txs = append(txs, tx.Tx)
|
||||||
txset.Shift()
|
txset.Shift()
|
||||||
}
|
}
|
||||||
|
@ -205,6 +205,7 @@ type worker struct {
|
|||||||
mu sync.RWMutex // The lock used to protect the coinbase and extra fields
|
mu sync.RWMutex // The lock used to protect the coinbase and extra fields
|
||||||
coinbase common.Address
|
coinbase common.Address
|
||||||
extra []byte
|
extra []byte
|
||||||
|
tip *big.Int // Minimum tip needed for non-local transaction to include them
|
||||||
|
|
||||||
pendingMu sync.RWMutex
|
pendingMu sync.RWMutex
|
||||||
pendingTasks map[common.Hash]*task
|
pendingTasks map[common.Hash]*task
|
||||||
@ -251,6 +252,7 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus
|
|||||||
isLocalBlock: isLocalBlock,
|
isLocalBlock: isLocalBlock,
|
||||||
coinbase: config.Etherbase,
|
coinbase: config.Etherbase,
|
||||||
extra: config.ExtraData,
|
extra: config.ExtraData,
|
||||||
|
tip: config.GasPrice,
|
||||||
pendingTasks: make(map[common.Hash]*task),
|
pendingTasks: make(map[common.Hash]*task),
|
||||||
txsCh: make(chan core.NewTxsEvent, txChanSize),
|
txsCh: make(chan core.NewTxsEvent, txChanSize),
|
||||||
chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
|
chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
|
||||||
@ -327,6 +329,13 @@ func (w *worker) setExtra(extra []byte) {
|
|||||||
w.extra = extra
|
w.extra = extra
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setGasTip sets the minimum miner tip needed to include a non-local transaction.
|
||||||
|
func (w *worker) setGasTip(tip *big.Int) {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
w.tip = tip
|
||||||
|
}
|
||||||
|
|
||||||
// setRecommitInterval updates the interval for miner sealing work recommitting.
|
// setRecommitInterval updates the interval for miner sealing work recommitting.
|
||||||
func (w *worker) setRecommitInterval(interval time.Duration) {
|
func (w *worker) setRecommitInterval(interval time.Duration) {
|
||||||
select {
|
select {
|
||||||
@ -554,7 +563,7 @@ func (w *worker) mainLoop() {
|
|||||||
}
|
}
|
||||||
txset := newTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee)
|
txset := newTransactionsByPriceAndNonce(w.current.signer, txs, w.current.header.BaseFee)
|
||||||
tcount := w.current.tcount
|
tcount := w.current.tcount
|
||||||
w.commitTransactions(w.current, txset, nil)
|
w.commitTransactions(w.current, txset, nil, new(big.Int))
|
||||||
|
|
||||||
// Only update the snapshot if any new transactions were added
|
// Only update the snapshot if any new transactions were added
|
||||||
// to the pending block
|
// to the pending block
|
||||||
@ -792,7 +801,7 @@ func (w *worker) applyTransaction(env *environment, tx *types.Transaction) (*typ
|
|||||||
return receipt, err
|
return receipt, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAndNonce, interrupt *atomic.Int32) error {
|
func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAndNonce, interrupt *atomic.Int32, minTip *big.Int) error {
|
||||||
gasLimit := env.header.GasLimit
|
gasLimit := env.header.GasLimit
|
||||||
if env.gasPool == nil {
|
if env.gasPool == nil {
|
||||||
env.gasPool = new(core.GasPool).AddGas(gasLimit)
|
env.gasPool = new(core.GasPool).AddGas(gasLimit)
|
||||||
@ -812,7 +821,7 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
// Retrieve the next transaction and abort if all done.
|
// Retrieve the next transaction and abort if all done.
|
||||||
ltx := txs.Peek()
|
ltx, tip := txs.Peek()
|
||||||
if ltx == nil {
|
if ltx == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -827,6 +836,11 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn
|
|||||||
txs.Pop()
|
txs.Pop()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
// If we don't receive enough tip for the next transaction, skip the account
|
||||||
|
if tip.Cmp(minTip) < 0 {
|
||||||
|
log.Trace("Not enough tip for transaction", "hash", ltx.Hash, "tip", tip, "needed", minTip)
|
||||||
|
break // If the next-best is too low, surely no better will be available
|
||||||
|
}
|
||||||
// Transaction seems to fit, pull it up from the pool
|
// Transaction seems to fit, pull it up from the pool
|
||||||
tx := ltx.Resolve()
|
tx := ltx.Resolve()
|
||||||
if tx == nil {
|
if tx == nil {
|
||||||
@ -888,7 +902,7 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn
|
|||||||
|
|
||||||
// generateParams wraps various of settings for generating sealing task.
|
// generateParams wraps various of settings for generating sealing task.
|
||||||
type generateParams struct {
|
type generateParams struct {
|
||||||
timestamp uint64 // The timstamp for sealing task
|
timestamp uint64 // The timestamp for sealing task
|
||||||
forceTime bool // Flag whether the given timestamp is immutable or not
|
forceTime bool // Flag whether the given timestamp is immutable or not
|
||||||
parentHash common.Hash // Parent block hash, empty means the latest chain head
|
parentHash common.Hash // Parent block hash, empty means the latest chain head
|
||||||
coinbase common.Address // The fee recipient address for including transaction
|
coinbase common.Address // The fee recipient address for including transaction
|
||||||
@ -997,15 +1011,19 @@ func (w *worker) fillTransactions(interrupt *atomic.Int32, env *environment) err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Fill the block with all available pending transactions.
|
// Fill the block with all available pending transactions.
|
||||||
|
w.mu.RLock()
|
||||||
|
tip := w.tip
|
||||||
|
w.mu.RUnlock()
|
||||||
|
|
||||||
if len(localTxs) > 0 {
|
if len(localTxs) > 0 {
|
||||||
txs := newTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee)
|
txs := newTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee)
|
||||||
if err := w.commitTransactions(env, txs, interrupt); err != nil {
|
if err := w.commitTransactions(env, txs, interrupt, new(big.Int)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(remoteTxs) > 0 {
|
if len(remoteTxs) > 0 {
|
||||||
txs := newTransactionsByPriceAndNonce(env.signer, remoteTxs, env.header.BaseFee)
|
txs := newTransactionsByPriceAndNonce(env.signer, remoteTxs, env.header.BaseFee)
|
||||||
if err := w.commitTransactions(env, txs, interrupt); err != nil {
|
if err := w.commitTransactions(env, txs, interrupt, tip); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -41,6 +41,7 @@ const (
|
|||||||
// needs of all CLs.
|
// needs of all CLs.
|
||||||
engineAPIBatchItemLimit = 2000
|
engineAPIBatchItemLimit = 2000
|
||||||
engineAPIBatchResponseSizeLimit = 250 * 1000 * 1000
|
engineAPIBatchResponseSizeLimit = 250 * 1000 * 1000
|
||||||
|
engineAPIBodyLimit = 128 * 1024 * 1024
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -453,14 +453,16 @@ func (n *Node) startRPC() error {
|
|||||||
jwtSecret: secret,
|
jwtSecret: secret,
|
||||||
batchItemLimit: engineAPIBatchItemLimit,
|
batchItemLimit: engineAPIBatchItemLimit,
|
||||||
batchResponseSizeLimit: engineAPIBatchResponseSizeLimit,
|
batchResponseSizeLimit: engineAPIBatchResponseSizeLimit,
|
||||||
|
httpBodyLimit: engineAPIBodyLimit,
|
||||||
}
|
}
|
||||||
if err := server.enableRPC(allAPIs, httpConfig{
|
err := server.enableRPC(allAPIs, httpConfig{
|
||||||
CorsAllowedOrigins: DefaultAuthCors,
|
CorsAllowedOrigins: DefaultAuthCors,
|
||||||
Vhosts: n.config.AuthVirtualHosts,
|
Vhosts: n.config.AuthVirtualHosts,
|
||||||
Modules: DefaultAuthModules,
|
Modules: DefaultAuthModules,
|
||||||
prefix: DefaultAuthPrefix,
|
prefix: DefaultAuthPrefix,
|
||||||
rpcEndpointConfig: sharedConfig,
|
rpcEndpointConfig: sharedConfig,
|
||||||
}); err != nil {
|
})
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
servers = append(servers, server)
|
servers = append(servers, server)
|
||||||
|
@ -56,6 +56,7 @@ type rpcEndpointConfig struct {
|
|||||||
jwtSecret []byte // optional JWT secret
|
jwtSecret []byte // optional JWT secret
|
||||||
batchItemLimit int
|
batchItemLimit int
|
||||||
batchResponseSizeLimit int
|
batchResponseSizeLimit int
|
||||||
|
httpBodyLimit int
|
||||||
}
|
}
|
||||||
|
|
||||||
type rpcHandler struct {
|
type rpcHandler struct {
|
||||||
@ -304,6 +305,9 @@ func (h *httpServer) enableRPC(apis []rpc.API, config httpConfig) error {
|
|||||||
// Create RPC server and handler.
|
// Create RPC server and handler.
|
||||||
srv := rpc.NewServer()
|
srv := rpc.NewServer()
|
||||||
srv.SetBatchLimits(config.batchItemLimit, config.batchResponseSizeLimit)
|
srv.SetBatchLimits(config.batchItemLimit, config.batchResponseSizeLimit)
|
||||||
|
if config.httpBodyLimit > 0 {
|
||||||
|
srv.SetHTTPBodyLimit(config.httpBodyLimit)
|
||||||
|
}
|
||||||
if err := RegisterApis(apis, config.Modules, srv); err != nil {
|
if err := RegisterApis(apis, config.Modules, srv); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -336,6 +340,9 @@ func (h *httpServer) enableWS(apis []rpc.API, config wsConfig) error {
|
|||||||
// Create RPC server and handler.
|
// Create RPC server and handler.
|
||||||
srv := rpc.NewServer()
|
srv := rpc.NewServer()
|
||||||
srv.SetBatchLimits(config.batchItemLimit, config.batchResponseSizeLimit)
|
srv.SetBatchLimits(config.batchItemLimit, config.batchResponseSizeLimit)
|
||||||
|
if config.httpBodyLimit > 0 {
|
||||||
|
srv.SetHTTPBodyLimit(config.httpBodyLimit)
|
||||||
|
}
|
||||||
if err := RegisterApis(apis, config.Modules, srv); err != nil {
|
if err := RegisterApis(apis, config.Modules, srv); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -58,7 +58,7 @@ func newMeteredConn(conn UDPConn) UDPConn {
|
|||||||
return &meteredUdpConn{UDPConn: conn}
|
return &meteredUdpConn{UDPConn: conn}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read delegates a network read to the underlying connection, bumping the udp ingress traffic meter along the way.
|
// ReadFromUDP delegates a network read to the underlying connection, bumping the udp ingress traffic meter along the way.
|
||||||
func (c *meteredUdpConn) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) {
|
func (c *meteredUdpConn) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) {
|
||||||
n, addr, err = c.UDPConn.ReadFromUDP(b)
|
n, addr, err = c.UDPConn.ReadFromUDP(b)
|
||||||
ingressTrafficMeter.Mark(int64(n))
|
ingressTrafficMeter.Mark(int64(n))
|
||||||
|
@ -127,7 +127,7 @@ func (srv *Server) portMappingLoop() {
|
|||||||
} else if !ip.Equal(lastExtIP) {
|
} else if !ip.Equal(lastExtIP) {
|
||||||
log.Debug("External IP changed", "ip", extip, "interface", srv.NAT)
|
log.Debug("External IP changed", "ip", extip, "interface", srv.NAT)
|
||||||
} else {
|
} else {
|
||||||
return
|
continue
|
||||||
}
|
}
|
||||||
// Here, we either failed to get the external IP, or it has changed.
|
// Here, we either failed to get the external IP, or it has changed.
|
||||||
lastExtIP = ip
|
lastExtIP = ip
|
||||||
|
@ -172,7 +172,7 @@ type SimNode struct {
|
|||||||
registerOnce sync.Once
|
registerOnce sync.Once
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the underlaying node.Node to release
|
// Close closes the underlying node.Node to release
|
||||||
// acquired resources.
|
// acquired resources.
|
||||||
func (sn *SimNode) Close() error {
|
func (sn *SimNode) Close() error {
|
||||||
return sn.node.Close()
|
return sn.node.Close()
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user