Merge pull request #95 from openrelayxyz/merge/geth-v1.13.4

Merge/geth v1.13.4
This commit is contained in:
AusIV 2023-10-30 11:32:31 -05:00 committed by GitHub
commit 9aec954185
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
152 changed files with 2512 additions and 2138 deletions

2
.gitignore vendored
View File

@ -48,3 +48,5 @@ profile.cov
**/yarn-error.log **/yarn-error.log
logs/ logs/
tests/spec-tests/

View File

@ -606,8 +606,7 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) { if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) {
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
} }
head := b.blockchain.CurrentHeader() if !b.blockchain.Config().IsLondon(header.Number) {
if !b.blockchain.Config().IsLondon(head.Number) {
// If there's no basefee, then it must be a non-1559 execution // If there's no basefee, then it must be a non-1559 execution
if call.GasPrice == nil { if call.GasPrice == nil {
call.GasPrice = new(big.Int) call.GasPrice = new(big.Int)
@ -629,13 +628,13 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
// Backfill the legacy gasPrice for EVM execution, unless we're all zeroes // Backfill the legacy gasPrice for EVM execution, unless we're all zeroes
call.GasPrice = new(big.Int) call.GasPrice = new(big.Int)
if call.GasFeeCap.BitLen() > 0 || call.GasTipCap.BitLen() > 0 { if call.GasFeeCap.BitLen() > 0 || call.GasTipCap.BitLen() > 0 {
call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, head.BaseFee), call.GasFeeCap) call.GasPrice = math.BigMin(new(big.Int).Add(call.GasTipCap, header.BaseFee), call.GasFeeCap)
} }
} }
} }
// Ensure message is initialized properly. // Ensure message is initialized properly.
if call.Gas == 0 { if call.Gas == 0 {
call.Gas = 50000000 call.Gas = 10 * header.GasLimit
} }
if call.Value == nil { if call.Value == nil {
call.Value = new(big.Int) call.Value = new(big.Int)

View File

@ -8,7 +8,7 @@
## Preparing the smartcard ## Preparing the smartcard
**WARNING: FOILLOWING THESE INSTRUCTIONS WILL DESTROY THE MASTER KEY ON YOUR CARD. ONLY PROCEED IF NO FUNDS ARE ASSOCIATED WITH THESE ACCOUNTS** **WARNING: FOLLOWING THESE INSTRUCTIONS WILL DESTROY THE MASTER KEY ON YOUR CARD. ONLY PROCEED IF NO FUNDS ARE ASSOCIATED WITH THESE ACCOUNTS**
You can use status' [keycard-cli](https://github.com/status-im/keycard-cli) and you should get _at least_ version 2.1.1 of their [smartcard application](https://github.com/status-im/status-keycard/releases/download/2.2.1/keycard_v2.2.1.cap) You can use status' [keycard-cli](https://github.com/status-im/keycard-cli) and you should get _at least_ version 2.1.1 of their [smartcard application](https://github.com/status-im/status-keycard/releases/download/2.2.1/keycard_v2.2.1.cap)

View File

@ -1,25 +1,30 @@
# This file contains sha256 checksums of optional build dependencies. # This file contains sha256 checksums of optional build dependencies.
# version:spec-tests 1.0.5
# https://github.com/ethereum/execution-spec-tests/releases # https://github.com/ethereum/execution-spec-tests/releases
24bac679f3a2d8240d8e08e7f6a70b70c2dabf673317d924cf1d1887b9fe1f81 fixtures.tar.gz # https://github.com/ethereum/execution-spec-tests/releases/download/v1.0.5/
d4fd06a0e5f94beb970f3c68374b38ef9de82d4be77517d326bcf739c3cbf3a2 fixtures_develop.tar.gz
# version:golang 1.21.3
# https://go.dev/dl/ # https://go.dev/dl/
bfa36bf75e9a1e9cbbdb9abcf9d1707e479bd3a07880a8ae3564caee5711cb99 go1.21.1.src.tar.gz 186f2b6f8c8b704e696821b09ab2041a5c1ee13dcbc3156a13adcf75931ee488 go1.21.3.src.tar.gz
809f5b0ef4f7dcdd5f51e9630a5b2e5a1006f22a047126d61560cdc365678a19 go1.21.1.darwin-amd64.tar.gz 27014fc69e301d7588a169ca239b3cc609f0aa1abf38528bf0d20d3b259211eb go1.21.3.darwin-amd64.tar.gz
ffd40391a1e995855488b008ad9326ff8c2e81803a6e80894401003bae47fcf1 go1.21.1.darwin-arm64.tar.gz 65302a7a9f7a4834932b3a7a14cb8be51beddda757b567a2f9e0cbd0d7b5a6ab go1.21.3.darwin-arm64.tar.gz
9919a9a4dc82371aba3da5b7c830bcb6249fc1502cd26d959eb340a60e41ee01 go1.21.1.freebsd-386.tar.gz 8e0cd2f66cf1bde9d07b4aee01e3d7c3cfdd14e20650488e1683da4b8492594a go1.21.3.freebsd-386.tar.gz
2571f10f6047e04d87c1f5986a05e5e8f7b511faf98803ef12b66d563845d2a1 go1.21.1.freebsd-amd64.tar.gz 6e74f65f586e93d1f3947894766f69e9b2ebda488592a09df61f36f06bfe58a8 go1.21.3.freebsd-amd64.tar.gz
b93850666cdadbd696a986cf7b03111fe99db8c34a9aaa113d7c96d0081e1901 go1.21.1.linux-386.tar.gz fb209fd070db500a84291c5a95251cceeb1723e8f6142de9baca5af70a927c0e go1.21.3.linux-386.tar.gz
b3075ae1ce5dab85f89bc7905d1632de23ca196bd8336afd93fa97434cfa55ae go1.21.1.linux-amd64.tar.gz 1241381b2843fae5a9707eec1f8fb2ef94d827990582c7c7c32f5bdfbfd420c8 go1.21.3.linux-amd64.tar.gz
7da1a3936a928fd0b2602ed4f3ef535b8cd1990f1503b8d3e1acc0fa0759c967 go1.21.1.linux-arm64.tar.gz fc90fa48ae97ba6368eecb914343590bbb61b388089510d0c56c2dde52987ef3 go1.21.3.linux-arm64.tar.gz
f3716a43f59ae69999841d6007b42c9e286e8d8ce470656fb3e70d7be2d7ca85 go1.21.1.linux-armv6l.tar.gz a1ddcaaf0821a12a800884c14cb4268ce1c1f5a0301e9060646f1e15e611c6c7 go1.21.3.linux-armv6l.tar.gz
eddf018206f8a5589bda75252b72716d26611efebabdca5d0083ec15e9e41ab7 go1.21.1.linux-ppc64le.tar.gz 3b0e10a3704f164a6e85e0377728ec5fd21524fabe4c925610e34076586d5826 go1.21.3.linux-ppc64le.tar.gz
a83b3e8eb4dbf76294e773055eb51397510ff4d612a247bad9903560267bba6d go1.21.1.linux-s390x.tar.gz 4c78e2e6f4c684a3d5a9bdc97202729053f44eb7be188206f0627ef3e18716b6 go1.21.3.linux-s390x.tar.gz
170256c820f466f29d64876f25f4dfa4029ed9902a0a9095d8bd603aecf4d83b go1.21.1.windows-386.zip e36737f4f2fadb4d2f919ec4ce517133a56e06064cca6e82fc883bb000c4d56c go1.21.3.windows-386.zip
10a4f5b63215d11d1770453733dbcbf024f3f74872f84e28d7ea59f0250316c6 go1.21.1.windows-amd64.zip 27c8daf157493f288d42a6f38debc6a2cb391f6543139eba9152fceca0be2a10 go1.21.3.windows-amd64.zip
41135ce6e0ced4bc1e459cb96bd4090c9dc2062e24179c3f337d855af9b560ef go1.21.1.windows-arm64.zip bfb7a5c56f9ded07d8ae0e0b3702ac07b65e68fa8f33da24ed6df4ce01fe2c5c go1.21.3.windows-arm64.zip
# https://github.com/golangci/golangci-lint/releases # version:golangci 1.51.1
# https://github.com/golangci/golangci-lint/releases/
# https://github.com/golangci/golangci-lint/releases/download/v1.51.1/
fba08acc4027f69f07cef48fbff70b8a7ecdfaa1c2aba9ad3fb31d60d9f5d4bc golangci-lint-1.51.1-darwin-amd64.tar.gz fba08acc4027f69f07cef48fbff70b8a7ecdfaa1c2aba9ad3fb31d60d9f5d4bc golangci-lint-1.51.1-darwin-amd64.tar.gz
75b8f0ff3a4e68147156be4161a49d4576f1be37a0b506473f8c482140c1e7f2 golangci-lint-1.51.1-darwin-arm64.tar.gz 75b8f0ff3a4e68147156be4161a49d4576f1be37a0b506473f8c482140c1e7f2 golangci-lint-1.51.1-darwin-arm64.tar.gz
e06b3459aaed356e1667580be00b05f41f3b2e29685d12cdee571c23e1edb414 golangci-lint-1.51.1-freebsd-386.tar.gz e06b3459aaed356e1667580be00b05f41f3b2e29685d12cdee571c23e1edb414 golangci-lint-1.51.1-freebsd-386.tar.gz
@ -48,4 +53,12 @@ bce02f7232723cb727755ee11f168a700a00896a25d37f87c4b173bce55596b4 golangci-lint-
cf6403f84707ce8c98664736772271bc8874f2e760c2fd0f00cf3e85963507e9 golangci-lint-1.51.1-windows-armv7.zip cf6403f84707ce8c98664736772271bc8874f2e760c2fd0f00cf3e85963507e9 golangci-lint-1.51.1-windows-armv7.zip
# This is the builder on PPA that will build Go itself (inception-y), don't modify! # This is the builder on PPA that will build Go itself (inception-y), don't modify!
#
# This version is fine to be old and full of security holes, we just use it
# to build the latest Go. Don't change it. If it ever becomes insufficient,
# we need to switch over to a recursive builder to jump across supported
# versions.
#
# version:ppa-builder 1.19.6
# https://go.dev/dl/
d7f0013f82e6d7f862cc6cb5c8cdb48eef5f2e239b35baa97e2f1a7466043767 go1.19.6.src.tar.gz d7f0013f82e6d7f862cc6cb5c8cdb48eef5f2e239b35baa97e2f1a7466043767 go1.19.6.src.tar.gz

View File

@ -136,23 +136,6 @@ var (
"golang-go": "/usr/lib/go", "golang-go": "/usr/lib/go",
} }
// This is the version of Go that will be downloaded by
//
// go run ci.go install -dlgo
dlgoVersion = "1.21.1"
// This is the version of Go that will be used to bootstrap the PPA builder.
//
// This version is fine to be old and full of security holes, we just use it
// to build the latest Go. Don't change it. If it ever becomes insufficient,
// we need to switch over to a recursive builder to jumpt across supported
// versions.
gobootVersion = "1.19.6"
// This is the version of execution-spec-tests that we are using.
// When updating, you must also update build/checksums.txt.
executionSpecTestsVersion = "1.0.2"
// This is where the tests should be unpacked. // This is where the tests should be unpacked.
executionSpecTestsDir = "tests/spec-tests" executionSpecTestsDir = "tests/spec-tests"
) )
@ -192,6 +175,8 @@ func main() {
doWindowsInstaller(os.Args[2:]) doWindowsInstaller(os.Args[2:])
case "purge": case "purge":
doPurge(os.Args[2:]) doPurge(os.Args[2:])
case "sanitycheck":
doSanityCheck()
default: default:
log.Fatal("unknown command ", os.Args[1]) log.Fatal("unknown command ", os.Args[1])
} }
@ -213,9 +198,8 @@ func doInstall(cmdline []string) {
tc := build.GoToolchain{GOARCH: *arch, CC: *cc} tc := build.GoToolchain{GOARCH: *arch, CC: *cc}
if *dlgo { if *dlgo {
csdb := build.MustLoadChecksums("build/checksums.txt") csdb := build.MustLoadChecksums("build/checksums.txt")
tc.Root = build.DownloadGo(csdb, dlgoVersion) tc.Root = build.DownloadGo(csdb)
} }
// Disable CLI markdown doc generation in release builds. // Disable CLI markdown doc generation in release builds.
buildTags := []string{"urfave_cli_no_docs"} buildTags := []string{"urfave_cli_no_docs"}
@ -312,7 +296,7 @@ func doTest(cmdline []string) {
// Configure the toolchain. // Configure the toolchain.
tc := build.GoToolchain{GOARCH: *arch, CC: *cc} tc := build.GoToolchain{GOARCH: *arch, CC: *cc}
if *dlgo { if *dlgo {
tc.Root = build.DownloadGo(csdb, dlgoVersion) tc.Root = build.DownloadGo(csdb)
} }
gotest := tc.Go("test") gotest := tc.Go("test")
@ -345,8 +329,12 @@ func doTest(cmdline []string) {
// downloadSpecTestFixtures downloads and extracts the execution-spec-tests fixtures. // downloadSpecTestFixtures downloads and extracts the execution-spec-tests fixtures.
func downloadSpecTestFixtures(csdb *build.ChecksumDB, cachedir string) string { func downloadSpecTestFixtures(csdb *build.ChecksumDB, cachedir string) string {
executionSpecTestsVersion, err := build.Version(csdb, "spec-tests")
if err != nil {
log.Fatal(err)
}
ext := ".tar.gz" ext := ".tar.gz"
base := "fixtures" // TODO(MariusVanDerWijden) rename once the version becomes part of the filename base := "fixtures_develop" // TODO(MariusVanDerWijden) rename once the version becomes part of the filename
url := fmt.Sprintf("https://github.com/ethereum/execution-spec-tests/releases/download/v%s/%s%s", executionSpecTestsVersion, base, ext) url := fmt.Sprintf("https://github.com/ethereum/execution-spec-tests/releases/download/v%s/%s%s", executionSpecTestsVersion, base, ext)
archivePath := filepath.Join(cachedir, base+ext) archivePath := filepath.Join(cachedir, base+ext)
if err := csdb.DownloadFile(url, archivePath); err != nil { if err := csdb.DownloadFile(url, archivePath); err != nil {
@ -377,9 +365,11 @@ func doLint(cmdline []string) {
// downloadLinter downloads and unpacks golangci-lint. // downloadLinter downloads and unpacks golangci-lint.
func downloadLinter(cachedir string) string { func downloadLinter(cachedir string) string {
const version = "1.51.1"
csdb := build.MustLoadChecksums("build/checksums.txt") csdb := build.MustLoadChecksums("build/checksums.txt")
version, err := build.Version(csdb, "golangci")
if err != nil {
log.Fatal(err)
}
arch := runtime.GOARCH arch := runtime.GOARCH
ext := ".tar.gz" ext := ".tar.gz"
@ -761,6 +751,10 @@ func doDebianSource(cmdline []string) {
// to bootstrap the builder Go. // to bootstrap the builder Go.
func downloadGoBootstrapSources(cachedir string) string { func downloadGoBootstrapSources(cachedir string) string {
csdb := build.MustLoadChecksums("build/checksums.txt") csdb := build.MustLoadChecksums("build/checksums.txt")
gobootVersion, err := build.Version(csdb, "ppa-builder")
if err != nil {
log.Fatal(err)
}
file := fmt.Sprintf("go%s.src.tar.gz", gobootVersion) file := fmt.Sprintf("go%s.src.tar.gz", gobootVersion)
url := "https://dl.google.com/go/" + file url := "https://dl.google.com/go/" + file
dst := filepath.Join(cachedir, file) dst := filepath.Join(cachedir, file)
@ -773,6 +767,10 @@ func downloadGoBootstrapSources(cachedir string) string {
// downloadGoSources downloads the Go source tarball. // downloadGoSources downloads the Go source tarball.
func downloadGoSources(cachedir string) string { func downloadGoSources(cachedir string) string {
csdb := build.MustLoadChecksums("build/checksums.txt") csdb := build.MustLoadChecksums("build/checksums.txt")
dlgoVersion, err := build.Version(csdb, "golang")
if err != nil {
log.Fatal(err)
}
file := fmt.Sprintf("go%s.src.tar.gz", dlgoVersion) file := fmt.Sprintf("go%s.src.tar.gz", dlgoVersion)
url := "https://dl.google.com/go/" + file url := "https://dl.google.com/go/" + file
dst := filepath.Join(cachedir, file) dst := filepath.Join(cachedir, file)
@ -1099,3 +1097,7 @@ func doPurge(cmdline []string) {
log.Fatal(err) log.Fatal(err)
} }
} }
func doSanityCheck() {
build.DownloadAndVerifyChecksums(build.MustLoadChecksums("build/checksums.txt"))
}

View File

@ -1206,7 +1206,7 @@ func GenDoc(ctx *cli.Context) error {
URL: accounts.URL{Path: ".. ignored .."}, URL: accounts.URL{Path: ".. ignored .."},
}, },
{ {
Address: common.HexToAddress("0xffffffffffffffffffffffffffffffffffffffff"), Address: common.MaxAddress,
}, },
}}) }})
} }

View File

@ -114,7 +114,7 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
records = lrecords records = lrecords
log.Info(fmt.Sprintf("Retrieving existing TXT records on %s", name)) log.Info(fmt.Sprintf("Retrieving existing TXT records on %s", name))
entries, err := c.DNSRecords(context.Background(), c.zoneID, cloudflare.DNSRecord{Type: "TXT"}) entries, _, err := c.ListDNSRecords(context.Background(), cloudflare.ZoneIdentifier(c.zoneID), cloudflare.ListDNSRecordsParams{Type: "TXT"})
if err != nil { if err != nil {
return err return err
} }
@ -141,14 +141,25 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
if path != name { if path != name {
ttl = treeNodeTTLCloudflare // Max TTL permitted by Cloudflare ttl = treeNodeTTLCloudflare // Max TTL permitted by Cloudflare
} }
record := cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl} record := cloudflare.CreateDNSRecordParams{Type: "TXT", Name: path, Content: val, TTL: ttl}
_, err = c.CreateDNSRecord(context.Background(), c.zoneID, record) _, err = c.CreateDNSRecord(context.Background(), cloudflare.ZoneIdentifier(c.zoneID), record)
} else if old.Content != val { } else if old.Content != val {
// Entry already exists, only change its content. // Entry already exists, only change its content.
log.Info(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val)) log.Info(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val))
updated++ updated++
old.Content = val
err = c.UpdateDNSRecord(context.Background(), c.zoneID, old.ID, old) record := cloudflare.UpdateDNSRecordParams{
Type: old.Type,
Name: old.Name,
Content: val,
Data: old.Data,
ID: old.ID,
Priority: old.Priority,
TTL: old.TTL,
Proxied: old.Proxied,
Tags: old.Tags,
}
_, err = c.UpdateDNSRecord(context.Background(), cloudflare.ZoneIdentifier(c.zoneID), record)
} else { } else {
skipped++ skipped++
log.Debug(fmt.Sprintf("Skipping %s = %q", path, val)) log.Debug(fmt.Sprintf("Skipping %s = %q", path, val))
@ -168,7 +179,7 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
// Stale entry, nuke it. // Stale entry, nuke it.
log.Debug(fmt.Sprintf("Deleting %s = %q", path, entry.Content)) log.Debug(fmt.Sprintf("Deleting %s = %q", path, entry.Content))
deleted++ deleted++
if err := c.DeleteDNSRecord(context.Background(), c.zoneID, entry.ID); err != nil { if err := c.DeleteDNSRecord(context.Background(), cloudflare.ZoneIdentifier(c.zoneID), entry.ID); err != nil {
return fmt.Errorf("failed to delete %s: %v", path, err) return fmt.Errorf("failed to delete %s: %v", path, err)
} }
} }

View File

@ -145,7 +145,7 @@ func TestChain_GetHeaders(t *testing.T) {
}{ }{
{ {
req: GetBlockHeaders{ req: GetBlockHeaders{
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Number: uint64(2)}, Origin: eth.HashOrNumber{Number: uint64(2)},
Amount: uint64(5), Amount: uint64(5),
Skip: 1, Skip: 1,
@ -162,7 +162,7 @@ func TestChain_GetHeaders(t *testing.T) {
}, },
{ {
req: GetBlockHeaders{ req: GetBlockHeaders{
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Number: uint64(chain.Len() - 1)}, Origin: eth.HashOrNumber{Number: uint64(chain.Len() - 1)},
Amount: uint64(3), Amount: uint64(3),
Skip: 0, Skip: 0,
@ -177,7 +177,7 @@ func TestChain_GetHeaders(t *testing.T) {
}, },
{ {
req: GetBlockHeaders{ req: GetBlockHeaders{
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Hash: chain.Head().Hash()}, Origin: eth.HashOrNumber{Hash: chain.Head().Hash()},
Amount: uint64(1), Amount: uint64(1),
Skip: 0, Skip: 0,

View File

@ -62,7 +62,6 @@ func (s *Suite) dial() (*Conn, error) {
} }
// set default p2p capabilities // set default p2p capabilities
conn.caps = []p2p.Cap{ conn.caps = []p2p.Cap{
{Name: "eth", Version: 66},
{Name: "eth", Version: 67}, {Name: "eth", Version: 67},
{Name: "eth", Version: 68}, {Name: "eth", Version: 68},
} }
@ -238,7 +237,7 @@ func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message {
} }
resp := &BlockHeaders{ resp := &BlockHeaders{
RequestId: msg.ReqID(), RequestId: msg.ReqID(),
BlockHeadersPacket: eth.BlockHeadersPacket(headers), BlockHeadersRequest: eth.BlockHeadersRequest(headers),
} }
if err := c.Write(resp); err != nil { if err := c.Write(resp); err != nil {
return errorf("could not write to connection: %v", err) return errorf("could not write to connection: %v", err)
@ -267,7 +266,7 @@ func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, reqID uint
if !ok { if !ok {
return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg)) return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg))
} }
headers := []*types.Header(resp.BlockHeadersPacket) headers := []*types.Header(resp.BlockHeadersRequest)
return headers, nil return headers, nil
} }
@ -379,7 +378,7 @@ func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block) error {
conn.SetReadDeadline(time.Now().Add(20 * time.Second)) conn.SetReadDeadline(time.Now().Add(20 * time.Second))
// create request // create request
req := &GetBlockHeaders{ req := &GetBlockHeaders{
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Hash: block.Hash()}, Origin: eth.HashOrNumber{Hash: block.Hash()},
Amount: 1, Amount: 1,
}, },
@ -605,7 +604,7 @@ func (s *Suite) hashAnnounce() error {
} }
err = sendConn.Write(&BlockHeaders{ err = sendConn.Write(&BlockHeaders{
RequestId: blockHeaderReq.ReqID(), RequestId: blockHeaderReq.ReqID(),
BlockHeadersPacket: eth.BlockHeadersPacket{nextBlock.Header()}, BlockHeadersRequest: eth.BlockHeadersRequest{nextBlock.Header()},
}) })
if err != nil { if err != nil {
return fmt.Errorf("failed to write to connection: %v", err) return fmt.Errorf("failed to write to connection: %v", err)

View File

@ -27,8 +27,8 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/eth/protocols/snap"
"github.com/ethereum/go-ethereum/internal/utesting" "github.com/ethereum/go-ethereum/internal/utesting"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3" "golang.org/x/crypto/sha3"
) )
@ -58,7 +58,7 @@ type accRangeTest struct {
func (s *Suite) TestSnapGetAccountRange(t *utesting.T) { func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
var ( var (
root = s.chain.RootAt(999) root = s.chain.RootAt(999)
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") ffHash = common.MaxHash
zero = common.Hash{} zero = common.Hash{}
firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29") firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29")
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a") firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
@ -125,7 +125,7 @@ type stRangesTest struct {
// TestSnapGetStorageRanges various forms of GetStorageRanges requests. // TestSnapGetStorageRanges various forms of GetStorageRanges requests.
func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) { func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
var ( var (
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") ffHash = common.MaxHash
zero = common.Hash{} zero = common.Hash{}
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a") firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606") secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
@ -530,17 +530,13 @@ func (s *Suite) snapGetAccountRange(t *utesting.T, tc *accRangeTest) error {
for i, key := range hashes { for i, key := range hashes {
keys[i] = common.CopyBytes(key[:]) keys[i] = common.CopyBytes(key[:])
} }
nodes := make(light.NodeList, len(proof)) nodes := make(trienode.ProofList, len(proof))
for i, node := range proof { for i, node := range proof {
nodes[i] = node nodes[i] = node
} }
proofdb := nodes.NodeSet() proofdb := nodes.Set()
var end []byte _, err = trie.VerifyRangeProof(tc.root, tc.origin[:], keys, accounts, proofdb)
if len(keys) > 0 {
end = keys[len(keys)-1]
}
_, err = trie.VerifyRangeProof(tc.root, tc.origin[:], end, keys, accounts, proofdb)
return err return err
} }

View File

@ -112,7 +112,7 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
} }
// write request // write request
req := &GetBlockHeaders{ req := &GetBlockHeaders{
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Hash: s.chain.blocks[1].Hash()}, Origin: eth.HashOrNumber{Hash: s.chain.blocks[1].Hash()},
Amount: 2, Amount: 2,
Skip: 1, Skip: 1,
@ -150,7 +150,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
// create two requests // create two requests
req1 := &GetBlockHeaders{ req1 := &GetBlockHeaders{
RequestId: uint64(111), RequestId: uint64(111),
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{ Origin: eth.HashOrNumber{
Hash: s.chain.blocks[1].Hash(), Hash: s.chain.blocks[1].Hash(),
}, },
@ -161,7 +161,7 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
} }
req2 := &GetBlockHeaders{ req2 := &GetBlockHeaders{
RequestId: uint64(222), RequestId: uint64(222),
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{ Origin: eth.HashOrNumber{
Hash: s.chain.blocks[1].Hash(), Hash: s.chain.blocks[1].Hash(),
}, },
@ -201,10 +201,10 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
if err != nil { if err != nil {
t.Fatalf("failed to get expected headers for request 2: %v", err) t.Fatalf("failed to get expected headers for request 2: %v", err)
} }
if !headersMatch(expected1, headers1.BlockHeadersPacket) { if !headersMatch(expected1, headers1.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
} }
if !headersMatch(expected2, headers2.BlockHeadersPacket) { if !headersMatch(expected2, headers2.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
} }
} }
@ -224,7 +224,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
reqID := uint64(1234) reqID := uint64(1234)
request1 := &GetBlockHeaders{ request1 := &GetBlockHeaders{
RequestId: reqID, RequestId: reqID,
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{ Origin: eth.HashOrNumber{
Number: 1, Number: 1,
}, },
@ -233,7 +233,7 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
} }
request2 := &GetBlockHeaders{ request2 := &GetBlockHeaders{
RequestId: reqID, RequestId: reqID,
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{ Origin: eth.HashOrNumber{
Number: 33, Number: 33,
}, },
@ -270,10 +270,10 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
if err != nil { if err != nil {
t.Fatalf("failed to get expected block headers: %v", err) t.Fatalf("failed to get expected block headers: %v", err)
} }
if !headersMatch(expected1, headers1.BlockHeadersPacket) { if !headersMatch(expected1, headers1.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
} }
if !headersMatch(expected2, headers2.BlockHeadersPacket) { if !headersMatch(expected2, headers2.BlockHeadersRequest) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
} }
} }
@ -290,7 +290,7 @@ func (s *Suite) TestZeroRequestID(t *utesting.T) {
t.Fatalf("peering failed: %v", err) t.Fatalf("peering failed: %v", err)
} }
req := &GetBlockHeaders{ req := &GetBlockHeaders{
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersRequest: &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{Number: 0}, Origin: eth.HashOrNumber{Number: 0},
Amount: 2, Amount: 2,
}, },
@ -322,7 +322,7 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) {
// create block bodies request // create block bodies request
req := &GetBlockBodies{ req := &GetBlockBodies{
RequestId: uint64(55), RequestId: uint64(55),
GetBlockBodiesPacket: eth.GetBlockBodiesPacket{ GetBlockBodiesRequest: eth.GetBlockBodiesRequest{
s.chain.blocks[54].Hash(), s.chain.blocks[54].Hash(),
s.chain.blocks[75].Hash(), s.chain.blocks[75].Hash(),
}, },
@ -336,11 +336,11 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) {
if !ok { if !ok {
t.Fatalf("unexpected: %s", pretty.Sdump(msg)) t.Fatalf("unexpected: %s", pretty.Sdump(msg))
} }
bodies := resp.BlockBodiesPacket bodies := resp.BlockBodiesResponse
t.Logf("received %d block bodies", len(bodies)) t.Logf("received %d block bodies", len(bodies))
if len(bodies) != len(req.GetBlockBodiesPacket) { if len(bodies) != len(req.GetBlockBodiesRequest) {
t.Fatalf("wrong bodies in response: expected %d bodies, "+ t.Fatalf("wrong bodies in response: expected %d bodies, "+
"got %d", len(req.GetBlockBodiesPacket), len(bodies)) "got %d", len(req.GetBlockBodiesRequest), len(bodies))
} }
} }
@ -482,7 +482,7 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
} }
getTxReq := &GetPooledTransactions{ getTxReq := &GetPooledTransactions{
RequestId: 1234, RequestId: 1234,
GetPooledTransactionsPacket: hashes, GetPooledTransactionsRequest: hashes,
} }
if err = conn.Write(getTxReq); err != nil { if err = conn.Write(getTxReq); err != nil {
t.Fatalf("could not write to conn: %v", err) t.Fatalf("could not write to conn: %v", err)
@ -490,7 +490,7 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
// check that all received transactions match those that were sent to node // check that all received transactions match those that were sent to node
switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) { switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) {
case *PooledTransactions: case *PooledTransactions:
for _, gotTx := range msg.PooledTransactionsPacket { for _, gotTx := range msg.PooledTransactionsResponse {
if _, exists := hashMap[gotTx.Hash()]; !exists { if _, exists := hashMap[gotTx.Hash()]; !exists {
t.Fatalf("unexpected tx received: %v", gotTx.Hash()) t.Fatalf("unexpected tx received: %v", gotTx.Hash())
} }
@ -547,8 +547,8 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) {
msg := conn.readAndServe(s.chain, timeout) msg := conn.readAndServe(s.chain, timeout)
switch msg := msg.(type) { switch msg := msg.(type) {
case *GetPooledTransactions: case *GetPooledTransactions:
if len(msg.GetPooledTransactionsPacket) != len(hashes) { if len(msg.GetPooledTransactionsRequest) != len(hashes) {
t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsPacket)) t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsRequest))
} }
return return

View File

@ -99,24 +99,24 @@ func (msg Transactions) Code() int { return 18 }
func (msg Transactions) ReqID() uint64 { return 18 } func (msg Transactions) ReqID() uint64 { return 18 }
// GetBlockHeaders represents a block header query. // GetBlockHeaders represents a block header query.
type GetBlockHeaders eth.GetBlockHeadersPacket66 type GetBlockHeaders eth.GetBlockHeadersPacket
func (msg GetBlockHeaders) Code() int { return 19 } func (msg GetBlockHeaders) Code() int { return 19 }
func (msg GetBlockHeaders) ReqID() uint64 { return msg.RequestId } func (msg GetBlockHeaders) ReqID() uint64 { return msg.RequestId }
type BlockHeaders eth.BlockHeadersPacket66 type BlockHeaders eth.BlockHeadersPacket
func (msg BlockHeaders) Code() int { return 20 } func (msg BlockHeaders) Code() int { return 20 }
func (msg BlockHeaders) ReqID() uint64 { return msg.RequestId } func (msg BlockHeaders) ReqID() uint64 { return msg.RequestId }
// GetBlockBodies represents a GetBlockBodies request // GetBlockBodies represents a GetBlockBodies request
type GetBlockBodies eth.GetBlockBodiesPacket66 type GetBlockBodies eth.GetBlockBodiesPacket
func (msg GetBlockBodies) Code() int { return 21 } func (msg GetBlockBodies) Code() int { return 21 }
func (msg GetBlockBodies) ReqID() uint64 { return msg.RequestId } func (msg GetBlockBodies) ReqID() uint64 { return msg.RequestId }
// BlockBodies is the network packet for block content distribution. // BlockBodies is the network packet for block content distribution.
type BlockBodies eth.BlockBodiesPacket66 type BlockBodies eth.BlockBodiesPacket
func (msg BlockBodies) Code() int { return 22 } func (msg BlockBodies) Code() int { return 22 }
func (msg BlockBodies) ReqID() uint64 { return msg.RequestId } func (msg BlockBodies) ReqID() uint64 { return msg.RequestId }
@ -128,7 +128,7 @@ func (msg NewBlock) Code() int { return 23 }
func (msg NewBlock) ReqID() uint64 { return 0 } func (msg NewBlock) ReqID() uint64 { return 0 }
// NewPooledTransactionHashes66 is the network packet for the tx hash propagation message. // NewPooledTransactionHashes66 is the network packet for the tx hash propagation message.
type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket66 type NewPooledTransactionHashes66 eth.NewPooledTransactionHashesPacket67
func (msg NewPooledTransactionHashes66) Code() int { return 24 } func (msg NewPooledTransactionHashes66) Code() int { return 24 }
func (msg NewPooledTransactionHashes66) ReqID() uint64 { return 0 } func (msg NewPooledTransactionHashes66) ReqID() uint64 { return 0 }
@ -139,12 +139,12 @@ type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket68
func (msg NewPooledTransactionHashes) Code() int { return 24 } func (msg NewPooledTransactionHashes) Code() int { return 24 }
func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 } func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 }
type GetPooledTransactions eth.GetPooledTransactionsPacket66 type GetPooledTransactions eth.GetPooledTransactionsPacket
func (msg GetPooledTransactions) Code() int { return 25 } func (msg GetPooledTransactions) Code() int { return 25 }
func (msg GetPooledTransactions) ReqID() uint64 { return msg.RequestId } func (msg GetPooledTransactions) ReqID() uint64 { return msg.RequestId }
type PooledTransactions eth.PooledTransactionsPacket66 type PooledTransactions eth.PooledTransactionsPacket
func (msg PooledTransactions) Code() int { return 26 } func (msg PooledTransactions) Code() int { return 26 }
func (msg PooledTransactions) ReqID() uint64 { return msg.RequestId } func (msg PooledTransactions) ReqID() uint64 { return msg.RequestId }
@ -180,25 +180,25 @@ func (c *Conn) Read() Message {
case (Status{}).Code(): case (Status{}).Code():
msg = new(Status) msg = new(Status)
case (GetBlockHeaders{}).Code(): case (GetBlockHeaders{}).Code():
ethMsg := new(eth.GetBlockHeadersPacket66) ethMsg := new(eth.GetBlockHeadersPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }
return (*GetBlockHeaders)(ethMsg) return (*GetBlockHeaders)(ethMsg)
case (BlockHeaders{}).Code(): case (BlockHeaders{}).Code():
ethMsg := new(eth.BlockHeadersPacket66) ethMsg := new(eth.BlockHeadersPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }
return (*BlockHeaders)(ethMsg) return (*BlockHeaders)(ethMsg)
case (GetBlockBodies{}).Code(): case (GetBlockBodies{}).Code():
ethMsg := new(eth.GetBlockBodiesPacket66) ethMsg := new(eth.GetBlockBodiesPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }
return (*GetBlockBodies)(ethMsg) return (*GetBlockBodies)(ethMsg)
case (BlockBodies{}).Code(): case (BlockBodies{}).Code():
ethMsg := new(eth.BlockBodiesPacket66) ethMsg := new(eth.BlockBodiesPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }
@ -217,13 +217,13 @@ func (c *Conn) Read() Message {
} }
msg = new(NewPooledTransactionHashes66) msg = new(NewPooledTransactionHashes66)
case (GetPooledTransactions{}.Code()): case (GetPooledTransactions{}.Code()):
ethMsg := new(eth.GetPooledTransactionsPacket66) ethMsg := new(eth.GetPooledTransactionsPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }
return (*GetPooledTransactions)(ethMsg) return (*GetPooledTransactions)(ethMsg)
case (PooledTransactions{}.Code()): case (PooledTransactions{}.Code()):
ethMsg := new(eth.PooledTransactionsPacket66) ethMsg := new(eth.PooledTransactionsPacket)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }

View File

@ -21,6 +21,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"os" "os"
"sort"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
@ -60,9 +61,16 @@ func blockTestCmd(ctx *cli.Context) error {
if err = json.Unmarshal(src, &tests); err != nil { if err = json.Unmarshal(src, &tests); err != nil {
return err return err
} }
for i, test := range tests { // run them in order
var keys []string
for key := range tests {
keys = append(keys, key)
}
sort.Strings(keys)
for _, name := range keys {
test := tests[name]
if err := test.Run(false, rawdb.HashScheme, tracer); err != nil { if err := test.Run(false, rawdb.HashScheme, tracer); err != nil {
return fmt.Errorf("test %v: %w", i, err) return fmt.Errorf("test %v: %w", name, err)
} }
} }
return nil return nil

View File

@ -54,6 +54,9 @@ type header struct {
Nonce *types.BlockNonce `json:"nonce"` Nonce *types.BlockNonce `json:"nonce"`
BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"` BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
} }
type headerMarshaling struct { type headerMarshaling struct {
@ -64,6 +67,8 @@ type headerMarshaling struct {
Time math.HexOrDecimal64 Time math.HexOrDecimal64
Extra hexutil.Bytes Extra hexutil.Bytes
BaseFee *math.HexOrDecimal256 BaseFee *math.HexOrDecimal256
BlobGasUsed *math.HexOrDecimal64
ExcessBlobGas *math.HexOrDecimal64
} }
type bbInput struct { type bbInput struct {
@ -129,6 +134,9 @@ func (i *bbInput) ToBlock() *types.Block {
MixDigest: i.Header.MixDigest, MixDigest: i.Header.MixDigest,
BaseFee: i.Header.BaseFee, BaseFee: i.Header.BaseFee,
WithdrawalsHash: i.Header.WithdrawalsHash, WithdrawalsHash: i.Header.WithdrawalsHash,
BlobGasUsed: i.Header.BlobGasUsed,
ExcessBlobGas: i.Header.ExcessBlobGas,
ParentBeaconRoot: i.Header.ParentBeaconBlockRoot,
} }
// Fill optional values. // Fill optional values.
@ -150,7 +158,7 @@ func (i *bbInput) ToBlock() *types.Block {
if i.Header.Nonce != nil { if i.Header.Nonce != nil {
header.Nonce = *i.Header.Nonce header.Nonce = *i.Header.Nonce
} }
if header.Difficulty != nil { if i.Header.Difficulty != nil {
header.Difficulty = i.Header.Difficulty header.Difficulty = i.Header.Difficulty
} }
return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers).WithWithdrawals(i.Withdrawals) return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers).WithWithdrawals(i.Withdrawals)

View File

@ -59,7 +59,7 @@ type ExecutionResult struct {
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"` WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"` CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
CurrentBlobGasUsed *math.HexOrDecimal64 `json:"currentBlobGasUsed,omitempty"` CurrentBlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed,omitempty"`
} }
type ommer struct { type ommer struct {
@ -85,7 +85,7 @@ type stEnv struct {
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
BaseFee *big.Int `json:"currentBaseFee,omitempty"` BaseFee *big.Int `json:"currentBaseFee,omitempty"`
ParentUncleHash common.Hash `json:"parentUncleHash"` ParentUncleHash common.Hash `json:"parentUncleHash"`
ExcessBlobGas *uint64 `json:"excessBlobGas,omitempty"` ExcessBlobGas *uint64 `json:"currentExcessBlobGas,omitempty"`
ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"` ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"`
ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"` ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
@ -163,17 +163,19 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
rnd := common.BigToHash(pre.Env.Random) rnd := common.BigToHash(pre.Env.Random)
vmContext.Random = &rnd vmContext.Random = &rnd
} }
// If excessBlobGas is defined, add it to the vmContext. // Calculate the BlobBaseFee
var excessBlobGas uint64
if pre.Env.ExcessBlobGas != nil { if pre.Env.ExcessBlobGas != nil {
vmContext.ExcessBlobGas = pre.Env.ExcessBlobGas excessBlobGas := *pre.Env.ExcessBlobGas
vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas)
} else { } else {
// If it is not explicitly defined, but we have the parent values, we try // If it is not explicitly defined, but we have the parent values, we try
// to calculate it ourselves. // to calculate it ourselves.
parentExcessBlobGas := pre.Env.ParentExcessBlobGas parentExcessBlobGas := pre.Env.ParentExcessBlobGas
parentBlobGasUsed := pre.Env.ParentBlobGasUsed parentBlobGasUsed := pre.Env.ParentBlobGasUsed
if parentExcessBlobGas != nil && parentBlobGasUsed != nil { if parentExcessBlobGas != nil && parentBlobGasUsed != nil {
excessBlobGas := eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed) excessBlobGas = eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed)
vmContext.ExcessBlobGas = &excessBlobGas vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas)
} }
} }
// If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's // If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's
@ -189,12 +191,15 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
} }
var blobGasUsed uint64 var blobGasUsed uint64
for i, tx := range txs { for i, tx := range txs {
if tx.Type() == types.BlobTxType && vmContext.ExcessBlobGas == nil { if tx.Type() == types.BlobTxType && vmContext.BlobBaseFee == nil {
errMsg := "blob tx used but field env.ExcessBlobGas missing" errMsg := "blob tx used but field env.ExcessBlobGas missing"
log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", errMsg) log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", errMsg)
rejectedTxs = append(rejectedTxs, &rejectedTx{i, errMsg}) rejectedTxs = append(rejectedTxs, &rejectedTx{i, errMsg})
continue continue
} }
if tx.Type() == types.BlobTxType {
blobGasUsed += uint64(params.BlobTxBlobGasPerBlob * len(tx.BlobHashes()))
}
msg, err := core.TransactionToMessage(tx, signer, pre.Env.BaseFee) msg, err := core.TransactionToMessage(tx, signer, pre.Env.BaseFee)
if err != nil { if err != nil {
log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", err) log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", err)
@ -224,9 +229,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
gaspool.SetGas(prevGas) gaspool.SetGas(prevGas)
continue continue
} }
if tx.Type() == types.BlobTxType {
blobGasUsed += params.BlobTxBlobGasPerBlob
}
includedTxs = append(includedTxs, tx) includedTxs = append(includedTxs, tx)
if hashError != nil { if hashError != nil {
return nil, nil, NewError(ErrorMissingBlockhash, hashError) return nil, nil, NewError(ErrorMissingBlockhash, hashError)
@ -322,8 +324,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
h := types.DeriveSha(types.Withdrawals(pre.Env.Withdrawals), trie.NewStackTrie(nil)) h := types.DeriveSha(types.Withdrawals(pre.Env.Withdrawals), trie.NewStackTrie(nil))
execRs.WithdrawalsRoot = &h execRs.WithdrawalsRoot = &h
} }
if vmContext.ExcessBlobGas != nil { if vmContext.BlobBaseFee != nil {
execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(vmContext.ExcessBlobGas) execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(&excessBlobGas)
execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed) execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed)
} }
// Re-create statedb instance with new root upon the updated database // Re-create statedb instance with new root upon the updated database

View File

@ -35,6 +35,9 @@ func (h header) MarshalJSON() ([]byte, error) {
Nonce *types.BlockNonce `json:"nonce"` Nonce *types.BlockNonce `json:"nonce"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
} }
var enc header var enc header
enc.ParentHash = h.ParentHash enc.ParentHash = h.ParentHash
@ -54,6 +57,9 @@ func (h header) MarshalJSON() ([]byte, error) {
enc.Nonce = h.Nonce enc.Nonce = h.Nonce
enc.BaseFee = (*math.HexOrDecimal256)(h.BaseFee) enc.BaseFee = (*math.HexOrDecimal256)(h.BaseFee)
enc.WithdrawalsHash = h.WithdrawalsHash enc.WithdrawalsHash = h.WithdrawalsHash
enc.BlobGasUsed = (*math.HexOrDecimal64)(h.BlobGasUsed)
enc.ExcessBlobGas = (*math.HexOrDecimal64)(h.ExcessBlobGas)
enc.ParentBeaconBlockRoot = h.ParentBeaconBlockRoot
return json.Marshal(&enc) return json.Marshal(&enc)
} }
@ -77,6 +83,9 @@ func (h *header) UnmarshalJSON(input []byte) error {
Nonce *types.BlockNonce `json:"nonce"` Nonce *types.BlockNonce `json:"nonce"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"` BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"` WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
} }
var dec header var dec header
if err := json.Unmarshal(input, &dec); err != nil { if err := json.Unmarshal(input, &dec); err != nil {
@ -137,5 +146,14 @@ func (h *header) UnmarshalJSON(input []byte) error {
if dec.WithdrawalsHash != nil { if dec.WithdrawalsHash != nil {
h.WithdrawalsHash = dec.WithdrawalsHash h.WithdrawalsHash = dec.WithdrawalsHash
} }
if dec.BlobGasUsed != nil {
h.BlobGasUsed = (*uint64)(dec.BlobGasUsed)
}
if dec.ExcessBlobGas != nil {
h.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
}
if dec.ParentBeaconBlockRoot != nil {
h.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot
}
return nil return nil
} }

View File

@ -33,7 +33,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
ParentUncleHash common.Hash `json:"parentUncleHash"` ParentUncleHash common.Hash `json:"parentUncleHash"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"` ExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"` ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"`
ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"` ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
@ -81,7 +81,7 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
ParentUncleHash *common.Hash `json:"parentUncleHash"` ParentUncleHash *common.Hash `json:"parentUncleHash"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"` ExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"` ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"`
ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"` ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"` ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`

View File

@ -334,7 +334,7 @@ func loadTransactions(txStr string, inputData *input, env stEnv, chainConfig *pa
txsWithKeys = inputData.Txs txsWithKeys = inputData.Txs
} }
// We may have to sign the transactions. // We may have to sign the transactions.
signer := types.MakeSigner(chainConfig, big.NewInt(int64(env.Number)), env.Timestamp) signer := types.LatestSignerForChainID(chainConfig.ChainID)
return signUnsignedTransactions(txsWithKeys, signer) return signUnsignedTransactions(txsWithKeys, signer)
} }

View File

@ -124,6 +124,7 @@ func runCmd(ctx *cli.Context) error {
receiver = common.BytesToAddress([]byte("receiver")) receiver = common.BytesToAddress([]byte("receiver"))
preimages = ctx.Bool(DumpFlag.Name) preimages = ctx.Bool(DumpFlag.Name)
blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests
blobBaseFee = new(big.Int) // TODO (MariusVanDerWijden) implement blob fee in state tests
) )
if ctx.Bool(MachineFlag.Name) { if ctx.Bool(MachineFlag.Name) {
tracer = logger.NewJSONLogger(logconfig, os.Stdout) tracer = logger.NewJSONLogger(logconfig, os.Stdout)
@ -221,6 +222,7 @@ func runCmd(ctx *cli.Context) error {
Coinbase: genesisConfig.Coinbase, Coinbase: genesisConfig.Coinbase,
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number), BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
BlobHashes: blobHashes, BlobHashes: blobHashes,
BlobBaseFee: blobBaseFee,
EVMConfig: vm.Config{ EVMConfig: vm.Config{
Tracer: tracer, Tracer: tracer,
}, },

View File

@ -9,8 +9,7 @@
"parentDifficulty" : "0x00", "parentDifficulty" : "0x00",
"parentUncleHash" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "parentUncleHash" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"currentRandom" : "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "currentRandom" : "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"withdrawals" : [ "withdrawals" : [],
],
"parentBaseFee" : "0x0a", "parentBaseFee" : "0x0a",
"parentGasUsed" : "0x00", "parentGasUsed" : "0x00",
"parentGasLimit" : "0x7fffffffffffffff", "parentGasLimit" : "0x7fffffffffffffff",

View File

@ -42,6 +42,6 @@
"currentBaseFee": "0x9", "currentBaseFee": "0x9",
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"currentExcessBlobGas": "0x0", "currentExcessBlobGas": "0x0",
"currentBlobGasUsed": "0x20000" "blobGasUsed": "0x20000"
} }
} }

View File

@ -6,7 +6,7 @@
"storage" : { "storage" : {
} }
}, },
"0xbEac00dDB15f3B6d645C48263dC93862413A222D" : { "0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02" : {
"balance" : "0x1", "balance" : "0x1",
"code" : "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500", "code" : "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500",
"nonce" : "0x00", "nonce" : "0x00",

View File

@ -1,6 +1,6 @@
{ {
"alloc": { "alloc": {
"0xbeac00ddb15f3b6d645c48263dc93862413a222d": { "0x000f3df6d732807ef1319fb7b8bb8522d0beac02": {
"code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500", "code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500",
"storage": { "storage": {
"0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e", "0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e",
@ -14,7 +14,7 @@
} }
}, },
"result": { "result": {
"stateRoot": "0x2db9f6bc233e8fd0af2d8023404493a19b37d9d69ace71f4e73158851fced574", "stateRoot": "0x19a4f821a7c0a6f4c934f9acb0fe9ce5417b68086e12513ecbc3e3f57e01573c",
"txRoot": "0x248074fabe112f7d93917f292b64932394f835bb98da91f21501574d58ec92ab", "txRoot": "0x248074fabe112f7d93917f292b64932394f835bb98da91f21501574d58ec92ab",
"receiptsRoot": "0xf78dfb743fbd92ade140711c8bbc542b5e307f0ab7984eff35d751969fe57efa", "receiptsRoot": "0xf78dfb743fbd92ade140711c8bbc542b5e307f0ab7984eff35d751969fe57efa",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
@ -40,6 +40,6 @@
"currentBaseFee": "0x9", "currentBaseFee": "0x9",
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"currentExcessBlobGas": "0x0", "currentExcessBlobGas": "0x0",
"currentBlobGasUsed": "0x0" "blobGasUsed": "0x0"
} }
} }

View File

@ -1,29 +1,29 @@
## EIP 4788 ## EIP 4788
This test contains testcases for EIP-4788. The 4788-contract is This test contains testcases for EIP-4788. The 4788-contract is
located at address `0xbeac00ddb15f3b6d645c48263dc93862413a222d`, and this test executes a simple transaction. It also located at address `0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02`, and this test executes a simple transaction. It also
implicitly invokes the system tx, which sets calls the contract and sets the implicitly invokes the system tx, which sets calls the contract and sets the
storage values storage values
``` ```
$ dir=./testdata/29/ && go run . t8n --state.fork=Cancun --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout $ dir=./testdata/29/ && go run . t8n --state.fork=Cancun --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout
INFO [08-15|20:07:56.335] Trie dumping started root=ecde45..2af8a7 INFO [09-27|15:34:53.049] Trie dumping started root=19a4f8..01573c
INFO [08-15|20:07:56.335] Trie dumping complete accounts=2 elapsed="225.848µs" INFO [09-27|15:34:53.049] Trie dumping complete accounts=2 elapsed="192.759µs"
INFO [08-15|20:07:56.335] Wrote file file=result.json INFO [09-27|15:34:53.050] Wrote file file=result.json
{ {
"alloc": { "alloc": {
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { "0x000f3df6d732807ef1319fb7b8bb8522d0beac02": {
"balance": "0x16345785d871db8",
"nonce": "0x1"
},
"0xbeac00541d49391ed88abf392bfc1f4dea8c4143": {
"code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500", "code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500",
"storage": { "storage": {
"0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e", "0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e",
"0x000000000000000000000000000000000000000000000000000000000001879e": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00" "0x000000000000000000000000000000000000000000000000000000000001879e": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00"
}, },
"balance": "0x "balance": "0x1"
},
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0x16345785d871db8",
"nonce": "0x1"
} }
} }
} }
``` ```

View File

@ -32,7 +32,7 @@ dir=./testdata/8 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json
{"pc":4,"op":84,"gas":"0x48456","gasCost":"0x64","memSize":0,"stack":["0x3"],"depth":1,"refund":0,"opName":"SLOAD"} {"pc":4,"op":84,"gas":"0x48456","gasCost":"0x64","memSize":0,"stack":["0x3"],"depth":1,"refund":0,"opName":"SLOAD"}
``` ```
Simlarly, we can provide the input transactions via `stdin` instead of as file: Similarly, we can provide the input transactions via `stdin` instead of as file:
``` ```
$ dir=./testdata/8 \ $ dir=./testdata/8 \

View File

@ -1,6 +1,6 @@
## EIP-1559 testing ## EIP-1559 testing
This test contains testcases for EIP-1559, which uses an new transaction type and has a new block parameter. This test contains testcases for EIP-1559, which uses a new transaction type and has a new block parameter.
### Prestate ### Prestate

View File

@ -474,7 +474,7 @@ func dump(ctx *cli.Context) error {
if err != nil { if err != nil {
return err return err
} }
triedb := utils.MakeTrieDatabase(ctx, db, true, false) // always enable preimage lookup triedb := utils.MakeTrieDatabase(ctx, db, true, true) // always enable preimage lookup
defer triedb.Close() defer triedb.Close()
state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil) state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil)

View File

@ -32,6 +32,8 @@ import (
"github.com/ethereum/go-ethereum/accounts/scwallet" "github.com/ethereum/go-ethereum/accounts/scwallet"
"github.com/ethereum/go-ethereum/accounts/usbwallet" "github.com/ethereum/go-ethereum/accounts/usbwallet"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/eth/catalyst" "github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
@ -199,17 +201,18 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
if ctx.IsSet(utils.GraphQLEnabledFlag.Name) { if ctx.IsSet(utils.GraphQLEnabledFlag.Name) {
utils.RegisterGraphQLService(stack, backend, filterSystem, &cfg.Node) utils.RegisterGraphQLService(stack, backend, filterSystem, &cfg.Node)
} }
// Add the Ethereum Stats daemon if requested. // Add the Ethereum Stats daemon if requested.
if cfg.Ethstats.URL != "" { if cfg.Ethstats.URL != "" {
utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL) utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL)
} }
// Configure full-sync tester service if requested // Configure full-sync tester service if requested
if ctx.IsSet(utils.SyncTargetFlag.Name) && cfg.Eth.SyncMode == downloader.FullSync { if ctx.IsSet(utils.SyncTargetFlag.Name) {
utils.RegisterFullSyncTester(stack, eth, ctx.Path(utils.SyncTargetFlag.Name)) hex := hexutil.MustDecode(ctx.String(utils.SyncTargetFlag.Name))
if len(hex) != common.HashLength {
utils.Fatalf("invalid sync target length: have %d, want %d", len(hex), common.HashLength)
}
utils.RegisterFullSyncTester(stack, eth, common.BytesToHash(hex))
} }
// Start the dev mode if requested, or launch the engine API for // Start the dev mode if requested, or launch the engine API for
// interacting with external consensus client. // interacting with external consensus client.
if ctx.IsSet(utils.DeveloperFlag.Name) { if ctx.IsSet(utils.DeveloperFlag.Name) {

View File

@ -176,12 +176,12 @@ func TestCustomBackend(t *testing.T) {
{ // Can't start pebble on top of leveldb { // Can't start pebble on top of leveldb
initArgs: []string{"--db.engine", "leveldb"}, initArgs: []string{"--db.engine", "leveldb"},
execArgs: []string{"--db.engine", "pebble"}, execArgs: []string{"--db.engine", "pebble"},
execExpect: `Fatal: Could not open database: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`, execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`,
}, },
{ // Can't start leveldb on top of pebble { // Can't start leveldb on top of pebble
initArgs: []string{"--db.engine", "pebble"}, initArgs: []string{"--db.engine", "pebble"},
execArgs: []string{"--db.engine", "leveldb"}, execArgs: []string{"--db.engine", "leveldb"},
execExpect: `Fatal: Could not open database: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`, execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`,
}, },
{ // Reject invalid backend choice { // Reject invalid backend choice
initArgs: []string{"--db.engine", "mssql"}, initArgs: []string{"--db.engine", "mssql"},

View File

@ -163,7 +163,7 @@ func plugethCaptureTrieConfig(ctx *cli.Context, stack *node.Node, backend ethapi
chaindb := backend.ChainDb() chaindb := backend.ChainDb()
scheme, err := utils.ParseStateScheme(ctx, chaindb) scheme, err := rawdb.ParseStateScheme(ctx.String(utils.StateSchemeFlag.Name), chaindb)
if err != nil { if err != nil {
utils.Fatalf("%v", err) utils.Fatalf("%v", err)
} }

View File

@ -18,7 +18,6 @@
package utils package utils
import ( import (
"bytes"
"context" "context"
"crypto/ecdsa" "crypto/ecdsa"
"encoding/hex" "encoding/hex"
@ -39,11 +38,9 @@ import (
"github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/fdlimit" "github.com/ethereum/go-ethereum/common/fdlimit"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/crypto/kzg4844"
@ -72,7 +69,6 @@ import (
"github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb" "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
@ -280,7 +276,6 @@ var (
StateSchemeFlag = &cli.StringFlag{ StateSchemeFlag = &cli.StringFlag{
Name: "state.scheme", Name: "state.scheme",
Usage: "Scheme to use for storing ethereum state ('hash' or 'path')", Usage: "Scheme to use for storing ethereum state ('hash' or 'path')",
Value: rawdb.HashScheme,
Category: flags.StateCategory, Category: flags.StateCategory,
} }
StateHistoryFlag = &cli.Uint64Flag{ StateHistoryFlag = &cli.Uint64Flag{
@ -603,9 +598,9 @@ var (
} }
// MISC settings // MISC settings
SyncTargetFlag = &cli.PathFlag{ SyncTargetFlag = &cli.StringFlag{
Name: "synctarget", Name: "synctarget",
Usage: `File for containing the hex-encoded block-rlp as sync target(dev feature)`, Usage: `Hash of the block to full sync to (dev testing feature)`,
TakesFile: true, TakesFile: true,
Category: flags.MiscCategory, Category: flags.MiscCategory,
} }
@ -974,17 +969,12 @@ var (
DataDirFlag, DataDirFlag,
AncientFlag, AncientFlag,
RemoteDBFlag, RemoteDBFlag,
DBEngineFlag,
StateSchemeFlag, StateSchemeFlag,
HttpHeaderFlag, HttpHeaderFlag,
} }
) )
func init() {
if rawdb.PebbleEnabled {
DatabaseFlags = append(DatabaseFlags, DBEngineFlag)
}
}
// MakeDataDir retrieves the currently requested data directory, terminating // MakeDataDir retrieves the currently requested data directory, terminating
// if none (or the empty string) is specified. If the node is starting a testnet, // if none (or the empty string) is specified. If the node is starting a testnet,
// then a subdirectory of the specified datadir will be used. // then a subdirectory of the specified datadir will be used.
@ -1699,7 +1689,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc)) log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc))
godebug.SetGCPercent(int(gogc)) godebug.SetGCPercent(int(gogc))
if ctx.IsSet(SyncModeFlag.Name) { if ctx.IsSet(SyncTargetFlag.Name) {
cfg.SyncMode = downloader.FullSync // dev sync target forces full sync
} else if ctx.IsSet(SyncModeFlag.Name) {
cfg.SyncMode = *flags.GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode) cfg.SyncMode = *flags.GlobalTextMarshaler(ctx, SyncModeFlag.Name).(*downloader.SyncMode)
} }
if ctx.IsSet(NetworkIdFlag.Name) { if ctx.IsSet(NetworkIdFlag.Name) {
@ -1731,15 +1723,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.IsSet(StateHistoryFlag.Name) { if ctx.IsSet(StateHistoryFlag.Name) {
cfg.StateHistory = ctx.Uint64(StateHistoryFlag.Name) cfg.StateHistory = ctx.Uint64(StateHistoryFlag.Name)
} }
// Parse state scheme, abort the process if it's not compatible. if ctx.IsSet(StateSchemeFlag.Name) {
chaindb := tryMakeReadOnlyDatabase(ctx, stack) cfg.StateScheme = ctx.String(StateSchemeFlag.Name)
scheme, err := ParseStateScheme(ctx, chaindb)
chaindb.Close()
if err != nil {
Fatalf("%v", err)
} }
cfg.StateScheme = scheme
// Parse transaction history flag, if user is still using legacy config // Parse transaction history flag, if user is still using legacy config
// file with 'TxLookupLimit' configured, copy the value to 'TransactionHistory'. // file with 'TxLookupLimit' configured, copy the value to 'TransactionHistory'.
if cfg.TransactionHistory == ethconfig.Defaults.TransactionHistory && cfg.TxLookupLimit != ethconfig.Defaults.TxLookupLimit { if cfg.TransactionHistory == ethconfig.Defaults.TransactionHistory && cfg.TxLookupLimit != ethconfig.Defaults.TxLookupLimit {
@ -1984,21 +1970,9 @@ func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconf
} }
// RegisterFullSyncTester adds the full-sync tester service into node. // RegisterFullSyncTester adds the full-sync tester service into node.
func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, path string) { func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, target common.Hash) {
blob, err := os.ReadFile(path) catalyst.RegisterFullSyncTester(stack, eth, target)
if err != nil { log.Info("Registered full-sync tester", "hash", target)
Fatalf("Failed to read block file: %v", err)
}
rlpBlob, err := hexutil.Decode(string(bytes.TrimRight(blob, "\r\n")))
if err != nil {
Fatalf("Failed to decode block blob: %v", err)
}
var block types.Block
if err := rlp.DecodeBytes(rlpBlob, &block); err != nil {
Fatalf("Failed to decode block: %v", err)
}
catalyst.RegisterFullSyncTester(stack, eth, &block)
log.Info("Registered full-sync tester", "number", block.NumberU64(), "hash", block.Hash())
} }
func SetupMetrics(ctx *cli.Context) { func SetupMetrics(ctx *cli.Context) {
@ -2187,7 +2161,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" { if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name) Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
} }
scheme, err := ParseStateScheme(ctx, chainDb) scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), chainDb)
if err != nil { if err != nil {
Fatalf("%v", err) Fatalf("%v", err)
} }
@ -2246,47 +2220,12 @@ func MakeConsolePreloads(ctx *cli.Context) []string {
return preloads return preloads
} }
// ParseStateScheme resolves scheme identifier from CLI flag. If the provided
// state scheme is not compatible with the one of persistent scheme, an error
// will be returned.
//
// - none: use the scheme consistent with persistent state, or fallback
// to hash-based scheme if state is empty.
// - hash: use hash-based scheme or error out if not compatible with
// persistent state scheme.
// - path: use path-based scheme or error out if not compatible with
// persistent state scheme.
func ParseStateScheme(ctx *cli.Context, disk ethdb.Database) (string, error) {
// If state scheme is not specified, use the scheme consistent
// with persistent state, or fallback to hash mode if database
// is empty.
stored := rawdb.ReadStateScheme(disk)
if !ctx.IsSet(StateSchemeFlag.Name) {
if stored == "" {
// use default scheme for empty database, flip it when
// path mode is chosen as default
log.Info("State schema set to default", "scheme", "hash")
return rawdb.HashScheme, nil
}
log.Info("State scheme set to already existing", "scheme", stored)
return stored, nil // reuse scheme of persistent scheme
}
// If state scheme is specified, ensure it's compatible with
// persistent state.
scheme := ctx.String(StateSchemeFlag.Name)
if stored == "" || scheme == stored {
log.Info("State scheme set by user", "scheme", scheme)
return scheme, nil
}
return "", fmt.Errorf("incompatible state scheme, stored: %s, provided: %s", stored, scheme)
}
// MakeTrieDatabase constructs a trie database based on the configured scheme. // MakeTrieDatabase constructs a trie database based on the configured scheme.
func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool) *trie.Database { func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool) *trie.Database {
config := &trie.Config{ config := &trie.Config{
Preimages: preimage, Preimages: preimage,
} }
scheme, err := ParseStateScheme(ctx, disk) scheme, err := rawdb.ParseStateScheme(ctx.String(StateSchemeFlag.Name), disk)
if err != nil { if err != nil {
Fatalf("%v", err) Fatalf("%v", err)
} }

View File

@ -44,6 +44,12 @@ const (
var ( var (
hashT = reflect.TypeOf(Hash{}) hashT = reflect.TypeOf(Hash{})
addressT = reflect.TypeOf(Address{}) addressT = reflect.TypeOf(Address{})
// MaxAddress represents the maximum possible address value.
MaxAddress = HexToAddress("0xffffffffffffffffffffffffffffffffffffffff")
// MaxHash represents the maximum possible hash value.
MaxHash = HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
) )
// Hash represents the 32 byte Keccak256 hash of arbitrary data. // Hash represents the 32 byte Keccak256 hash of arbitrary data.
@ -239,9 +245,6 @@ func (a Address) Cmp(other Address) int {
// Bytes gets the string representation of the underlying address. // Bytes gets the string representation of the underlying address.
func (a Address) Bytes() []byte { return a[:] } func (a Address) Bytes() []byte { return a[:] }
// Hash converts an address to a hash by left-padding it with zeros.
func (a Address) Hash() Hash { return BytesToHash(a[:]) }
// Big converts an address to a big integer. // Big converts an address to a big integer.
func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) } func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) }

View File

@ -34,7 +34,7 @@ type instructionIterator struct {
started bool started bool
} }
// NewInstructionIterator create a new instruction iterator. // NewInstructionIterator creates a new instruction iterator.
func NewInstructionIterator(code []byte) *instructionIterator { func NewInstructionIterator(code []byte) *instructionIterator {
it := new(instructionIterator) it := new(instructionIterator)
it.code = code it.code = code

View File

@ -72,12 +72,12 @@ func TestLexer(t *testing.T) {
input: "@label123", input: "@label123",
tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}}, tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}},
}, },
// comment after label // Comment after label
{ {
input: "@label123 ;; comment", input: "@label123 ;; comment",
tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}}, tokens: []token{{typ: lineStart}, {typ: label, text: "label123"}, {typ: eof}},
}, },
// comment after instruction // Comment after instruction
{ {
input: "push 3 ;; comment\nadd", input: "push 3 ;; comment\nadd",
tokens: []token{{typ: lineStart}, {typ: element, text: "push"}, {typ: number, text: "3"}, {typ: lineEnd, text: "\n"}, {typ: lineStart, lineno: 1}, {typ: element, lineno: 1, text: "add"}, {typ: eof, lineno: 1}}, tokens: []token{{typ: lineStart}, {typ: element, text: "push"}, {typ: number, text: "3"}, {typ: lineEnd, text: "\n"}, {typ: lineStart, lineno: 1}, {typ: element, lineno: 1, text: "add"}, {typ: eof, lineno: 1}},

View File

@ -576,7 +576,7 @@ func (bc *BlockChain) SetHead(head uint64) error {
header := bc.CurrentBlock() header := bc.CurrentBlock()
block := bc.GetBlock(header.Hash(), header.Number.Uint64()) block := bc.GetBlock(header.Hash(), header.Number.Uint64())
if block == nil { if block == nil {
// This should never happen. In practice, previsouly currentBlock // This should never happen. In practice, previously currentBlock
// contained the entire block whereas now only a "marker", so there // contained the entire block whereas now only a "marker", so there
// is an ever so slight chance for a race we should handle. // is an ever so slight chance for a race we should handle.
log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash()) log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
@ -598,7 +598,7 @@ func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error {
header := bc.CurrentBlock() header := bc.CurrentBlock()
block := bc.GetBlock(header.Hash(), header.Number.Uint64()) block := bc.GetBlock(header.Hash(), header.Number.Uint64())
if block == nil { if block == nil {
// This should never happen. In practice, previsouly currentBlock // This should never happen. In practice, previously currentBlock
// contained the entire block whereas now only a "marker", so there // contained the entire block whereas now only a "marker", so there
// is an ever so slight chance for a race we should handle. // is an ever so slight chance for a race we should handle.
log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash()) log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
@ -982,7 +982,7 @@ func (bc *BlockChain) stopWithoutSaving() {
func (bc *BlockChain) Stop() { func (bc *BlockChain) Stop() {
bc.stopWithoutSaving() bc.stopWithoutSaving()
// Ensure that the entirety of the state snapshot is journalled to disk. // Ensure that the entirety of the state snapshot is journaled to disk.
var snapBase common.Hash var snapBase common.Hash
if bc.snaps != nil { if bc.snaps != nil {
var err error var err error
@ -1193,7 +1193,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients) // a background routine to re-indexed all indices in [ancients - txlookupLimit, ancients)
// range. In this case, all tx indices of newly imported blocks should be // range. In this case, all tx indices of newly imported blocks should be
// generated. // generated.
var batch = bc.db.NewBatch() batch := bc.db.NewBatch()
for i, block := range blockChain { for i, block := range blockChain {
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit { if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
rawdb.WriteTxLookupEntriesByBlock(batch, block) rawdb.WriteTxLookupEntriesByBlock(batch, block)
@ -2618,7 +2618,7 @@ func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) {
bc.flushInterval.Store(int64(interval)) bc.flushInterval.Store(int64(interval))
} }
// GetTrieFlushInterval gets the in-memroy tries flush interval // GetTrieFlushInterval gets the in-memory tries flush interval
func (bc *BlockChain) GetTrieFlushInterval() time.Duration { func (bc *BlockChain) GetTrieFlushInterval() time.Duration {
return time.Duration(bc.flushInterval.Load()) return time.Duration(bc.flushInterval.Load())
} }

View File

@ -58,7 +58,7 @@ type partialMatches struct {
// bit with the given number of fetch elements, or a response for such a request. // bit with the given number of fetch elements, or a response for such a request.
// It can also have the actual results set to be used as a delivery data struct. // It can also have the actual results set to be used as a delivery data struct.
// //
// The contest and error fields are used by the light client to terminate matching // The context and error fields are used by the light client to terminate matching
// early if an error is encountered on some path of the pipeline. // early if an error is encountered on some path of the pipeline.
type Retrieval struct { type Retrieval struct {
Bit uint Bit uint
@ -389,7 +389,7 @@ func (m *Matcher) distributor(dist chan *request, session *MatcherSession) {
shutdown = session.quit // Shutdown request channel, will gracefully wait for pending requests shutdown = session.quit // Shutdown request channel, will gracefully wait for pending requests
) )
// assign is a helper method fo try to assign a pending bit an actively // assign is a helper method to try to assign a pending bit an actively
// listening servicer, or schedule it up for later when one arrives. // listening servicer, or schedule it up for later when one arrives.
assign := func(bit uint) { assign := func(bit uint) {
select { select {

View File

@ -85,7 +85,7 @@ func TestMatcherRandom(t *testing.T) {
} }
// Tests that the matcher can properly find matches if the starting block is // Tests that the matcher can properly find matches if the starting block is
// shifter from a multiple of 8. This is needed to cover an optimisation with // shifted from a multiple of 8. This is needed to cover an optimisation with
// bitset matching https://github.com/ethereum/go-ethereum/issues/15309. // bitset matching https://github.com/ethereum/go-ethereum/issues/15309.
func TestMatcherShifted(t *testing.T) { func TestMatcherShifted(t *testing.T) {
t.Parallel() t.Parallel()
@ -106,7 +106,7 @@ func TestWildcardMatcher(t *testing.T) {
testMatcherBothModes(t, nil, 0, 10000, 0) testMatcherBothModes(t, nil, 0, 10000, 0)
} }
// makeRandomIndexes generates a random filter system, composed on multiple filter // makeRandomIndexes generates a random filter system, composed of multiple filter
// criteria, each having one bloom list component for the address and arbitrarily // criteria, each having one bloom list component for the address and arbitrarily
// many topic bloom list components. // many topic bloom list components.
func makeRandomIndexes(lengths []int, max int) [][]bloomIndexes { func makeRandomIndexes(lengths []int, max int) [][]bloomIndexes {

View File

@ -88,11 +88,6 @@ func (b *BlockGen) SetPoS() {
b.header.Difficulty = new(big.Int) b.header.Difficulty = new(big.Int)
} }
// SetBlobGas sets the data gas used by the blob in the generated block.
func (b *BlockGen) SetBlobGas(blobGasUsed uint64) {
b.header.BlobGasUsed = &blobGasUsed
}
// addTx adds a transaction to the generated block. If no coinbase has // addTx adds a transaction to the generated block. If no coinbase has
// been set, the block's coinbase is set to the zero address. // been set, the block's coinbase is set to the zero address.
// //
@ -111,6 +106,9 @@ func (b *BlockGen) addTx(bc *BlockChain, vmConfig vm.Config, tx *types.Transacti
} }
b.txs = append(b.txs, tx) b.txs = append(b.txs, tx)
b.receipts = append(b.receipts, receipt) b.receipts = append(b.receipts, receipt)
if b.header.BlobGasUsed != nil {
*b.header.BlobGasUsed += receipt.BlobGasUsed
}
} }
// AddTx adds a transaction to the generated block. If no coinbase has // AddTx adds a transaction to the generated block. If no coinbase has

View File

@ -21,6 +21,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
) )
@ -40,6 +41,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
var ( var (
beneficiary common.Address beneficiary common.Address
baseFee *big.Int baseFee *big.Int
blobBaseFee *big.Int
random *common.Hash random *common.Hash
) )
@ -52,6 +54,9 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
if header.BaseFee != nil { if header.BaseFee != nil {
baseFee = new(big.Int).Set(header.BaseFee) baseFee = new(big.Int).Set(header.BaseFee)
} }
if header.ExcessBlobGas != nil {
blobBaseFee = eip4844.CalcBlobFee(*header.ExcessBlobGas)
}
if header.Difficulty.Cmp(common.Big0) == 0 { if header.Difficulty.Cmp(common.Big0) == 0 {
random = &header.MixDigest random = &header.MixDigest
} }
@ -64,9 +69,9 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
Time: header.Time, Time: header.Time,
Difficulty: new(big.Int).Set(header.Difficulty), Difficulty: new(big.Int).Set(header.Difficulty),
BaseFee: baseFee, BaseFee: baseFee,
BlobBaseFee: blobBaseFee,
GasLimit: header.GasLimit, GasLimit: header.GasLimit,
Random: random, Random: random,
ExcessBlobGas: header.ExcessBlobGas,
} }
} }

View File

@ -120,8 +120,8 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
return nil return nil
} }
// deriveHash computes the state root according to the genesis specification. // hash computes the state root according to the genesis specification.
func (ga *GenesisAlloc) deriveHash() (common.Hash, error) { func (ga *GenesisAlloc) hash() (common.Hash, error) {
// Create an ephemeral in-memory database for computing hash, // Create an ephemeral in-memory database for computing hash,
// all the derived states will be discarded to not pollute disk. // all the derived states will be discarded to not pollute disk.
db := state.NewDatabase(rawdb.NewMemoryDatabase()) db := state.NewDatabase(rawdb.NewMemoryDatabase())
@ -142,9 +142,9 @@ func (ga *GenesisAlloc) deriveHash() (common.Hash, error) {
return statedb.Commit(0, false) return statedb.Commit(0, false)
} }
// flush is very similar with deriveHash, but the main difference is // flush is very similar with hash, but the main difference is all the generated
// all the generated states will be persisted into the given database. // states will be persisted into the given database. Also, the genesis state
// Also, the genesis state specification will be flushed as well. // specification will be flushed as well.
func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error { func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error {
statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil) statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil)
if err != nil { if err != nil {
@ -179,39 +179,6 @@ func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhas
return nil return nil
} }
// CommitGenesisState loads the stored genesis state with the given block
// hash and commits it into the provided trie database.
func CommitGenesisState(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error {
var alloc GenesisAlloc
blob := rawdb.ReadGenesisStateSpec(db, blockhash)
if len(blob) != 0 {
if err := alloc.UnmarshalJSON(blob); err != nil {
return err
}
} else {
// Genesis allocation is missing and there are several possibilities:
// the node is legacy which doesn't persist the genesis allocation or
// the persisted allocation is just lost.
// - supported networks(mainnet, testnets), recover with defined allocations
// - private network, can't recover
var genesis *Genesis
switch blockhash {
case params.MainnetGenesisHash:
genesis = DefaultGenesisBlock()
case params.GoerliGenesisHash:
genesis = DefaultGoerliGenesisBlock()
case params.SepoliaGenesisHash:
genesis = DefaultSepoliaGenesisBlock()
}
if genesis != nil {
alloc = genesis.Alloc
} else {
return errors.New("not found")
}
}
return alloc.flush(db, triedb, blockhash)
}
// GenesisAccount is an account in the state of the genesis block. // GenesisAccount is an account in the state of the genesis block.
type GenesisAccount struct { type GenesisAccount struct {
Code []byte `json:"code,omitempty"` Code []byte `json:"code,omitempty"`
@ -444,7 +411,7 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
// ToBlock returns the genesis block according to genesis specification. // ToBlock returns the genesis block according to genesis specification.
func (g *Genesis) ToBlock() *types.Block { func (g *Genesis) ToBlock() *types.Block {
root, err := g.Alloc.deriveHash() root, err := g.Alloc.hash()
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -231,7 +231,7 @@ func TestReadWriteGenesisAlloc(t *testing.T) {
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}}, {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
{2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}}, {2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}},
} }
hash, _ = alloc.deriveHash() hash, _ = alloc.hash()
) )
blob, _ := json.Marshal(alloc) blob, _ := json.Marshal(alloc)
rawdb.WriteGenesisStateSpec(db, hash, blob) rawdb.WriteGenesisStateSpec(db, hash, blob)

View File

@ -305,3 +305,38 @@ func ReadStateScheme(db ethdb.Reader) string {
} }
return HashScheme return HashScheme
} }
// ParseStateScheme checks if the specified state scheme is compatible with
// the stored state.
//
// - If the provided scheme is none, use the scheme consistent with persistent
// state, or fallback to hash-based scheme if state is empty.
//
// - If the provided scheme is hash, use hash-based scheme or error out if not
// compatible with persistent state scheme.
//
// - If the provided scheme is path: use path-based scheme or error out if not
// compatible with persistent state scheme.
func ParseStateScheme(provided string, disk ethdb.Database) (string, error) {
// If state scheme is not specified, use the scheme consistent
// with persistent state, or fallback to hash mode if database
// is empty.
stored := ReadStateScheme(disk)
if provided == "" {
if stored == "" {
// use default scheme for empty database, flip it when
// path mode is chosen as default
log.Info("State schema set to default", "scheme", "hash")
return HashScheme, nil
}
log.Info("State scheme set to already existing", "scheme", stored)
return stored, nil // reuse scheme of persistent scheme
}
// If state scheme is specified, ensure it's compatible with
// persistent state.
if stored == "" || provided == stored {
log.Info("State scheme set by user", "scheme", provided)
return provided, nil
}
return "", fmt.Errorf("incompatible state scheme, stored: %s, provided: %s", stored, provided)
}

View File

@ -200,7 +200,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
} }
batch.Reset() batch.Reset()
// Step into the future and delete and dangling side chains // Step into the future and delete any dangling side chains
if frozen > 0 { if frozen > 0 {
tip := frozen tip := frozen
for len(dangling) > 0 { for len(dangling) > 0 {

View File

@ -30,11 +30,12 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/leveldb" "github.com/ethereum/go-ethereum/ethdb/leveldb"
"github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/ethdb/pebble"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/olekukonko/tablewriter" "github.com/olekukonko/tablewriter"
) )
// freezerdb is a database wrapper that enabled freezer data retrievals. // freezerdb is a database wrapper that enables freezer data retrievals.
type freezerdb struct { type freezerdb struct {
ancientRoot string ancientRoot string
ethdb.KeyValueStore ethdb.KeyValueStore
@ -141,7 +142,7 @@ func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error)
// Unlike other ancient-related methods, this method does not return // Unlike other ancient-related methods, this method does not return
// errNotSupported when invoked. // errNotSupported when invoked.
// The reason for this is that the caller might want to do several things: // The reason for this is that the caller might want to do several things:
// 1. Check if something is in freezer, // 1. Check if something is in the freezer,
// 2. If not, check leveldb. // 2. If not, check leveldb.
// //
// This will work, since the ancient-checks inside 'fn' will return errors, // This will work, since the ancient-checks inside 'fn' will return errors,
@ -209,7 +210,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
// of the freezer and database. Ensure that we don't shoot ourselves in the foot // of the freezer and database. Ensure that we don't shoot ourselves in the foot
// by serving up conflicting data, leading to both datastores getting corrupted. // by serving up conflicting data, leading to both datastores getting corrupted.
// //
// - If both the freezer and key-value store is empty (no genesis), we just // - If both the freezer and key-value store are empty (no genesis), we just
// initialized a new empty freezer, so everything's fine. // initialized a new empty freezer, so everything's fine.
// - If the key-value store is empty, but the freezer is not, we need to make // - If the key-value store is empty, but the freezer is not, we need to make
// sure the user's genesis matches the freezer. That will be checked in the // sure the user's genesis matches the freezer. That will be checked in the
@ -218,7 +219,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace st
// - If neither the key-value store nor the freezer is empty, cross validate // - If neither the key-value store nor the freezer is empty, cross validate
// the genesis hashes to make sure they are compatible. If they are, also // the genesis hashes to make sure they are compatible. If they are, also
// ensure that there's no gap between the freezer and subsequently leveldb. // ensure that there's no gap between the freezer and subsequently leveldb.
// - If the key-value store is not empty, but the freezer is we might just be // - If the key-value store is not empty, but the freezer is, we might just be
// upgrading to the freezer release, or we might have had a small chain and // upgrading to the freezer release, or we might have had a small chain and
// not frozen anything yet. Ensure that no blocks are missing yet from the // not frozen anything yet. Ensure that no blocks are missing yet from the
// key-value store, since that would mean we already had an old freezer. // key-value store, since that would mean we already had an old freezer.
@ -321,6 +322,16 @@ func NewLevelDBDatabase(file string, cache int, handles int, namespace string, r
return NewDatabase(db), nil return NewDatabase(db), nil
} }
// NewPebbleDBDatabase creates a persistent key-value database without a freezer
// moving immutable chain segments into cold storage.
func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly, ephemeral bool) (ethdb.Database, error) {
db, err := pebble.New(file, cache, handles, namespace, readonly, ephemeral)
if err != nil {
return nil, err
}
return NewDatabase(db), nil
}
const ( const (
dbPebble = "pebble" dbPebble = "pebble"
dbLeveldb = "leveldb" dbLeveldb = "leveldb"
@ -375,26 +386,16 @@ func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) {
return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb) return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb)
} }
if o.Type == dbPebble || existingDb == dbPebble { if o.Type == dbPebble || existingDb == dbPebble {
if PebbleEnabled {
log.Info("Using pebble as the backing database") log.Info("Using pebble as the backing database")
return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral) return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral)
} else {
return nil, errors.New("db.engine 'pebble' not supported on this platform")
}
} }
if o.Type == dbLeveldb || existingDb == dbLeveldb { if o.Type == dbLeveldb || existingDb == dbLeveldb {
log.Info("Using leveldb as the backing database") log.Info("Using leveldb as the backing database")
return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
} }
// No pre-existing database, no user-requested one either. Default to Pebble // No pre-existing database, no user-requested one either. Default to Pebble.
// on supported platforms and LevelDB on anything else.
if PebbleEnabled {
log.Info("Defaulting to pebble as the backing database") log.Info("Defaulting to pebble as the backing database")
return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral) return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.Ephemeral)
} else {
log.Info("Defaulting to leveldb as the backing database")
return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly)
}
} }
// Open opens both a disk-based key-value database such as leveldb or pebble, but also // Open opens both a disk-based key-value database such as leveldb or pebble, but also
@ -634,7 +635,7 @@ func printChainMetadata(db ethdb.KeyValueStore) {
fmt.Fprintf(os.Stderr, "\n\n") fmt.Fprintf(os.Stderr, "\n\n")
} }
// ReadChainMetadata returns a set of key/value pairs that contains informatin // ReadChainMetadata returns a set of key/value pairs that contains information
// about the database chain status. This can be used for diagnostic purposes // about the database chain status. This can be used for diagnostic purposes
// when investigating the state of the node. // when investigating the state of the node.
func ReadChainMetadata(db ethdb.KeyValueStore) [][]string { func ReadChainMetadata(db ethdb.KeyValueStore) [][]string {

View File

@ -1,37 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
//go:build (arm64 || amd64) && !openbsd
package rawdb
import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/pebble"
)
// Pebble is unsuported on 32bit architecture
const PebbleEnabled = true
// NewPebbleDBDatabase creates a persistent key-value database without a freezer
// moving immutable chain segments into cold storage.
func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly, ephemeral bool) (ethdb.Database, error) {
db, err := pebble.New(file, cache, handles, namespace, readonly, ephemeral)
if err != nil {
return nil, err
}
return NewDatabase(db), nil
}

View File

@ -1,34 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build !((arm64 || amd64) && !openbsd)
package rawdb
import (
"errors"
"github.com/ethereum/go-ethereum/ethdb"
)
// Pebble is unsuported on 32bit architecture
const PebbleEnabled = false
// NewPebbleDBDatabase creates a persistent key-value database without a freezer
// moving immutable chain segments into cold storage.
func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly, ephemeral bool) (ethdb.Database, error) {
return nil, errors.New("pebble is not supported on this platform")
}

View File

@ -219,7 +219,7 @@ func (b *tableBatch) Put(key, value []byte) error {
return b.batch.Put(append([]byte(b.prefix), key...), value) return b.batch.Put(append([]byte(b.prefix), key...), value)
} }
// Delete inserts the a key removal into the batch for later committing. // Delete inserts a key removal into the batch for later committing.
func (b *tableBatch) Delete(key []byte) error { func (b *tableBatch) Delete(key []byte) error {
return b.batch.Delete(append([]byte(b.prefix), key...)) return b.batch.Delete(append([]byte(b.prefix), key...))
} }

View File

@ -364,11 +364,11 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou
func stackTrieGenerate(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan trieKV, out chan common.Hash) { func stackTrieGenerate(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan trieKV, out chan common.Hash) {
var nodeWriter trie.NodeWriteFunc var nodeWriter trie.NodeWriteFunc
if db != nil { if db != nil {
nodeWriter = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { nodeWriter = func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(db, owner, path, hash, blob, scheme) rawdb.WriteTrieNode(db, owner, path, hash, blob, scheme)
} }
} }
t := trie.NewStackTrieWithOwner(nodeWriter, owner) t := trie.NewStackTrie(nodeWriter)
for leaf := range in { for leaf := range in {
t.Update(leaf.key[:], leaf.value) t.Update(leaf.key[:], leaf.value)
} }

View File

@ -247,11 +247,6 @@ func (dl *diskLayer) proveRange(ctx *generatorContext, trieId *trie.ID, prefix [
ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
return nil, errMissingTrie return nil, errMissingTrie
} }
// Firstly find out the key of last iterated element.
var last []byte
if len(keys) > 0 {
last = keys[len(keys)-1]
}
// Generate the Merkle proofs for the first and last element // Generate the Merkle proofs for the first and last element
if origin == nil { if origin == nil {
origin = common.Hash{}.Bytes() origin = common.Hash{}.Bytes()
@ -266,9 +261,9 @@ func (dl *diskLayer) proveRange(ctx *generatorContext, trieId *trie.ID, prefix [
tr: tr, tr: tr,
}, nil }, nil
} }
if last != nil { if len(keys) > 0 {
if err := tr.Prove(last, proof); err != nil { if err := tr.Prove(keys[len(keys)-1], proof); err != nil {
log.Debug("Failed to prove range", "kind", kind, "last", last, "err", err) log.Debug("Failed to prove range", "kind", kind, "last", keys[len(keys)-1], "err", err)
return &proofResult{ return &proofResult{
keys: keys, keys: keys,
vals: vals, vals: vals,
@ -280,7 +275,7 @@ func (dl *diskLayer) proveRange(ctx *generatorContext, trieId *trie.ID, prefix [
} }
// Verify the snapshot segment with range prover, ensure that all flat states // Verify the snapshot segment with range prover, ensure that all flat states
// in this range correspond to merkle trie. // in this range correspond to merkle trie.
cont, err := trie.VerifyRangeProof(root, origin, last, keys, vals, proof) cont, err := trie.VerifyRangeProof(root, origin, keys, vals, proof)
return &proofResult{ return &proofResult{
keys: keys, keys: keys,
vals: vals, vals: vals,

View File

@ -978,7 +978,7 @@ func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (boo
nodes = trienode.NewNodeSet(addrHash) nodes = trienode.NewNodeSet(addrHash)
slots = make(map[common.Hash][]byte) slots = make(map[common.Hash][]byte)
) )
stack := trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { stack := trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) {
nodes.AddNode(path, trienode.NewDeleted()) nodes.AddNode(path, trienode.NewDeleted())
size += common.StorageSize(len(path)) size += common.StorageSize(len(path))
}) })

View File

@ -37,7 +37,7 @@ var (
type triePrefetcher struct { type triePrefetcher struct {
db Database // Database to fetch trie nodes through db Database // Database to fetch trie nodes through
root common.Hash // Root hash of the account trie for metrics root common.Hash // Root hash of the account trie for metrics
fetches map[string]Trie // Partially or fully fetcher tries fetches map[string]Trie // Partially or fully fetched tries. Only populated for inactive copies.
fetchers map[string]*subfetcher // Subfetchers for each trie fetchers map[string]*subfetcher // Subfetchers for each trie
deliveryMissMeter metrics.Meter deliveryMissMeter metrics.Meter
@ -197,7 +197,10 @@ func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte
// trieID returns an unique trie identifier consists the trie owner and root hash. // trieID returns an unique trie identifier consists the trie owner and root hash.
func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string { func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string {
return string(append(owner.Bytes(), root.Bytes()...)) trieID := make([]byte, common.HashLength*2)
copy(trieID, owner.Bytes())
copy(trieID[common.HashLength:], root.Bytes())
return string(trieID)
} }
// subfetcher is a trie fetcher goroutine responsible for pulling entries for a // subfetcher is a trie fetcher goroutine responsible for pulling entries for a

View File

@ -24,7 +24,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc" "github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
@ -169,7 +168,7 @@ func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, sta
if tx.Type() == types.BlobTxType { if tx.Type() == types.BlobTxType {
receipt.BlobGasUsed = uint64(len(tx.BlobHashes()) * params.BlobTxBlobGasPerBlob) receipt.BlobGasUsed = uint64(len(tx.BlobHashes()) * params.BlobTxBlobGasPerBlob)
receipt.BlobGasPrice = eip4844.CalcBlobFee(*evm.Context.ExcessBlobGas) receipt.BlobGasPrice = evm.Context.BlobBaseFee
} }
// If the transaction created a contract, store the creation address in the receipt. // If the transaction created a contract, store the creation address in the receipt.

View File

@ -132,7 +132,7 @@ func TestStateProcessorErrors(t *testing.T) {
) )
defer blockchain.Stop() defer blockchain.Stop()
bigNumber := new(big.Int).SetBytes(common.FromHex("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) bigNumber := new(big.Int).SetBytes(common.MaxHash.Bytes())
tooBigNumber := new(big.Int).Set(bigNumber) tooBigNumber := new(big.Int).Set(bigNumber)
tooBigNumber.Add(tooBigNumber, common.Big1) tooBigNumber.Add(tooBigNumber, common.Big1)
for i, tt := range []struct { for i, tt := range []struct {

View File

@ -24,7 +24,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
cmath "github.com/ethereum/go-ethereum/common/math" cmath "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
@ -248,7 +247,7 @@ func (st *StateTransition) buyGas() error {
balanceCheck.Add(balanceCheck, blobBalanceCheck) balanceCheck.Add(balanceCheck, blobBalanceCheck)
// Pay for blobGasUsed * actual blob fee // Pay for blobGasUsed * actual blob fee
blobFee := new(big.Int).SetUint64(blobGas) blobFee := new(big.Int).SetUint64(blobGas)
blobFee.Mul(blobFee, eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas)) blobFee.Mul(blobFee, st.evm.Context.BlobBaseFee)
mgval.Add(mgval, blobFee) mgval.Add(mgval, blobFee)
} }
} }
@ -329,7 +328,7 @@ func (st *StateTransition) preCheck() error {
if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) { if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) {
if st.blobGasUsed() > 0 { if st.blobGasUsed() > 0 {
// Check that the user is paying at least the current blob fee // Check that the user is paying at least the current blob fee
blobFee := eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas) blobFee := st.evm.Context.BlobBaseFee
if st.msg.BlobGasFeeCap.Cmp(blobFee) < 0 { if st.msg.BlobGasFeeCap.Cmp(blobFee) < 0 {
return fmt.Errorf("%w: address %v have %v want %v", ErrBlobFeeCapTooLow, st.msg.From.Hex(), st.msg.BlobGasFeeCap, blobFee) return fmt.Errorf("%w: address %v have %v want %v", ErrBlobFeeCapTooLow, st.msg.From.Hex(), st.msg.BlobGasFeeCap, blobFee)
} }

View File

@ -97,6 +97,8 @@ type blobTxMeta struct {
execTipCap *uint256.Int // Needed to prioritize inclusion order across accounts and validate replacement price bump execTipCap *uint256.Int // Needed to prioritize inclusion order across accounts and validate replacement price bump
execFeeCap *uint256.Int // Needed to validate replacement price bump execFeeCap *uint256.Int // Needed to validate replacement price bump
blobFeeCap *uint256.Int // Needed to validate replacement price bump blobFeeCap *uint256.Int // Needed to validate replacement price bump
execGas uint64 // Needed to check inclusion validity before reading the blob
blobGas uint64 // Needed to check inclusion validity before reading the blob
basefeeJumps float64 // Absolute number of 1559 fee adjustments needed to reach the tx's fee cap basefeeJumps float64 // Absolute number of 1559 fee adjustments needed to reach the tx's fee cap
blobfeeJumps float64 // Absolute number of 4844 fee adjustments needed to reach the tx's blob fee cap blobfeeJumps float64 // Absolute number of 4844 fee adjustments needed to reach the tx's blob fee cap
@ -118,6 +120,8 @@ func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta {
execTipCap: uint256.MustFromBig(tx.GasTipCap()), execTipCap: uint256.MustFromBig(tx.GasTipCap()),
execFeeCap: uint256.MustFromBig(tx.GasFeeCap()), execFeeCap: uint256.MustFromBig(tx.GasFeeCap()),
blobFeeCap: uint256.MustFromBig(tx.BlobGasFeeCap()), blobFeeCap: uint256.MustFromBig(tx.BlobGasFeeCap()),
execGas: tx.Gas(),
blobGas: tx.BlobGas(),
} }
meta.basefeeJumps = dynamicFeeJumps(meta.execFeeCap) meta.basefeeJumps = dynamicFeeJumps(meta.execFeeCap)
meta.blobfeeJumps = dynamicFeeJumps(meta.blobFeeCap) meta.blobfeeJumps = dynamicFeeJumps(meta.blobFeeCap)
@ -307,8 +311,8 @@ type BlobPool struct {
spent map[common.Address]*uint256.Int // Expenditure tracking for individual accounts spent map[common.Address]*uint256.Int // Expenditure tracking for individual accounts
evict *evictHeap // Heap of cheapest accounts for eviction when full evict *evictHeap // Heap of cheapest accounts for eviction when full
eventFeed event.Feed // Event feed to send out new tx events on pool inclusion discoverFeed event.Feed // Event feed to send out new tx events on pool discovery (reorg excluded)
eventScope event.SubscriptionScope // Event scope to track and mass unsubscribe on termination insertFeed event.Feed // Event feed to send out new tx events on pool inclusion (reorg included)
lock sync.RWMutex // Mutex protecting the pool during reorg handling lock sync.RWMutex // Mutex protecting the pool during reorg handling
} }
@ -436,8 +440,6 @@ func (p *BlobPool) Close() error {
if err := p.store.Close(); err != nil { if err := p.store.Close(); err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
p.eventScope.Close()
switch { switch {
case errs == nil: case errs == nil:
return nil return nil
@ -758,15 +760,21 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) {
// Run the reorg between the old and new head and figure out which accounts // Run the reorg between the old and new head and figure out which accounts
// need to be rechecked and which transactions need to be readded // need to be rechecked and which transactions need to be readded
if reinject, inclusions := p.reorg(oldHead, newHead); reinject != nil { if reinject, inclusions := p.reorg(oldHead, newHead); reinject != nil {
var adds []*types.Transaction
for addr, txs := range reinject { for addr, txs := range reinject {
// Blindly push all the lost transactions back into the pool // Blindly push all the lost transactions back into the pool
for _, tx := range txs { for _, tx := range txs {
p.reinject(addr, tx.Hash()) if err := p.reinject(addr, tx.Hash()); err == nil {
adds = append(adds, tx.WithoutBlobTxSidecar())
}
} }
// Recheck the account's pooled transactions to drop included and // Recheck the account's pooled transactions to drop included and
// invalidated one // invalidated one
p.recheck(addr, inclusions) p.recheck(addr, inclusions)
} }
if len(adds) > 0 {
p.insertFeed.Send(core.NewTxsEvent{Txs: adds})
}
} }
// Flush out any blobs from limbo that are older than the latest finality // Flush out any blobs from limbo that are older than the latest finality
if p.chain.Config().IsCancun(p.head.Number, p.head.Time) { if p.chain.Config().IsCancun(p.head.Number, p.head.Time) {
@ -921,13 +929,13 @@ func (p *BlobPool) reorg(oldHead, newHead *types.Header) (map[common.Address][]*
// Note, the method will not initialize the eviction cache values as those will // Note, the method will not initialize the eviction cache values as those will
// be done once for all transactions belonging to an account after all individual // be done once for all transactions belonging to an account after all individual
// transactions are injected back into the pool. // transactions are injected back into the pool.
func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) { func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) error {
// Retrieve the associated blob from the limbo. Without the blobs, we cannot // Retrieve the associated blob from the limbo. Without the blobs, we cannot
// add the transaction back into the pool as it is not mineable. // add the transaction back into the pool as it is not mineable.
tx, err := p.limbo.pull(txhash) tx, err := p.limbo.pull(txhash)
if err != nil { if err != nil {
log.Error("Blobs unavailable, dropping reorged tx", "err", err) log.Error("Blobs unavailable, dropping reorged tx", "err", err)
return return err
} }
// TODO: seems like an easy optimization here would be getting the serialized tx // TODO: seems like an easy optimization here would be getting the serialized tx
// from limbo instead of re-serializing it here. // from limbo instead of re-serializing it here.
@ -936,12 +944,12 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) {
blob, err := rlp.EncodeToBytes(tx) blob, err := rlp.EncodeToBytes(tx)
if err != nil { if err != nil {
log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err) log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err)
return return err
} }
id, err := p.store.Put(blob) id, err := p.store.Put(blob)
if err != nil { if err != nil {
log.Error("Failed to write transaction into storage", "hash", tx.Hash(), "err", err) log.Error("Failed to write transaction into storage", "hash", tx.Hash(), "err", err)
return return err
} }
// Update the indixes and metrics // Update the indixes and metrics
@ -949,7 +957,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) {
if _, ok := p.index[addr]; !ok { if _, ok := p.index[addr]; !ok {
if err := p.reserve(addr, true); err != nil { if err := p.reserve(addr, true); err != nil {
log.Warn("Failed to reserve account for blob pool", "tx", tx.Hash(), "from", addr, "err", err) log.Warn("Failed to reserve account for blob pool", "tx", tx.Hash(), "from", addr, "err", err)
return return err
} }
p.index[addr] = []*blobTxMeta{meta} p.index[addr] = []*blobTxMeta{meta}
p.spent[addr] = meta.costCap p.spent[addr] = meta.costCap
@ -960,6 +968,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) {
} }
p.lookup[meta.hash] = meta.id p.lookup[meta.hash] = meta.id
p.stored += uint64(meta.size) p.stored += uint64(meta.size)
return nil
} }
// SetGasTip implements txpool.SubPool, allowing the blob pool's gas requirements // SetGasTip implements txpool.SubPool, allowing the blob pool's gas requirements
@ -1154,9 +1163,19 @@ func (p *BlobPool) Get(hash common.Hash) *types.Transaction {
// Add inserts a set of blob transactions into the pool if they pass validation (both // Add inserts a set of blob transactions into the pool if they pass validation (both
// consensus validity and pool restictions). // consensus validity and pool restictions).
func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error { func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
errs := make([]error, len(txs)) var (
adds = make([]*types.Transaction, 0, len(txs))
errs = make([]error, len(txs))
)
for i, tx := range txs { for i, tx := range txs {
errs[i] = p.add(tx) errs[i] = p.add(tx)
if errs[i] == nil {
adds = append(adds, tx.WithoutBlobTxSidecar())
}
}
if len(adds) > 0 {
p.discoverFeed.Send(core.NewTxsEvent{Txs: adds})
p.insertFeed.Send(core.NewTxsEvent{Txs: adds})
} }
return errs return errs
} }
@ -1384,6 +1403,8 @@ func (p *BlobPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTr
Time: time.Now(), // TODO(karalabe): Maybe save these and use that? Time: time.Now(), // TODO(karalabe): Maybe save these and use that?
GasFeeCap: tx.execFeeCap.ToBig(), GasFeeCap: tx.execFeeCap.ToBig(),
GasTipCap: tx.execTipCap.ToBig(), GasTipCap: tx.execTipCap.ToBig(),
Gas: tx.execGas,
BlobGas: tx.blobGas,
}) })
} }
if len(lazies) > 0 { if len(lazies) > 0 {
@ -1468,10 +1489,14 @@ func (p *BlobPool) updateLimboMetrics() {
limboSlotusedGauge.Update(int64(slotused)) limboSlotusedGauge.Update(int64(slotused))
} }
// SubscribeTransactions registers a subscription of NewTxsEvent and // SubscribeTransactions registers a subscription for new transaction events,
// starts sending event to the given channel. // supporting feeding only newly seen or also resurrected transactions.
func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription { func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
return p.eventScope.Track(p.eventFeed.Subscribe(ch)) if reorgs {
return p.insertFeed.Subscribe(ch)
} else {
return p.discoverFeed.Subscribe(ch)
}
} }
// Nonce returns the next nonce of an account, with all transactions executable // Nonce returns the next nonce of an account, with all transactions executable

View File

@ -208,7 +208,6 @@ type LegacyPool struct {
chain BlockChain chain BlockChain
gasTip atomic.Pointer[big.Int] gasTip atomic.Pointer[big.Int]
txFeed event.Feed txFeed event.Feed
scope event.SubscriptionScope
signer types.Signer signer types.Signer
mu sync.RWMutex mu sync.RWMutex
@ -404,9 +403,6 @@ func (pool *LegacyPool) loop() {
// Close terminates the transaction pool. // Close terminates the transaction pool.
func (pool *LegacyPool) Close() error { func (pool *LegacyPool) Close() error {
// Unsubscribe all subscriptions registered from txpool
pool.scope.Close()
// Terminate the pool reorger and return // Terminate the pool reorger and return
close(pool.reorgShutdownCh) close(pool.reorgShutdownCh)
pool.wg.Wait() pool.wg.Wait()
@ -425,10 +421,14 @@ func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) {
<-wait <-wait
} }
// SubscribeTransactions registers a subscription of NewTxsEvent and // SubscribeTransactions registers a subscription for new transaction events,
// starts sending event to the given channel. // supporting feeding only newly seen or also resurrected transactions.
func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription { func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
return pool.scope.Track(pool.txFeed.Subscribe(ch)) // The legacy pool has a very messed up internal shuffling, so it's kind of
// hard to separate newly discovered transaction from resurrected ones. This
// is because the new txs are added to the queue, resurrected ones too and
// reorgs run lazily, so separating the two would need a marker.
return pool.txFeed.Subscribe(ch)
} }
// SetGasTip updates the minimum gas tip required by the transaction pool for a // SetGasTip updates the minimum gas tip required by the transaction pool for a
@ -552,6 +552,8 @@ func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*txpool.L
Time: txs[i].Time(), Time: txs[i].Time(),
GasFeeCap: txs[i].GasFeeCap(), GasFeeCap: txs[i].GasFeeCap(),
GasTipCap: txs[i].GasTipCap(), GasTipCap: txs[i].GasTipCap(),
Gas: txs[i].Gas(),
BlobGas: txs[i].BlobGas(),
} }
} }
pending[addr] = lazies pending[addr] = lazies

View File

@ -205,7 +205,7 @@ func (m *sortedMap) Remove(nonce uint64) bool {
// removed from the list. // removed from the list.
// //
// Note, all transactions with nonces lower than start will also be returned to // Note, all transactions with nonces lower than start will also be returned to
// prevent getting into and invalid state. This is not something that should ever // prevent getting into an invalid state. This is not something that should ever
// happen but better to be self correcting than failing! // happen but better to be self correcting than failing!
func (m *sortedMap) Ready(start uint64) types.Transactions { func (m *sortedMap) Ready(start uint64) types.Transactions {
// Short circuit if no transactions are available // Short circuit if no transactions are available
@ -421,7 +421,7 @@ func (l *list) Remove(tx *types.Transaction) (bool, types.Transactions) {
// removed from the list. // removed from the list.
// //
// Note, all transactions with nonces lower than start will also be returned to // Note, all transactions with nonces lower than start will also be returned to
// prevent getting into and invalid state. This is not something that should ever // prevent getting into an invalid state. This is not something that should ever
// happen but better to be self correcting than failing! // happen but better to be self correcting than failing!
func (l *list) Ready(start uint64) types.Transactions { func (l *list) Ready(start uint64) types.Transactions {
txs := l.txs.Ready(start) txs := l.txs.Ready(start)

View File

@ -30,13 +30,16 @@ import (
// enough for the miner and other APIs to handle large batches of transactions; // enough for the miner and other APIs to handle large batches of transactions;
// and supports pulling up the entire transaction when really needed. // and supports pulling up the entire transaction when really needed.
type LazyTransaction struct { type LazyTransaction struct {
Pool SubPool // Transaction subpool to pull the real transaction up Pool LazyResolver // Transaction resolver to pull the real transaction up
Hash common.Hash // Transaction hash to pull up if needed Hash common.Hash // Transaction hash to pull up if needed
Tx *types.Transaction // Transaction if already resolved Tx *types.Transaction // Transaction if already resolved
Time time.Time // Time when the transaction was first seen Time time.Time // Time when the transaction was first seen
GasFeeCap *big.Int // Maximum fee per gas the transaction may consume GasFeeCap *big.Int // Maximum fee per gas the transaction may consume
GasTipCap *big.Int // Maximum miner tip per gas the transaction can pay GasTipCap *big.Int // Maximum miner tip per gas the transaction can pay
Gas uint64 // Amount of gas required by the transaction
BlobGas uint64 // Amount of blob gas required by the transaction
} }
// Resolve retrieves the full transaction belonging to a lazy handle if it is still // Resolve retrieves the full transaction belonging to a lazy handle if it is still
@ -48,6 +51,14 @@ func (ltx *LazyTransaction) Resolve() *types.Transaction {
return ltx.Tx return ltx.Tx
} }
// LazyResolver is a minimal interface needed for a transaction pool to satisfy
// resolving lazy transactions. It's mostly a helper to avoid the entire sub-
// pool being injected into the lazy transaction.
type LazyResolver interface {
// Get returns a transaction if it is contained in the pool, or nil otherwise.
Get(hash common.Hash) *types.Transaction
}
// AddressReserver is passed by the main transaction pool to subpools, so they // AddressReserver is passed by the main transaction pool to subpools, so they
// may request (and relinquish) exclusive access to certain addresses. // may request (and relinquish) exclusive access to certain addresses.
type AddressReserver func(addr common.Address, reserve bool) error type AddressReserver func(addr common.Address, reserve bool) error
@ -99,8 +110,10 @@ type SubPool interface {
// account and sorted by nonce. // account and sorted by nonce.
Pending(enforceTips bool) map[common.Address][]*LazyTransaction Pending(enforceTips bool) map[common.Address][]*LazyTransaction
// SubscribeTransactions subscribes to new transaction events. // SubscribeTransactions subscribes to new transaction events. The subscriber
SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription // can decide whether to receive notifications only for newly seen transactions
// or also for reorged out ones.
SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription
// Nonce returns the next nonce of an account, with all transactions executable // Nonce returns the next nonce of an account, with all transactions executable
// by the pool already applied on top. // by the pool already applied on top.

View File

@ -155,13 +155,15 @@ func (p *TxPool) Close() error {
if err := <-errc; err != nil { if err := <-errc; err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
// Terminate each subpool // Terminate each subpool
for _, subpool := range p.subpools { for _, subpool := range p.subpools {
if err := subpool.Close(); err != nil { if err := subpool.Close(); err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
} }
// Unsubscribe anyone still listening for tx events
p.subs.Close()
if len(errs) > 0 { if len(errs) > 0 {
return fmt.Errorf("subpool close errors: %v", errs) return fmt.Errorf("subpool close errors: %v", errs)
} }
@ -316,12 +318,12 @@ func (p *TxPool) Pending(enforceTips bool) map[common.Address][]*LazyTransaction
return txs return txs
} }
// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and starts sending // SubscribeTransactions registers a subscription for new transaction events,
// events to the given channel. // supporting feeding only newly seen or also resurrected transactions.
func (p *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { func (p *TxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
subs := make([]event.Subscription, len(p.subpools)) subs := make([]event.Subscription, len(p.subpools))
for i, subpool := range p.subpools { for i, subpool := range p.subpools {
subs[i] = subpool.SubscribeTransactions(ch) subs[i] = subpool.SubscribeTransactions(ch, reorgs)
} }
return p.subs.Track(event.JoinSubscriptions(subs...)) return p.subs.Track(event.JoinSubscriptions(subs...))
} }

View File

@ -95,7 +95,7 @@ type DerivableList interface {
func encodeForDerive(list DerivableList, i int, buf *bytes.Buffer) []byte { func encodeForDerive(list DerivableList, i int, buf *bytes.Buffer) []byte {
buf.Reset() buf.Reset()
list.EncodeIndex(i, buf) list.EncodeIndex(i, buf)
// It's really unfortunate that we need to do perform this copy. // It's really unfortunate that we need to perform this copy.
// StackTrie holds onto the values until Hash is called, so the values // StackTrie holds onto the values until Hash is called, so the values
// written to it must not alias. // written to it must not alias.
return common.CopyBytes(buf.Bytes()) return common.CopyBytes(buf.Bytes())

View File

@ -87,7 +87,7 @@ func SlimAccountRLP(account StateAccount) []byte {
return data return data
} }
// FullAccount decodes the data on the 'slim RLP' format and return // FullAccount decodes the data on the 'slim RLP' format and returns
// the consensus format account. // the consensus format account.
func FullAccount(data []byte) (*StateAccount, error) { func FullAccount(data []byte) (*StateAccount, error) {
var slim SlimAccount var slim SlimAccount

View File

@ -168,7 +168,7 @@ func (tx *Transaction) DecodeRLP(s *rlp.Stream) error {
} }
// UnmarshalBinary decodes the canonical encoding of transactions. // UnmarshalBinary decodes the canonical encoding of transactions.
// It supports legacy RLP transactions and EIP2718 typed transactions. // It supports legacy RLP transactions and EIP-2718 typed transactions.
func (tx *Transaction) UnmarshalBinary(b []byte) error { func (tx *Transaction) UnmarshalBinary(b []byte) error {
if len(b) > 0 && b[0] > 0x7f { if len(b) > 0 && b[0] > 0x7f {
// It's a legacy transaction. // It's a legacy transaction.
@ -180,7 +180,7 @@ func (tx *Transaction) UnmarshalBinary(b []byte) error {
tx.setDecoded(&data, uint64(len(b))) tx.setDecoded(&data, uint64(len(b)))
return nil return nil
} }
// It's an EIP2718 typed transaction envelope. // It's an EIP-2718 typed transaction envelope.
inner, err := tx.decodeTyped(b) inner, err := tx.decodeTyped(b)
if err != nil { if err != nil {
return err return err
@ -395,7 +395,7 @@ func (tx *Transaction) BlobGasFeeCap() *big.Int {
return nil return nil
} }
// BlobHashes returns the hases of the blob commitments for blob transactions, nil otherwise. // BlobHashes returns the hashes of the blob commitments for blob transactions, nil otherwise.
func (tx *Transaction) BlobHashes() []common.Hash { func (tx *Transaction) BlobHashes() []common.Hash {
if blobtx, ok := tx.inner.(*BlobTx); ok { if blobtx, ok := tx.inner.(*BlobTx); ok {
return blobtx.BlobHashes return blobtx.BlobHashes

View File

@ -57,7 +57,7 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime uint
} }
// LatestSigner returns the 'most permissive' Signer available for the given chain // LatestSigner returns the 'most permissive' Signer available for the given chain
// configuration. Specifically, this enables support of all types of transacrions // configuration. Specifically, this enables support of all types of transactions
// when their respective forks are scheduled to occur at any block number (or time) // when their respective forks are scheduled to occur at any block number (or time)
// in the chain config. // in the chain config.
// //

View File

@ -31,13 +31,13 @@ type ContractRef interface {
// AccountRef implements ContractRef. // AccountRef implements ContractRef.
// //
// Account references are used during EVM initialisation and // Account references are used during EVM initialisation and
// it's primary use is to fetch addresses. Removing this object // its primary use is to fetch addresses. Removing this object
// proves difficult because of the cached jump destinations which // proves difficult because of the cached jump destinations which
// are fetched from the parent contract (i.e. the caller), which // are fetched from the parent contract (i.e. the caller), which
// is a ContractRef. // is a ContractRef.
type AccountRef common.Address type AccountRef common.Address
// Address casts AccountRef to a Address // Address casts AccountRef to an Address
func (ar AccountRef) Address() common.Address { return (common.Address)(ar) } func (ar AccountRef) Address() common.Address { return (common.Address)(ar) }
// Contract represents an ethereum contract in the state database. It contains // Contract represents an ethereum contract in the state database. It contains

View File

@ -282,9 +282,15 @@ func opBlobHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
return nil, nil return nil, nil
} }
// enable4844 applies EIP-4844 (DATAHASH opcode) // opBlobBaseFee implements BLOBBASEFEE opcode
func opBlobBaseFee(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
blobBaseFee, _ := uint256.FromBig(interpreter.evm.Context.BlobBaseFee)
scope.Stack.push(blobBaseFee)
return nil, nil
}
// enable4844 applies EIP-4844 (BLOBHASH opcode)
func enable4844(jt *JumpTable) { func enable4844(jt *JumpTable) {
// New opcode
jt[BLOBHASH] = &operation{ jt[BLOBHASH] = &operation{
execute: opBlobHash, execute: opBlobHash,
constantGas: GasFastestStep, constantGas: GasFastestStep,
@ -293,6 +299,16 @@ func enable4844(jt *JumpTable) {
} }
} }
// enable7516 applies EIP-7516 (BLOBBASEFEE opcode)
func enable7516(jt *JumpTable) {
jt[BLOBBASEFEE] = &operation{
execute: opBlobBaseFee,
constantGas: GasQuickStep,
minStack: minStack(0, 1),
maxStack: maxStack(0, 1),
}
}
// enable6780 applies EIP-6780 (deactivate SELFDESTRUCT) // enable6780 applies EIP-6780 (deactivate SELFDESTRUCT)
func enable6780(jt *JumpTable) { func enable6780(jt *JumpTable) {
jt[SELFDESTRUCT] = &operation{ jt[SELFDESTRUCT] = &operation{

View File

@ -73,8 +73,8 @@ type BlockContext struct {
Time uint64 // Provides information for TIME Time uint64 // Provides information for TIME
Difficulty *big.Int // Provides information for DIFFICULTY Difficulty *big.Int // Provides information for DIFFICULTY
BaseFee *big.Int // Provides information for BASEFEE BaseFee *big.Int // Provides information for BASEFEE
BlobBaseFee *big.Int // Provides information for BLOBBASEFEE
Random *common.Hash // Provides information for PREVRANDAO Random *common.Hash // Provides information for PREVRANDAO
ExcessBlobGas *uint64 // ExcessBlobGas field in the header, needed to compute the data
} }
// TxContext provides the EVM with information about a transaction. // TxContext provides the EVM with information about a transaction.

View File

@ -104,7 +104,7 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi
// Legacy rules should be applied if we are in Petersburg (removal of EIP-1283) // Legacy rules should be applied if we are in Petersburg (removal of EIP-1283)
// OR Constantinople is not active // OR Constantinople is not active
if evm.chainRules.IsPetersburg || !evm.chainRules.IsConstantinople { if evm.chainRules.IsPetersburg || !evm.chainRules.IsConstantinople {
// This checks for 3 scenario's and calculates gas accordingly: // This checks for 3 scenarios and calculates gas accordingly:
// //
// 1. From a zero-value address to a non-zero value (NEW VALUE) // 1. From a zero-value address to a non-zero value (NEW VALUE)
// 2. From a non-zero value address to a zero-value address (DELETE) // 2. From a non-zero value address to a zero-value address (DELETE)

View File

@ -45,7 +45,7 @@ type EVMInterpreter struct {
table *JumpTable table *JumpTable
hasher crypto.KeccakState // Keccak256 hasher instance shared across opcodes hasher crypto.KeccakState // Keccak256 hasher instance shared across opcodes
hasherBuf common.Hash // Keccak256 hasher result array shared aross opcodes hasherBuf common.Hash // Keccak256 hasher result array shared across opcodes
readOnly bool // Whether to throw on stateful modifications readOnly bool // Whether to throw on stateful modifications
returnData []byte // Last CALL's return data for subsequent reuse returnData []byte // Last CALL's return data for subsequent reuse

View File

@ -82,7 +82,8 @@ func validate(jt JumpTable) JumpTable {
func newCancunInstructionSet() JumpTable { func newCancunInstructionSet() JumpTable {
instructionSet := newShanghaiInstructionSet() instructionSet := newShanghaiInstructionSet()
enable4844(&instructionSet) // EIP-4844 (DATAHASH opcode) enable4844(&instructionSet) // EIP-4844 (BLOBHASH opcode)
enable7516(&instructionSet) // EIP-7516 (BLOBBASEFEE opcode)
enable1153(&instructionSet) // EIP-1153 "Transient Storage" enable1153(&instructionSet) // EIP-1153 "Transient Storage"
enable5656(&instructionSet) // EIP-5656 (MCOPY opcode) enable5656(&instructionSet) // EIP-5656 (MCOPY opcode)
enable6780(&instructionSet) // EIP-6780 SELFDESTRUCT only in same transaction enable6780(&instructionSet) // EIP-6780 SELFDESTRUCT only in same transaction

View File

@ -56,7 +56,7 @@ func LookupInstructionSet(rules params.Rules) (JumpTable, error) {
return newFrontierInstructionSet(), nil return newFrontierInstructionSet(), nil
} }
// Stack returns the mininum and maximum stack requirements. // Stack returns the minimum and maximum stack requirements.
func (op *operation) Stack() (int, int) { func (op *operation) Stack() (int, int) {
return op.minStack, op.maxStack return op.minStack, op.maxStack
} }

View File

@ -101,6 +101,7 @@ const (
SELFBALANCE OpCode = 0x47 SELFBALANCE OpCode = 0x47
BASEFEE OpCode = 0x48 BASEFEE OpCode = 0x48
BLOBHASH OpCode = 0x49 BLOBHASH OpCode = 0x49
BLOBBASEFEE OpCode = 0x4a
) )
// 0x50 range - 'storage' and execution. // 0x50 range - 'storage' and execution.
@ -287,6 +288,7 @@ var opCodeToString = map[OpCode]string{
SELFBALANCE: "SELFBALANCE", SELFBALANCE: "SELFBALANCE",
BASEFEE: "BASEFEE", BASEFEE: "BASEFEE",
BLOBHASH: "BLOBHASH", BLOBHASH: "BLOBHASH",
BLOBBASEFEE: "BLOBBASEFEE",
// 0x50 range - 'storage' and execution. // 0x50 range - 'storage' and execution.
POP: "POP", POP: "POP",
@ -444,6 +446,7 @@ var stringToOp = map[string]OpCode{
"CHAINID": CHAINID, "CHAINID": CHAINID,
"BASEFEE": BASEFEE, "BASEFEE": BASEFEE,
"BLOBHASH": BLOBHASH, "BLOBHASH": BLOBHASH,
"BLOBBASEFEE": BLOBBASEFEE,
"DELEGATECALL": DELEGATECALL, "DELEGATECALL": DELEGATECALL,
"STATICCALL": STATICCALL, "STATICCALL": STATICCALL,
"CODESIZE": CODESIZE, "CODESIZE": CODESIZE,

View File

@ -37,6 +37,7 @@ func NewEnv(cfg *Config) *vm.EVM {
Difficulty: cfg.Difficulty, Difficulty: cfg.Difficulty,
GasLimit: cfg.GasLimit, GasLimit: cfg.GasLimit,
BaseFee: cfg.BaseFee, BaseFee: cfg.BaseFee,
BlobBaseFee: cfg.BlobBaseFee,
Random: cfg.Random, Random: cfg.Random,
} }

View File

@ -44,6 +44,7 @@ type Config struct {
Debug bool Debug bool
EVMConfig vm.Config EVMConfig vm.Config
BaseFee *big.Int BaseFee *big.Int
BlobBaseFee *big.Int
BlobHashes []common.Hash BlobHashes []common.Hash
Random *common.Hash Random *common.Hash
@ -95,6 +96,9 @@ func setDefaults(cfg *Config) {
if cfg.BaseFee == nil { if cfg.BaseFee == nil {
cfg.BaseFee = big.NewInt(params.InitialBaseFee) cfg.BaseFee = big.NewInt(params.InitialBaseFee)
} }
if cfg.BlobBaseFee == nil {
cfg.BlobBaseFee = new(big.Int)
}
} }
// Execute executes the code using the input as call data during the execution. // Execute executes the code using the input as call data during the execution.

View File

@ -334,7 +334,7 @@ func (b *EthAPIBackend) TxPool() *txpool.TxPool {
} }
func (b *EthAPIBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { func (b *EthAPIBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return b.eth.txPool.SubscribeNewTxsEvent(ch) return b.eth.txPool.SubscribeTransactions(ch, true)
} }
func (b *EthAPIBackend) SyncProgress() ethereum.SyncProgress { func (b *EthAPIBackend) SyncProgress() ethereum.SyncProgress {

View File

@ -133,8 +133,12 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
scheme, err := rawdb.ParseStateScheme(config.StateScheme, chainDb)
if err != nil {
return nil, err
}
// Try to recover offline state pruning only in hash-based. // Try to recover offline state pruning only in hash-based.
if config.StateScheme == rawdb.HashScheme { if scheme == rawdb.HashScheme {
if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb); err != nil { if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb); err != nil {
log.Error("Failed to recover state", "error", err) log.Error("Failed to recover state", "error", err)
} }
@ -194,7 +198,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
SnapshotLimit: config.SnapshotCache, SnapshotLimit: config.SnapshotCache,
Preimages: config.Preimages, Preimages: config.Preimages,
StateHistory: config.StateHistory, StateHistory: config.StateHistory,
StateScheme: config.StateScheme, StateScheme: scheme,
} }
) )
// Override the chain config with provided settings. // Override the chain config with provided settings.

View File

@ -513,7 +513,7 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe
block, err := engine.ExecutableDataToBlock(params, versionedHashes, beaconRoot) block, err := engine.ExecutableDataToBlock(params, versionedHashes, beaconRoot)
if err != nil { if err != nil {
log.Warn("Invalid NewPayload params", "params", params, "error", err) log.Warn("Invalid NewPayload params", "params", params, "error", err)
return engine.PayloadStatusV1{Status: engine.INVALID}, nil return api.invalid(err, nil), nil
} }
// Stash away the last update to warn the user if the beacon client goes offline // Stash away the last update to warn the user if the beacon client goes offline
api.lastNewPayloadLock.Lock() api.lastNewPayloadLock.Lock()
@ -694,20 +694,21 @@ func (api *ConsensusAPI) checkInvalidAncestor(check common.Hash, head common.Has
} }
} }
// invalid returns a response "INVALID" with the latest valid hash supplied by latest or to the current head // invalid returns a response "INVALID" with the latest valid hash supplied by latest.
// if no latestValid block was provided.
func (api *ConsensusAPI) invalid(err error, latestValid *types.Header) engine.PayloadStatusV1 { func (api *ConsensusAPI) invalid(err error, latestValid *types.Header) engine.PayloadStatusV1 {
currentHash := api.eth.BlockChain().CurrentBlock().Hash() var currentHash *common.Hash
if latestValid != nil { if latestValid != nil {
if latestValid.Difficulty.BitLen() != 0 {
// Set latest valid hash to 0x0 if parent is PoW block // Set latest valid hash to 0x0 if parent is PoW block
currentHash = common.Hash{} currentHash = &common.Hash{}
if latestValid.Difficulty.BitLen() == 0 { } else {
// Otherwise set latest valid hash to parent hash // Otherwise set latest valid hash to parent hash
currentHash = latestValid.Hash() h := latestValid.Hash()
currentHash = &h
} }
} }
errorMsg := err.Error() errorMsg := err.Error()
return engine.PayloadStatusV1{Status: engine.INVALID, LatestValidHash: &currentHash, ValidationError: &errorMsg} return engine.PayloadStatusV1{Status: engine.INVALID, LatestValidHash: currentHash, ValidationError: &errorMsg}
} }
// heartbeat loops indefinitely, and checks if there have been beacon client updates // heartbeat loops indefinitely, and checks if there have been beacon client updates
@ -776,7 +777,7 @@ func (api *ConsensusAPI) ExchangeCapabilities([]string) []string {
// GetPayloadBodiesByHashV1 implements engine_getPayloadBodiesByHashV1 which allows for retrieval of a list // GetPayloadBodiesByHashV1 implements engine_getPayloadBodiesByHashV1 which allows for retrieval of a list
// of block bodies by the engine api. // of block bodies by the engine api.
func (api *ConsensusAPI) GetPayloadBodiesByHashV1(hashes []common.Hash) []*engine.ExecutionPayloadBodyV1 { func (api *ConsensusAPI) GetPayloadBodiesByHashV1(hashes []common.Hash) []*engine.ExecutionPayloadBodyV1 {
var bodies = make([]*engine.ExecutionPayloadBodyV1, len(hashes)) bodies := make([]*engine.ExecutionPayloadBodyV1, len(hashes))
for i, hash := range hashes { for i, hash := range hashes {
block := api.eth.BlockChain().GetBlockByHash(hash) block := api.eth.BlockChain().GetBlockByHash(hash)
bodies[i] = getBody(block) bodies[i] = getBody(block)

View File

@ -199,7 +199,7 @@ func (c *SimulatedBeacon) sealBlock(withdrawals []*types.Withdrawal) error {
func (c *SimulatedBeacon) loopOnDemand() { func (c *SimulatedBeacon) loopOnDemand() {
var ( var (
newTxs = make(chan core.NewTxsEvent) newTxs = make(chan core.NewTxsEvent)
sub = c.eth.TxPool().SubscribeNewTxsEvent(newTxs) sub = c.eth.TxPool().SubscribeTransactions(newTxs, true)
) )
defer sub.Unsubscribe() defer sub.Unsubscribe()

View File

@ -20,7 +20,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
@ -28,22 +28,26 @@ import (
) )
// FullSyncTester is an auxiliary service that allows Geth to perform full sync // FullSyncTester is an auxiliary service that allows Geth to perform full sync
// alone without consensus-layer attached. Users must specify a valid block as // alone without consensus-layer attached. Users must specify a valid block hash
// the sync target. This tester can be applied to different networks, no matter // as the sync target.
// it's pre-merge or post-merge, but only for full-sync. //
// This tester can be applied to different networks, no matter it's pre-merge or
// post-merge, but only for full-sync.
type FullSyncTester struct { type FullSyncTester struct {
api *ConsensusAPI stack *node.Node
block *types.Block backend *eth.Ethereum
target common.Hash
closed chan struct{} closed chan struct{}
wg sync.WaitGroup wg sync.WaitGroup
} }
// RegisterFullSyncTester registers the full-sync tester service into the node // RegisterFullSyncTester registers the full-sync tester service into the node
// stack for launching and stopping the service controlled by node. // stack for launching and stopping the service controlled by node.
func RegisterFullSyncTester(stack *node.Node, backend *eth.Ethereum, block *types.Block) (*FullSyncTester, error) { func RegisterFullSyncTester(stack *node.Node, backend *eth.Ethereum, target common.Hash) (*FullSyncTester, error) {
cl := &FullSyncTester{ cl := &FullSyncTester{
api: newConsensusAPIWithoutHeartbeat(backend), stack: stack,
block: block, backend: backend,
target: target,
closed: make(chan struct{}), closed: make(chan struct{}),
} }
stack.RegisterLifecycle(cl) stack.RegisterLifecycle(cl)
@ -56,29 +60,25 @@ func (tester *FullSyncTester) Start() error {
go func() { go func() {
defer tester.wg.Done() defer tester.wg.Done()
// Trigger beacon sync with the provided block hash as trusted
// chain head.
err := tester.backend.Downloader().BeaconDevSync(downloader.FullSync, tester.target, tester.closed)
if err != nil {
log.Info("Failed to trigger beacon sync", "err", err)
}
ticker := time.NewTicker(time.Second * 5) ticker := time.NewTicker(time.Second * 5)
defer ticker.Stop() defer ticker.Stop()
for { for {
select { select {
case <-ticker.C: case <-ticker.C:
// Don't bother downloader in case it's already syncing. // Stop in case the target block is already stored locally.
if tester.api.eth.Downloader().Synchronising() { if block := tester.backend.BlockChain().GetBlockByHash(tester.target); block != nil {
continue log.Info("Full-sync target reached", "number", block.NumberU64(), "hash", block.Hash())
} go tester.stack.Close() // async since we need to close ourselves
// Short circuit in case the target block is already stored
// locally. TODO(somehow terminate the node stack if target
// is reached).
if tester.api.eth.BlockChain().HasBlock(tester.block.Hash(), tester.block.NumberU64()) {
log.Info("Full-sync target reached", "number", tester.block.NumberU64(), "hash", tester.block.Hash())
return return
} }
// Trigger beacon sync with the provided block header as
// trusted chain head.
err := tester.api.eth.Downloader().BeaconSync(downloader.FullSync, tester.block.Header(), tester.block.Header())
if err != nil {
log.Info("Failed to beacon sync", "err", err)
}
case <-tester.closed: case <-tester.closed:
return return

View File

@ -0,0 +1,81 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package downloader
import (
"errors"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
// BeaconDevSync is a development helper to test synchronization by providing
// a block hash instead of header to run the beacon sync against.
//
// The method will reach out to the network to retrieve the header of the sync
// target instead of receiving it from the consensus node.
//
// Note, this must not be used in live code. If the forkchcoice endpoint where
// to use this instead of giving us the payload first, then essentially nobody
// in the network would have the block yet that we'd attempt to retrieve.
func (d *Downloader) BeaconDevSync(mode SyncMode, hash common.Hash, stop chan struct{}) error {
// Be very loud that this code should not be used in a live node
log.Warn("----------------------------------")
log.Warn("Beacon syncing with hash as target", "hash", hash)
log.Warn("This is unhealthy for a live node!")
log.Warn("----------------------------------")
log.Info("Waiting for peers to retrieve sync target")
for {
// If the node is going down, unblock
select {
case <-stop:
return errors.New("stop requested")
default:
}
// Pick a random peer to sync from and keep retrying if none are yet
// available due to fresh startup
d.peers.lock.RLock()
var peer *peerConnection
for _, peer = range d.peers.peers {
break
}
d.peers.lock.RUnlock()
if peer == nil {
time.Sleep(time.Second)
continue
}
// Found a peer, attempt to retrieve the header whilst blocking and
// retry if it fails for whatever reason
log.Info("Attempting to retrieve sync target", "peer", peer.id)
headers, metas, err := d.fetchHeadersByHash(peer, hash, 1, 0, false)
if err != nil || len(headers) != 1 {
log.Warn("Failed to fetch sync target", "headers", len(headers), "err", err)
time.Sleep(time.Second)
continue
}
// Head header retrieved, if the hash matches, start the actual sync
if metas[0] != hash {
log.Error("Received invalid sync target", "want", hash, "have", metas[0])
time.Sleep(time.Second)
continue
}
return d.BeaconSync(mode, headers[0], headers[0])
}
}

View File

@ -286,11 +286,6 @@ func (d *Downloader) Progress() ethereum.SyncProgress {
} }
} }
// Synchronising returns whether the downloader is currently retrieving blocks.
func (d *Downloader) Synchronising() bool {
return d.synchronising.Load()
}
// RegisterPeer injects a new download peer into the set of block source to be // RegisterPeer injects a new download peer into the set of block source to be
// used for fetching hashes and blocks from. // used for fetching hashes and blocks from.
func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error { func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error {
@ -309,11 +304,6 @@ func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error {
return nil return nil
} }
// RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer.
func (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error {
return d.RegisterPeer(id, version, &lightPeerWrapper{peer})
}
// UnregisterPeer remove a peer from the known list, preventing any action from // UnregisterPeer remove a peer from the known list, preventing any action from
// the specified peer. An effort is also made to return any pending fetches into // the specified peer. An effort is also made to return any pending fetches into
// the queue. // the queue.

View File

@ -177,7 +177,7 @@ func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header {
// function can be used to retrieve batches of headers from the particular peer. // function can be used to retrieve batches of headers from the particular peer.
func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
// Service the header query via the live handler code // Service the header query via the live handler code
rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, &eth.GetBlockHeadersPacket{ rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{ Origin: eth.HashOrNumber{
Hash: origin, Hash: origin,
}, },
@ -205,7 +205,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i
} }
res := &eth.Response{ res := &eth.Response{
Req: req, Req: req,
Res: (*eth.BlockHeadersPacket)(&headers), Res: (*eth.BlockHeadersRequest)(&headers),
Meta: hashes, Meta: hashes,
Time: 1, Time: 1,
Done: make(chan error, 1), // Ignore the returned status Done: make(chan error, 1), // Ignore the returned status
@ -221,7 +221,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount i
// function can be used to retrieve batches of headers from the particular peer. // function can be used to retrieve batches of headers from the particular peer.
func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
// Service the header query via the live handler code // Service the header query via the live handler code
rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, &eth.GetBlockHeadersPacket{ rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, &eth.GetBlockHeadersRequest{
Origin: eth.HashOrNumber{ Origin: eth.HashOrNumber{
Number: origin, Number: origin,
}, },
@ -249,7 +249,7 @@ func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int,
} }
res := &eth.Response{ res := &eth.Response{
Req: req, Req: req,
Res: (*eth.BlockHeadersPacket)(&headers), Res: (*eth.BlockHeadersRequest)(&headers),
Meta: hashes, Meta: hashes,
Time: 1, Time: 1,
Done: make(chan error, 1), // Ignore the returned status Done: make(chan error, 1), // Ignore the returned status
@ -286,7 +286,7 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et
} }
res := &eth.Response{ res := &eth.Response{
Req: req, Req: req,
Res: (*eth.BlockBodiesPacket)(&bodies), Res: (*eth.BlockBodiesResponse)(&bodies),
Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes}, Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes},
Time: 1, Time: 1,
Done: make(chan error, 1), // Ignore the returned status Done: make(chan error, 1), // Ignore the returned status
@ -317,7 +317,7 @@ func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan *
} }
res := &eth.Response{ res := &eth.Response{
Req: req, Req: req,
Res: (*eth.ReceiptsPacket)(&receipts), Res: (*eth.ReceiptsResponse)(&receipts),
Meta: hashes, Meta: hashes,
Time: 1, Time: 1,
Done: make(chan error, 1), // Ignore the returned status Done: make(chan error, 1), // Ignore the returned status
@ -437,9 +437,9 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
} }
} }
func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) } func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) }
func TestCanonicalSynchronisation66Snap(t *testing.T) { testCanonSync(t, eth.ETH66, SnapSync) } func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) }
func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) } func TestCanonicalSynchronisation68Light(t *testing.T) { testCanonSync(t, eth.ETH68, LightSync) }
func TestCanonicalSynchronisation67Full(t *testing.T) { testCanonSync(t, eth.ETH67, FullSync) } func TestCanonicalSynchronisation67Full(t *testing.T) { testCanonSync(t, eth.ETH67, FullSync) }
func TestCanonicalSynchronisation67Snap(t *testing.T) { testCanonSync(t, eth.ETH67, SnapSync) } func TestCanonicalSynchronisation67Snap(t *testing.T) { testCanonSync(t, eth.ETH67, SnapSync) }
func TestCanonicalSynchronisation67Light(t *testing.T) { testCanonSync(t, eth.ETH67, LightSync) } func TestCanonicalSynchronisation67Light(t *testing.T) { testCanonSync(t, eth.ETH67, LightSync) }
@ -461,8 +461,8 @@ func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if a large batch of blocks are being downloaded, it is throttled // Tests that if a large batch of blocks are being downloaded, it is throttled
// until the cached blocks are retrieved. // until the cached blocks are retrieved.
func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) } func TestThrottling68Full(t *testing.T) { testThrottling(t, eth.ETH68, FullSync) }
func TestThrottling66Snap(t *testing.T) { testThrottling(t, eth.ETH66, SnapSync) } func TestThrottling68Snap(t *testing.T) { testThrottling(t, eth.ETH68, SnapSync) }
func TestThrottling67Full(t *testing.T) { testThrottling(t, eth.ETH67, FullSync) } func TestThrottling67Full(t *testing.T) { testThrottling(t, eth.ETH67, FullSync) }
func TestThrottling67Snap(t *testing.T) { testThrottling(t, eth.ETH67, SnapSync) } func TestThrottling67Snap(t *testing.T) { testThrottling(t, eth.ETH67, SnapSync) }
@ -543,9 +543,9 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
// Tests that simple synchronization against a forked chain works correctly. In // Tests that simple synchronization against a forked chain works correctly. In
// this test common ancestor lookup should *not* be short circuited, and a full // this test common ancestor lookup should *not* be short circuited, and a full
// binary search should be executed. // binary search should be executed.
func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) } func TestForkedSync68Full(t *testing.T) { testForkedSync(t, eth.ETH68, FullSync) }
func TestForkedSync66Snap(t *testing.T) { testForkedSync(t, eth.ETH66, SnapSync) } func TestForkedSync68Snap(t *testing.T) { testForkedSync(t, eth.ETH68, SnapSync) }
func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) } func TestForkedSync68Light(t *testing.T) { testForkedSync(t, eth.ETH68, LightSync) }
func TestForkedSync67Full(t *testing.T) { testForkedSync(t, eth.ETH67, FullSync) } func TestForkedSync67Full(t *testing.T) { testForkedSync(t, eth.ETH67, FullSync) }
func TestForkedSync67Snap(t *testing.T) { testForkedSync(t, eth.ETH67, SnapSync) } func TestForkedSync67Snap(t *testing.T) { testForkedSync(t, eth.ETH67, SnapSync) }
func TestForkedSync67Light(t *testing.T) { testForkedSync(t, eth.ETH67, LightSync) } func TestForkedSync67Light(t *testing.T) { testForkedSync(t, eth.ETH67, LightSync) }
@ -573,9 +573,9 @@ func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that synchronising against a much shorter but much heavier fork works // Tests that synchronising against a much shorter but much heavier fork works
// currently and is not dropped. // currently and is not dropped.
func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) } func TestHeavyForkedSync68Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, FullSync) }
func TestHeavyForkedSync66Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, SnapSync) } func TestHeavyForkedSync68Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, SnapSync) }
func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) } func TestHeavyForkedSync68Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, LightSync) }
func TestHeavyForkedSync67Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, FullSync) } func TestHeavyForkedSync67Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, FullSync) }
func TestHeavyForkedSync67Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, SnapSync) } func TestHeavyForkedSync67Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, SnapSync) }
func TestHeavyForkedSync67Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, LightSync) } func TestHeavyForkedSync67Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, LightSync) }
@ -605,9 +605,9 @@ func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that chain forks are contained within a certain interval of the current // Tests that chain forks are contained within a certain interval of the current
// chain head, ensuring that malicious peers cannot waste resources by feeding // chain head, ensuring that malicious peers cannot waste resources by feeding
// long dead chains. // long dead chains.
func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) } func TestBoundedForkedSync68Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, FullSync) }
func TestBoundedForkedSync66Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, SnapSync) } func TestBoundedForkedSync68Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, SnapSync) }
func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) } func TestBoundedForkedSync68Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, LightSync) }
func TestBoundedForkedSync67Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, FullSync) } func TestBoundedForkedSync67Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, FullSync) }
func TestBoundedForkedSync67Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, SnapSync) } func TestBoundedForkedSync67Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, SnapSync) }
func TestBoundedForkedSync67Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, LightSync) } func TestBoundedForkedSync67Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, LightSync) }
@ -636,14 +636,14 @@ func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that chain forks are contained within a certain interval of the current // Tests that chain forks are contained within a certain interval of the current
// chain head for short but heavy forks too. These are a bit special because they // chain head for short but heavy forks too. These are a bit special because they
// take different ancestor lookup paths. // take different ancestor lookup paths.
func TestBoundedHeavyForkedSync66Full(t *testing.T) { func TestBoundedHeavyForkedSync68Full(t *testing.T) {
testBoundedHeavyForkedSync(t, eth.ETH66, FullSync) testBoundedHeavyForkedSync(t, eth.ETH68, FullSync)
} }
func TestBoundedHeavyForkedSync66Snap(t *testing.T) { func TestBoundedHeavyForkedSync68Snap(t *testing.T) {
testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync) testBoundedHeavyForkedSync(t, eth.ETH68, SnapSync)
} }
func TestBoundedHeavyForkedSync66Light(t *testing.T) { func TestBoundedHeavyForkedSync68Light(t *testing.T) {
testBoundedHeavyForkedSync(t, eth.ETH66, LightSync) testBoundedHeavyForkedSync(t, eth.ETH68, LightSync)
} }
func TestBoundedHeavyForkedSync67Full(t *testing.T) { func TestBoundedHeavyForkedSync67Full(t *testing.T) {
testBoundedHeavyForkedSync(t, eth.ETH67, FullSync) testBoundedHeavyForkedSync(t, eth.ETH67, FullSync)
@ -678,9 +678,9 @@ func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
} }
// Tests that a canceled download wipes all previously accumulated state. // Tests that a canceled download wipes all previously accumulated state.
func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) } func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) }
func TestCancel66Snap(t *testing.T) { testCancel(t, eth.ETH66, SnapSync) } func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) }
func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) } func TestCancel68Light(t *testing.T) { testCancel(t, eth.ETH68, LightSync) }
func TestCancel67Full(t *testing.T) { testCancel(t, eth.ETH67, FullSync) } func TestCancel67Full(t *testing.T) { testCancel(t, eth.ETH67, FullSync) }
func TestCancel67Snap(t *testing.T) { testCancel(t, eth.ETH67, SnapSync) } func TestCancel67Snap(t *testing.T) { testCancel(t, eth.ETH67, SnapSync) }
func TestCancel67Light(t *testing.T) { testCancel(t, eth.ETH67, LightSync) } func TestCancel67Light(t *testing.T) { testCancel(t, eth.ETH67, LightSync) }
@ -708,9 +708,9 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) {
} }
// Tests that synchronisation from multiple peers works as intended (multi thread sanity test). // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) } func TestMultiSynchronisation68Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, FullSync) }
func TestMultiSynchronisation66Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, SnapSync) } func TestMultiSynchronisation68Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, SnapSync) }
func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) } func TestMultiSynchronisation68Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, LightSync) }
func TestMultiSynchronisation67Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, FullSync) } func TestMultiSynchronisation67Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, FullSync) }
func TestMultiSynchronisation67Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, SnapSync) } func TestMultiSynchronisation67Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, SnapSync) }
func TestMultiSynchronisation67Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, LightSync) } func TestMultiSynchronisation67Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, LightSync) }
@ -735,9 +735,9 @@ func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
// Tests that synchronisations behave well in multi-version protocol environments // Tests that synchronisations behave well in multi-version protocol environments
// and not wreak havoc on other nodes in the network. // and not wreak havoc on other nodes in the network.
func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) } func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) }
func TestMultiProtoSynchronisation66Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH66, SnapSync) } func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) }
func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) } func TestMultiProtoSynchronisation68Light(t *testing.T) { testMultiProtoSync(t, eth.ETH68, LightSync) }
func TestMultiProtoSynchronisation67Full(t *testing.T) { testMultiProtoSync(t, eth.ETH67, FullSync) } func TestMultiProtoSynchronisation67Full(t *testing.T) { testMultiProtoSync(t, eth.ETH67, FullSync) }
func TestMultiProtoSynchronisation67Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH67, SnapSync) } func TestMultiProtoSynchronisation67Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH67, SnapSync) }
func TestMultiProtoSynchronisation67Light(t *testing.T) { testMultiProtoSync(t, eth.ETH67, LightSync) } func TestMultiProtoSynchronisation67Light(t *testing.T) { testMultiProtoSync(t, eth.ETH67, LightSync) }
@ -750,7 +750,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
chain := testChainBase.shorten(blockCacheMaxItems - 15) chain := testChainBase.shorten(blockCacheMaxItems - 15)
// Create peers of every type // Create peers of every type
tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:]) tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:])
tester.newPeer("peer 67", eth.ETH67, chain.blocks[1:]) tester.newPeer("peer 67", eth.ETH67, chain.blocks[1:])
// Synchronise with the requested peer and make sure all blocks were retrieved // Synchronise with the requested peer and make sure all blocks were retrieved
@ -760,7 +760,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
assertOwnChain(t, tester, len(chain.blocks)) assertOwnChain(t, tester, len(chain.blocks))
// Check that no peers have been dropped off // Check that no peers have been dropped off
for _, version := range []int{66, 67} { for _, version := range []int{68, 67} {
peer := fmt.Sprintf("peer %d", version) peer := fmt.Sprintf("peer %d", version)
if _, ok := tester.peers[peer]; !ok { if _, ok := tester.peers[peer]; !ok {
t.Errorf("%s dropped", peer) t.Errorf("%s dropped", peer)
@ -770,9 +770,9 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if a block is empty (e.g. header only), no body request should be // Tests that if a block is empty (e.g. header only), no body request should be
// made, and instead the header should be assembled into a whole block in itself. // made, and instead the header should be assembled into a whole block in itself.
func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) } func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) }
func TestEmptyShortCircuit66Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, SnapSync) } func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) }
func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) } func TestEmptyShortCircuit68Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, LightSync) }
func TestEmptyShortCircuit67Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, FullSync) } func TestEmptyShortCircuit67Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, FullSync) }
func TestEmptyShortCircuit67Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, SnapSync) } func TestEmptyShortCircuit67Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, SnapSync) }
func TestEmptyShortCircuit67Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, LightSync) } func TestEmptyShortCircuit67Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, LightSync) }
@ -821,9 +821,9 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
// Tests that headers are enqueued continuously, preventing malicious nodes from // Tests that headers are enqueued continuously, preventing malicious nodes from
// stalling the downloader by feeding gapped header chains. // stalling the downloader by feeding gapped header chains.
func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) } func TestMissingHeaderAttack68Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, FullSync) }
func TestMissingHeaderAttack66Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, SnapSync) } func TestMissingHeaderAttack68Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, SnapSync) }
func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) } func TestMissingHeaderAttack68Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, LightSync) }
func TestMissingHeaderAttack67Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, FullSync) } func TestMissingHeaderAttack67Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, FullSync) }
func TestMissingHeaderAttack67Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, SnapSync) } func TestMissingHeaderAttack67Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, SnapSync) }
func TestMissingHeaderAttack67Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, LightSync) } func TestMissingHeaderAttack67Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, LightSync) }
@ -850,9 +850,9 @@ func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if requested headers are shifted (i.e. first is missing), the queue // Tests that if requested headers are shifted (i.e. first is missing), the queue
// detects the invalid numbering. // detects the invalid numbering.
func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) } func TestShiftedHeaderAttack68Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, FullSync) }
func TestShiftedHeaderAttack66Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, SnapSync) } func TestShiftedHeaderAttack68Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, SnapSync) }
func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) } func TestShiftedHeaderAttack68Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, LightSync) }
func TestShiftedHeaderAttack67Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, FullSync) } func TestShiftedHeaderAttack67Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, FullSync) }
func TestShiftedHeaderAttack67Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, SnapSync) } func TestShiftedHeaderAttack67Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, SnapSync) }
func TestShiftedHeaderAttack67Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, LightSync) } func TestShiftedHeaderAttack67Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, LightSync) }
@ -880,14 +880,14 @@ func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
// Tests that a peer advertising a high TD doesn't get to stall the downloader // Tests that a peer advertising a high TD doesn't get to stall the downloader
// afterwards by not sending any useful hashes. // afterwards by not sending any useful hashes.
func TestHighTDStarvationAttack66Full(t *testing.T) { func TestHighTDStarvationAttack68Full(t *testing.T) {
testHighTDStarvationAttack(t, eth.ETH66, FullSync) testHighTDStarvationAttack(t, eth.ETH68, FullSync)
} }
func TestHighTDStarvationAttack66Snap(t *testing.T) { func TestHighTDStarvationAttack68Snap(t *testing.T) {
testHighTDStarvationAttack(t, eth.ETH66, SnapSync) testHighTDStarvationAttack(t, eth.ETH68, SnapSync)
} }
func TestHighTDStarvationAttack66Light(t *testing.T) { func TestHighTDStarvationAttack68Light(t *testing.T) {
testHighTDStarvationAttack(t, eth.ETH66, LightSync) testHighTDStarvationAttack(t, eth.ETH68, LightSync)
} }
func TestHighTDStarvationAttack67Full(t *testing.T) { func TestHighTDStarvationAttack67Full(t *testing.T) {
testHighTDStarvationAttack(t, eth.ETH67, FullSync) testHighTDStarvationAttack(t, eth.ETH67, FullSync)
@ -911,7 +911,7 @@ func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
} }
// Tests that misbehaving peers are disconnected, whilst behaving ones are not. // Tests that misbehaving peers are disconnected, whilst behaving ones are not.
func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) } func TestBlockHeaderAttackerDropping68(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH68) }
func TestBlockHeaderAttackerDropping67(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH67) } func TestBlockHeaderAttackerDropping67(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH67) }
func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
@ -960,9 +960,9 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
// Tests that synchronisation progress (origin block number, current block number // Tests that synchronisation progress (origin block number, current block number
// and highest block number) is tracked and updated correctly. // and highest block number) is tracked and updated correctly.
func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) } func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) }
func TestSyncProgress66Snap(t *testing.T) { testSyncProgress(t, eth.ETH66, SnapSync) } func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) }
func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) } func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) }
func TestSyncProgress67Full(t *testing.T) { testSyncProgress(t, eth.ETH67, FullSync) } func TestSyncProgress67Full(t *testing.T) { testSyncProgress(t, eth.ETH67, FullSync) }
func TestSyncProgress67Snap(t *testing.T) { testSyncProgress(t, eth.ETH67, SnapSync) } func TestSyncProgress67Snap(t *testing.T) { testSyncProgress(t, eth.ETH67, SnapSync) }
func TestSyncProgress67Light(t *testing.T) { testSyncProgress(t, eth.ETH67, LightSync) } func TestSyncProgress67Light(t *testing.T) { testSyncProgress(t, eth.ETH67, LightSync) }
@ -1040,9 +1040,9 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync
// Tests that synchronisation progress (origin block number and highest block // Tests that synchronisation progress (origin block number and highest block
// number) is tracked and updated correctly in case of a fork (or manual head // number) is tracked and updated correctly in case of a fork (or manual head
// revertal). // revertal).
func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) } func TestForkedSyncProgress68Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, FullSync) }
func TestForkedSyncProgress66Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, SnapSync) } func TestForkedSyncProgress68Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, SnapSync) }
func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) } func TestForkedSyncProgress68Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, LightSync) }
func TestForkedSyncProgress67Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, FullSync) } func TestForkedSyncProgress67Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, FullSync) }
func TestForkedSyncProgress67Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, SnapSync) } func TestForkedSyncProgress67Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, SnapSync) }
func TestForkedSyncProgress67Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, LightSync) } func TestForkedSyncProgress67Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, LightSync) }
@ -1114,9 +1114,9 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if synchronisation is aborted due to some failure, then the progress // Tests that if synchronisation is aborted due to some failure, then the progress
// origin is not updated in the next sync cycle, as it should be considered the // origin is not updated in the next sync cycle, as it should be considered the
// continuation of the previous sync and not a new instance. // continuation of the previous sync and not a new instance.
func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) } func TestFailedSyncProgress68Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, FullSync) }
func TestFailedSyncProgress66Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, SnapSync) } func TestFailedSyncProgress68Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, SnapSync) }
func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) } func TestFailedSyncProgress68Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, LightSync) }
func TestFailedSyncProgress67Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, FullSync) } func TestFailedSyncProgress67Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, FullSync) }
func TestFailedSyncProgress67Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, SnapSync) } func TestFailedSyncProgress67Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, SnapSync) }
func TestFailedSyncProgress67Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, LightSync) } func TestFailedSyncProgress67Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, LightSync) }
@ -1183,9 +1183,9 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
// Tests that if an attacker fakes a chain height, after the attack is detected, // Tests that if an attacker fakes a chain height, after the attack is detected,
// the progress height is successfully reduced at the next sync invocation. // the progress height is successfully reduced at the next sync invocation.
func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) } func TestFakedSyncProgress68Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, FullSync) }
func TestFakedSyncProgress66Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, SnapSync) } func TestFakedSyncProgress68Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, SnapSync) }
func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) } func TestFakedSyncProgress68Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, LightSync) }
func TestFakedSyncProgress67Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, FullSync) } func TestFakedSyncProgress67Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, FullSync) }
func TestFakedSyncProgress67Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, SnapSync) } func TestFakedSyncProgress67Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, SnapSync) }
func TestFakedSyncProgress67Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, LightSync) } func TestFakedSyncProgress67Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, LightSync) }
@ -1330,8 +1330,10 @@ func TestRemoteHeaderRequestSpan(t *testing.T) {
// Tests that peers below a pre-configured checkpoint block are prevented from // Tests that peers below a pre-configured checkpoint block are prevented from
// being fast-synced from, avoiding potential cheap eclipse attacks. // being fast-synced from, avoiding potential cheap eclipse attacks.
func TestBeaconSync66Full(t *testing.T) { testBeaconSync(t, eth.ETH66, FullSync) } func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, eth.ETH68, FullSync) }
func TestBeaconSync66Snap(t *testing.T) { testBeaconSync(t, eth.ETH66, SnapSync) } func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, eth.ETH68, SnapSync) }
func TestBeaconSync67Full(t *testing.T) { testBeaconSync(t, eth.ETH67, FullSync) }
func TestBeaconSync67Snap(t *testing.T) { testBeaconSync(t, eth.ETH67, SnapSync) }
func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) {
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) //log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))

View File

@ -58,14 +58,14 @@ func (d *Downloader) fetchHeadersByHash(p *peerConnection, hash common.Hash, amo
case res := <-resCh: case res := <-resCh:
// Headers successfully retrieved, update the metrics // Headers successfully retrieved, update the metrics
headerReqTimer.Update(time.Since(start)) headerReqTimer.Update(time.Since(start))
headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket)))) headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest))))
// Don't reject the packet even if it turns out to be bad, downloader will // Don't reject the packet even if it turns out to be bad, downloader will
// disconnect the peer on its own terms. Simply delivery the headers to // disconnect the peer on its own terms. Simply delivery the headers to
// be processed by the caller // be processed by the caller
res.Done <- nil res.Done <- nil
return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil
} }
} }
@ -103,13 +103,13 @@ func (d *Downloader) fetchHeadersByNumber(p *peerConnection, number uint64, amou
case res := <-resCh: case res := <-resCh:
// Headers successfully retrieved, update the metrics // Headers successfully retrieved, update the metrics
headerReqTimer.Update(time.Since(start)) headerReqTimer.Update(time.Since(start))
headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket)))) headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersRequest))))
// Don't reject the packet even if it turns out to be bad, downloader will // Don't reject the packet even if it turns out to be bad, downloader will
// disconnect the peer on its own terms. Simply delivery the headers to // disconnect the peer on its own terms. Simply delivery the headers to
// be processed by the caller // be processed by the caller
res.Done <- nil res.Done <- nil
return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil return *res.Res.(*eth.BlockHeadersRequest), res.Meta.([]common.Hash), nil
} }
} }

View File

@ -89,7 +89,7 @@ func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan
// deliver is responsible for taking a generic response packet from the concurrent // deliver is responsible for taking a generic response packet from the concurrent
// fetcher, unpacking the body data and delivering it to the downloader's queue. // fetcher, unpacking the body data and delivering it to the downloader's queue.
func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesPacket).Unpack() txs, uncles, withdrawals := packet.Res.(*eth.BlockBodiesResponse).Unpack()
hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes} hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes, withdrawal hashes}
accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2]) accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1], withdrawals, hashsets[2])

View File

@ -81,7 +81,7 @@ func (q *headerQueue) request(peer *peerConnection, req *fetchRequest, resCh cha
// deliver is responsible for taking a generic response packet from the concurrent // deliver is responsible for taking a generic response packet from the concurrent
// fetcher, unpacking the header data and delivering it to the downloader's queue. // fetcher, unpacking the header data and delivering it to the downloader's queue.
func (q *headerQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { func (q *headerQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
headers := *packet.Res.(*eth.BlockHeadersPacket) headers := *packet.Res.(*eth.BlockHeadersRequest)
hashes := packet.Meta.([]common.Hash) hashes := packet.Meta.([]common.Hash)
accepted, err := q.queue.DeliverHeaders(peer.id, headers, hashes, q.headerProcCh) accepted, err := q.queue.DeliverHeaders(peer.id, headers, hashes, q.headerProcCh)

View File

@ -88,7 +88,7 @@ func (q *receiptQueue) request(peer *peerConnection, req *fetchRequest, resCh ch
// deliver is responsible for taking a generic response packet from the concurrent // deliver is responsible for taking a generic response packet from the concurrent
// fetcher, unpacking the receipt data and delivering it to the downloader's queue. // fetcher, unpacking the receipt data and delivering it to the downloader's queue.
func (q *receiptQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { func (q *receiptQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) {
receipts := *packet.Res.(*eth.ReceiptsPacket) receipts := *packet.Res.(*eth.ReceiptsResponse)
hashes := packet.Meta.([]common.Hash) // {receipt hashes} hashes := packet.Meta.([]common.Hash) // {receipt hashes}
accepted, err := q.queue.DeliverReceipts(peer.id, receipts, hashes) accepted, err := q.queue.DeliverReceipts(peer.id, receipts, hashes)

View File

@ -55,39 +55,16 @@ type peerConnection struct {
lock sync.RWMutex lock sync.RWMutex
} }
// LightPeer encapsulates the methods required to synchronise with a remote light peer. // Peer encapsulates the methods required to synchronise with a remote full peer.
type LightPeer interface { type Peer interface {
Head() (common.Hash, *big.Int) Head() (common.Hash, *big.Int)
RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error) RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error)
RequestHeadersByNumber(uint64, int, int, bool, chan *eth.Response) (*eth.Request, error) RequestHeadersByNumber(uint64, int, int, bool, chan *eth.Response) (*eth.Request, error)
}
// Peer encapsulates the methods required to synchronise with a remote full peer.
type Peer interface {
LightPeer
RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error)
RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error)
} }
// lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods.
type lightPeerWrapper struct {
peer LightPeer
}
func (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() }
func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
return w.peer.RequestHeadersByHash(h, amount, skip, reverse, sink)
}
func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
return w.peer.RequestHeadersByNumber(i, amount, skip, reverse, sink)
}
func (w *lightPeerWrapper) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) {
panic("RequestBodies not supported in light client mode sync")
}
func (w *lightPeerWrapper) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) {
panic("RequestReceipts not supported in light client mode sync")
}
// newPeerConnection creates a new downloader peer. // newPeerConnection creates a new downloader peer.
func newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection { func newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection {
return &peerConnection{ return &peerConnection{

View File

@ -794,7 +794,7 @@ func (s *skeleton) executeTask(peer *peerConnection, req *headerRequest) {
case res := <-resCh: case res := <-resCh:
// Headers successfully retrieved, update the metrics // Headers successfully retrieved, update the metrics
headers := *res.Res.(*eth.BlockHeadersPacket) headers := *res.Res.(*eth.BlockHeadersRequest)
headerReqTimer.Update(time.Since(start)) headerReqTimer.Update(time.Since(start))
s.peers.rates.Update(peer.id, eth.BlockHeadersMsg, res.Time, len(headers)) s.peers.rates.Update(peer.id, eth.BlockHeadersMsg, res.Time, len(headers))

View File

@ -173,7 +173,7 @@ func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, ski
} }
res := &eth.Response{ res := &eth.Response{
Req: req, Req: req,
Res: (*eth.BlockHeadersPacket)(&headers), Res: (*eth.BlockHeadersRequest)(&headers),
Meta: hashes, Meta: hashes,
Time: 1, Time: 1,
Done: make(chan error), Done: make(chan error),
@ -811,7 +811,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
// Create a peer set to feed headers through // Create a peer set to feed headers through
peerset := newPeerSet() peerset := newPeerSet()
for _, peer := range tt.peers { for _, peer := range tt.peers {
peerset.Register(newPeerConnection(peer.id, eth.ETH66, peer, log.New("id", peer.id))) peerset.Register(newPeerConnection(peer.id, eth.ETH67, peer, log.New("id", peer.id)))
} }
// Create a peer dropper to track malicious peers // Create a peer dropper to track malicious peers
dropped := make(map[string]int) dropped := make(map[string]int)
@ -913,7 +913,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
skeleton.Sync(tt.newHead, nil, true) skeleton.Sync(tt.newHead, nil, true)
} }
if tt.newPeer != nil { if tt.newPeer != nil {
if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH67, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil {
t.Errorf("test %d: failed to register new peer: %v", i, err) t.Errorf("test %d: failed to register new peer: %v", i, err)
} }
} }

View File

@ -27,7 +27,6 @@ import (
"github.com/ethereum/go-ethereum/consensus/clique" "github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/txpool/blobpool" "github.com/ethereum/go-ethereum/core/txpool/blobpool"
"github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
@ -65,7 +64,6 @@ var Defaults = Config{
TxLookupLimit: 2350000, TxLookupLimit: 2350000,
TransactionHistory: 2350000, TransactionHistory: 2350000,
StateHistory: params.FullImmutabilityThreshold, StateHistory: params.FullImmutabilityThreshold,
StateScheme: rawdb.HashScheme,
LightPeers: 100, LightPeers: 100,
DatabaseCache: 512, DatabaseCache: 512,
TrieCleanCache: 154, TrieCleanCache: 154,
@ -84,7 +82,7 @@ var Defaults = Config{
//go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go //go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go
// Config contains configuration options for of the ETH and LES protocols. // Config contains configuration options for ETH and LES protocols.
type Config struct { type Config struct {
// The genesis block, which is inserted if the database is empty. // The genesis block, which is inserted if the database is empty.
// If nil, the Ethereum main net block is used. // If nil, the Ethereum main net block is used.
@ -106,7 +104,11 @@ type Config struct {
TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved. TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
TransactionHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved. TransactionHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
StateHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose state histories are reserved. StateHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose state histories are reserved.
StateScheme string `toml:",omitempty"` // State scheme used to store ethereum state and merkle trie nodes on top
// State scheme represents the scheme used to store ethereum states and trie
// nodes on top. It can be 'hash', 'path', or none which means use the scheme
// consistent with persistent state.
StateScheme string `toml:",omitempty"`
// RequiredBlocks is a set of block number -> hash mappings which must be in the // RequiredBlocks is a set of block number -> hash mappings which must be in the
// canonical chain of all remote peers. Setting the option makes geth verify the // canonical chain of all remote peers. Setting the option makes geth verify the
@ -184,7 +186,7 @@ func CreateConsensusEngine(config *params.ChainConfig, db ethdb.Database) (conse
return beacon.New(clique.New(config.Clique, db)), nil return beacon.New(clique.New(config.Clique, db)), nil
} }
// If defaulting to proof-of-work, enforce an already merged network since // If defaulting to proof-of-work, enforce an already merged network since
// we cannot run PoW algorithms and more, so we cannot even follow a chain // we cannot run PoW algorithms anymore, so we cannot even follow a chain
// not coordinated by a beacon node. // not coordinated by a beacon node.
if !config.TerminalTotalDifficultyPassed { if !config.TerminalTotalDifficultyPassed {
return nil, errors.New("ethash is only supported as a historical component of already merged networks") return nil, errors.New("ethash is only supported as a historical component of already merged networks")

View File

@ -483,7 +483,7 @@ func (f *BlockFetcher) loop() {
select { select {
case res := <-resCh: case res := <-resCh:
res.Done <- nil res.Done <- nil
f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersPacket), time.Now().Add(res.Time)) f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersRequest), time.Now().Add(res.Time))
case <-timeout.C: case <-timeout.C:
// The peer didn't respond in time. The request // The peer didn't respond in time. The request
@ -541,7 +541,7 @@ func (f *BlockFetcher) loop() {
case res := <-resCh: case res := <-resCh:
res.Done <- nil res.Done <- nil
// Ignoring withdrawals here, since the block fetcher is not used post-merge. // Ignoring withdrawals here, since the block fetcher is not used post-merge.
txs, uncles, _ := res.Res.(*eth.BlockBodiesPacket).Unpack() txs, uncles, _ := res.Res.(*eth.BlockBodiesResponse).Unpack()
f.FilterBodies(peer, txs, uncles, time.Now()) f.FilterBodies(peer, txs, uncles, time.Now())
case <-timeout.C: case <-timeout.C:

View File

@ -213,7 +213,7 @@ func (f *fetcherTester) makeHeaderFetcher(peer string, blocks map[common.Hash]*t
} }
res := &eth.Response{ res := &eth.Response{
Req: req, Req: req,
Res: (*eth.BlockHeadersPacket)(&headers), Res: (*eth.BlockHeadersRequest)(&headers),
Time: drift, Time: drift,
Done: make(chan error, 1), // Ignore the returned status Done: make(chan error, 1), // Ignore the returned status
} }
@ -255,7 +255,7 @@ func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*typ
} }
res := &eth.Response{ res := &eth.Response{
Req: req, Req: req,
Res: (*eth.BlockBodiesPacket)(&bodies), Res: (*eth.BlockBodiesResponse)(&bodies),
Time: drift, Time: drift,
Done: make(chan error, 1), // Ignore the returned status Done: make(chan error, 1), // Ignore the returned status
} }

View File

@ -20,6 +20,7 @@ import (
"bytes" "bytes"
"errors" "errors"
"fmt" "fmt"
"math"
mrand "math/rand" mrand "math/rand"
"sort" "sort"
"time" "time"
@ -38,23 +39,29 @@ const (
// can announce in a short time. // can announce in a short time.
maxTxAnnounces = 4096 maxTxAnnounces = 4096
// maxTxRetrievals is the maximum transaction number can be fetched in one // maxTxRetrievals is the maximum number of transactions that can be fetched
// request. The rationale to pick 256 is: // in one request. The rationale for picking 256 is to have a reasonabe lower
// - In eth protocol, the softResponseLimit is 2MB. Nowadays according to // bound for the transferred data (don't waste RTTs, transfer more meaningful
// Etherscan the average transaction size is around 200B, so in theory // batch sizes), but also have an upper bound on the sequentiality to allow
// we can include lots of transaction in a single protocol packet. // using our entire peerset for deliveries.
// - However the maximum size of a single transaction is raised to 128KB, //
// so pick a middle value here to ensure we can maximize the efficiency // This number also acts as a failsafe against malicious announces which might
// of the retrieval and response size overflow won't happen in most cases. // cause us to request more data than we'd expect.
maxTxRetrievals = 256 maxTxRetrievals = 256
// maxTxRetrievalSize is the max number of bytes that delivered transactions
// should weigh according to the announcements. The 128KB was chosen to limit
// retrieving a maximum of one blob transaction at a time to minimize hogging
// a connection between two peers.
maxTxRetrievalSize = 128 * 1024
// maxTxUnderpricedSetSize is the size of the underpriced transaction set that // maxTxUnderpricedSetSize is the size of the underpriced transaction set that
// is used to track recent transactions that have been dropped so we don't // is used to track recent transactions that have been dropped so we don't
// re-request them. // re-request them.
maxTxUnderpricedSetSize = 32768 maxTxUnderpricedSetSize = 32768
// maxTxUnderpricedTimeout is the max time a transaction should be stuck in the underpriced set. // maxTxUnderpricedTimeout is the max time a transaction should be stuck in the underpriced set.
maxTxUnderpricedTimeout = int64(5 * time.Minute) maxTxUnderpricedTimeout = 5 * time.Minute
// txArriveTimeout is the time allowance before an announced transaction is // txArriveTimeout is the time allowance before an announced transaction is
// explicitly requested. // explicitly requested.
@ -105,6 +112,14 @@ var (
type txAnnounce struct { type txAnnounce struct {
origin string // Identifier of the peer originating the notification origin string // Identifier of the peer originating the notification
hashes []common.Hash // Batch of transaction hashes being announced hashes []common.Hash // Batch of transaction hashes being announced
metas []*txMetadata // Batch of metadatas associated with the hashes (nil before eth/68)
}
// txMetadata is a set of extra data transmitted along the announcement for better
// fetch scheduling.
type txMetadata struct {
kind byte // Transaction consensus type
size uint32 // Transaction size in bytes
} }
// txRequest represents an in-flight transaction retrieval request destined to // txRequest represents an in-flight transaction retrieval request destined to
@ -120,6 +135,7 @@ type txRequest struct {
type txDelivery struct { type txDelivery struct {
origin string // Identifier of the peer originating the notification origin string // Identifier of the peer originating the notification
hashes []common.Hash // Batch of transaction hashes having been delivered hashes []common.Hash // Batch of transaction hashes having been delivered
metas []txMetadata // Batch of metadatas associated with the delivered hashes
direct bool // Whether this is a direct reply or a broadcast direct bool // Whether this is a direct reply or a broadcast
} }
@ -151,17 +167,17 @@ type TxFetcher struct {
drop chan *txDrop drop chan *txDrop
quit chan struct{} quit chan struct{}
underpriced *lru.Cache[common.Hash, int64] // Transactions discarded as too cheap (don't re-fetch) underpriced *lru.Cache[common.Hash, time.Time] // Transactions discarded as too cheap (don't re-fetch)
// Stage 1: Waiting lists for newly discovered transactions that might be // Stage 1: Waiting lists for newly discovered transactions that might be
// broadcast without needing explicit request/reply round trips. // broadcast without needing explicit request/reply round trips.
waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast
waittime map[common.Hash]mclock.AbsTime // Timestamps when transactions were added to the waitlist waittime map[common.Hash]mclock.AbsTime // Timestamps when transactions were added to the waitlist
waitslots map[string]map[common.Hash]struct{} // Waiting announcements grouped by peer (DoS protection) waitslots map[string]map[common.Hash]*txMetadata // Waiting announcements grouped by peer (DoS protection)
// Stage 2: Queue of transactions that waiting to be allocated to some peer // Stage 2: Queue of transactions that waiting to be allocated to some peer
// to be retrieved directly. // to be retrieved directly.
announces map[string]map[common.Hash]struct{} // Set of announced transactions, grouped by origin peer announces map[string]map[common.Hash]*txMetadata // Set of announced transactions, grouped by origin peer
announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash
// Stage 3: Set of transactions currently being retrieved, some which may be // Stage 3: Set of transactions currently being retrieved, some which may be
@ -175,6 +191,7 @@ type TxFetcher struct {
hasTx func(common.Hash) bool // Retrieves a tx from the local txpool hasTx func(common.Hash) bool // Retrieves a tx from the local txpool
addTxs func([]*types.Transaction) []error // Insert a batch of transactions into local txpool addTxs func([]*types.Transaction) []error // Insert a batch of transactions into local txpool
fetchTxs func(string, []common.Hash) error // Retrieves a set of txs from a remote peer fetchTxs func(string, []common.Hash) error // Retrieves a set of txs from a remote peer
dropPeer func(string) // Drops a peer in case of announcement violation
step chan struct{} // Notification channel when the fetcher loop iterates step chan struct{} // Notification channel when the fetcher loop iterates
clock mclock.Clock // Time wrapper to simulate in tests clock mclock.Clock // Time wrapper to simulate in tests
@ -183,14 +200,14 @@ type TxFetcher struct {
// NewTxFetcher creates a transaction fetcher to retrieve transaction // NewTxFetcher creates a transaction fetcher to retrieve transaction
// based on hash announcements. // based on hash announcements.
func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error) *TxFetcher { func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string)) *TxFetcher {
return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, mclock.System{}, nil) return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, dropPeer, mclock.System{}, nil)
} }
// NewTxFetcherForTests is a testing method to mock out the realtime clock with // NewTxFetcherForTests is a testing method to mock out the realtime clock with
// a simulated version and the internal randomness with a deterministic one. // a simulated version and the internal randomness with a deterministic one.
func NewTxFetcherForTests( func NewTxFetcherForTests(
hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, dropPeer func(string),
clock mclock.Clock, rand *mrand.Rand) *TxFetcher { clock mclock.Clock, rand *mrand.Rand) *TxFetcher {
return &TxFetcher{ return &TxFetcher{
notify: make(chan *txAnnounce), notify: make(chan *txAnnounce),
@ -199,16 +216,17 @@ func NewTxFetcherForTests(
quit: make(chan struct{}), quit: make(chan struct{}),
waitlist: make(map[common.Hash]map[string]struct{}), waitlist: make(map[common.Hash]map[string]struct{}),
waittime: make(map[common.Hash]mclock.AbsTime), waittime: make(map[common.Hash]mclock.AbsTime),
waitslots: make(map[string]map[common.Hash]struct{}), waitslots: make(map[string]map[common.Hash]*txMetadata),
announces: make(map[string]map[common.Hash]struct{}), announces: make(map[string]map[common.Hash]*txMetadata),
announced: make(map[common.Hash]map[string]struct{}), announced: make(map[common.Hash]map[string]struct{}),
fetching: make(map[common.Hash]string), fetching: make(map[common.Hash]string),
requests: make(map[string]*txRequest), requests: make(map[string]*txRequest),
alternates: make(map[common.Hash]map[string]struct{}), alternates: make(map[common.Hash]map[string]struct{}),
underpriced: lru.NewCache[common.Hash, int64](maxTxUnderpricedSetSize), underpriced: lru.NewCache[common.Hash, time.Time](maxTxUnderpricedSetSize),
hasTx: hasTx, hasTx: hasTx,
addTxs: addTxs, addTxs: addTxs,
fetchTxs: fetchTxs, fetchTxs: fetchTxs,
dropPeer: dropPeer,
clock: clock, clock: clock,
rand: rand, rand: rand,
} }
@ -216,7 +234,7 @@ func NewTxFetcherForTests(
// Notify announces the fetcher of the potential availability of a new batch of // Notify announces the fetcher of the potential availability of a new batch of
// transactions in the network. // transactions in the network.
func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error { func (f *TxFetcher) Notify(peer string, types []byte, sizes []uint32, hashes []common.Hash) error {
// Keep track of all the announced transactions // Keep track of all the announced transactions
txAnnounceInMeter.Mark(int64(len(hashes))) txAnnounceInMeter.Mark(int64(len(hashes)))
@ -226,28 +244,35 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
// still valuable to check here because it runs concurrent to the internal // still valuable to check here because it runs concurrent to the internal
// loop, so anything caught here is time saved internally. // loop, so anything caught here is time saved internally.
var ( var (
unknowns = make([]common.Hash, 0, len(hashes)) unknownHashes = make([]common.Hash, 0, len(hashes))
unknownMetas = make([]*txMetadata, 0, len(hashes))
duplicate int64 duplicate int64
underpriced int64 underpriced int64
) )
for _, hash := range hashes { for i, hash := range hashes {
switch { switch {
case f.hasTx(hash): case f.hasTx(hash):
duplicate++ duplicate++
case f.isKnownUnderpriced(hash): case f.isKnownUnderpriced(hash):
underpriced++ underpriced++
default: default:
unknowns = append(unknowns, hash) unknownHashes = append(unknownHashes, hash)
if types == nil {
unknownMetas = append(unknownMetas, nil)
} else {
unknownMetas = append(unknownMetas, &txMetadata{kind: types[i], size: sizes[i]})
}
} }
} }
txAnnounceKnownMeter.Mark(duplicate) txAnnounceKnownMeter.Mark(duplicate)
txAnnounceUnderpricedMeter.Mark(underpriced) txAnnounceUnderpricedMeter.Mark(underpriced)
// If anything's left to announce, push it into the internal loop // If anything's left to announce, push it into the internal loop
if len(unknowns) == 0 { if len(unknownHashes) == 0 {
return nil return nil
} }
announce := &txAnnounce{origin: peer, hashes: unknowns} announce := &txAnnounce{origin: peer, hashes: unknownHashes, metas: unknownMetas}
select { select {
case f.notify <- announce: case f.notify <- announce:
return nil return nil
@ -259,7 +284,7 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
// isKnownUnderpriced reports whether a transaction hash was recently found to be underpriced. // isKnownUnderpriced reports whether a transaction hash was recently found to be underpriced.
func (f *TxFetcher) isKnownUnderpriced(hash common.Hash) bool { func (f *TxFetcher) isKnownUnderpriced(hash common.Hash) bool {
prevTime, ok := f.underpriced.Peek(hash) prevTime, ok := f.underpriced.Peek(hash)
if ok && prevTime+maxTxUnderpricedTimeout < time.Now().Unix() { if ok && prevTime.Before(time.Now().Add(-maxTxUnderpricedTimeout)) {
f.underpriced.Remove(hash) f.underpriced.Remove(hash)
return false return false
} }
@ -290,6 +315,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
// re-requesting them and dropping the peer in case of malicious transfers. // re-requesting them and dropping the peer in case of malicious transfers.
var ( var (
added = make([]common.Hash, 0, len(txs)) added = make([]common.Hash, 0, len(txs))
metas = make([]txMetadata, 0, len(txs))
) )
// proceed in batches // proceed in batches
for i := 0; i < len(txs); i += 128 { for i := 0; i < len(txs); i += 128 {
@ -309,7 +335,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
// Avoid re-request this transaction when we receive another // Avoid re-request this transaction when we receive another
// announcement. // announcement.
if errors.Is(err, txpool.ErrUnderpriced) || errors.Is(err, txpool.ErrReplaceUnderpriced) { if errors.Is(err, txpool.ErrUnderpriced) || errors.Is(err, txpool.ErrReplaceUnderpriced) {
f.underpriced.Add(batch[j].Hash(), batch[j].Time().Unix()) f.underpriced.Add(batch[j].Hash(), batch[j].Time())
} }
// Track a few interesting failure types // Track a few interesting failure types
switch { switch {
@ -325,6 +351,10 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
otherreject++ otherreject++
} }
added = append(added, batch[j].Hash()) added = append(added, batch[j].Hash())
metas = append(metas, txMetadata{
kind: batch[j].Type(),
size: uint32(batch[j].Size()),
})
} }
knownMeter.Mark(duplicate) knownMeter.Mark(duplicate)
underpricedMeter.Mark(underpriced) underpricedMeter.Mark(underpriced)
@ -337,7 +367,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
} }
} }
select { select {
case f.cleanup <- &txDelivery{origin: peer, hashes: added, direct: direct}: case f.cleanup <- &txDelivery{origin: peer, hashes: added, metas: metas, direct: direct}:
return nil return nil
case <-f.quit: case <-f.quit:
return errTerminated return errTerminated
@ -394,13 +424,15 @@ func (f *TxFetcher) loop() {
want := used + len(ann.hashes) want := used + len(ann.hashes)
if want > maxTxAnnounces { if want > maxTxAnnounces {
txAnnounceDOSMeter.Mark(int64(want - maxTxAnnounces)) txAnnounceDOSMeter.Mark(int64(want - maxTxAnnounces))
ann.hashes = ann.hashes[:want-maxTxAnnounces] ann.hashes = ann.hashes[:want-maxTxAnnounces]
ann.metas = ann.metas[:want-maxTxAnnounces]
} }
// All is well, schedule the remainder of the transactions // All is well, schedule the remainder of the transactions
idleWait := len(f.waittime) == 0 idleWait := len(f.waittime) == 0
_, oldPeer := f.announces[ann.origin] _, oldPeer := f.announces[ann.origin]
for _, hash := range ann.hashes { for i, hash := range ann.hashes {
// If the transaction is already downloading, add it to the list // If the transaction is already downloading, add it to the list
// of possible alternates (in case the current retrieval fails) and // of possible alternates (in case the current retrieval fails) and
// also account it for the peer. // also account it for the peer.
@ -409,9 +441,9 @@ func (f *TxFetcher) loop() {
// Stage 2 and 3 share the set of origins per tx // Stage 2 and 3 share the set of origins per tx
if announces := f.announces[ann.origin]; announces != nil { if announces := f.announces[ann.origin]; announces != nil {
announces[hash] = struct{}{} announces[hash] = ann.metas[i]
} else { } else {
f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}} f.announces[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]}
} }
continue continue
} }
@ -422,9 +454,9 @@ func (f *TxFetcher) loop() {
// Stage 2 and 3 share the set of origins per tx // Stage 2 and 3 share the set of origins per tx
if announces := f.announces[ann.origin]; announces != nil { if announces := f.announces[ann.origin]; announces != nil {
announces[hash] = struct{}{} announces[hash] = ann.metas[i]
} else { } else {
f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}} f.announces[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]}
} }
continue continue
} }
@ -432,12 +464,18 @@ func (f *TxFetcher) loop() {
// yet downloading, add the peer as an alternate origin in the // yet downloading, add the peer as an alternate origin in the
// waiting list. // waiting list.
if f.waitlist[hash] != nil { if f.waitlist[hash] != nil {
// Ignore double announcements from the same peer. This is
// especially important if metadata is also passed along to
// prevent malicious peers flip-flopping good/bad values.
if _, ok := f.waitlist[hash][ann.origin]; ok {
continue
}
f.waitlist[hash][ann.origin] = struct{}{} f.waitlist[hash][ann.origin] = struct{}{}
if waitslots := f.waitslots[ann.origin]; waitslots != nil { if waitslots := f.waitslots[ann.origin]; waitslots != nil {
waitslots[hash] = struct{}{} waitslots[hash] = ann.metas[i]
} else { } else {
f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}} f.waitslots[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]}
} }
continue continue
} }
@ -446,9 +484,9 @@ func (f *TxFetcher) loop() {
f.waittime[hash] = f.clock.Now() f.waittime[hash] = f.clock.Now()
if waitslots := f.waitslots[ann.origin]; waitslots != nil { if waitslots := f.waitslots[ann.origin]; waitslots != nil {
waitslots[hash] = struct{}{} waitslots[hash] = ann.metas[i]
} else { } else {
f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}} f.waitslots[ann.origin] = map[common.Hash]*txMetadata{hash: ann.metas[i]}
} }
} }
// If a new item was added to the waitlist, schedule it into the fetcher // If a new item was added to the waitlist, schedule it into the fetcher
@ -474,9 +512,9 @@ func (f *TxFetcher) loop() {
f.announced[hash] = f.waitlist[hash] f.announced[hash] = f.waitlist[hash]
for peer := range f.waitlist[hash] { for peer := range f.waitlist[hash] {
if announces := f.announces[peer]; announces != nil { if announces := f.announces[peer]; announces != nil {
announces[hash] = struct{}{} announces[hash] = f.waitslots[peer][hash]
} else { } else {
f.announces[peer] = map[common.Hash]struct{}{hash: {}} f.announces[peer] = map[common.Hash]*txMetadata{hash: f.waitslots[peer][hash]}
} }
delete(f.waitslots[peer], hash) delete(f.waitslots[peer], hash)
if len(f.waitslots[peer]) == 0 { if len(f.waitslots[peer]) == 0 {
@ -545,10 +583,28 @@ func (f *TxFetcher) loop() {
case delivery := <-f.cleanup: case delivery := <-f.cleanup:
// Independent if the delivery was direct or broadcast, remove all // Independent if the delivery was direct or broadcast, remove all
// traces of the hash from internal trackers // traces of the hash from internal trackers. That said, compare any
for _, hash := range delivery.hashes { // advertised metadata with the real ones and drop bad peers.
for i, hash := range delivery.hashes {
if _, ok := f.waitlist[hash]; ok { if _, ok := f.waitlist[hash]; ok {
for peer, txset := range f.waitslots { for peer, txset := range f.waitslots {
if meta := txset[hash]; meta != nil {
if delivery.metas[i].kind != meta.kind {
log.Warn("Announced transaction type mismatch", "peer", peer, "tx", hash, "type", delivery.metas[i].kind, "ann", meta.kind)
f.dropPeer(peer)
} else if delivery.metas[i].size != meta.size {
if math.Abs(float64(delivery.metas[i].size)-float64(meta.size)) > 8 {
log.Warn("Announced transaction size mismatch", "peer", peer, "tx", hash, "size", delivery.metas[i].size, "ann", meta.size)
// Normally we should drop a peer considering this is a protocol violation.
// However, due to the RLP vs consensus format messyness, allow a few bytes
// wiggle-room where we only warn, but don't drop.
//
// TODO(karalabe): Get rid of this relaxation when clients are proven stable.
f.dropPeer(peer)
}
}
}
delete(txset, hash) delete(txset, hash)
if len(txset) == 0 { if len(txset) == 0 {
delete(f.waitslots, peer) delete(f.waitslots, peer)
@ -558,6 +614,23 @@ func (f *TxFetcher) loop() {
delete(f.waittime, hash) delete(f.waittime, hash)
} else { } else {
for peer, txset := range f.announces { for peer, txset := range f.announces {
if meta := txset[hash]; meta != nil {
if delivery.metas[i].kind != meta.kind {
log.Warn("Announced transaction type mismatch", "peer", peer, "tx", hash, "type", delivery.metas[i].kind, "ann", meta.kind)
f.dropPeer(peer)
} else if delivery.metas[i].size != meta.size {
if math.Abs(float64(delivery.metas[i].size)-float64(meta.size)) > 8 {
log.Warn("Announced transaction size mismatch", "peer", peer, "tx", hash, "size", delivery.metas[i].size, "ann", meta.size)
// Normally we should drop a peer considering this is a protocol violation.
// However, due to the RLP vs consensus format messyness, allow a few bytes
// wiggle-room where we only warn, but don't drop.
//
// TODO(karalabe): Get rid of this relaxation when clients are proven stable.
f.dropPeer(peer)
}
}
}
delete(txset, hash) delete(txset, hash)
if len(txset) == 0 { if len(txset) == 0 {
delete(f.announces, peer) delete(f.announces, peer)
@ -794,9 +867,15 @@ func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{},
if len(f.announces[peer]) == 0 { if len(f.announces[peer]) == 0 {
return // continue in the for-each return // continue in the for-each
} }
hashes := make([]common.Hash, 0, maxTxRetrievals) var (
f.forEachHash(f.announces[peer], func(hash common.Hash) bool { hashes = make([]common.Hash, 0, maxTxRetrievals)
if _, ok := f.fetching[hash]; !ok { bytes uint64
)
f.forEachAnnounce(f.announces[peer], func(hash common.Hash, meta *txMetadata) bool {
// If the transaction is already fetching, skip to the next one
if _, ok := f.fetching[hash]; ok {
return true
}
// Mark the hash as fetching and stash away possible alternates // Mark the hash as fetching and stash away possible alternates
f.fetching[hash] = peer f.fetching[hash] = peer
@ -811,8 +890,13 @@ func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{},
if len(hashes) >= maxTxRetrievals { if len(hashes) >= maxTxRetrievals {
return false // break in the for-each return false // break in the for-each
} }
if meta != nil { // Only set eth/68 and upwards
bytes += uint64(meta.size)
if bytes >= maxTxRetrievalSize {
return false
} }
return true // continue in the for-each }
return true // scheduled, try to add more
}) })
// If any hashes were allocated, request them from the peer // If any hashes were allocated, request them from the peer
if len(hashes) > 0 { if len(hashes) > 0 {
@ -857,27 +941,28 @@ func (f *TxFetcher) forEachPeer(peers map[string]struct{}, do func(peer string))
} }
} }
// forEachHash does a range loop over a map of hashes in production, but during // forEachAnnounce does a range loop over a map of announcements in production,
// testing it does a deterministic sorted random to allow reproducing issues. // but during testing it does a deterministic sorted random to allow reproducing
func (f *TxFetcher) forEachHash(hashes map[common.Hash]struct{}, do func(hash common.Hash) bool) { // issues.
func (f *TxFetcher) forEachAnnounce(announces map[common.Hash]*txMetadata, do func(hash common.Hash, meta *txMetadata) bool) {
// If we're running production, use whatever Go's map gives us // If we're running production, use whatever Go's map gives us
if f.rand == nil { if f.rand == nil {
for hash := range hashes { for hash, meta := range announces {
if !do(hash) { if !do(hash, meta) {
return return
} }
} }
return return
} }
// We're running the test suite, make iteration deterministic // We're running the test suite, make iteration deterministic
list := make([]common.Hash, 0, len(hashes)) list := make([]common.Hash, 0, len(announces))
for hash := range hashes { for hash := range announces {
list = append(list, hash) list = append(list, hash)
} }
sortHashes(list) sortHashes(list)
rotateHashes(list, f.rand.Intn(len(list))) rotateHashes(list, f.rand.Intn(len(list)))
for _, hash := range list { for _, hash := range list {
if !do(hash) { if !do(hash, announces[hash]) {
return return
} }
} }

View File

@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
) )
var ( var (
@ -41,9 +42,20 @@ var (
testTxsHashes = []common.Hash{testTxs[0].Hash(), testTxs[1].Hash(), testTxs[2].Hash(), testTxs[3].Hash()} testTxsHashes = []common.Hash{testTxs[0].Hash(), testTxs[1].Hash(), testTxs[2].Hash(), testTxs[3].Hash()}
) )
type announce struct {
hash common.Hash
kind *byte
size *uint32
}
func typeptr(t byte) *byte { return &t }
func sizeptr(n uint32) *uint32 { return &n }
type doTxNotify struct { type doTxNotify struct {
peer string peer string
hashes []common.Hash hashes []common.Hash
types []byte
sizes []uint32
} }
type doTxEnqueue struct { type doTxEnqueue struct {
peer string peer string
@ -57,7 +69,14 @@ type doWait struct {
type doDrop string type doDrop string
type doFunc func() type doFunc func()
type isWaitingWithMeta map[string][]announce
type isWaiting map[string][]common.Hash type isWaiting map[string][]common.Hash
type isScheduledWithMeta struct {
tracking map[string][]announce
fetching map[string][]common.Hash
dangling map[string][]common.Hash
}
type isScheduled struct { type isScheduled struct {
tracking map[string][]common.Hash tracking map[string][]common.Hash
fetching map[string][]common.Hash fetching map[string][]common.Hash
@ -81,6 +100,7 @@ func TestTransactionFetcherWaiting(t *testing.T) {
func(common.Hash) bool { return false }, func(common.Hash) bool { return false },
nil, nil,
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -162,6 +182,212 @@ func TestTransactionFetcherWaiting(t *testing.T) {
}) })
} }
// Tests that transaction announcements with associated metadata are added to a
// waitlist, and none of them are scheduled for retrieval until the wait expires.
//
// This test is an extended version of TestTransactionFetcherWaiting. It's mostly
// to cover the metadata checkes without bloating up the basic behavioral tests
// with all the useless extra fields.
func TestTransactionFetcherWaitingWithMeta(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
nil,
func(string, []common.Hash) error { return nil },
nil,
)
},
steps: []interface{}{
// Initial announcement to get something into the waitlist
doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 222}},
isWaitingWithMeta(map[string][]announce{
"A": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
},
}),
// Announce from a new peer to check that no overwrite happens
doTxNotify{peer: "B", hashes: []common.Hash{{0x03}, {0x04}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{333, 444}},
isWaitingWithMeta(map[string][]announce{
"A": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
},
"B": {
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
}),
// Announce clashing hashes but unique new peer
doTxNotify{peer: "C", hashes: []common.Hash{{0x01}, {0x04}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 444}},
isWaitingWithMeta(map[string][]announce{
"A": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
},
"B": {
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
"C": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
}),
// Announce existing and clashing hashes from existing peer. Clashes
// should not overwrite previous announcements.
doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x03}, {0x05}}, types: []byte{types.LegacyTxType, types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{999, 333, 555}},
isWaitingWithMeta(map[string][]announce{
"A": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)},
},
"B": {
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
"C": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
}),
// Announce clashing hashes with conflicting metadata. Somebody will
// be in the wrong, but we don't know yet who.
doTxNotify{peer: "D", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.BlobTxType}, sizes: []uint32{999, 222}},
isWaitingWithMeta(map[string][]announce{
"A": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)},
},
"B": {
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
"C": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
"D": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)},
{common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)},
},
}),
isScheduled{tracking: nil, fetching: nil},
// Wait for the arrival timeout which should move all expired items
// from the wait list to the scheduler
doWait{time: txArriveTimeout, step: true},
isWaiting(nil),
isScheduledWithMeta{
tracking: map[string][]announce{
"A": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)},
},
"B": {
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
"C": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
"D": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)},
{common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)},
},
},
fetching: map[string][]common.Hash{ // Depends on deterministic test randomizer
"A": {{0x03}, {0x05}},
"C": {{0x01}, {0x04}},
"D": {{0x02}},
},
},
// Queue up a non-fetchable transaction and then trigger it with a new
// peer (weird case to test 1 line in the fetcher)
doTxNotify{peer: "C", hashes: []common.Hash{{0x06}, {0x07}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{666, 777}},
isWaitingWithMeta(map[string][]announce{
"C": {
{common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)},
{common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)},
},
}),
doWait{time: txArriveTimeout, step: true},
isScheduledWithMeta{
tracking: map[string][]announce{
"A": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)},
},
"B": {
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
"C": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
{common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)},
{common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)},
},
"D": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)},
{common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)},
},
},
fetching: map[string][]common.Hash{
"A": {{0x03}, {0x05}},
"C": {{0x01}, {0x04}},
"D": {{0x02}},
},
},
doTxNotify{peer: "E", hashes: []common.Hash{{0x06}, {0x07}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{666, 777}},
isScheduledWithMeta{
tracking: map[string][]announce{
"A": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)},
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)},
},
"B": {
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
},
"C": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)},
{common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)},
{common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)},
},
"D": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)},
{common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)},
},
"E": {
{common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)},
{common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)},
},
},
fetching: map[string][]common.Hash{
"A": {{0x03}, {0x05}},
"C": {{0x01}, {0x04}},
"D": {{0x02}},
"E": {{0x06}, {0x07}},
},
},
},
})
}
// Tests that transaction announcements skip the waiting list if they are // Tests that transaction announcements skip the waiting list if they are
// already scheduled. // already scheduled.
func TestTransactionFetcherSkipWaiting(t *testing.T) { func TestTransactionFetcherSkipWaiting(t *testing.T) {
@ -171,6 +397,7 @@ func TestTransactionFetcherSkipWaiting(t *testing.T) {
func(common.Hash) bool { return false }, func(common.Hash) bool { return false },
nil, nil,
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -234,6 +461,7 @@ func TestTransactionFetcherSingletonRequesting(t *testing.T) {
func(common.Hash) bool { return false }, func(common.Hash) bool { return false },
nil, nil,
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -313,6 +541,7 @@ func TestTransactionFetcherFailedRescheduling(t *testing.T) {
<-proceed <-proceed
return errors.New("peer disconnected") return errors.New("peer disconnected")
}, },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -382,6 +611,7 @@ func TestTransactionFetcherCleanup(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -421,6 +651,7 @@ func TestTransactionFetcherCleanupEmpty(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -459,6 +690,7 @@ func TestTransactionFetcherMissingRescheduling(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -505,6 +737,7 @@ func TestTransactionFetcherMissingCleanup(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -543,6 +776,7 @@ func TestTransactionFetcherBroadcasts(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -591,6 +825,7 @@ func TestTransactionFetcherWaitTimerResets(t *testing.T) {
func(common.Hash) bool { return false }, func(common.Hash) bool { return false },
nil, nil,
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -648,6 +883,7 @@ func TestTransactionFetcherTimeoutRescheduling(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -713,6 +949,7 @@ func TestTransactionFetcherTimeoutTimerResets(t *testing.T) {
func(common.Hash) bool { return false }, func(common.Hash) bool { return false },
nil, nil,
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -757,21 +994,21 @@ func TestTransactionFetcherTimeoutTimerResets(t *testing.T) {
}) })
} }
// Tests that if thousands of transactions are announces, only a small // Tests that if thousands of transactions are announced, only a small
// number of them will be requested at a time. // number of them will be requested at a time.
func TestTransactionFetcherRateLimiting(t *testing.T) { func TestTransactionFetcherRateLimiting(t *testing.T) {
// Create a slew of transactions and to announce them // Create a slew of transactions and announce them
var hashes []common.Hash var hashes []common.Hash
for i := 0; i < maxTxAnnounces; i++ { for i := 0; i < maxTxAnnounces; i++ {
hashes = append(hashes, common.Hash{byte(i / 256), byte(i % 256)}) hashes = append(hashes, common.Hash{byte(i / 256), byte(i % 256)})
} }
testTransactionFetcherParallel(t, txFetcherTest{ testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher { init: func() *TxFetcher {
return NewTxFetcher( return NewTxFetcher(
func(common.Hash) bool { return false }, func(common.Hash) bool { return false },
nil, nil,
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -792,6 +1029,68 @@ func TestTransactionFetcherRateLimiting(t *testing.T) {
}) })
} }
// Tests that if huge transactions are announced, only a small number of them will
// be requested at a time, to keep the responses below a resonable level.
func TestTransactionFetcherBandwidthLimiting(t *testing.T) {
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
nil,
func(string, []common.Hash) error { return nil },
nil,
)
},
steps: []interface{}{
// Announce mid size transactions from A to verify that multiple
// ones can be piled into a single request.
doTxNotify{peer: "A",
hashes: []common.Hash{{0x01}, {0x02}, {0x03}, {0x04}},
types: []byte{types.LegacyTxType, types.LegacyTxType, types.LegacyTxType, types.LegacyTxType},
sizes: []uint32{48 * 1024, 48 * 1024, 48 * 1024, 48 * 1024},
},
// Announce exactly on the limit transactions to see that only one
// gets requested
doTxNotify{peer: "B",
hashes: []common.Hash{{0x05}, {0x06}},
types: []byte{types.LegacyTxType, types.LegacyTxType},
sizes: []uint32{maxTxRetrievalSize, maxTxRetrievalSize},
},
// Announce oversized blob transactions to see that overflows are ok
doTxNotify{peer: "C",
hashes: []common.Hash{{0x07}, {0x08}},
types: []byte{types.BlobTxType, types.BlobTxType},
sizes: []uint32{params.MaxBlobGasPerBlock, params.MaxBlobGasPerBlock},
},
doWait{time: txArriveTimeout, step: true},
isWaiting(nil),
isScheduledWithMeta{
tracking: map[string][]announce{
"A": {
{common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)},
{common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)},
{common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)},
{common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)},
},
"B": {
{common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(maxTxRetrievalSize)},
{common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(maxTxRetrievalSize)},
},
"C": {
{common.Hash{0x07}, typeptr(types.BlobTxType), sizeptr(params.MaxBlobGasPerBlock)},
{common.Hash{0x08}, typeptr(types.BlobTxType), sizeptr(params.MaxBlobGasPerBlock)},
},
},
fetching: map[string][]common.Hash{
"A": {{0x02}, {0x03}, {0x04}},
"B": {{0x06}},
"C": {{0x08}},
},
},
},
})
}
// Tests that then number of transactions a peer is allowed to announce and/or // Tests that then number of transactions a peer is allowed to announce and/or
// request at the same time is hard capped. // request at the same time is hard capped.
func TestTransactionFetcherDoSProtection(t *testing.T) { func TestTransactionFetcherDoSProtection(t *testing.T) {
@ -810,6 +1109,7 @@ func TestTransactionFetcherDoSProtection(t *testing.T) {
func(common.Hash) bool { return false }, func(common.Hash) bool { return false },
nil, nil,
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -877,6 +1177,7 @@ func TestTransactionFetcherUnderpricedDedup(t *testing.T) {
return errs return errs
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -946,6 +1247,7 @@ func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) {
return errs return errs
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: append(steps, []interface{}{ steps: append(steps, []interface{}{
@ -968,6 +1270,7 @@ func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -1021,6 +1324,7 @@ func TestTransactionFetcherDrop(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -1087,6 +1391,7 @@ func TestTransactionFetcherDropRescheduling(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -1120,6 +1425,74 @@ func TestTransactionFetcherDropRescheduling(t *testing.T) {
}) })
} }
// Tests that announced transactions with the wrong transaction type or size will
// result in a dropped peer.
func TestInvalidAnnounceMetadata(t *testing.T) {
drop := make(chan string, 2)
testTransactionFetcherParallel(t, txFetcherTest{
init: func() *TxFetcher {
return NewTxFetcher(
func(common.Hash) bool { return false },
func(txs []*types.Transaction) []error {
return make([]error, len(txs))
},
func(string, []common.Hash) error { return nil },
func(peer string) { drop <- peer },
)
},
steps: []interface{}{
// Initial announcement to get something into the waitlist
doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}, types: []byte{testTxs[0].Type(), testTxs[1].Type()}, sizes: []uint32{uint32(testTxs[0].Size()), uint32(testTxs[1].Size())}},
isWaitingWithMeta(map[string][]announce{
"A": {
{testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(uint32(testTxs[0].Size()))},
{testTxsHashes[1], typeptr(testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))},
},
}),
// Announce from new peers conflicting transactions
doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{1024 + uint32(testTxs[0].Size())}},
doTxNotify{peer: "C", hashes: []common.Hash{testTxsHashes[1]}, types: []byte{1 + testTxs[1].Type()}, sizes: []uint32{uint32(testTxs[1].Size())}},
isWaitingWithMeta(map[string][]announce{
"A": {
{testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(uint32(testTxs[0].Size()))},
{testTxsHashes[1], typeptr(testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))},
},
"B": {
{testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(1024 + uint32(testTxs[0].Size()))},
},
"C": {
{testTxsHashes[1], typeptr(1 + testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))},
},
}),
// Schedule all the transactions for retrieval
doWait{time: txArriveTimeout, step: true},
isWaitingWithMeta(nil),
isScheduledWithMeta{
tracking: map[string][]announce{
"A": {
{testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(uint32(testTxs[0].Size()))},
{testTxsHashes[1], typeptr(testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))},
},
"B": {
{testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(1024 + uint32(testTxs[0].Size()))},
},
"C": {
{testTxsHashes[1], typeptr(1 + testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))},
},
},
fetching: map[string][]common.Hash{
"A": {testTxsHashes[0]},
"C": {testTxsHashes[1]},
},
},
// Deliver the transactions and wait for B to be dropped
doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}},
doFunc(func() { <-drop }),
doFunc(func() { <-drop }),
},
})
}
// This test reproduces a crash caught by the fuzzer. The root cause was a // This test reproduces a crash caught by the fuzzer. The root cause was a
// dangling transaction timing out and clashing on re-add with a concurrently // dangling transaction timing out and clashing on re-add with a concurrently
// announced one. // announced one.
@ -1132,6 +1505,7 @@ func TestTransactionFetcherFuzzCrash01(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -1159,6 +1533,7 @@ func TestTransactionFetcherFuzzCrash02(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -1188,6 +1563,7 @@ func TestTransactionFetcherFuzzCrash03(t *testing.T) {
return make([]error, len(txs)) return make([]error, len(txs))
}, },
func(string, []common.Hash) error { return nil }, func(string, []common.Hash) error { return nil },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -1224,6 +1600,7 @@ func TestTransactionFetcherFuzzCrash04(t *testing.T) {
<-proceed <-proceed
return errors.New("peer disconnected") return errors.New("peer disconnected")
}, },
nil,
) )
}, },
steps: []interface{}{ steps: []interface{}{
@ -1274,9 +1651,34 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
// Crunch through all the test steps and execute them // Crunch through all the test steps and execute them
for i, step := range tt.steps { for i, step := range tt.steps {
// Auto-expand certain steps to ones with metadata
switch old := step.(type) {
case isWaiting:
new := make(isWaitingWithMeta)
for peer, hashes := range old {
for _, hash := range hashes {
new[peer] = append(new[peer], announce{hash, nil, nil})
}
}
step = new
case isScheduled:
new := isScheduledWithMeta{
tracking: make(map[string][]announce),
fetching: old.fetching,
dangling: old.dangling,
}
for peer, hashes := range old.tracking {
for _, hash := range hashes {
new.tracking[peer] = append(new.tracking[peer], announce{hash, nil, nil})
}
}
step = new
}
// Process the original or expanded steps
switch step := step.(type) { switch step := step.(type) {
case doTxNotify: case doTxNotify:
if err := fetcher.Notify(step.peer, step.hashes); err != nil { if err := fetcher.Notify(step.peer, step.types, step.sizes, step.hashes); err != nil {
t.Errorf("step %d: %v", i, err) t.Errorf("step %d: %v", i, err)
} }
<-wait // Fetcher needs to process this, wait until it's done <-wait // Fetcher needs to process this, wait until it's done
@ -1307,24 +1709,34 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
case doFunc: case doFunc:
step() step()
case isWaiting: case isWaitingWithMeta:
// We need to check that the waiting list (stage 1) internals // We need to check that the waiting list (stage 1) internals
// match with the expected set. Check the peer->hash mappings // match with the expected set. Check the peer->hash mappings
// first. // first.
for peer, hashes := range step { for peer, announces := range step {
waiting := fetcher.waitslots[peer] waiting := fetcher.waitslots[peer]
if waiting == nil { if waiting == nil {
t.Errorf("step %d: peer %s missing from waitslots", i, peer) t.Errorf("step %d: peer %s missing from waitslots", i, peer)
continue continue
} }
for _, hash := range hashes { for _, ann := range announces {
if _, ok := waiting[hash]; !ok { if meta, ok := waiting[ann.hash]; !ok {
t.Errorf("step %d, peer %s: hash %x missing from waitslots", i, peer, hash) t.Errorf("step %d, peer %s: hash %x missing from waitslots", i, peer, ann.hash)
} else {
if (meta == nil && (ann.kind != nil || ann.size != nil)) ||
(meta != nil && (ann.kind == nil || ann.size == nil)) ||
(meta != nil && (meta.kind != *ann.kind || meta.size != *ann.size)) {
t.Errorf("step %d, peer %s, hash %x: waitslot metadata mismatch: want %v, have %v/%v", i, peer, ann.hash, meta, *ann.kind, *ann.size)
} }
} }
for hash := range waiting { }
if !containsHash(hashes, hash) { for hash, meta := range waiting {
t.Errorf("step %d, peer %s: hash %x extra in waitslots", i, peer, hash) ann := announce{hash: hash}
if meta != nil {
ann.kind, ann.size = &meta.kind, &meta.size
}
if !containsAnnounce(announces, ann) {
t.Errorf("step %d, peer %s: announce %v extra in waitslots", i, peer, ann)
} }
} }
} }
@ -1334,13 +1746,13 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
} }
} }
// Peer->hash sets correct, check the hash->peer and timeout sets // Peer->hash sets correct, check the hash->peer and timeout sets
for peer, hashes := range step { for peer, announces := range step {
for _, hash := range hashes { for _, ann := range announces {
if _, ok := fetcher.waitlist[hash][peer]; !ok { if _, ok := fetcher.waitlist[ann.hash][peer]; !ok {
t.Errorf("step %d, hash %x: peer %s missing from waitlist", i, hash, peer) t.Errorf("step %d, hash %x: peer %s missing from waitlist", i, ann.hash, peer)
} }
if _, ok := fetcher.waittime[hash]; !ok { if _, ok := fetcher.waittime[ann.hash]; !ok {
t.Errorf("step %d: hash %x missing from waittime", i, hash) t.Errorf("step %d: hash %x missing from waittime", i, ann.hash)
} }
} }
} }
@ -1349,15 +1761,15 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
t.Errorf("step %d, hash %x: empty peerset in waitlist", i, hash) t.Errorf("step %d, hash %x: empty peerset in waitlist", i, hash)
} }
for peer := range peers { for peer := range peers {
if !containsHash(step[peer], hash) { if !containsHashInAnnounces(step[peer], hash) {
t.Errorf("step %d, hash %x: peer %s extra in waitlist", i, hash, peer) t.Errorf("step %d, hash %x: peer %s extra in waitlist", i, hash, peer)
} }
} }
} }
for hash := range fetcher.waittime { for hash := range fetcher.waittime {
var found bool var found bool
for _, hashes := range step { for _, announces := range step {
if containsHash(hashes, hash) { if containsHashInAnnounces(announces, hash) {
found = true found = true
break break
} }
@ -1367,23 +1779,33 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
} }
} }
case isScheduled: case isScheduledWithMeta:
// Check that all scheduled announces are accounted for and no // Check that all scheduled announces are accounted for and no
// extra ones are present. // extra ones are present.
for peer, hashes := range step.tracking { for peer, announces := range step.tracking {
scheduled := fetcher.announces[peer] scheduled := fetcher.announces[peer]
if scheduled == nil { if scheduled == nil {
t.Errorf("step %d: peer %s missing from announces", i, peer) t.Errorf("step %d: peer %s missing from announces", i, peer)
continue continue
} }
for _, hash := range hashes { for _, ann := range announces {
if _, ok := scheduled[hash]; !ok { if meta, ok := scheduled[ann.hash]; !ok {
t.Errorf("step %d, peer %s: hash %x missing from announces", i, peer, hash) t.Errorf("step %d, peer %s: hash %x missing from announces", i, peer, ann.hash)
} else {
if (meta == nil && (ann.kind != nil || ann.size != nil)) ||
(meta != nil && (ann.kind == nil || ann.size == nil)) ||
(meta != nil && (meta.kind != *ann.kind || meta.size != *ann.size)) {
t.Errorf("step %d, peer %s, hash %x: announce metadata mismatch: want %v, have %v/%v", i, peer, ann.hash, meta, *ann.kind, *ann.size)
} }
} }
for hash := range scheduled { }
if !containsHash(hashes, hash) { for hash, meta := range scheduled {
t.Errorf("step %d, peer %s: hash %x extra in announces", i, peer, hash) ann := announce{hash: hash}
if meta != nil {
ann.kind, ann.size = &meta.kind, &meta.size
}
if !containsAnnounce(announces, ann) {
t.Errorf("step %d, peer %s: announce %x extra in announces", i, peer, hash)
} }
} }
} }
@ -1483,17 +1905,17 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
// retrieval but not actively being downloaded are tracked only // retrieval but not actively being downloaded are tracked only
// in the stage 2 `announced` map. // in the stage 2 `announced` map.
var queued []common.Hash var queued []common.Hash
for _, hashes := range step.tracking { for _, announces := range step.tracking {
for _, hash := range hashes { for _, ann := range announces {
var found bool var found bool
for _, hs := range step.fetching { for _, hs := range step.fetching {
if containsHash(hs, hash) { if containsHash(hs, ann.hash) {
found = true found = true
break break
} }
} }
if !found { if !found {
queued = append(queued, hash) queued = append(queued, ann.hash)
} }
} }
} }
@ -1526,6 +1948,42 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
} }
} }
// containsAnnounce returns whether an announcement is contained within a slice
// of announcements.
func containsAnnounce(slice []announce, ann announce) bool {
for _, have := range slice {
if have.hash == ann.hash {
if have.kind == nil || ann.kind == nil {
if have.kind != ann.kind {
return false
}
} else if *have.kind != *ann.kind {
return false
}
if have.size == nil || ann.size == nil {
if have.size != ann.size {
return false
}
} else if *have.size != *ann.size {
return false
}
return true
}
}
return false
}
// containsHashInAnnounces returns whether a hash is contained within a slice
// of announcements.
func containsHashInAnnounces(slice []announce, hash common.Hash) bool {
for _, have := range slice {
if have.hash == hash {
return true
}
}
return false
}
// containsHash returns whether a hash is contained within a hash slice. // containsHash returns whether a hash is contained within a hash slice.
func containsHash(slice []common.Hash, hash common.Hash) bool { func containsHash(slice []common.Hash, hash common.Hash) bool {
for _, have := range slice { for _, have := range slice {
@ -1535,3 +1993,38 @@ func containsHash(slice []common.Hash, hash common.Hash) bool {
} }
return false return false
} }
// Tests that a transaction is forgotten after the timeout.
func TestTransactionForgotten(t *testing.T) {
fetcher := NewTxFetcher(
func(common.Hash) bool { return false },
func(txs []*types.Transaction) []error {
errs := make([]error, len(txs))
for i := 0; i < len(errs); i++ {
errs[i] = txpool.ErrUnderpriced
}
return errs
},
func(string, []common.Hash) error { return nil },
func(string) {},
)
fetcher.Start()
defer fetcher.Stop()
// Create one TX which is 5 minutes old, and one which is recent
tx1 := types.NewTx(&types.LegacyTx{Nonce: 0})
tx1.SetTime(time.Now().Add(-maxTxUnderpricedTimeout - 1*time.Second))
tx2 := types.NewTx(&types.LegacyTx{Nonce: 1})
// Enqueue both in the fetcher. They will be immediately tagged as underpriced
if err := fetcher.Enqueue("asdf", []*types.Transaction{tx1, tx2}, false); err != nil {
t.Fatal(err)
}
// isKnownUnderpriced should trigger removal of the first tx (no longer be known underpriced)
if fetcher.isKnownUnderpriced(tx1.Hash()) {
t.Fatal("transaction should be forgotten by now")
}
// isKnownUnderpriced should not trigger removal of the second
if !fetcher.isKnownUnderpriced(tx2.Hash()) {
t.Fatal("transaction should be known underpriced")
}
}

View File

@ -55,9 +55,7 @@ const (
txMaxBroadcastSize = 4096 txMaxBroadcastSize = 4096
) )
var ( var syncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge
syncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge
)
// txPool defines the methods needed from a transaction pool implementation to // txPool defines the methods needed from a transaction pool implementation to
// support all the operations needed by the Ethereum chain protocols. // support all the operations needed by the Ethereum chain protocols.
@ -77,9 +75,10 @@ type txPool interface {
// The slice should be modifiable by the caller. // The slice should be modifiable by the caller.
Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction
// SubscribeNewTxsEvent should return an event subscription of // SubscribeTransactions subscribes to new transaction events. The subscriber
// NewTxsEvent and send events to the given channel. // can decide whether to receive notifications only for newly seen transactions
SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription // or also for reorged out ones.
SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription
} }
// handlerConfig is the collection of initialization parameters to create a full // handlerConfig is the collection of initialization parameters to create a full
@ -89,7 +88,7 @@ type handlerConfig struct {
Chain *core.BlockChain // Blockchain to serve data from Chain *core.BlockChain // Blockchain to serve data from
TxPool txPool // Transaction pool to propagate from TxPool txPool // Transaction pool to propagate from
Merger *consensus.Merger // The manager for eth1/2 transition Merger *consensus.Merger // The manager for eth1/2 transition
Network uint64 // Network identifier to adfvertise Network uint64 // Network identifier to advertise
Sync downloader.SyncMode // Whether to snap or full sync Sync downloader.SyncMode // Whether to snap or full sync
BloomCache uint64 // Megabytes to alloc for snap sync bloom BloomCache uint64 // Megabytes to alloc for snap sync bloom
EventMux *event.TypeMux // Legacy event mux, deprecate for `feed` EventMux *event.TypeMux // Legacy event mux, deprecate for `feed`
@ -255,7 +254,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
} }
td := new(big.Int).Add(ptd, block.Difficulty()) td := new(big.Int).Add(ptd, block.Difficulty())
if !h.chain.Config().IsTerminalPoWBlock(ptd, td) { if !h.chain.Config().IsTerminalPoWBlock(ptd, td) {
log.Info("Filtered out non-termimal pow block", "number", block.NumberU64(), "hash", block.Hash()) log.Info("Filtered out non-terminal pow block", "number", block.NumberU64(), "hash", block.Hash())
return 0, nil return 0, nil
} }
if err := h.chain.InsertBlockWithoutSetHead(block); err != nil { if err := h.chain.InsertBlockWithoutSetHead(block); err != nil {
@ -278,7 +277,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
addTxs := func(txs []*types.Transaction) []error { addTxs := func(txs []*types.Transaction) []error {
return h.txpool.Add(txs, false, false) return h.txpool.Add(txs, false, false)
} }
h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx) h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx, h.removePeer)
h.chainSync = newChainSyncer(h) h.chainSync = newChainSyncer(h)
return h, nil return h, nil
} }
@ -416,7 +415,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
select { select {
case res := <-resCh: case res := <-resCh:
headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket)) headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersRequest))
if len(headers) == 0 { if len(headers) == 0 {
// Required blocks are allowed to be missing if the remote // Required blocks are allowed to be missing if the remote
// node is not yet synced // node is not yet synced
@ -463,7 +462,7 @@ func (h *handler) runSnapExtension(peer *snap.Peer, handler snap.Handler) error
snap.EgressRegistrationErrorMeter.Mark(1) snap.EgressRegistrationErrorMeter.Mark(1)
} }
} }
peer.Log().Warn("Snapshot extension registration failed", "err", err) peer.Log().Debug("Snapshot extension registration failed", "err", err)
return err return err
} }
return handler(peer) return handler(peer)
@ -511,10 +510,10 @@ func (h *handler) unregisterPeer(id string) {
func (h *handler) Start(maxPeers int) { func (h *handler) Start(maxPeers int) {
h.maxPeers = maxPeers h.maxPeers = maxPeers
// broadcast transactions // broadcast and announce transactions (only new ones, not resurrected ones)
h.wg.Add(1) h.wg.Add(1)
h.txsCh = make(chan core.NewTxsEvent, txChanSize) h.txsCh = make(chan core.NewTxsEvent, txChanSize)
h.txsSub = h.txpool.SubscribeNewTxsEvent(h.txsCh) h.txsSub = h.txpool.SubscribeTransactions(h.txsCh, false)
go h.txBroadcastLoop() go h.txBroadcastLoop()
// broadcast mined blocks // broadcast mined blocks
@ -594,26 +593,33 @@ func (h *handler) BroadcastBlock(block *types.Block, propagate bool) {
} }
// BroadcastTransactions will propagate a batch of transactions // BroadcastTransactions will propagate a batch of transactions
// - To a square root of all peers // - To a square root of all peers for non-blob transactions
// - And, separately, as announcements to all peers which are not known to // - And, separately, as announcements to all peers which are not known to
// already have the given transaction. // already have the given transaction.
func (h *handler) BroadcastTransactions(txs types.Transactions) { func (h *handler) BroadcastTransactions(txs types.Transactions) {
var ( var (
annoCount int // Count of announcements made blobTxs int // Number of blob transactions to announce only
annoPeers int largeTxs int // Number of large transactions to announce only
directCount int // Count of the txs sent directly to peers
directPeers int // Count of the peers that were sent transactions directly directCount int // Number of transactions sent directly to peers (duplicates included)
directPeers int // Number of peers that were sent transactions directly
annCount int // Number of transactions announced across all peers (duplicates included)
annPeers int // Number of peers announced about transactions
txset = make(map[*ethPeer][]common.Hash) // Set peer->hash to transfer directly txset = make(map[*ethPeer][]common.Hash) // Set peer->hash to transfer directly
annos = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce annos = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce
) )
// Broadcast transactions to a batch of peers not knowing about it // Broadcast transactions to a batch of peers not knowing about it
for _, tx := range txs { for _, tx := range txs {
peers := h.peers.peersWithoutTransaction(tx.Hash()) peers := h.peers.peersWithoutTransaction(tx.Hash())
var numDirect int var numDirect int
if tx.Size() <= txMaxBroadcastSize { switch {
case tx.Type() == types.BlobTxType:
blobTxs++
case tx.Size() > txMaxBroadcastSize:
largeTxs++
default:
numDirect = int(math.Sqrt(float64(len(peers)))) numDirect = int(math.Sqrt(float64(len(peers))))
} }
// Send the tx unconditionally to a subset of our peers // Send the tx unconditionally to a subset of our peers
@ -631,13 +637,12 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) {
peer.AsyncSendTransactions(hashes) peer.AsyncSendTransactions(hashes)
} }
for peer, hashes := range annos { for peer, hashes := range annos {
annoPeers++ annPeers++
annoCount += len(hashes) annCount += len(hashes)
peer.AsyncSendPooledTransactionHashes(hashes) peer.AsyncSendPooledTransactionHashes(hashes)
} }
log.Debug("Transaction broadcast", "txs", len(txs), log.Debug("Distributed transactions", "plaintxs", len(txs)-blobTxs-largeTxs, "blobtxs", blobTxs, "largetxs", largeTxs,
"announce packs", annoPeers, "announced hashes", annoCount, "bcastpeers", directPeers, "bcastcount", directCount, "annpeers", annPeers, "anncount", annCount)
"tx packs", directPeers, "broadcast txs", directCount)
} }
// minedBroadcastLoop sends mined blocks to connected peers. // minedBroadcastLoop sends mined blocks to connected peers.

View File

@ -17,6 +17,7 @@
package eth package eth
import ( import (
"errors"
"fmt" "fmt"
"math/big" "math/big"
"time" "time"
@ -66,16 +67,21 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
case *eth.NewBlockPacket: case *eth.NewBlockPacket:
return h.handleBlockBroadcast(peer, packet.Block, packet.TD) return h.handleBlockBroadcast(peer, packet.Block, packet.TD)
case *eth.NewPooledTransactionHashesPacket66: case *eth.NewPooledTransactionHashesPacket67:
return h.txFetcher.Notify(peer.ID(), *packet) return h.txFetcher.Notify(peer.ID(), nil, nil, *packet)
case *eth.NewPooledTransactionHashesPacket68: case *eth.NewPooledTransactionHashesPacket68:
return h.txFetcher.Notify(peer.ID(), packet.Hashes) return h.txFetcher.Notify(peer.ID(), packet.Types, packet.Sizes, packet.Hashes)
case *eth.TransactionsPacket: case *eth.TransactionsPacket:
for _, tx := range *packet {
if tx.Type() == types.BlobTxType {
return errors.New("disallowed broadcast blob transaction")
}
}
return h.txFetcher.Enqueue(peer.ID(), *packet, false) return h.txFetcher.Enqueue(peer.ID(), *packet, false)
case *eth.PooledTransactionsPacket: case *eth.PooledTransactionsResponse:
return h.txFetcher.Enqueue(peer.ID(), *packet, true) return h.txFetcher.Enqueue(peer.ID(), *packet, true)
default: default:
@ -90,9 +96,7 @@ func (h *ethHandler) handleBlockAnnounces(peer *eth.Peer, hashes []common.Hash,
// the chain already entered the pos stage and disconnect the // the chain already entered the pos stage and disconnect the
// remote peer. // remote peer.
if h.merger.PoSFinalized() { if h.merger.PoSFinalized() {
// TODO (MariusVanDerWijden) drop non-updated peers after the merge return errors.New("disallowed block announcement")
return nil
// return errors.New("unexpected block announces")
} }
// Schedule all the unknown hashes for retrieval // Schedule all the unknown hashes for retrieval
var ( var (
@ -118,9 +122,7 @@ func (h *ethHandler) handleBlockBroadcast(peer *eth.Peer, block *types.Block, td
// the chain already entered the pos stage and disconnect the // the chain already entered the pos stage and disconnect the
// remote peer. // remote peer.
if h.merger.PoSFinalized() { if h.merger.PoSFinalized() {
// TODO (MariusVanDerWijden) drop non-updated peers after the merge return errors.New("disallowed block broadcast")
return nil
// return errors.New("unexpected block announces")
} }
// Schedule the block for import // Schedule the block for import
h.blockFetcher.Enqueue(peer.ID(), block) h.blockFetcher.Enqueue(peer.ID(), block)

View File

@ -58,7 +58,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
h.blockBroadcasts.Send(packet.Block) h.blockBroadcasts.Send(packet.Block)
return nil return nil
case *eth.NewPooledTransactionHashesPacket66: case *eth.NewPooledTransactionHashesPacket67:
h.txAnnounces.Send(([]common.Hash)(*packet)) h.txAnnounces.Send(([]common.Hash)(*packet))
return nil return nil
@ -70,7 +70,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
h.txBroadcasts.Send(([]*types.Transaction)(*packet)) h.txBroadcasts.Send(([]*types.Transaction)(*packet))
return nil return nil
case *eth.PooledTransactionsPacket: case *eth.PooledTransactionsResponse:
h.txBroadcasts.Send(([]*types.Transaction)(*packet)) h.txBroadcasts.Send(([]*types.Transaction)(*packet))
return nil return nil
@ -81,7 +81,6 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
// Tests that peers are correctly accepted (or rejected) based on the advertised // Tests that peers are correctly accepted (or rejected) based on the advertised
// fork IDs in the protocol handshake. // fork IDs in the protocol handshake.
func TestForkIDSplit66(t *testing.T) { testForkIDSplit(t, eth.ETH66) }
func TestForkIDSplit67(t *testing.T) { testForkIDSplit(t, eth.ETH67) } func TestForkIDSplit67(t *testing.T) { testForkIDSplit(t, eth.ETH67) }
func TestForkIDSplit68(t *testing.T) { testForkIDSplit(t, eth.ETH68) } func TestForkIDSplit68(t *testing.T) { testForkIDSplit(t, eth.ETH68) }
@ -237,7 +236,6 @@ func testForkIDSplit(t *testing.T, protocol uint) {
} }
// Tests that received transactions are added to the local pool. // Tests that received transactions are added to the local pool.
func TestRecvTransactions66(t *testing.T) { testRecvTransactions(t, eth.ETH66) }
func TestRecvTransactions67(t *testing.T) { testRecvTransactions(t, eth.ETH67) } func TestRecvTransactions67(t *testing.T) { testRecvTransactions(t, eth.ETH67) }
func TestRecvTransactions68(t *testing.T) { testRecvTransactions(t, eth.ETH68) } func TestRecvTransactions68(t *testing.T) { testRecvTransactions(t, eth.ETH68) }
@ -251,7 +249,7 @@ func testRecvTransactions(t *testing.T, protocol uint) {
handler.handler.synced.Store(true) // mark synced to accept transactions handler.handler.synced.Store(true) // mark synced to accept transactions
txs := make(chan core.NewTxsEvent) txs := make(chan core.NewTxsEvent)
sub := handler.txpool.SubscribeNewTxsEvent(txs) sub := handler.txpool.SubscribeTransactions(txs, false)
defer sub.Unsubscribe() defer sub.Unsubscribe()
// Create a source peer to send messages through and a sink handler to receive them // Create a source peer to send messages through and a sink handler to receive them
@ -296,7 +294,6 @@ func testRecvTransactions(t *testing.T, protocol uint) {
} }
// This test checks that pending transactions are sent. // This test checks that pending transactions are sent.
func TestSendTransactions66(t *testing.T) { testSendTransactions(t, eth.ETH66) }
func TestSendTransactions67(t *testing.T) { testSendTransactions(t, eth.ETH67) } func TestSendTransactions67(t *testing.T) { testSendTransactions(t, eth.ETH67) }
func TestSendTransactions68(t *testing.T) { testSendTransactions(t, eth.ETH68) } func TestSendTransactions68(t *testing.T) { testSendTransactions(t, eth.ETH68) }
@ -356,7 +353,7 @@ func testSendTransactions(t *testing.T, protocol uint) {
seen := make(map[common.Hash]struct{}) seen := make(map[common.Hash]struct{})
for len(seen) < len(insert) { for len(seen) < len(insert) {
switch protocol { switch protocol {
case 66, 67, 68: case 67, 68:
select { select {
case hashes := <-anns: case hashes := <-anns:
for _, hash := range hashes { for _, hash := range hashes {
@ -382,7 +379,6 @@ func testSendTransactions(t *testing.T, protocol uint) {
// Tests that transactions get propagated to all attached peers, either via direct // Tests that transactions get propagated to all attached peers, either via direct
// broadcasts or via announcements/retrievals. // broadcasts or via announcements/retrievals.
func TestTransactionPropagation66(t *testing.T) { testTransactionPropagation(t, eth.ETH66) }
func TestTransactionPropagation67(t *testing.T) { testTransactionPropagation(t, eth.ETH67) } func TestTransactionPropagation67(t *testing.T) { testTransactionPropagation(t, eth.ETH67) }
func TestTransactionPropagation68(t *testing.T) { testTransactionPropagation(t, eth.ETH68) } func TestTransactionPropagation68(t *testing.T) { testTransactionPropagation(t, eth.ETH68) }
@ -428,7 +424,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) {
for i := 0; i < len(sinks); i++ { for i := 0; i < len(sinks); i++ {
txChs[i] = make(chan core.NewTxsEvent, 1024) txChs[i] = make(chan core.NewTxsEvent, 1024)
sub := sinks[i].txpool.SubscribeNewTxsEvent(txChs[i]) sub := sinks[i].txpool.SubscribeTransactions(txChs[i], false)
defer sub.Unsubscribe() defer sub.Unsubscribe()
} }
// Fill the source pool with transactions and wait for them at the sinks // Fill the source pool with transactions and wait for them at the sinks
@ -490,8 +486,8 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
defer sourcePipe.Close() defer sourcePipe.Close()
defer sinkPipe.Close() defer sinkPipe.Close()
sourcePeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil) sourcePeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil)
sinkPeer := eth.NewPeer(eth.ETH66, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil) sinkPeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil)
defer sourcePeer.Close() defer sourcePeer.Close()
defer sinkPeer.Close() defer sinkPeer.Close()
@ -543,7 +539,6 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
// Tests that a propagated malformed block (uncles or transactions don't match // Tests that a propagated malformed block (uncles or transactions don't match
// with the hashes in the header) gets discarded and not broadcast forward. // with the hashes in the header) gets discarded and not broadcast forward.
func TestBroadcastMalformedBlock66(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH66) }
func TestBroadcastMalformedBlock67(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH67) } func TestBroadcastMalformedBlock67(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH67) }
func TestBroadcastMalformedBlock68(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH68) } func TestBroadcastMalformedBlock68(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH68) }

View File

@ -113,15 +113,17 @@ func (p *testTxPool) Pending(enforceTips bool) map[common.Address][]*txpool.Lazy
Time: tx.Time(), Time: tx.Time(),
GasFeeCap: tx.GasFeeCap(), GasFeeCap: tx.GasFeeCap(),
GasTipCap: tx.GasTipCap(), GasTipCap: tx.GasTipCap(),
Gas: tx.Gas(),
BlobGas: tx.BlobGas(),
}) })
} }
} }
return pending return pending
} }
// SubscribeNewTxsEvent should return an event subscription of NewTxsEvent and // SubscribeTransactions should return an event subscription of NewTxsEvent and
// send events to the given channel. // send events to the given channel.
func (p *testTxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { func (p *testTxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription {
return p.txFeed.Subscribe(ch) return p.txFeed.Subscribe(ch)
} }

View File

@ -18,6 +18,7 @@ package eth
import ( import (
"errors" "errors"
"fmt"
"math/big" "math/big"
"sync" "sync"
@ -74,7 +75,7 @@ func (ps *peerSet) registerSnapExtension(peer *snap.Peer) error {
// Reject the peer if it advertises `snap` without `eth` as `snap` is only a // Reject the peer if it advertises `snap` without `eth` as `snap` is only a
// satellite protocol meaningful with the chain selection of `eth` // satellite protocol meaningful with the chain selection of `eth`
if !peer.RunningCap(eth.ProtocolName, eth.ProtocolVersions) { if !peer.RunningCap(eth.ProtocolName, eth.ProtocolVersions) {
return errSnapWithoutEth return fmt.Errorf("%w: have %v", errSnapWithoutEth, peer.Caps())
} }
// Ensure nobody can double connect // Ensure nobody can double connect
ps.lock.Lock() ps.lock.Lock()

View File

@ -23,7 +23,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
@ -45,10 +44,6 @@ const (
// nowadays, the practical limit will always be softResponseLimit. // nowadays, the practical limit will always be softResponseLimit.
maxBodiesServe = 1024 maxBodiesServe = 1024
// maxNodeDataServe is the maximum number of state trie nodes to serve. This
// number is there to limit the number of disk lookups.
maxNodeDataServe = 1024
// maxReceiptsServe is the maximum number of block receipts to serve. This // maxReceiptsServe is the maximum number of block receipts to serve. This
// number is mostly there to limit the number of disk lookups. With block // number is mostly there to limit the number of disk lookups. With block
// containing 200+ transactions nowadays, the practical limit will always // containing 200+ transactions nowadays, the practical limit will always
@ -98,12 +93,12 @@ type TxPool interface {
func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol { func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol {
protocols := make([]p2p.Protocol, 0, len(ProtocolVersions)) protocols := make([]p2p.Protocol, 0, len(ProtocolVersions))
for _, version := range ProtocolVersions { for _, version := range ProtocolVersions {
version := version // Closure // Blob transactions require eth/68 announcements, disable everything else
if version <= ETH67 && backend.Chain().Config().CancunTime != nil {
// Path scheme does not support GetNodeData, don't advertise eth66 on it
if version <= ETH66 && backend.Chain().TrieDB().Scheme() == rawdb.PathScheme {
continue continue
} }
version := version // Closure
protocols = append(protocols, p2p.Protocol{ protocols = append(protocols, p2p.Protocol{
Name: ProtocolName, Name: ProtocolName,
Version: version, Version: version,
@ -171,36 +166,19 @@ type Decoder interface {
Time() time.Time Time() time.Time
} }
var eth66 = map[uint64]msgHandler{
NewBlockHashesMsg: handleNewBlockhashes,
NewBlockMsg: handleNewBlock,
TransactionsMsg: handleTransactions,
NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66,
GetBlockHeadersMsg: handleGetBlockHeaders66,
BlockHeadersMsg: handleBlockHeaders66,
GetBlockBodiesMsg: handleGetBlockBodies66,
BlockBodiesMsg: handleBlockBodies66,
GetNodeDataMsg: handleGetNodeData66,
NodeDataMsg: handleNodeData66,
GetReceiptsMsg: handleGetReceipts66,
ReceiptsMsg: handleReceipts66,
GetPooledTransactionsMsg: handleGetPooledTransactions66,
PooledTransactionsMsg: handlePooledTransactions66,
}
var eth67 = map[uint64]msgHandler{ var eth67 = map[uint64]msgHandler{
NewBlockHashesMsg: handleNewBlockhashes, NewBlockHashesMsg: handleNewBlockhashes,
NewBlockMsg: handleNewBlock, NewBlockMsg: handleNewBlock,
TransactionsMsg: handleTransactions, TransactionsMsg: handleTransactions,
NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes66, NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes67,
GetBlockHeadersMsg: handleGetBlockHeaders66, GetBlockHeadersMsg: handleGetBlockHeaders,
BlockHeadersMsg: handleBlockHeaders66, BlockHeadersMsg: handleBlockHeaders,
GetBlockBodiesMsg: handleGetBlockBodies66, GetBlockBodiesMsg: handleGetBlockBodies,
BlockBodiesMsg: handleBlockBodies66, BlockBodiesMsg: handleBlockBodies,
GetReceiptsMsg: handleGetReceipts66, GetReceiptsMsg: handleGetReceipts,
ReceiptsMsg: handleReceipts66, ReceiptsMsg: handleReceipts,
GetPooledTransactionsMsg: handleGetPooledTransactions66, GetPooledTransactionsMsg: handleGetPooledTransactions,
PooledTransactionsMsg: handlePooledTransactions66, PooledTransactionsMsg: handlePooledTransactions,
} }
var eth68 = map[uint64]msgHandler{ var eth68 = map[uint64]msgHandler{
@ -208,14 +186,14 @@ var eth68 = map[uint64]msgHandler{
NewBlockMsg: handleNewBlock, NewBlockMsg: handleNewBlock,
TransactionsMsg: handleTransactions, TransactionsMsg: handleTransactions,
NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes68, NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes68,
GetBlockHeadersMsg: handleGetBlockHeaders66, GetBlockHeadersMsg: handleGetBlockHeaders,
BlockHeadersMsg: handleBlockHeaders66, BlockHeadersMsg: handleBlockHeaders,
GetBlockBodiesMsg: handleGetBlockBodies66, GetBlockBodiesMsg: handleGetBlockBodies,
BlockBodiesMsg: handleBlockBodies66, BlockBodiesMsg: handleBlockBodies,
GetReceiptsMsg: handleGetReceipts66, GetReceiptsMsg: handleGetReceipts,
ReceiptsMsg: handleReceipts66, ReceiptsMsg: handleReceipts,
GetPooledTransactionsMsg: handleGetPooledTransactions66, GetPooledTransactionsMsg: handleGetPooledTransactions,
PooledTransactionsMsg: handlePooledTransactions66, PooledTransactionsMsg: handlePooledTransactions,
} }
// handleMessage is invoked whenever an inbound message is received from a remote // handleMessage is invoked whenever an inbound message is received from a remote
@ -231,14 +209,10 @@ func handleMessage(backend Backend, peer *Peer) error {
} }
defer msg.Discard() defer msg.Discard()
var handlers = eth66 var handlers = eth67
if peer.Version() == ETH67 {
handlers = eth67
}
if peer.Version() >= ETH68 { if peer.Version() >= ETH68 {
handlers = eth68 handlers = eth68
} }
// Track the amount of time it takes to serve the request and run the handler // Track the amount of time it takes to serve the request and run the handler
if metrics.Enabled { if metrics.Enabled {
h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)

View File

@ -28,7 +28,6 @@ import (
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
@ -151,7 +150,6 @@ func (b *testBackend) Handle(*Peer, Packet) error {
} }
// Tests that block headers can be retrieved from a remote chain based on user queries. // Tests that block headers can be retrieved from a remote chain based on user queries.
func TestGetBlockHeaders66(t *testing.T) { testGetBlockHeaders(t, ETH66) }
func TestGetBlockHeaders67(t *testing.T) { testGetBlockHeaders(t, ETH67) } func TestGetBlockHeaders67(t *testing.T) { testGetBlockHeaders(t, ETH67) }
func TestGetBlockHeaders68(t *testing.T) { testGetBlockHeaders(t, ETH68) } func TestGetBlockHeaders68(t *testing.T) { testGetBlockHeaders(t, ETH68) }
@ -178,29 +176,29 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
// Create a batch of tests for various scenarios // Create a batch of tests for various scenarios
limit := uint64(maxHeadersServe) limit := uint64(maxHeadersServe)
tests := []struct { tests := []struct {
query *GetBlockHeadersPacket // The query to execute for header retrieval query *GetBlockHeadersRequest // The query to execute for header retrieval
expect []common.Hash // The hashes of the block whose headers are expected expect []common.Hash // The hashes of the block whose headers are expected
}{ }{
// A single random block should be retrievable by hash // A single random block should be retrievable by hash
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
[]common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()},
}, },
// A single random block should be retrievable by number // A single random block should be retrievable by number
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 1}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 1},
[]common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()},
}, },
// Multiple headers should be retrievable in both directions // Multiple headers should be retrievable in both directions
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit / 2).Hash(),
backend.chain.GetBlockByNumber(limit/2 + 1).Hash(), backend.chain.GetBlockByNumber(limit/2 + 1).Hash(),
backend.chain.GetBlockByNumber(limit/2 + 2).Hash(), backend.chain.GetBlockByNumber(limit/2 + 2).Hash(),
}, },
}, { }, {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit / 2).Hash(),
backend.chain.GetBlockByNumber(limit/2 - 1).Hash(), backend.chain.GetBlockByNumber(limit/2 - 1).Hash(),
@ -209,14 +207,14 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
}, },
// Multiple headers with skip lists should be retrievable // Multiple headers with skip lists should be retrievable
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit / 2).Hash(),
backend.chain.GetBlockByNumber(limit/2 + 4).Hash(), backend.chain.GetBlockByNumber(limit/2 + 4).Hash(),
backend.chain.GetBlockByNumber(limit/2 + 8).Hash(), backend.chain.GetBlockByNumber(limit/2 + 8).Hash(),
}, },
}, { }, {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(limit / 2).Hash(), backend.chain.GetBlockByNumber(limit / 2).Hash(),
backend.chain.GetBlockByNumber(limit/2 - 4).Hash(), backend.chain.GetBlockByNumber(limit/2 - 4).Hash(),
@ -225,31 +223,31 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
}, },
// The chain endpoints should be retrievable // The chain endpoints should be retrievable
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 0}, Amount: 1}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 0}, Amount: 1},
[]common.Hash{backend.chain.GetBlockByNumber(0).Hash()}, []common.Hash{backend.chain.GetBlockByNumber(0).Hash()},
}, },
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1},
[]common.Hash{backend.chain.CurrentBlock().Hash()}, []common.Hash{backend.chain.CurrentBlock().Hash()},
}, },
{ // If the peer requests a bit into the future, we deliver what we have { // If the peer requests a bit into the future, we deliver what we have
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10},
[]common.Hash{backend.chain.CurrentBlock().Hash()}, []common.Hash{backend.chain.CurrentBlock().Hash()},
}, },
// Ensure protocol limits are honored // Ensure protocol limits are honored
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true},
getHashes(backend.chain.CurrentBlock().Number.Uint64(), limit), getHashes(backend.chain.CurrentBlock().Number.Uint64(), limit),
}, },
// Check that requesting more than available is handled gracefully // Check that requesting more than available is handled gracefully
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(),
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64()).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64()).Hash(),
}, },
}, { }, {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(4).Hash(), backend.chain.GetBlockByNumber(4).Hash(),
backend.chain.GetBlockByNumber(0).Hash(), backend.chain.GetBlockByNumber(0).Hash(),
@ -257,13 +255,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
}, },
// Check that requesting more than available is handled gracefully, even if mid skip // Check that requesting more than available is handled gracefully, even if mid skip
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(),
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 1).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 1).Hash(),
}, },
}, { }, {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(4).Hash(), backend.chain.GetBlockByNumber(4).Hash(),
backend.chain.GetBlockByNumber(1).Hash(), backend.chain.GetBlockByNumber(1).Hash(),
@ -271,7 +269,7 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
}, },
// Check a corner case where requesting more can iterate past the endpoints // Check a corner case where requesting more can iterate past the endpoints
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(2).Hash(), backend.chain.GetBlockByNumber(2).Hash(),
backend.chain.GetBlockByNumber(1).Hash(), backend.chain.GetBlockByNumber(1).Hash(),
@ -280,24 +278,24 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
}, },
// Check a corner case where skipping overflow loops back into the chain start // Check a corner case where skipping overflow loops back into the chain start
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1}, &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(3).Hash(), backend.chain.GetBlockByNumber(3).Hash(),
}, },
}, },
// Check a corner case where skipping overflow loops back to the same header // Check a corner case where skipping overflow loops back to the same header
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64}, &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(1).Hash(), backend.chain.GetBlockByNumber(1).Hash(),
}, },
}, },
// Check that non existing headers aren't returned // Check that non existing headers aren't returned
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: unknown}, Amount: 1}, &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: unknown}, Amount: 1},
[]common.Hash{}, []common.Hash{},
}, { }, {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1}, &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1},
[]common.Hash{}, []common.Hash{},
}, },
} }
@ -309,13 +307,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
headers = append(headers, backend.chain.GetBlockByHash(hash).Header()) headers = append(headers, backend.chain.GetBlockByHash(hash).Header())
} }
// Send the hash request and verify the response // Send the hash request and verify the response
p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{
RequestId: 123, RequestId: 123,
GetBlockHeadersPacket: tt.query, GetBlockHeadersRequest: tt.query,
}) })
if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket66{ if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket{
RequestId: 123, RequestId: 123,
BlockHeadersPacket: headers, BlockHeadersRequest: headers,
}); err != nil { }); err != nil {
t.Errorf("test %d: headers mismatch: %v", i, err) t.Errorf("test %d: headers mismatch: %v", i, err)
} }
@ -324,11 +322,11 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil { if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil {
tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0 tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0
p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{
RequestId: 456, RequestId: 456,
GetBlockHeadersPacket: tt.query, GetBlockHeadersRequest: tt.query,
}) })
expected := &BlockHeadersPacket66{RequestId: 456, BlockHeadersPacket: headers} expected := &BlockHeadersPacket{RequestId: 456, BlockHeadersRequest: headers}
if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil { if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil {
t.Errorf("test %d by hash: headers mismatch: %v", i, err) t.Errorf("test %d by hash: headers mismatch: %v", i, err)
} }
@ -338,7 +336,6 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
} }
// Tests that block contents can be retrieved from a remote chain based on their hashes. // Tests that block contents can be retrieved from a remote chain based on their hashes.
func TestGetBlockBodies66(t *testing.T) { testGetBlockBodies(t, ETH66) }
func TestGetBlockBodies67(t *testing.T) { testGetBlockBodies(t, ETH67) } func TestGetBlockBodies67(t *testing.T) { testGetBlockBodies(t, ETH67) }
func TestGetBlockBodies68(t *testing.T) { testGetBlockBodies(t, ETH68) } func TestGetBlockBodies68(t *testing.T) { testGetBlockBodies(t, ETH68) }
@ -420,139 +417,20 @@ func testGetBlockBodies(t *testing.T, protocol uint) {
} }
// Send the hash request and verify the response // Send the hash request and verify the response
p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket66{ p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket{
RequestId: 123, RequestId: 123,
GetBlockBodiesPacket: hashes, GetBlockBodiesRequest: hashes,
}) })
if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket66{ if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket{
RequestId: 123, RequestId: 123,
BlockBodiesPacket: bodies, BlockBodiesResponse: bodies,
}); err != nil { }); err != nil {
t.Fatalf("test %d: bodies mismatch: %v", i, err) t.Fatalf("test %d: bodies mismatch: %v", i, err)
} }
} }
} }
// Tests that the state trie nodes can be retrieved based on hashes.
func TestGetNodeData66(t *testing.T) { testGetNodeData(t, ETH66, false) }
func TestGetNodeData67(t *testing.T) { testGetNodeData(t, ETH67, true) }
func TestGetNodeData68(t *testing.T) { testGetNodeData(t, ETH68, true) }
func testGetNodeData(t *testing.T, protocol uint, drop bool) {
t.Parallel()
// Define three accounts to simulate transactions with
acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)
acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)
signer := types.HomesteadSigner{}
// Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_makers_test)
generator := func(i int, block *core.BlockGen) {
switch i {
case 0:
// In block 1, the test bank sends account #1 some ether.
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey)
block.AddTx(tx)
case 1:
// In block 2, the test bank sends some more ether to account #1.
// acc1Addr passes it on to account #2.
tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey)
tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, acc1Key)
block.AddTx(tx1)
block.AddTx(tx2)
case 2:
// Block 3 is empty but was mined by account #2.
block.SetCoinbase(acc2Addr)
block.SetExtra([]byte("yeehaw"))
case 3:
// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
b2 := block.PrevBlock(1).Header()
b2.Extra = []byte("foo")
block.AddUncle(b2)
b3 := block.PrevBlock(2).Header()
b3.Extra = []byte("foo")
block.AddUncle(b3)
}
}
// Assemble the test environment
backend := newTestBackendWithGenerator(4, false, generator)
defer backend.close()
peer, _ := newTestPeer("peer", protocol, backend)
defer peer.close()
// Collect all state tree hashes.
var hashes []common.Hash
it := backend.db.NewIterator(nil, nil)
for it.Next() {
if key := it.Key(); len(key) == common.HashLength {
hashes = append(hashes, common.BytesToHash(key))
}
}
it.Release()
// Request all hashes.
p2p.Send(peer.app, GetNodeDataMsg, &GetNodeDataPacket66{
RequestId: 123,
GetNodeDataPacket: hashes,
})
msg, err := peer.app.ReadMsg()
if !drop {
if err != nil {
t.Fatalf("failed to read node data response: %v", err)
}
} else {
if err != nil {
return
}
t.Fatalf("succeeded to read node data response on non-supporting protocol: %v", msg)
}
if msg.Code != NodeDataMsg {
t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, NodeDataMsg)
}
var res NodeDataPacket66
if err := msg.Decode(&res); err != nil {
t.Fatalf("failed to decode response node data: %v", err)
}
// Verify that all hashes correspond to the requested data.
data := res.NodeDataPacket
for i, want := range hashes {
if hash := crypto.Keccak256Hash(data[i]); hash != want {
t.Errorf("data hash mismatch: have %x, want %x", hash, want)
}
}
// Reconstruct state tree from the received data.
reconstructDB := rawdb.NewMemoryDatabase()
for i := 0; i < len(data); i++ {
rawdb.WriteLegacyTrieNode(reconstructDB, hashes[i], data[i])
}
// Sanity check whether all state matches.
accounts := []common.Address{testAddr, acc1Addr, acc2Addr}
for i := uint64(0); i <= backend.chain.CurrentBlock().Number.Uint64(); i++ {
root := backend.chain.GetBlockByNumber(i).Root()
reconstructed, _ := state.New(root, state.NewDatabase(reconstructDB), nil)
for j, acc := range accounts {
state, _ := backend.chain.StateAt(root)
bw := state.GetBalance(acc)
bh := reconstructed.GetBalance(acc)
if (bw == nil) != (bh == nil) {
t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw)
}
if bw != nil && bh != nil && bw.Cmp(bh) != 0 {
t.Errorf("block %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw)
}
}
}
}
// Tests that the transaction receipts can be retrieved based on hashes. // Tests that the transaction receipts can be retrieved based on hashes.
func TestGetBlockReceipts66(t *testing.T) { testGetBlockReceipts(t, ETH66) }
func TestGetBlockReceipts67(t *testing.T) { testGetBlockReceipts(t, ETH67) } func TestGetBlockReceipts67(t *testing.T) { testGetBlockReceipts(t, ETH67) }
func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) } func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) }
@ -613,13 +491,13 @@ func testGetBlockReceipts(t *testing.T, protocol uint) {
receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash())) receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash()))
} }
// Send the hash request and verify the response // Send the hash request and verify the response
p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket66{ p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket{
RequestId: 123, RequestId: 123,
GetReceiptsPacket: hashes, GetReceiptsRequest: hashes,
}) })
if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket66{ if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket{
RequestId: 123, RequestId: 123,
ReceiptsPacket: receipts, ReceiptsResponse: receipts,
}); err != nil { }); err != nil {
t.Errorf("receipts mismatch: %v", err) t.Errorf("receipts mismatch: %v", err)
} }

Some files were not shown because too many files have changed in this diff Show More