Merge pull request #54 from openrelayxyz/feature/merge-v1.10.22

Feature/merge v1.10.22
This commit is contained in:
AusIV 2022-08-22 13:57:01 -05:00 committed by GitHub
commit 79609688a5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
233 changed files with 3592 additions and 2488 deletions

View File

@ -14,7 +14,7 @@ COPY go.sum /go-ethereum/
RUN cd /go-ethereum && go mod download RUN cd /go-ethereum && go mod download
ADD . /go-ethereum ADD . /go-ethereum
RUN cd /go-ethereum && go run build/ci.go install ./cmd/geth RUN cd /go-ethereum && go run build/ci.go install -static ./cmd/geth
# Pull Geth into a second stage deploy alpine container # Pull Geth into a second stage deploy alpine container
FROM alpine:latest FROM alpine:latest

View File

@ -14,7 +14,7 @@ COPY go.sum /go-ethereum/
RUN cd /go-ethereum && go mod download RUN cd /go-ethereum && go mod download
ADD . /go-ethereum ADD . /go-ethereum
RUN cd /go-ethereum && go run build/ci.go install RUN cd /go-ethereum && go run build/ci.go install -static
# Pull all binaries into a second stage deploy alpine container # Pull all binaries into a second stage deploy alpine container
FROM alpine:latest FROM alpine:latest

View File

@ -95,7 +95,7 @@ func (abi ABI) getArguments(name string, data []byte) (Arguments, error) {
args = event.Inputs args = event.Inputs
} }
if args == nil { if args == nil {
return nil, errors.New("abi: could not locate named method or event") return nil, fmt.Errorf("abi: could not locate named method or event: %s", name)
} }
return args, nil return args, nil
} }

View File

@ -68,7 +68,8 @@ type SimulatedBackend struct {
pendingState *state.StateDB // Currently pending state that will be the active on request pendingState *state.StateDB // Currently pending state that will be the active on request
pendingReceipts types.Receipts // Currently receipts for the pending block pendingReceipts types.Receipts // Currently receipts for the pending block
events *filters.EventSystem // Event system for filtering log events live events *filters.EventSystem // for filtering log events live
filterSystem *filters.FilterSystem // for filtering database logs
config *params.ChainConfig config *params.ChainConfig
} }
@ -86,7 +87,11 @@ func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.Genesis
blockchain: blockchain, blockchain: blockchain,
config: genesis.Config, config: genesis.Config,
} }
backend.events = filters.NewEventSystem(&filterBackend{database, blockchain, backend}, false)
filterBackend := &filterBackend{database, blockchain, backend}
backend.filterSystem = filters.NewFilterSystem(filterBackend, filters.Config{})
backend.events = filters.NewEventSystem(backend.filterSystem, false)
backend.rollback(blockchain.CurrentBlock()) backend.rollback(blockchain.CurrentBlock())
return backend return backend
} }
@ -609,7 +614,7 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
// User specified the legacy gas field, convert to 1559 gas typing // User specified the legacy gas field, convert to 1559 gas typing
call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice call.GasFeeCap, call.GasTipCap = call.GasPrice, call.GasPrice
} else { } else {
// User specified 1559 gas feilds (or none), use those // User specified 1559 gas fields (or none), use those
if call.GasFeeCap == nil { if call.GasFeeCap == nil {
call.GasFeeCap = new(big.Int) call.GasFeeCap = new(big.Int)
} }
@ -689,7 +694,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.Filter
var filter *filters.Filter var filter *filters.Filter
if query.BlockHash != nil { if query.BlockHash != nil {
// Block filter requested, construct a single-shot filter // Block filter requested, construct a single-shot filter
filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain, b}, *query.BlockHash, query.Addresses, query.Topics) filter = b.filterSystem.NewBlockFilter(*query.BlockHash, query.Addresses, query.Topics)
} else { } else {
// Initialize unset filter boundaries to run from genesis to chain head // Initialize unset filter boundaries to run from genesis to chain head
from := int64(0) from := int64(0)
@ -701,7 +706,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.Filter
to = query.ToBlock.Int64() to = query.ToBlock.Int64()
} }
// Construct the range filter // Construct the range filter
filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain, b}, from, to, query.Addresses, query.Topics) filter = b.filterSystem.NewRangeFilter(from, to, query.Addresses, query.Topics)
} }
// Run the filter and return all the logs // Run the filter and return all the logs
logs, err := filter.Logs(ctx) logs, err := filter.Logs(ctx)
@ -828,6 +833,7 @@ type filterBackend struct {
} }
func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db } func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db }
func (fb *filterBackend) EventMux() *event.TypeMux { panic("not supported") } func (fb *filterBackend) EventMux() *event.TypeMux { panic("not supported") }
func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumber) (*types.Header, error) { func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumber) (*types.Header, error) {
@ -853,19 +859,8 @@ func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (typ
return rawdb.ReadReceipts(fb.db, hash, *number, fb.bc.Config()), nil return rawdb.ReadReceipts(fb.db, hash, *number, fb.bc.Config()), nil
} }
func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) { func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) {
number := rawdb.ReadHeaderNumber(fb.db, hash) logs := rawdb.ReadLogs(fb.db, hash, number, fb.bc.Config())
if number == nil {
return nil, nil
}
receipts := rawdb.ReadReceipts(fb.db, hash, *number, fb.bc.Config())
if receipts == nil {
return nil, nil
}
logs := make([][]*types.Log, len(receipts))
for i, receipt := range receipts {
logs[i] = receipt.Logs
}
return logs, nil return logs, nil
} }

View File

@ -99,7 +99,7 @@ func mustArrayToByteSlice(value reflect.Value) reflect.Value {
func set(dst, src reflect.Value) error { func set(dst, src reflect.Value) error {
dstType, srcType := dst.Type(), src.Type() dstType, srcType := dst.Type(), src.Type()
switch { switch {
case dstType.Kind() == reflect.Interface && dst.Elem().IsValid(): case dstType.Kind() == reflect.Interface && dst.Elem().IsValid() && (dst.Elem().Type().Kind() == reflect.Ptr || dst.Elem().CanSet()):
return set(dst.Elem(), src) return set(dst.Elem(), src)
case dstType.Kind() == reflect.Ptr && dstType.Elem() != reflect.TypeOf(big.Int{}): case dstType.Kind() == reflect.Ptr && dstType.Elem() != reflect.TypeOf(big.Int{}):
return set(dst.Elem(), src) return set(dst.Elem(), src)

View File

@ -32,7 +32,7 @@ type reflectTest struct {
var reflectTests = []reflectTest{ var reflectTests = []reflectTest{
{ {
name: "OneToOneCorrespondance", name: "OneToOneCorrespondence",
args: []string{"fieldA"}, args: []string{"fieldA"},
struc: struct { struc: struct {
FieldA int `abi:"fieldA"` FieldA int `abi:"fieldA"`

View File

@ -352,6 +352,11 @@ func TestMethodMultiReturn(t *testing.T) {
&[]interface{}{&expected.Int, &expected.String}, &[]interface{}{&expected.Int, &expected.String},
"", "",
"Can unpack into a slice", "Can unpack into a slice",
}, {
&[]interface{}{&bigint, ""},
&[]interface{}{&expected.Int, expected.String},
"",
"Can unpack into a slice without indirection",
}, { }, {
&[2]interface{}{&bigint, new(string)}, &[2]interface{}{&bigint, new(string)},
&[2]interface{}{&expected.Int, &expected.String}, &[2]interface{}{&expected.Int, &expected.String},

View File

@ -318,7 +318,7 @@ func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
func TestUpdatedKeyfileContents(t *testing.T) { func TestUpdatedKeyfileContents(t *testing.T) {
t.Parallel() t.Parallel()
// Create a temporary kesytore to test with // Create a temporary keystore to test with
rand.Seed(time.Now().UnixNano()) rand.Seed(time.Now().UnixNano())
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int())) dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int()))
ks := NewKeyStore(dir, LightScryptN, LightScryptP) ks := NewKeyStore(dir, LightScryptN, LightScryptP)

View File

@ -39,7 +39,7 @@ type fileCache struct {
func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, error) { func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, error) {
t0 := time.Now() t0 := time.Now()
// List all the failes from the keystore folder // List all the files from the keystore folder
files, err := os.ReadDir(keyDir) files, err := os.ReadDir(keyDir)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
@ -61,7 +61,7 @@ func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, er
log.Trace("Ignoring file on account scan", "path", path) log.Trace("Ignoring file on account scan", "path", path)
continue continue
} }
// Gather the set of all and fresly modified files // Gather the set of all and freshly modified files
all.Add(path) all.Add(path)
info, err := fi.Info() info, err := fi.Info()

View File

@ -214,7 +214,7 @@ func TestSignRace(t *testing.T) {
// Tests that the wallet notifier loop starts and stops correctly based on the // Tests that the wallet notifier loop starts and stops correctly based on the
// addition and removal of wallet event subscriptions. // addition and removal of wallet event subscriptions.
func TestWalletNotifierLifecycle(t *testing.T) { func TestWalletNotifierLifecycle(t *testing.T) {
// Create a temporary kesytore to test with // Create a temporary keystore to test with
_, ks := tmpKeyStore(t, false) _, ks := tmpKeyStore(t, false)
// Ensure that the notification updater is not running yet // Ensure that the notification updater is not running yet

View File

@ -196,10 +196,10 @@ func (w *trezorDriver) trezorDerive(derivationPath []uint32) (common.Address, er
if _, err := w.trezorExchange(&trezor.EthereumGetAddress{AddressN: derivationPath}, address); err != nil { if _, err := w.trezorExchange(&trezor.EthereumGetAddress{AddressN: derivationPath}, address); err != nil {
return common.Address{}, err return common.Address{}, err
} }
if addr := address.GetAddressBin(); len(addr) > 0 { // Older firmwares use binary fomats if addr := address.GetAddressBin(); len(addr) > 0 { // Older firmwares use binary formats
return common.BytesToAddress(addr), nil return common.BytesToAddress(addr), nil
} }
if addr := address.GetAddressHex(); len(addr) > 0 { // Newer firmwares use hexadecimal fomats if addr := address.GetAddressHex(); len(addr) > 0 { // Newer firmwares use hexadecimal formats
return common.HexToAddress(addr), nil return common.HexToAddress(addr), nil
} }
return common.Address{}, errors.New("missing derived address") return common.Address{}, errors.New("missing derived address")

View File

@ -380,7 +380,7 @@ func (w *wallet) selfDerive() {
// of legacy-ledger, the first account on the legacy-path will // of legacy-ledger, the first account on the legacy-path will
// be shown to the user, even if we don't actively track it // be shown to the user, even if we don't actively track it
if i < len(nextAddrs)-1 { if i < len(nextAddrs)-1 {
w.log.Info("Skipping trakcking first account on legacy path, use personal.deriveAccount(<url>,<path>, false) to track", w.log.Info("Skipping tracking first account on legacy path, use personal.deriveAccount(<url>,<path>, false) to track",
"path", path, "address", nextAddrs[i]) "path", path, "address", nextAddrs[i])
break break
} }

View File

@ -1,19 +1,19 @@
# This file contains sha256 checksums of optional build dependencies. # This file contains sha256 checksums of optional build dependencies.
4525aa6b0e3cecb57845f4060a7075aafc9ab752bb7b6b4cf8a212d43078e1e4 go1.18.4.src.tar.gz 9920d3306a1ac536cdd2c796d6cb3c54bc559c226fc3cc39c32f1e0bd7f50d2a go1.18.5.src.tar.gz
315e1a2b21a827c68da1b7f492b5dcbe81d8df8a79ebe50922df9588893f87f0 go1.18.4.darwin-amd64.tar.gz 828eeca8b5abea3e56921df8fa4b1101380a5ebcfee10acbc8ffe7ec0bf5876b go1.18.5.darwin-amd64.tar.gz
04eed623d5143ffa44965b618b509e0beccccfd3a4a1bfebc0cdbcf906046769 go1.18.4.darwin-arm64.tar.gz 923a377c6fc9a2c789f5db61c24b8f64133f7889056897449891f256af34065f go1.18.5.darwin-arm64.tar.gz
e5244fdcd6b6eaf785dbd8c6e02b4804a4d00409e7edecc63cd59fc8f37c34c5 go1.18.4.freebsd-386.tar.gz c3d90264a706e2d88cfb44126dc6f0d008a48f00732e04bc377cea1a2b716a7c go1.18.5.freebsd-386.tar.gz
fb00f8aaffcc80e0a2bd39db1d8e8e21ef0a691c564f7b7601383dd6adad4042 go1.18.4.freebsd-amd64.tar.gz 0de23843c568d388bc0f0e390a8966938cccaae0d74b698325f7175bac04e0c6 go1.18.5.freebsd-amd64.tar.gz
418232d905e18ece6cb13c4884bb1c68963d7d3b4d889671b3e5be8bd4059862 go1.18.4.linux-386.tar.gz 0c44f85d146c6f98c34e8ff436a42af22e90e36fe232d3d9d3101f23fd61362b go1.18.5.linux-386.tar.gz
c9b099b68d93f5c5c8a8844a89f8db07eaa58270e3a1e01804f17f4cf8df02f5 go1.18.4.linux-amd64.tar.gz 9e5de37f9c49942c601b191ac5fba404b868bfc21d446d6960acc12283d6e5f2 go1.18.5.linux-amd64.tar.gz
35014d92b50d97da41dade965df7ebeb9a715da600206aa59ce1b2d05527421f go1.18.4.linux-arm64.tar.gz 006f6622718212363fa1ff004a6ab4d87bbbe772ec5631bab7cac10be346e4f1 go1.18.5.linux-arm64.tar.gz
7dfeab572e49638b0f3d9901457f0622c27b73301c2b99db9f5e9568ff40460c go1.18.4.linux-armv6l.tar.gz d5ac34ac5f060a5274319aa04b7b11e41b123bd7887d64efb5f44ead236957af go1.18.5.linux-armv6l.tar.gz
f80acc4dc054ddc89ccc4869664e331bf16e0ac6e07830e94554162e66f66961 go1.18.4.linux-ppc64le.tar.gz 2e37fb9c7cbaedd4e729492d658aa4cde821fc94117391a8105c13b25ca1c84b go1.18.5.linux-ppc64le.tar.gz
7e932f36e8f347feea2e706dcd32c1a464b1e5767ab2928ae460a37a975fe4a3 go1.18.4.linux-s390x.tar.gz e3d536e7873639f85353e892444f83b14cb6670603961f215986ae8e28e8e07a go1.18.5.linux-s390x.tar.gz
6343010a13ab783e553786b3cc3b4d63080128f61cf1e963505139c71ca66a0d go1.18.4.windows-386.zip 7b3142ec0c5db991e7f73a231662a92429b90ee151fe47557acb566d8d9ae4d3 go1.18.5.windows-386.zip
dfb93c517e050ba0cfc066802b38a8e7cda2ef666efd634859356b33f543cc49 go1.18.4.windows-amd64.zip 73753620602d4b4469770040c53db55e5dd6af2ad07ecc18f71f164c3224eaad go1.18.5.windows-amd64.zip
7d0d7b73592019d276f2bd44ee3cda0d8bd99356fdbf04fdb40c263518108ae4 go1.18.4.windows-arm64.zip 4d154626affff12ef73ea1017af0e5b52dbc839ef92f6f9e76cf4f71278a5744 go1.18.5.windows-arm64.zip
658078aaaf7608693f37c4cf1380b2af418ab8b2d23fdb33e7e2d4339328590e golangci-lint-1.46.2-darwin-amd64.tar.gz 658078aaaf7608693f37c4cf1380b2af418ab8b2d23fdb33e7e2d4339328590e golangci-lint-1.46.2-darwin-amd64.tar.gz
81f9b4afd62ec5e612ef8bc3b1d612a88b56ff289874831845cdad394427385f golangci-lint-1.46.2-darwin-arm64.tar.gz 81f9b4afd62ec5e612ef8bc3b1d612a88b56ff289874831845cdad394427385f golangci-lint-1.46.2-darwin-arm64.tar.gz

View File

@ -149,7 +149,7 @@ var (
// This is the version of go that will be downloaded by // This is the version of go that will be downloaded by
// //
// go run ci.go install -dlgo // go run ci.go install -dlgo
dlgoVersion = "1.18.4" dlgoVersion = "1.18.5"
) )
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@ -203,6 +203,7 @@ func doInstall(cmdline []string) {
dlgo = flag.Bool("dlgo", false, "Download Go and build with it") dlgo = flag.Bool("dlgo", false, "Download Go and build with it")
arch = flag.String("arch", "", "Architecture to cross build for") arch = flag.String("arch", "", "Architecture to cross build for")
cc = flag.String("cc", "", "C compiler to cross build with") cc = flag.String("cc", "", "C compiler to cross build with")
staticlink = flag.Bool("static", false, "Create statically-linked executable")
) )
flag.CommandLine.Parse(cmdline) flag.CommandLine.Parse(cmdline)
@ -213,9 +214,12 @@ func doInstall(cmdline []string) {
tc.Root = build.DownloadGo(csdb, dlgoVersion) tc.Root = build.DownloadGo(csdb, dlgoVersion)
} }
// Disable CLI markdown doc generation in release builds.
buildTags := []string{"urfave_cli_no_docs"}
// Configure the build. // Configure the build.
env := build.Env() env := build.Env()
gobuild := tc.Go("build", buildFlags(env)...) gobuild := tc.Go("build", buildFlags(env, *staticlink, buildTags)...)
// arm64 CI builders are memory-constrained and can't handle concurrent builds, // arm64 CI builders are memory-constrained and can't handle concurrent builds,
// better disable it. This check isn't the best, it should probably // better disable it. This check isn't the best, it should probably
@ -224,9 +228,6 @@ func doInstall(cmdline []string) {
gobuild.Args = append(gobuild.Args, "-p", "1") gobuild.Args = append(gobuild.Args, "-p", "1")
} }
// Disable CLI markdown doc generation in release builds.
gobuild.Args = append(gobuild.Args, "-tags", "urfave_cli_no_docs")
// We use -trimpath to avoid leaking local paths into the built executables. // We use -trimpath to avoid leaking local paths into the built executables.
gobuild.Args = append(gobuild.Args, "-trimpath") gobuild.Args = append(gobuild.Args, "-trimpath")
@ -251,7 +252,7 @@ func doInstall(cmdline []string) {
} }
// buildFlags returns the go tool flags for building. // buildFlags returns the go tool flags for building.
func buildFlags(env build.Environment) (flags []string) { func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (flags []string) {
var ld []string var ld []string
if env.Commit != "" { if env.Commit != "" {
ld = append(ld, "-X", "main.gitCommit="+env.Commit) ld = append(ld, "-X", "main.gitCommit="+env.Commit)
@ -262,14 +263,24 @@ func buildFlags(env build.Environment) (flags []string) {
if runtime.GOOS == "darwin" { if runtime.GOOS == "darwin" {
ld = append(ld, "-s") ld = append(ld, "-s")
} }
if runtime.GOOS == "linux" {
// Enforce the stacksize to 8M, which is the case on most platforms apart from // Enforce the stacksize to 8M, which is the case on most platforms apart from
// alpine Linux. // alpine Linux.
if runtime.GOOS == "linux" { extld := []string{"-Wl,-z,stack-size=0x800000"}
ld = append(ld, "-extldflags", "-Wl,-z,stack-size=0x800000") if staticLinking {
extld = append(extld, "-static")
// Under static linking, use of certain glibc features must be
// disabled to avoid shared library dependencies.
buildTags = append(buildTags, "osusergo", "netgo")
}
ld = append(ld, "-extldflags", "'"+strings.Join(extld, " ")+"'")
} }
if len(ld) > 0 { if len(ld) > 0 {
flags = append(flags, "-ldflags", strings.Join(ld, " ")) flags = append(flags, "-ldflags", strings.Join(ld, " "))
} }
if len(buildTags) > 0 {
flags = append(flags, "-tags", strings.Join(buildTags, ","))
}
return flags return flags
} }
@ -597,7 +608,7 @@ func doDocker(cmdline []string) {
} }
if mismatch { if mismatch {
// Build numbers mismatching, retry in a short time to // Build numbers mismatching, retry in a short time to
// avoid concurrent failes in both publisher images. If // avoid concurrent fails in both publisher images. If
// however the retry failed too, it means the concurrent // however the retry failed too, it means the concurrent
// builder is still crunching, let that do the publish. // builder is still crunching, let that do the publish.
if i == 0 { if i == 0 {

View File

@ -96,12 +96,12 @@ func (c *Chain) Head() *types.Block {
return c.blocks[c.Len()-1] return c.blocks[c.Len()-1]
} }
func (c *Chain) GetHeaders(req GetBlockHeaders) (BlockHeaders, error) { func (c *Chain) GetHeaders(req *GetBlockHeaders) ([]*types.Header, error) {
if req.Amount < 1 { if req.Amount < 1 {
return nil, fmt.Errorf("no block headers requested") return nil, fmt.Errorf("no block headers requested")
} }
headers := make(BlockHeaders, req.Amount) headers := make([]*types.Header, req.Amount)
var blockNumber uint64 var blockNumber uint64
// range over blocks to check if our chain has the requested header // range over blocks to check if our chain has the requested header
@ -139,7 +139,7 @@ func loadChain(chainfile string, genesis string) (*Chain, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
gblock := gen.ToBlock(nil) gblock := gen.ToBlock()
blocks, err := blocksFromFile(chainfile, gblock) blocks, err := blocksFromFile(chainfile, gblock)
if err != nil { if err != nil {

View File

@ -21,6 +21,7 @@ import (
"strconv" "strconv"
"testing" "testing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -140,18 +141,18 @@ func TestChain_GetHeaders(t *testing.T) {
var tests = []struct { var tests = []struct {
req GetBlockHeaders req GetBlockHeaders
expected BlockHeaders expected []*types.Header
}{ }{
{ {
req: GetBlockHeaders{ req: GetBlockHeaders{
Origin: eth.HashOrNumber{ GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{
Number: uint64(2), Origin: eth.HashOrNumber{Number: uint64(2)},
},
Amount: uint64(5), Amount: uint64(5),
Skip: 1, Skip: 1,
Reverse: false, Reverse: false,
}, },
expected: BlockHeaders{ },
expected: []*types.Header{
chain.blocks[2].Header(), chain.blocks[2].Header(),
chain.blocks[4].Header(), chain.blocks[4].Header(),
chain.blocks[6].Header(), chain.blocks[6].Header(),
@ -161,14 +162,14 @@ func TestChain_GetHeaders(t *testing.T) {
}, },
{ {
req: GetBlockHeaders{ req: GetBlockHeaders{
Origin: eth.HashOrNumber{ GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{
Number: uint64(chain.Len() - 1), Origin: eth.HashOrNumber{Number: uint64(chain.Len() - 1)},
},
Amount: uint64(3), Amount: uint64(3),
Skip: 0, Skip: 0,
Reverse: true, Reverse: true,
}, },
expected: BlockHeaders{ },
expected: []*types.Header{
chain.blocks[chain.Len()-1].Header(), chain.blocks[chain.Len()-1].Header(),
chain.blocks[chain.Len()-2].Header(), chain.blocks[chain.Len()-2].Header(),
chain.blocks[chain.Len()-3].Header(), chain.blocks[chain.Len()-3].Header(),
@ -176,14 +177,14 @@ func TestChain_GetHeaders(t *testing.T) {
}, },
{ {
req: GetBlockHeaders{ req: GetBlockHeaders{
Origin: eth.HashOrNumber{ GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{
Hash: chain.Head().Hash(), Origin: eth.HashOrNumber{Hash: chain.Head().Hash()},
},
Amount: uint64(1), Amount: uint64(1),
Skip: 0, Skip: 0,
Reverse: false, Reverse: false,
}, },
expected: BlockHeaders{ },
expected: []*types.Header{
chain.Head().Header(), chain.Head().Header(),
}, },
}, },
@ -191,7 +192,7 @@ func TestChain_GetHeaders(t *testing.T) {
for i, tt := range tests { for i, tt := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
headers, err := chain.GetHeaders(tt.req) headers, err := chain.GetHeaders(&tt.req)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -43,21 +43,6 @@ var (
timeout = 20 * time.Second timeout = 20 * time.Second
) )
// Is_66 checks if the node supports the eth66 protocol version,
// and if not, exists the test suite
func (s *Suite) Is_66(t *utesting.T) {
conn, err := s.dial66()
if err != nil {
t.Fatalf("dial failed: %v", err)
}
if err := conn.handshake(); err != nil {
t.Fatalf("handshake failed: %v", err)
}
if conn.negotiatedProtoVersion < 66 {
t.Fail()
}
}
// dial attempts to dial the given node and perform a handshake, // dial attempts to dial the given node and perform a handshake,
// returning the created Conn if successful. // returning the created Conn if successful.
func (s *Suite) dial() (*Conn, error) { func (s *Suite) dial() (*Conn, error) {
@ -76,31 +61,16 @@ func (s *Suite) dial() (*Conn, error) {
} }
// set default p2p capabilities // set default p2p capabilities
conn.caps = []p2p.Cap{ conn.caps = []p2p.Cap{
{Name: "eth", Version: 64}, {Name: "eth", Version: 66},
{Name: "eth", Version: 65}, {Name: "eth", Version: 67},
} }
conn.ourHighestProtoVersion = 65 conn.ourHighestProtoVersion = 67
return &conn, nil return &conn, nil
} }
// dial66 attempts to dial the given node and perform a handshake, // dialSnap creates a connection with snap/1 capability.
// returning the created Conn with additional eth66 capabilities if
// successful
func (s *Suite) dial66() (*Conn, error) {
conn, err := s.dial()
if err != nil {
return nil, fmt.Errorf("dial failed: %v", err)
}
conn.caps = append(conn.caps, p2p.Cap{Name: "eth", Version: 66})
conn.ourHighestProtoVersion = 66
return conn, nil
}
// dial66 attempts to dial the given node and perform a handshake,
// returning the created Conn with additional snap/1 capabilities if
// successful.
func (s *Suite) dialSnap() (*Conn, error) { func (s *Suite) dialSnap() (*Conn, error) {
conn, err := s.dial66() conn, err := s.dial()
if err != nil { if err != nil {
return nil, fmt.Errorf("dial failed: %v", err) return nil, fmt.Errorf("dial failed: %v", err)
} }
@ -235,60 +205,40 @@ loop:
// createSendAndRecvConns creates two connections, one for sending messages to the // createSendAndRecvConns creates two connections, one for sending messages to the
// node, and one for receiving messages from the node. // node, and one for receiving messages from the node.
func (s *Suite) createSendAndRecvConns(isEth66 bool) (*Conn, *Conn, error) { func (s *Suite) createSendAndRecvConns() (*Conn, *Conn, error) {
var ( sendConn, err := s.dial()
sendConn *Conn
recvConn *Conn
err error
)
if isEth66 {
sendConn, err = s.dial66()
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("dial failed: %v", err) return nil, nil, fmt.Errorf("dial failed: %v", err)
} }
recvConn, err = s.dial66() recvConn, err := s.dial()
if err != nil { if err != nil {
sendConn.Close() sendConn.Close()
return nil, nil, fmt.Errorf("dial failed: %v", err) return nil, nil, fmt.Errorf("dial failed: %v", err)
} }
} else {
sendConn, err = s.dial()
if err != nil {
return nil, nil, fmt.Errorf("dial failed: %v", err)
}
recvConn, err = s.dial()
if err != nil {
sendConn.Close()
return nil, nil, fmt.Errorf("dial failed: %v", err)
}
}
return sendConn, recvConn, nil return sendConn, recvConn, nil
} }
func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message {
if c.negotiatedProtoVersion == 66 {
_, msg := c.readAndServe66(chain, timeout)
return msg
}
return c.readAndServe65(chain, timeout)
}
// readAndServe serves GetBlockHeaders requests while waiting // readAndServe serves GetBlockHeaders requests while waiting
// on another message from the node. // on another message from the node.
func (c *Conn) readAndServe65(chain *Chain, timeout time.Duration) Message { func (c *Conn) readAndServe(chain *Chain, timeout time.Duration) Message {
start := time.Now() start := time.Now()
for time.Since(start) < timeout { for time.Since(start) < timeout {
c.SetReadDeadline(time.Now().Add(5 * time.Second)) c.SetReadDeadline(time.Now().Add(10 * time.Second))
switch msg := c.Read().(type) {
msg := c.Read()
switch msg := msg.(type) {
case *Ping: case *Ping:
c.Write(&Pong{}) c.Write(&Pong{})
case *GetBlockHeaders: case *GetBlockHeaders:
req := *msg headers, err := chain.GetHeaders(msg)
headers, err := chain.GetHeaders(req)
if err != nil { if err != nil {
return errorf("could not get headers for inbound header request: %v", err) return errorf("could not get headers for inbound header request: %v", err)
} }
if err := c.Write(headers); err != nil { resp := &BlockHeaders{
RequestId: msg.ReqID(),
BlockHeadersPacket: eth.BlockHeadersPacket(headers),
}
if err := c.Write(resp); err != nil {
return errorf("could not write to connection: %v", err) return errorf("could not write to connection: %v", err)
} }
default: default:
@ -298,54 +248,25 @@ func (c *Conn) readAndServe65(chain *Chain, timeout time.Duration) Message {
return errorf("no message received within %v", timeout) return errorf("no message received within %v", timeout)
} }
// readAndServe66 serves eth66 GetBlockHeaders requests while waiting
// on another message from the node.
func (c *Conn) readAndServe66(chain *Chain, timeout time.Duration) (uint64, Message) {
start := time.Now()
for time.Since(start) < timeout {
c.SetReadDeadline(time.Now().Add(10 * time.Second))
reqID, msg := c.Read66()
switch msg := msg.(type) {
case *Ping:
c.Write(&Pong{})
case GetBlockHeaders:
headers, err := chain.GetHeaders(msg)
if err != nil {
return 0, errorf("could not get headers for inbound header request: %v", err)
}
resp := &eth.BlockHeadersPacket66{
RequestId: reqID,
BlockHeadersPacket: eth.BlockHeadersPacket(headers),
}
if err := c.Write66(resp, BlockHeaders{}.Code()); err != nil {
return 0, errorf("could not write to connection: %v", err)
}
default:
return reqID, msg
}
}
return 0, errorf("no message received within %v", timeout)
}
// headersRequest executes the given `GetBlockHeaders` request. // headersRequest executes the given `GetBlockHeaders` request.
func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, isEth66 bool, reqID uint64) (BlockHeaders, error) { func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, reqID uint64) ([]*types.Header, error) {
defer c.SetReadDeadline(time.Time{}) defer c.SetReadDeadline(time.Time{})
c.SetReadDeadline(time.Now().Add(20 * time.Second)) c.SetReadDeadline(time.Now().Add(20 * time.Second))
// if on eth66 connection, perform eth66 GetBlockHeaders request
if isEth66 { // write request
return getBlockHeaders66(chain, c, request, reqID) request.RequestId = reqID
}
if err := c.Write(request); err != nil { if err := c.Write(request); err != nil {
return nil, err return nil, fmt.Errorf("could not write to connection: %v", err)
} }
switch msg := c.readAndServe(chain, timeout).(type) {
case *BlockHeaders: // wait for response
return *msg, nil msg := c.waitForResponse(chain, timeout, request.RequestId)
default: resp, ok := msg.(*BlockHeaders)
return nil, fmt.Errorf("invalid message: %s", pretty.Sdump(msg)) if !ok {
return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg))
} }
headers := []*types.Header(resp.BlockHeadersPacket)
return headers, nil
} }
func (c *Conn) snapRequest(msg Message, id uint64, chain *Chain) (Message, error) { func (c *Conn) snapRequest(msg Message, id uint64, chain *Chain) (Message, error) {
@ -357,28 +278,8 @@ func (c *Conn) snapRequest(msg Message, id uint64, chain *Chain) (Message, error
return c.ReadSnap(id) return c.ReadSnap(id)
} }
// getBlockHeaders66 executes the given `GetBlockHeaders` request over the eth66 protocol.
func getBlockHeaders66(chain *Chain, conn *Conn, request *GetBlockHeaders, id uint64) (BlockHeaders, error) {
// write request
packet := eth.GetBlockHeadersPacket(*request)
req := &eth.GetBlockHeadersPacket66{
RequestId: id,
GetBlockHeadersPacket: &packet,
}
if err := conn.Write66(req, GetBlockHeaders{}.Code()); err != nil {
return nil, fmt.Errorf("could not write to connection: %v", err)
}
// wait for response
msg := conn.waitForResponse(chain, timeout, req.RequestId)
headers, ok := msg.(BlockHeaders)
if !ok {
return nil, fmt.Errorf("unexpected message received: %s", pretty.Sdump(msg))
}
return headers, nil
}
// headersMatch returns whether the received headers match the given request // headersMatch returns whether the received headers match the given request
func headersMatch(expected BlockHeaders, headers BlockHeaders) bool { func headersMatch(expected []*types.Header, headers []*types.Header) bool {
return reflect.DeepEqual(expected, headers) return reflect.DeepEqual(expected, headers)
} }
@ -386,8 +287,8 @@ func headersMatch(expected BlockHeaders, headers BlockHeaders) bool {
// request ID is received. // request ID is received.
func (c *Conn) waitForResponse(chain *Chain, timeout time.Duration, requestID uint64) Message { func (c *Conn) waitForResponse(chain *Chain, timeout time.Duration, requestID uint64) Message {
for { for {
id, msg := c.readAndServe66(chain, timeout) msg := c.readAndServe(chain, timeout)
if id == requestID { if msg.ReqID() == requestID {
return msg return msg
} }
} }
@ -395,9 +296,9 @@ func (c *Conn) waitForResponse(chain *Chain, timeout time.Duration, requestID ui
// sendNextBlock broadcasts the next block in the chain and waits // sendNextBlock broadcasts the next block in the chain and waits
// for the node to propagate the block and import it into its chain. // for the node to propagate the block and import it into its chain.
func (s *Suite) sendNextBlock(isEth66 bool) error { func (s *Suite) sendNextBlock() error {
// set up sending and receiving connections // set up sending and receiving connections
sendConn, recvConn, err := s.createSendAndRecvConns(isEth66) sendConn, recvConn, err := s.createSendAndRecvConns()
if err != nil { if err != nil {
return err return err
} }
@ -420,7 +321,7 @@ func (s *Suite) sendNextBlock(isEth66 bool) error {
return fmt.Errorf("failed to announce block: %v", err) return fmt.Errorf("failed to announce block: %v", err)
} }
// wait for client to update its chain // wait for client to update its chain
if err = s.waitForBlockImport(recvConn, nextBlock, isEth66); err != nil { if err = s.waitForBlockImport(recvConn, nextBlock); err != nil {
return fmt.Errorf("failed to receive confirmation of block import: %v", err) return fmt.Errorf("failed to receive confirmation of block import: %v", err)
} }
// update test suite chain // update test suite chain
@ -465,29 +366,22 @@ func (s *Suite) waitAnnounce(conn *Conn, blockAnnouncement *NewBlock) error {
} }
} }
func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block, isEth66 bool) error { func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block) error {
defer conn.SetReadDeadline(time.Time{}) defer conn.SetReadDeadline(time.Time{})
conn.SetReadDeadline(time.Now().Add(20 * time.Second)) conn.SetReadDeadline(time.Now().Add(20 * time.Second))
// create request // create request
req := &GetBlockHeaders{ req := &GetBlockHeaders{
Origin: eth.HashOrNumber{ GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{
Hash: block.Hash(), Origin: eth.HashOrNumber{Hash: block.Hash()},
},
Amount: 1, Amount: 1,
},
} }
// loop until BlockHeaders response contains desired block, confirming the // loop until BlockHeaders response contains desired block, confirming the
// node imported the block // node imported the block
for { for {
var (
headers BlockHeaders
err error
)
if isEth66 {
requestID := uint64(54) requestID := uint64(54)
headers, err = conn.headersRequest(req, s.chain, eth66, requestID) headers, err := conn.headersRequest(req, s.chain, requestID)
} else {
headers, err = conn.headersRequest(req, s.chain, eth65, 0)
}
if err != nil { if err != nil {
return fmt.Errorf("GetBlockHeader request failed: %v", err) return fmt.Errorf("GetBlockHeader request failed: %v", err)
} }
@ -503,8 +397,8 @@ func (s *Suite) waitForBlockImport(conn *Conn, block *types.Block, isEth66 bool)
} }
} }
func (s *Suite) oldAnnounce(isEth66 bool) error { func (s *Suite) oldAnnounce() error {
sendConn, receiveConn, err := s.createSendAndRecvConns(isEth66) sendConn, receiveConn, err := s.createSendAndRecvConns()
if err != nil { if err != nil {
return err return err
} }
@ -550,23 +444,13 @@ func (s *Suite) oldAnnounce(isEth66 bool) error {
return nil return nil
} }
func (s *Suite) maliciousHandshakes(t *utesting.T, isEth66 bool) error { func (s *Suite) maliciousHandshakes(t *utesting.T) error {
var ( conn, err := s.dial()
conn *Conn
err error
)
if isEth66 {
conn, err = s.dial66()
if err != nil { if err != nil {
return fmt.Errorf("dial failed: %v", err) return fmt.Errorf("dial failed: %v", err)
} }
} else {
conn, err = s.dial()
if err != nil {
return fmt.Errorf("dial failed: %v", err)
}
}
defer conn.Close() defer conn.Close()
// write hello to client // write hello to client
pub0 := crypto.FromECDSAPub(&conn.ourKey.PublicKey)[1:] pub0 := crypto.FromECDSAPub(&conn.ourKey.PublicKey)[1:]
handshakes := []*Hello{ handshakes := []*Hello{
@ -627,18 +511,11 @@ func (s *Suite) maliciousHandshakes(t *utesting.T, isEth66 bool) error {
} }
} }
// dial for the next round // dial for the next round
if isEth66 {
conn, err = s.dial66()
if err != nil {
return fmt.Errorf("dial failed: %v", err)
}
} else {
conn, err = s.dial() conn, err = s.dial()
if err != nil { if err != nil {
return fmt.Errorf("dial failed: %v", err) return fmt.Errorf("dial failed: %v", err)
} }
} }
}
return nil return nil
} }
@ -654,6 +531,7 @@ func (s *Suite) maliciousStatus(conn *Conn) error {
Genesis: s.chain.blocks[0].Hash(), Genesis: s.chain.blocks[0].Hash(),
ForkID: s.chain.ForkID(), ForkID: s.chain.ForkID(),
} }
// get status // get status
msg, err := conn.statusExchange(s.chain, status) msg, err := conn.statusExchange(s.chain, status)
if err != nil { if err != nil {
@ -664,6 +542,7 @@ func (s *Suite) maliciousStatus(conn *Conn) error {
default: default:
return fmt.Errorf("expected status, got: %#v ", msg) return fmt.Errorf("expected status, got: %#v ", msg)
} }
// wait for disconnect // wait for disconnect
switch msg := conn.readAndServe(s.chain, timeout).(type) { switch msg := conn.readAndServe(s.chain, timeout).(type) {
case *Disconnect: case *Disconnect:
@ -675,9 +554,9 @@ func (s *Suite) maliciousStatus(conn *Conn) error {
} }
} }
func (s *Suite) hashAnnounce(isEth66 bool) error { func (s *Suite) hashAnnounce() error {
// create connections // create connections
sendConn, recvConn, err := s.createSendAndRecvConns(isEth66) sendConn, recvConn, err := s.createSendAndRecvConns()
if err != nil { if err != nil {
return fmt.Errorf("failed to create connections: %v", err) return fmt.Errorf("failed to create connections: %v", err)
} }
@ -689,6 +568,7 @@ func (s *Suite) hashAnnounce(isEth66 bool) error {
if err := recvConn.peer(s.chain, nil); err != nil { if err := recvConn.peer(s.chain, nil); err != nil {
return fmt.Errorf("peering failed: %v", err) return fmt.Errorf("peering failed: %v", err)
} }
// create NewBlockHashes announcement // create NewBlockHashes announcement
type anno struct { type anno struct {
Hash common.Hash // Hash of one particular block being announced Hash common.Hash // Hash of one particular block being announced
@ -700,18 +580,11 @@ func (s *Suite) hashAnnounce(isEth66 bool) error {
if err := sendConn.Write(newBlockHash); err != nil { if err := sendConn.Write(newBlockHash); err != nil {
return fmt.Errorf("failed to write to connection: %v", err) return fmt.Errorf("failed to write to connection: %v", err)
} }
// Announcement sent, now wait for a header request // Announcement sent, now wait for a header request
var ( msg := sendConn.Read()
id uint64 blockHeaderReq, ok := msg.(*GetBlockHeaders)
msg Message if !ok {
blockHeaderReq GetBlockHeaders
)
if isEth66 {
id, msg = sendConn.Read66()
switch msg := msg.(type) {
case GetBlockHeaders:
blockHeaderReq = msg
default:
return fmt.Errorf("unexpected %s", pretty.Sdump(msg)) return fmt.Errorf("unexpected %s", pretty.Sdump(msg))
} }
if blockHeaderReq.Amount != 1 { if blockHeaderReq.Amount != 1 {
@ -722,34 +595,14 @@ func (s *Suite) hashAnnounce(isEth66 bool) error {
pretty.Sdump(announcement), pretty.Sdump(announcement),
pretty.Sdump(blockHeaderReq)) pretty.Sdump(blockHeaderReq))
} }
if err := sendConn.Write66(&eth.BlockHeadersPacket66{ err = sendConn.Write(&BlockHeaders{
RequestId: id, RequestId: blockHeaderReq.ReqID(),
BlockHeadersPacket: eth.BlockHeadersPacket{ BlockHeadersPacket: eth.BlockHeadersPacket{nextBlock.Header()},
nextBlock.Header(), })
}, if err != nil {
}, BlockHeaders{}.Code()); err != nil {
return fmt.Errorf("failed to write to connection: %v", err) return fmt.Errorf("failed to write to connection: %v", err)
} }
} else {
msg = sendConn.Read()
switch msg := msg.(type) {
case *GetBlockHeaders:
blockHeaderReq = *msg
default:
return fmt.Errorf("unexpected %s", pretty.Sdump(msg))
}
if blockHeaderReq.Amount != 1 {
return fmt.Errorf("unexpected number of block headers requested: %v", blockHeaderReq.Amount)
}
if blockHeaderReq.Origin.Hash != announcement.Hash {
return fmt.Errorf("unexpected block header requested. Announced:\n %v\n Remote request:\n%v",
pretty.Sdump(announcement),
pretty.Sdump(blockHeaderReq))
}
if err := sendConn.Write(&BlockHeaders{nextBlock.Header()}); err != nil {
return fmt.Errorf("failed to write to connection: %v", err)
}
}
// wait for block announcement // wait for block announcement
msg = recvConn.readAndServe(s.chain, timeout) msg = recvConn.readAndServe(s.chain, timeout)
switch msg := msg.(type) { switch msg := msg.(type) {
@ -762,6 +615,7 @@ func (s *Suite) hashAnnounce(isEth66 bool) error {
return fmt.Errorf("unexpected block hash announcement, wanted %v, got %v", nextBlock.Hash(), return fmt.Errorf("unexpected block hash announcement, wanted %v, got %v", nextBlock.Hash(),
hashes[0].Hash) hashes[0].Hash)
} }
case *NewBlock: case *NewBlock:
// node should only propagate NewBlock without having requested the body if the body is empty // node should only propagate NewBlock without having requested the body if the body is empty
nextBlockBody := nextBlock.Body() nextBlockBody := nextBlock.Body()
@ -780,7 +634,7 @@ func (s *Suite) hashAnnounce(isEth66 bool) error {
return fmt.Errorf("unexpected: %s", pretty.Sdump(msg)) return fmt.Errorf("unexpected: %s", pretty.Sdump(msg))
} }
// confirm node imported block // confirm node imported block
if err := s.waitForBlockImport(recvConn, nextBlock, isEth66); err != nil { if err := s.waitForBlockImport(recvConn, nextBlock); err != nil {
return fmt.Errorf("error waiting for node to import new block: %v", err) return fmt.Errorf("error waiting for node to import new block: %v", err)
} }
// update the chain // update the chain

View File

@ -21,32 +21,40 @@ import "github.com/ethereum/go-ethereum/eth/protocols/snap"
// GetAccountRange represents an account range query. // GetAccountRange represents an account range query.
type GetAccountRange snap.GetAccountRangePacket type GetAccountRange snap.GetAccountRangePacket
func (g GetAccountRange) Code() int { return 33 } func (msg GetAccountRange) Code() int { return 33 }
func (msg GetAccountRange) ReqID() uint64 { return msg.ID }
type AccountRange snap.AccountRangePacket type AccountRange snap.AccountRangePacket
func (g AccountRange) Code() int { return 34 } func (msg AccountRange) Code() int { return 34 }
func (msg AccountRange) ReqID() uint64 { return msg.ID }
type GetStorageRanges snap.GetStorageRangesPacket type GetStorageRanges snap.GetStorageRangesPacket
func (g GetStorageRanges) Code() int { return 35 } func (msg GetStorageRanges) Code() int { return 35 }
func (msg GetStorageRanges) ReqID() uint64 { return msg.ID }
type StorageRanges snap.StorageRangesPacket type StorageRanges snap.StorageRangesPacket
func (g StorageRanges) Code() int { return 36 } func (msg StorageRanges) Code() int { return 36 }
func (msg StorageRanges) ReqID() uint64 { return msg.ID }
type GetByteCodes snap.GetByteCodesPacket type GetByteCodes snap.GetByteCodesPacket
func (g GetByteCodes) Code() int { return 37 } func (msg GetByteCodes) Code() int { return 37 }
func (msg GetByteCodes) ReqID() uint64 { return msg.ID }
type ByteCodes snap.ByteCodesPacket type ByteCodes snap.ByteCodesPacket
func (g ByteCodes) Code() int { return 38 } func (msg ByteCodes) Code() int { return 38 }
func (msg ByteCodes) ReqID() uint64 { return msg.ID }
type GetTrieNodes snap.GetTrieNodesPacket type GetTrieNodes snap.GetTrieNodesPacket
func (g GetTrieNodes) Code() int { return 39 } func (msg GetTrieNodes) Code() int { return 39 }
func (msg GetTrieNodes) ReqID() uint64 { return msg.ID }
type TrieNodes snap.TrieNodesPacket type TrieNodes snap.TrieNodesPacket
func (g TrieNodes) Code() int { return 40 } func (msg TrieNodes) Code() int { return 40 }
func (msg TrieNodes) ReqID() uint64 { return msg.ID }

View File

@ -49,79 +49,30 @@ func NewSuite(dest *enode.Node, chainfile string, genesisfile string) (*Suite, e
}, nil }, nil
} }
func (s *Suite) AllEthTests() []utesting.Test {
return []utesting.Test{
// status
{Name: "TestStatus65", Fn: s.TestStatus65},
{Name: "TestStatus66", Fn: s.TestStatus66},
// get block headers
{Name: "TestGetBlockHeaders65", Fn: s.TestGetBlockHeaders65},
{Name: "TestGetBlockHeaders66", Fn: s.TestGetBlockHeaders66},
{Name: "TestSimultaneousRequests66", Fn: s.TestSimultaneousRequests66},
{Name: "TestSameRequestID66", Fn: s.TestSameRequestID66},
{Name: "TestZeroRequestID66", Fn: s.TestZeroRequestID66},
// get block bodies
{Name: "TestGetBlockBodies65", Fn: s.TestGetBlockBodies65},
{Name: "TestGetBlockBodies66", Fn: s.TestGetBlockBodies66},
// broadcast
{Name: "TestBroadcast65", Fn: s.TestBroadcast65},
{Name: "TestBroadcast66", Fn: s.TestBroadcast66},
{Name: "TestLargeAnnounce65", Fn: s.TestLargeAnnounce65},
{Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66},
{Name: "TestOldAnnounce65", Fn: s.TestOldAnnounce65},
{Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66},
{Name: "TestBlockHashAnnounce65", Fn: s.TestBlockHashAnnounce65},
{Name: "TestBlockHashAnnounce66", Fn: s.TestBlockHashAnnounce66},
// malicious handshakes + status
{Name: "TestMaliciousHandshake65", Fn: s.TestMaliciousHandshake65},
{Name: "TestMaliciousStatus65", Fn: s.TestMaliciousStatus65},
{Name: "TestMaliciousHandshake66", Fn: s.TestMaliciousHandshake66},
{Name: "TestMaliciousStatus66", Fn: s.TestMaliciousStatus66},
// test transactions
{Name: "TestTransaction65", Fn: s.TestTransaction65},
{Name: "TestTransaction66", Fn: s.TestTransaction66},
{Name: "TestMaliciousTx65", Fn: s.TestMaliciousTx65},
{Name: "TestMaliciousTx66", Fn: s.TestMaliciousTx66},
{Name: "TestLargeTxRequest66", Fn: s.TestLargeTxRequest66},
{Name: "TestNewPooledTxs66", Fn: s.TestNewPooledTxs66},
}
}
func (s *Suite) EthTests() []utesting.Test { func (s *Suite) EthTests() []utesting.Test {
return []utesting.Test{ return []utesting.Test{
{Name: "TestStatus65", Fn: s.TestStatus65}, // status
{Name: "TestGetBlockHeaders65", Fn: s.TestGetBlockHeaders65}, {Name: "TestStatus", Fn: s.TestStatus},
{Name: "TestGetBlockBodies65", Fn: s.TestGetBlockBodies65}, // get block headers
{Name: "TestBroadcast65", Fn: s.TestBroadcast65}, {Name: "TestGetBlockHeaders", Fn: s.TestGetBlockHeaders},
{Name: "TestLargeAnnounce65", Fn: s.TestLargeAnnounce65}, {Name: "TestSimultaneousRequests", Fn: s.TestSimultaneousRequests},
{Name: "TestOldAnnounce65", Fn: s.TestOldAnnounce65}, {Name: "TestSameRequestID", Fn: s.TestSameRequestID},
{Name: "TestBlockHashAnnounce65", Fn: s.TestBlockHashAnnounce65}, {Name: "TestZeroRequestID", Fn: s.TestZeroRequestID},
{Name: "TestMaliciousHandshake65", Fn: s.TestMaliciousHandshake65}, // get block bodies
{Name: "TestMaliciousStatus65", Fn: s.TestMaliciousStatus65}, {Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies},
{Name: "TestTransaction65", Fn: s.TestTransaction65}, // broadcast
{Name: "TestMaliciousTx65", Fn: s.TestMaliciousTx65}, {Name: "TestBroadcast", Fn: s.TestBroadcast},
} {Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce},
} {Name: "TestOldAnnounce", Fn: s.TestOldAnnounce},
{Name: "TestBlockHashAnnounce", Fn: s.TestBlockHashAnnounce},
func (s *Suite) Eth66Tests() []utesting.Test { // malicious handshakes + status
return []utesting.Test{ {Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake},
// only proceed with eth66 test suite if node supports eth 66 protocol {Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus},
{Name: "TestStatus66", Fn: s.TestStatus66}, // test transactions
{Name: "TestGetBlockHeaders66", Fn: s.TestGetBlockHeaders66}, {Name: "TestTransaction", Fn: s.TestTransaction},
{Name: "TestSimultaneousRequests66", Fn: s.TestSimultaneousRequests66}, {Name: "TestMaliciousTx", Fn: s.TestMaliciousTx},
{Name: "TestSameRequestID66", Fn: s.TestSameRequestID66}, {Name: "TestLargeTxRequest", Fn: s.TestLargeTxRequest},
{Name: "TestZeroRequestID66", Fn: s.TestZeroRequestID66}, {Name: "TestNewPooledTxs", Fn: s.TestNewPooledTxs},
{Name: "TestGetBlockBodies66", Fn: s.TestGetBlockBodies66},
{Name: "TestBroadcast66", Fn: s.TestBroadcast66},
{Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66},
{Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66},
{Name: "TestBlockHashAnnounce66", Fn: s.TestBlockHashAnnounce66},
{Name: "TestMaliciousHandshake66", Fn: s.TestMaliciousHandshake66},
{Name: "TestMaliciousStatus66", Fn: s.TestMaliciousStatus66},
{Name: "TestTransaction66", Fn: s.TestTransaction66},
{Name: "TestMaliciousTx66", Fn: s.TestMaliciousTx66},
{Name: "TestLargeTxRequest66", Fn: s.TestLargeTxRequest66},
{Name: "TestNewPooledTxs66", Fn: s.TestNewPooledTxs66},
} }
} }
@ -135,14 +86,9 @@ func (s *Suite) SnapTests() []utesting.Test {
} }
} }
var ( // TestStatus attempts to connect to the given node and exchange
eth66 = true // indicates whether suite should negotiate eth66 connection // a status message with it on the eth protocol.
eth65 = false // indicates whether suite should negotiate eth65 connection or below. func (s *Suite) TestStatus(t *utesting.T) {
)
// TestStatus65 attempts to connect to the given node and exchange
// a status message with it.
func (s *Suite) TestStatus65(t *utesting.T) {
conn, err := s.dial() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
@ -153,79 +99,32 @@ func (s *Suite) TestStatus65(t *utesting.T) {
} }
} }
// TestStatus66 attempts to connect to the given node and exchange // TestGetBlockHeaders tests whether the given node can respond to
// a status message with it on the eth66 protocol. // an eth `GetBlockHeaders` request and that the response is accurate.
func (s *Suite) TestStatus66(t *utesting.T) { func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
conn, err := s.dial66()
if err != nil {
t.Fatalf("dial failed: %v", err)
}
defer conn.Close()
if err := conn.peer(s.chain, nil); err != nil {
t.Fatalf("peering failed: %v", err)
}
}
// TestGetBlockHeaders65 tests whether the given node can respond to
// a `GetBlockHeaders` request accurately.
func (s *Suite) TestGetBlockHeaders65(t *utesting.T) {
conn, err := s.dial() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
} }
defer conn.Close() defer conn.Close()
if err := conn.peer(s.chain, nil); err != nil {
t.Fatalf("handshake(s) failed: %v", err)
}
// write request
req := &GetBlockHeaders{
Origin: eth.HashOrNumber{
Hash: s.chain.blocks[1].Hash(),
},
Amount: 2,
Skip: 1,
Reverse: false,
}
headers, err := conn.headersRequest(req, s.chain, eth65, 0)
if err != nil {
t.Fatalf("GetBlockHeaders request failed: %v", err)
}
// check for correct headers
expected, err := s.chain.GetHeaders(*req)
if err != nil {
t.Fatalf("failed to get headers for given request: %v", err)
}
if !headersMatch(expected, headers) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers)
}
}
// TestGetBlockHeaders66 tests whether the given node can respond to
// an eth66 `GetBlockHeaders` request and that the response is accurate.
func (s *Suite) TestGetBlockHeaders66(t *utesting.T) {
conn, err := s.dial66()
if err != nil {
t.Fatalf("dial failed: %v", err)
}
defer conn.Close()
if err = conn.peer(s.chain, nil); err != nil { if err = conn.peer(s.chain, nil); err != nil {
t.Fatalf("peering failed: %v", err) t.Fatalf("peering failed: %v", err)
} }
// write request // write request
req := &GetBlockHeaders{ req := &GetBlockHeaders{
Origin: eth.HashOrNumber{ GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{
Hash: s.chain.blocks[1].Hash(), Origin: eth.HashOrNumber{Hash: s.chain.blocks[1].Hash()},
},
Amount: 2, Amount: 2,
Skip: 1, Skip: 1,
Reverse: false, Reverse: false,
},
} }
headers, err := conn.headersRequest(req, s.chain, eth66, 33) headers, err := conn.headersRequest(req, s.chain, 33)
if err != nil { if err != nil {
t.Fatalf("could not get block headers: %v", err) t.Fatalf("could not get block headers: %v", err)
} }
// check for correct headers // check for correct headers
expected, err := s.chain.GetHeaders(*req) expected, err := s.chain.GetHeaders(req)
if err != nil { if err != nil {
t.Fatalf("failed to get headers for given request: %v", err) t.Fatalf("failed to get headers for given request: %v", err)
} }
@ -234,12 +133,12 @@ func (s *Suite) TestGetBlockHeaders66(t *utesting.T) {
} }
} }
// TestSimultaneousRequests66 sends two simultaneous `GetBlockHeader` requests from // TestSimultaneousRequests sends two simultaneous `GetBlockHeader` requests from
// the same connection with different request IDs and checks to make sure the node // the same connection with different request IDs and checks to make sure the node
// responds with the correct headers per request. // responds with the correct headers per request.
func (s *Suite) TestSimultaneousRequests66(t *utesting.T) { func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
// create a connection // create a connection
conn, err := s.dial66() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
} }
@ -247,8 +146,9 @@ func (s *Suite) TestSimultaneousRequests66(t *utesting.T) {
if err := conn.peer(s.chain, nil); err != nil { if err := conn.peer(s.chain, nil); err != nil {
t.Fatalf("peering failed: %v", err) t.Fatalf("peering failed: %v", err)
} }
// create two requests // create two requests
req1 := &eth.GetBlockHeadersPacket66{ req1 := &GetBlockHeaders{
RequestId: uint64(111), RequestId: uint64(111),
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{
Origin: eth.HashOrNumber{ Origin: eth.HashOrNumber{
@ -259,7 +159,7 @@ func (s *Suite) TestSimultaneousRequests66(t *utesting.T) {
Reverse: false, Reverse: false,
}, },
} }
req2 := &eth.GetBlockHeadersPacket66{ req2 := &GetBlockHeaders{
RequestId: uint64(222), RequestId: uint64(222),
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{
Origin: eth.HashOrNumber{ Origin: eth.HashOrNumber{
@ -270,46 +170,49 @@ func (s *Suite) TestSimultaneousRequests66(t *utesting.T) {
Reverse: false, Reverse: false,
}, },
} }
// write the first request // write the first request
if err := conn.Write66(req1, GetBlockHeaders{}.Code()); err != nil { if err := conn.Write(req1); err != nil {
t.Fatalf("failed to write to connection: %v", err) t.Fatalf("failed to write to connection: %v", err)
} }
// write the second request // write the second request
if err := conn.Write66(req2, GetBlockHeaders{}.Code()); err != nil { if err := conn.Write(req2); err != nil {
t.Fatalf("failed to write to connection: %v", err) t.Fatalf("failed to write to connection: %v", err)
} }
// wait for responses // wait for responses
msg := conn.waitForResponse(s.chain, timeout, req1.RequestId) msg := conn.waitForResponse(s.chain, timeout, req1.RequestId)
headers1, ok := msg.(BlockHeaders) headers1, ok := msg.(*BlockHeaders)
if !ok { if !ok {
t.Fatalf("unexpected %s", pretty.Sdump(msg)) t.Fatalf("unexpected %s", pretty.Sdump(msg))
} }
msg = conn.waitForResponse(s.chain, timeout, req2.RequestId) msg = conn.waitForResponse(s.chain, timeout, req2.RequestId)
headers2, ok := msg.(BlockHeaders) headers2, ok := msg.(*BlockHeaders)
if !ok { if !ok {
t.Fatalf("unexpected %s", pretty.Sdump(msg)) t.Fatalf("unexpected %s", pretty.Sdump(msg))
} }
// check received headers for accuracy // check received headers for accuracy
expected1, err := s.chain.GetHeaders(GetBlockHeaders(*req1.GetBlockHeadersPacket)) expected1, err := s.chain.GetHeaders(req1)
if err != nil { if err != nil {
t.Fatalf("failed to get expected headers for request 1: %v", err) t.Fatalf("failed to get expected headers for request 1: %v", err)
} }
expected2, err := s.chain.GetHeaders(GetBlockHeaders(*req2.GetBlockHeadersPacket)) expected2, err := s.chain.GetHeaders(req2)
if err != nil { if err != nil {
t.Fatalf("failed to get expected headers for request 2: %v", err) t.Fatalf("failed to get expected headers for request 2: %v", err)
} }
if !headersMatch(expected1, headers1) { if !headersMatch(expected1, headers1.BlockHeadersPacket) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
} }
if !headersMatch(expected2, headers2) { if !headersMatch(expected2, headers2.BlockHeadersPacket) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
} }
} }
// TestSameRequestID66 sends two requests with the same request ID to a // TestSameRequestID sends two requests with the same request ID to a
// single node. // single node.
func (s *Suite) TestSameRequestID66(t *utesting.T) { func (s *Suite) TestSameRequestID(t *utesting.T) {
conn, err := s.dial66() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
} }
@ -319,7 +222,7 @@ func (s *Suite) TestSameRequestID66(t *utesting.T) {
} }
// create requests // create requests
reqID := uint64(1234) reqID := uint64(1234)
request1 := &eth.GetBlockHeadersPacket66{ request1 := &GetBlockHeaders{
RequestId: reqID, RequestId: reqID,
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{
Origin: eth.HashOrNumber{ Origin: eth.HashOrNumber{
@ -328,7 +231,7 @@ func (s *Suite) TestSameRequestID66(t *utesting.T) {
Amount: 2, Amount: 2,
}, },
} }
request2 := &eth.GetBlockHeadersPacket66{ request2 := &GetBlockHeaders{
RequestId: reqID, RequestId: reqID,
GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{ GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{
Origin: eth.HashOrNumber{ Origin: eth.HashOrNumber{
@ -337,45 +240,48 @@ func (s *Suite) TestSameRequestID66(t *utesting.T) {
Amount: 2, Amount: 2,
}, },
} }
// write the requests // write the requests
if err = conn.Write66(request1, GetBlockHeaders{}.Code()); err != nil { if err = conn.Write(request1); err != nil {
t.Fatalf("failed to write to connection: %v", err) t.Fatalf("failed to write to connection: %v", err)
} }
if err = conn.Write66(request2, GetBlockHeaders{}.Code()); err != nil { if err = conn.Write(request2); err != nil {
t.Fatalf("failed to write to connection: %v", err) t.Fatalf("failed to write to connection: %v", err)
} }
// wait for responses // wait for responses
msg := conn.waitForResponse(s.chain, timeout, reqID) msg := conn.waitForResponse(s.chain, timeout, reqID)
headers1, ok := msg.(BlockHeaders) headers1, ok := msg.(*BlockHeaders)
if !ok { if !ok {
t.Fatalf("unexpected %s", pretty.Sdump(msg)) t.Fatalf("unexpected %s", pretty.Sdump(msg))
} }
msg = conn.waitForResponse(s.chain, timeout, reqID) msg = conn.waitForResponse(s.chain, timeout, reqID)
headers2, ok := msg.(BlockHeaders) headers2, ok := msg.(*BlockHeaders)
if !ok { if !ok {
t.Fatalf("unexpected %s", pretty.Sdump(msg)) t.Fatalf("unexpected %s", pretty.Sdump(msg))
} }
// check if headers match // check if headers match
expected1, err := s.chain.GetHeaders(GetBlockHeaders(*request1.GetBlockHeadersPacket)) expected1, err := s.chain.GetHeaders(request1)
if err != nil { if err != nil {
t.Fatalf("failed to get expected block headers: %v", err) t.Fatalf("failed to get expected block headers: %v", err)
} }
expected2, err := s.chain.GetHeaders(GetBlockHeaders(*request2.GetBlockHeadersPacket)) expected2, err := s.chain.GetHeaders(request2)
if err != nil { if err != nil {
t.Fatalf("failed to get expected block headers: %v", err) t.Fatalf("failed to get expected block headers: %v", err)
} }
if !headersMatch(expected1, headers1) { if !headersMatch(expected1, headers1.BlockHeadersPacket) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1) t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected1, headers1)
} }
if !headersMatch(expected2, headers2) { if !headersMatch(expected2, headers2.BlockHeadersPacket) {
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2) t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected2, headers2)
} }
} }
// TestZeroRequestID_66 checks that a message with a request ID of zero is still handled // TestZeroRequestID checks that a message with a request ID of zero is still handled
// by the node. // by the node.
func (s *Suite) TestZeroRequestID66(t *utesting.T) { func (s *Suite) TestZeroRequestID(t *utesting.T) {
conn, err := s.dial66() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
} }
@ -384,16 +290,16 @@ func (s *Suite) TestZeroRequestID66(t *utesting.T) {
t.Fatalf("peering failed: %v", err) t.Fatalf("peering failed: %v", err)
} }
req := &GetBlockHeaders{ req := &GetBlockHeaders{
Origin: eth.HashOrNumber{ GetBlockHeadersPacket: &eth.GetBlockHeadersPacket{
Number: 0, Origin: eth.HashOrNumber{Number: 0},
},
Amount: 2, Amount: 2,
},
} }
headers, err := conn.headersRequest(req, s.chain, eth66, 0) headers, err := conn.headersRequest(req, s.chain, 0)
if err != nil { if err != nil {
t.Fatalf("failed to get block headers: %v", err) t.Fatalf("failed to get block headers: %v", err)
} }
expected, err := s.chain.GetHeaders(*req) expected, err := s.chain.GetHeaders(req)
if err != nil { if err != nil {
t.Fatalf("failed to get expected block headers: %v", err) t.Fatalf("failed to get expected block headers: %v", err)
} }
@ -402,9 +308,9 @@ func (s *Suite) TestZeroRequestID66(t *utesting.T) {
} }
} }
// TestGetBlockBodies65 tests whether the given node can respond to // TestGetBlockBodies tests whether the given node can respond to
// a `GetBlockBodies` request and that the response is accurate. // a `GetBlockBodies` request and that the response is accurate.
func (s *Suite) TestGetBlockBodies65(t *utesting.T) { func (s *Suite) TestGetBlockBodies(t *utesting.T) {
conn, err := s.dial() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
@ -415,126 +321,39 @@ func (s *Suite) TestGetBlockBodies65(t *utesting.T) {
} }
// create block bodies request // create block bodies request
req := &GetBlockBodies{ req := &GetBlockBodies{
s.chain.blocks[54].Hash(),
s.chain.blocks[75].Hash(),
}
if err := conn.Write(req); err != nil {
t.Fatalf("could not write to connection: %v", err)
}
// wait for response
switch msg := conn.readAndServe(s.chain, timeout).(type) {
case *BlockBodies:
t.Logf("received %d block bodies", len(*msg))
if len(*msg) != len(*req) {
t.Fatalf("wrong bodies in response: expected %d bodies, "+
"got %d", len(*req), len(*msg))
}
default:
t.Fatalf("unexpected: %s", pretty.Sdump(msg))
}
}
// TestGetBlockBodies66 tests whether the given node can respond to
// a `GetBlockBodies` request and that the response is accurate over
// the eth66 protocol.
func (s *Suite) TestGetBlockBodies66(t *utesting.T) {
conn, err := s.dial66()
if err != nil {
t.Fatalf("dial failed: %v", err)
}
defer conn.Close()
if err := conn.peer(s.chain, nil); err != nil {
t.Fatalf("peering failed: %v", err)
}
// create block bodies request
req := &eth.GetBlockBodiesPacket66{
RequestId: uint64(55), RequestId: uint64(55),
GetBlockBodiesPacket: eth.GetBlockBodiesPacket{ GetBlockBodiesPacket: eth.GetBlockBodiesPacket{
s.chain.blocks[54].Hash(), s.chain.blocks[54].Hash(),
s.chain.blocks[75].Hash(), s.chain.blocks[75].Hash(),
}, },
} }
if err := conn.Write66(req, GetBlockBodies{}.Code()); err != nil { if err := conn.Write(req); err != nil {
t.Fatalf("could not write to connection: %v", err) t.Fatalf("could not write to connection: %v", err)
} }
// wait for block bodies response // wait for block bodies response
msg := conn.waitForResponse(s.chain, timeout, req.RequestId) msg := conn.waitForResponse(s.chain, timeout, req.RequestId)
blockBodies, ok := msg.(BlockBodies) resp, ok := msg.(*BlockBodies)
if !ok { if !ok {
t.Fatalf("unexpected: %s", pretty.Sdump(msg)) t.Fatalf("unexpected: %s", pretty.Sdump(msg))
} }
t.Logf("received %d block bodies", len(blockBodies)) bodies := resp.BlockBodiesPacket
if len(blockBodies) != len(req.GetBlockBodiesPacket) { t.Logf("received %d block bodies", len(bodies))
if len(bodies) != len(req.GetBlockBodiesPacket) {
t.Fatalf("wrong bodies in response: expected %d bodies, "+ t.Fatalf("wrong bodies in response: expected %d bodies, "+
"got %d", len(req.GetBlockBodiesPacket), len(blockBodies)) "got %d", len(req.GetBlockBodiesPacket), len(bodies))
} }
} }
// TestBroadcast65 tests whether a block announcement is correctly // TestBroadcast tests whether a block announcement is correctly
// propagated to the given node's peer(s). // propagated to the node's peers.
func (s *Suite) TestBroadcast65(t *utesting.T) { func (s *Suite) TestBroadcast(t *utesting.T) {
if err := s.sendNextBlock(eth65); err != nil { if err := s.sendNextBlock(); err != nil {
t.Fatalf("block broadcast failed: %v", err) t.Fatalf("block broadcast failed: %v", err)
} }
} }
// TestBroadcast66 tests whether a block announcement is correctly // TestLargeAnnounce tests the announcement mechanism with a large block.
// propagated to the given node's peer(s) on the eth66 protocol. func (s *Suite) TestLargeAnnounce(t *utesting.T) {
func (s *Suite) TestBroadcast66(t *utesting.T) {
if err := s.sendNextBlock(eth66); err != nil {
t.Fatalf("block broadcast failed: %v", err)
}
}
// TestLargeAnnounce65 tests the announcement mechanism with a large block.
func (s *Suite) TestLargeAnnounce65(t *utesting.T) {
nextBlock := len(s.chain.blocks)
blocks := []*NewBlock{
{
Block: largeBlock(),
TD: s.fullChain.TotalDifficultyAt(nextBlock),
},
{
Block: s.fullChain.blocks[nextBlock],
TD: largeNumber(2),
},
{
Block: largeBlock(),
TD: largeNumber(2),
},
}
for i, blockAnnouncement := range blocks {
t.Logf("Testing malicious announcement: %v\n", i)
conn, err := s.dial()
if err != nil {
t.Fatalf("dial failed: %v", err)
}
if err = conn.peer(s.chain, nil); err != nil {
t.Fatalf("peering failed: %v", err)
}
if err = conn.Write(blockAnnouncement); err != nil {
t.Fatalf("could not write to connection: %v", err)
}
// Invalid announcement, check that peer disconnected
switch msg := conn.readAndServe(s.chain, time.Second*8).(type) {
case *Disconnect:
case *Error:
break
default:
t.Fatalf("unexpected: %s wanted disconnect", pretty.Sdump(msg))
}
conn.Close()
}
// Test the last block as a valid block
if err := s.sendNextBlock(eth65); err != nil {
t.Fatalf("failed to broadcast next block: %v", err)
}
}
// TestLargeAnnounce66 tests the announcement mechanism with a large
// block over the eth66 protocol.
func (s *Suite) TestLargeAnnounce66(t *utesting.T) {
nextBlock := len(s.chain.blocks) nextBlock := len(s.chain.blocks)
blocks := []*NewBlock{ blocks := []*NewBlock{
{ {
@ -553,7 +372,7 @@ func (s *Suite) TestLargeAnnounce66(t *utesting.T) {
for i, blockAnnouncement := range blocks[0:3] { for i, blockAnnouncement := range blocks[0:3] {
t.Logf("Testing malicious announcement: %v\n", i) t.Logf("Testing malicious announcement: %v\n", i)
conn, err := s.dial66() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
} }
@ -564,7 +383,7 @@ func (s *Suite) TestLargeAnnounce66(t *utesting.T) {
t.Fatalf("could not write to connection: %v", err) t.Fatalf("could not write to connection: %v", err)
} }
// Invalid announcement, check that peer disconnected // Invalid announcement, check that peer disconnected
switch msg := conn.readAndServe(s.chain, time.Second*8).(type) { switch msg := conn.readAndServe(s.chain, 8*time.Second).(type) {
case *Disconnect: case *Disconnect:
case *Error: case *Error:
break break
@ -574,58 +393,35 @@ func (s *Suite) TestLargeAnnounce66(t *utesting.T) {
conn.Close() conn.Close()
} }
// Test the last block as a valid block // Test the last block as a valid block
if err := s.sendNextBlock(eth66); err != nil { if err := s.sendNextBlock(); err != nil {
t.Fatalf("failed to broadcast next block: %v", err) t.Fatalf("failed to broadcast next block: %v", err)
} }
} }
// TestOldAnnounce65 tests the announcement mechanism with an old block. // TestOldAnnounce tests the announcement mechanism with an old block.
func (s *Suite) TestOldAnnounce65(t *utesting.T) { func (s *Suite) TestOldAnnounce(t *utesting.T) {
if err := s.oldAnnounce(eth65); err != nil { if err := s.oldAnnounce(); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
// TestOldAnnounce66 tests the announcement mechanism with an old block, // TestBlockHashAnnounce sends a new block hash announcement and expects
// over the eth66 protocol.
func (s *Suite) TestOldAnnounce66(t *utesting.T) {
if err := s.oldAnnounce(eth66); err != nil {
t.Fatal(err)
}
}
// TestBlockHashAnnounce65 sends a new block hash announcement and expects
// the node to perform a `GetBlockHeaders` request. // the node to perform a `GetBlockHeaders` request.
func (s *Suite) TestBlockHashAnnounce65(t *utesting.T) { func (s *Suite) TestBlockHashAnnounce(t *utesting.T) {
if err := s.hashAnnounce(eth65); err != nil { if err := s.hashAnnounce(); err != nil {
t.Fatalf("block hash announcement failed: %v", err) t.Fatalf("block hash announcement failed: %v", err)
} }
} }
// TestBlockHashAnnounce66 sends a new block hash announcement and expects // TestMaliciousHandshake tries to send malicious data during the handshake.
// the node to perform a `GetBlockHeaders` request. func (s *Suite) TestMaliciousHandshake(t *utesting.T) {
func (s *Suite) TestBlockHashAnnounce66(t *utesting.T) { if err := s.maliciousHandshakes(t); err != nil {
if err := s.hashAnnounce(eth66); err != nil {
t.Fatalf("block hash announcement failed: %v", err)
}
}
// TestMaliciousHandshake65 tries to send malicious data during the handshake.
func (s *Suite) TestMaliciousHandshake65(t *utesting.T) {
if err := s.maliciousHandshakes(t, eth65); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
// TestMaliciousHandshake66 tries to send malicious data during the handshake. // TestMaliciousStatus sends a status package with a large total difficulty.
func (s *Suite) TestMaliciousHandshake66(t *utesting.T) { func (s *Suite) TestMaliciousStatus(t *utesting.T) {
if err := s.maliciousHandshakes(t, eth66); err != nil {
t.Fatal(err)
}
}
// TestMaliciousStatus65 sends a status package with a large total difficulty.
func (s *Suite) TestMaliciousStatus65(t *utesting.T) {
conn, err := s.dial() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
@ -637,58 +433,28 @@ func (s *Suite) TestMaliciousStatus65(t *utesting.T) {
} }
} }
// TestMaliciousStatus66 sends a status package with a large total // TestTransaction sends a valid transaction to the node and
// difficulty over the eth66 protocol.
func (s *Suite) TestMaliciousStatus66(t *utesting.T) {
conn, err := s.dial66()
if err != nil {
t.Fatalf("dial failed: %v", err)
}
defer conn.Close()
if err := s.maliciousStatus(conn); err != nil {
t.Fatal(err)
}
}
// TestTransaction65 sends a valid transaction to the node and
// checks if the transaction gets propagated. // checks if the transaction gets propagated.
func (s *Suite) TestTransaction65(t *utesting.T) { func (s *Suite) TestTransaction(t *utesting.T) {
if err := s.sendSuccessfulTxs(t, eth65); err != nil { if err := s.sendSuccessfulTxs(t); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
// TestTransaction66 sends a valid transaction to the node and // TestMaliciousTx sends several invalid transactions and tests whether
// checks if the transaction gets propagated.
func (s *Suite) TestTransaction66(t *utesting.T) {
if err := s.sendSuccessfulTxs(t, eth66); err != nil {
t.Fatal(err)
}
}
// TestMaliciousTx65 sends several invalid transactions and tests whether
// the node will propagate them. // the node will propagate them.
func (s *Suite) TestMaliciousTx65(t *utesting.T) { func (s *Suite) TestMaliciousTx(t *utesting.T) {
if err := s.sendMaliciousTxs(t, eth65); err != nil { if err := s.sendMaliciousTxs(t); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
// TestMaliciousTx66 sends several invalid transactions and tests whether // TestLargeTxRequest tests whether a node can fulfill a large GetPooledTransactions
// the node will propagate them.
func (s *Suite) TestMaliciousTx66(t *utesting.T) {
if err := s.sendMaliciousTxs(t, eth66); err != nil {
t.Fatal(err)
}
}
// TestLargeTxRequest66 tests whether a node can fulfill a large GetPooledTransactions
// request. // request.
func (s *Suite) TestLargeTxRequest66(t *utesting.T) { func (s *Suite) TestLargeTxRequest(t *utesting.T) {
// send the next block to ensure the node is no longer syncing and // send the next block to ensure the node is no longer syncing and
// is able to accept txs // is able to accept txs
if err := s.sendNextBlock(eth66); err != nil { if err := s.sendNextBlock(); err != nil {
t.Fatalf("failed to send next block: %v", err) t.Fatalf("failed to send next block: %v", err)
} }
// send 2000 transactions to the node // send 2000 transactions to the node
@ -701,7 +467,7 @@ func (s *Suite) TestLargeTxRequest66(t *utesting.T) {
} }
// set up connection to receive to ensure node is peered with the receiving connection // set up connection to receive to ensure node is peered with the receiving connection
// before tx request is sent // before tx request is sent
conn, err := s.dial66() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
} }
@ -714,17 +480,17 @@ func (s *Suite) TestLargeTxRequest66(t *utesting.T) {
for _, hash := range hashMap { for _, hash := range hashMap {
hashes = append(hashes, hash) hashes = append(hashes, hash)
} }
getTxReq := &eth.GetPooledTransactionsPacket66{ getTxReq := &GetPooledTransactions{
RequestId: 1234, RequestId: 1234,
GetPooledTransactionsPacket: hashes, GetPooledTransactionsPacket: hashes,
} }
if err = conn.Write66(getTxReq, GetPooledTransactions{}.Code()); err != nil { if err = conn.Write(getTxReq); err != nil {
t.Fatalf("could not write to conn: %v", err) t.Fatalf("could not write to conn: %v", err)
} }
// check that all received transactions match those that were sent to node // check that all received transactions match those that were sent to node
switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) { switch msg := conn.waitForResponse(s.chain, timeout, getTxReq.RequestId).(type) {
case PooledTransactions: case *PooledTransactions:
for _, gotTx := range msg { for _, gotTx := range msg.PooledTransactionsPacket {
if _, exists := hashMap[gotTx.Hash()]; !exists { if _, exists := hashMap[gotTx.Hash()]; !exists {
t.Fatalf("unexpected tx received: %v", gotTx.Hash()) t.Fatalf("unexpected tx received: %v", gotTx.Hash())
} }
@ -734,12 +500,12 @@ func (s *Suite) TestLargeTxRequest66(t *utesting.T) {
} }
} }
// TestNewPooledTxs_66 tests whether a node will do a GetPooledTransactions // TestNewPooledTxs tests whether a node will do a GetPooledTransactions
// request upon receiving a NewPooledTransactionHashes announcement. // request upon receiving a NewPooledTransactionHashes announcement.
func (s *Suite) TestNewPooledTxs66(t *utesting.T) { func (s *Suite) TestNewPooledTxs(t *utesting.T) {
// send the next block to ensure the node is no longer syncing and // send the next block to ensure the node is no longer syncing and
// is able to accept txs // is able to accept txs
if err := s.sendNextBlock(eth66); err != nil { if err := s.sendNextBlock(); err != nil {
t.Fatalf("failed to send next block: %v", err) t.Fatalf("failed to send next block: %v", err)
} }
@ -757,7 +523,7 @@ func (s *Suite) TestNewPooledTxs66(t *utesting.T) {
announce := NewPooledTransactionHashes(hashes) announce := NewPooledTransactionHashes(hashes)
// send announcement // send announcement
conn, err := s.dial66() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
} }
@ -771,11 +537,11 @@ func (s *Suite) TestNewPooledTxs66(t *utesting.T) {
// wait for GetPooledTxs request // wait for GetPooledTxs request
for { for {
_, msg := conn.readAndServe66(s.chain, timeout) msg := conn.readAndServe(s.chain, timeout)
switch msg := msg.(type) { switch msg := msg.(type) {
case GetPooledTransactions: case *GetPooledTransactions:
if len(msg) != len(hashes) { if len(msg.GetPooledTransactionsPacket) != len(hashes) {
t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg)) t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsPacket))
} }
return return
// ignore propagated txs from previous tests // ignore propagated txs from previous tests

View File

@ -45,7 +45,7 @@ func TestEthSuite(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("could not create new test suite: %v", err) t.Fatalf("could not create new test suite: %v", err)
} }
for _, test := range suite.Eth66Tests() { for _, test := range suite.EthTests() {
t.Run(test.Name, func(t *testing.T) { t.Run(test.Name, func(t *testing.T) {
result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout) result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
if result[0].Failed { if result[0].Failed {

View File

@ -32,7 +32,7 @@ import (
//var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") //var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7")
var faucetKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") var faucetKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
func (s *Suite) sendSuccessfulTxs(t *utesting.T, isEth66 bool) error { func (s *Suite) sendSuccessfulTxs(t *utesting.T) error {
tests := []*types.Transaction{ tests := []*types.Transaction{
getNextTxFromChain(s), getNextTxFromChain(s),
unknownTx(s), unknownTx(s),
@ -48,15 +48,15 @@ func (s *Suite) sendSuccessfulTxs(t *utesting.T, isEth66 bool) error {
prevTx = tests[i-1] prevTx = tests[i-1]
} }
// write tx to connection // write tx to connection
if err := sendSuccessfulTx(s, tx, prevTx, isEth66); err != nil { if err := sendSuccessfulTx(s, tx, prevTx); err != nil {
return fmt.Errorf("send successful tx test failed: %v", err) return fmt.Errorf("send successful tx test failed: %v", err)
} }
} }
return nil return nil
} }
func sendSuccessfulTx(s *Suite, tx *types.Transaction, prevTx *types.Transaction, isEth66 bool) error { func sendSuccessfulTx(s *Suite, tx *types.Transaction, prevTx *types.Transaction) error {
sendConn, recvConn, err := s.createSendAndRecvConns(isEth66) sendConn, recvConn, err := s.createSendAndRecvConns()
if err != nil { if err != nil {
return err return err
} }
@ -73,8 +73,10 @@ func sendSuccessfulTx(s *Suite, tx *types.Transaction, prevTx *types.Transaction
if err = recvConn.peer(s.chain, nil); err != nil { if err = recvConn.peer(s.chain, nil); err != nil {
return fmt.Errorf("peering failed: %v", err) return fmt.Errorf("peering failed: %v", err)
} }
// update last nonce seen // update last nonce seen
nonce = tx.Nonce() nonce = tx.Nonce()
// Wait for the transaction announcement // Wait for the transaction announcement
for { for {
switch msg := recvConn.readAndServe(s.chain, timeout).(type) { switch msg := recvConn.readAndServe(s.chain, timeout).(type) {
@ -114,7 +116,7 @@ func sendSuccessfulTx(s *Suite, tx *types.Transaction, prevTx *types.Transaction
} }
} }
func (s *Suite) sendMaliciousTxs(t *utesting.T, isEth66 bool) error { func (s *Suite) sendMaliciousTxs(t *utesting.T) error {
badTxs := []*types.Transaction{ badTxs := []*types.Transaction{
getOldTxFromChain(s), getOldTxFromChain(s),
invalidNonceTx(s), invalidNonceTx(s),
@ -122,16 +124,9 @@ func (s *Suite) sendMaliciousTxs(t *utesting.T, isEth66 bool) error {
hugeGasPrice(s), hugeGasPrice(s),
hugeData(s), hugeData(s),
} }
// setup receiving connection before sending malicious txs // setup receiving connection before sending malicious txs
var ( recvConn, err := s.dial()
recvConn *Conn
err error
)
if isEth66 {
recvConn, err = s.dial66()
} else {
recvConn, err = s.dial()
}
if err != nil { if err != nil {
return fmt.Errorf("dial failed: %v", err) return fmt.Errorf("dial failed: %v", err)
} }
@ -139,9 +134,10 @@ func (s *Suite) sendMaliciousTxs(t *utesting.T, isEth66 bool) error {
if err = recvConn.peer(s.chain, nil); err != nil { if err = recvConn.peer(s.chain, nil); err != nil {
return fmt.Errorf("peering failed: %v", err) return fmt.Errorf("peering failed: %v", err)
} }
for i, tx := range badTxs { for i, tx := range badTxs {
t.Logf("Testing malicious tx propagation: %v\n", i) t.Logf("Testing malicious tx propagation: %v\n", i)
if err = sendMaliciousTx(s, tx, isEth66); err != nil { if err = sendMaliciousTx(s, tx); err != nil {
return fmt.Errorf("malicious tx test failed:\ntx: %v\nerror: %v", tx, err) return fmt.Errorf("malicious tx test failed:\ntx: %v\nerror: %v", tx, err)
} }
} }
@ -149,17 +145,8 @@ func (s *Suite) sendMaliciousTxs(t *utesting.T, isEth66 bool) error {
return checkMaliciousTxPropagation(s, badTxs, recvConn) return checkMaliciousTxPropagation(s, badTxs, recvConn)
} }
func sendMaliciousTx(s *Suite, tx *types.Transaction, isEth66 bool) error { func sendMaliciousTx(s *Suite, tx *types.Transaction) error {
// setup connection conn, err := s.dial()
var (
conn *Conn
err error
)
if isEth66 {
conn, err = s.dial66()
} else {
conn, err = s.dial()
}
if err != nil { if err != nil {
return fmt.Errorf("dial failed: %v", err) return fmt.Errorf("dial failed: %v", err)
} }
@ -167,6 +154,7 @@ func sendMaliciousTx(s *Suite, tx *types.Transaction, isEth66 bool) error {
if err = conn.peer(s.chain, nil); err != nil { if err = conn.peer(s.chain, nil); err != nil {
return fmt.Errorf("peering failed: %v", err) return fmt.Errorf("peering failed: %v", err)
} }
// write malicious tx // write malicious tx
if err = conn.Write(&Transactions{tx}); err != nil { if err = conn.Write(&Transactions{tx}); err != nil {
return fmt.Errorf("failed to write to connection: %v", err) return fmt.Errorf("failed to write to connection: %v", err)
@ -182,7 +170,7 @@ func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, txs []*types.Transaction
txMsg := Transactions(txs) txMsg := Transactions(txs)
t.Logf("sending %d txs\n", len(txs)) t.Logf("sending %d txs\n", len(txs))
sendConn, recvConn, err := s.createSendAndRecvConns(true) sendConn, recvConn, err := s.createSendAndRecvConns()
if err != nil { if err != nil {
return err return err
} }
@ -194,15 +182,19 @@ func sendMultipleSuccessfulTxs(t *utesting.T, s *Suite, txs []*types.Transaction
if err = recvConn.peer(s.chain, nil); err != nil { if err = recvConn.peer(s.chain, nil); err != nil {
return fmt.Errorf("peering failed: %v", err) return fmt.Errorf("peering failed: %v", err)
} }
// Send the transactions // Send the transactions
if err = sendConn.Write(&txMsg); err != nil { if err = sendConn.Write(&txMsg); err != nil {
return fmt.Errorf("failed to write message to connection: %v", err) return fmt.Errorf("failed to write message to connection: %v", err)
} }
// update nonce // update nonce
nonce = txs[len(txs)-1].Nonce() nonce = txs[len(txs)-1].Nonce()
// Wait for the transaction announcement(s) and make sure all sent txs are being propagated
// Wait for the transaction announcement(s) and make sure all sent txs are being propagated.
// all txs should be announced within 3 announcements.
recvHashes := make([]common.Hash, 0) recvHashes := make([]common.Hash, 0)
// all txs should be announced within 3 announcements
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
switch msg := recvConn.readAndServe(s.chain, timeout).(type) { switch msg := recvConn.readAndServe(s.chain, timeout).(type) {
case *Transactions: case *Transactions:

View File

@ -29,6 +29,7 @@ import (
type Message interface { type Message interface {
Code() int Code() int
ReqID() uint64
} }
type Error struct { type Error struct {
@ -37,9 +38,11 @@ type Error struct {
func (e *Error) Unwrap() error { return e.err } func (e *Error) Unwrap() error { return e.err }
func (e *Error) Error() string { return e.err.Error() } func (e *Error) Error() string { return e.err.Error() }
func (e *Error) Code() int { return -1 }
func (e *Error) String() string { return e.Error() } func (e *Error) String() string { return e.Error() }
func (e *Error) Code() int { return -1 }
func (e *Error) ReqID() uint64 { return 0 }
func errorf(format string, args ...interface{}) *Error { func errorf(format string, args ...interface{}) *Error {
return &Error{fmt.Errorf(format, args...)} return &Error{fmt.Errorf(format, args...)}
} }
@ -56,73 +59,88 @@ type Hello struct {
Rest []rlp.RawValue `rlp:"tail"` Rest []rlp.RawValue `rlp:"tail"`
} }
func (h Hello) Code() int { return 0x00 } func (msg Hello) Code() int { return 0x00 }
func (msg Hello) ReqID() uint64 { return 0 }
// Disconnect is the RLP structure for a disconnect message. // Disconnect is the RLP structure for a disconnect message.
type Disconnect struct { type Disconnect struct {
Reason p2p.DiscReason Reason p2p.DiscReason
} }
func (d Disconnect) Code() int { return 0x01 } func (msg Disconnect) Code() int { return 0x01 }
func (msg Disconnect) ReqID() uint64 { return 0 }
type Ping struct{} type Ping struct{}
func (p Ping) Code() int { return 0x02 } func (msg Ping) Code() int { return 0x02 }
func (msg Ping) ReqID() uint64 { return 0 }
type Pong struct{} type Pong struct{}
func (p Pong) Code() int { return 0x03 } func (msg Pong) Code() int { return 0x03 }
func (msg Pong) ReqID() uint64 { return 0 }
// Status is the network packet for the status message for eth/64 and later. // Status is the network packet for the status message for eth/64 and later.
type Status eth.StatusPacket type Status eth.StatusPacket
func (s Status) Code() int { return 16 } func (msg Status) Code() int { return 16 }
func (msg Status) ReqID() uint64 { return 0 }
// NewBlockHashes is the network packet for the block announcements. // NewBlockHashes is the network packet for the block announcements.
type NewBlockHashes eth.NewBlockHashesPacket type NewBlockHashes eth.NewBlockHashesPacket
func (nbh NewBlockHashes) Code() int { return 17 } func (msg NewBlockHashes) Code() int { return 17 }
func (msg NewBlockHashes) ReqID() uint64 { return 0 }
type Transactions eth.TransactionsPacket type Transactions eth.TransactionsPacket
func (t Transactions) Code() int { return 18 } func (msg Transactions) Code() int { return 18 }
func (msg Transactions) ReqID() uint64 { return 18 }
// GetBlockHeaders represents a block header query. // GetBlockHeaders represents a block header query.
type GetBlockHeaders eth.GetBlockHeadersPacket type GetBlockHeaders eth.GetBlockHeadersPacket66
func (g GetBlockHeaders) Code() int { return 19 } func (msg GetBlockHeaders) Code() int { return 19 }
func (msg GetBlockHeaders) ReqID() uint64 { return msg.RequestId }
type BlockHeaders eth.BlockHeadersPacket type BlockHeaders eth.BlockHeadersPacket66
func (bh BlockHeaders) Code() int { return 20 } func (msg BlockHeaders) Code() int { return 20 }
func (msg BlockHeaders) ReqID() uint64 { return msg.RequestId }
// GetBlockBodies represents a GetBlockBodies request // GetBlockBodies represents a GetBlockBodies request
type GetBlockBodies eth.GetBlockBodiesPacket type GetBlockBodies eth.GetBlockBodiesPacket66
func (gbb GetBlockBodies) Code() int { return 21 } func (msg GetBlockBodies) Code() int { return 21 }
func (msg GetBlockBodies) ReqID() uint64 { return msg.RequestId }
// BlockBodies is the network packet for block content distribution. // BlockBodies is the network packet for block content distribution.
type BlockBodies eth.BlockBodiesPacket type BlockBodies eth.BlockBodiesPacket66
func (bb BlockBodies) Code() int { return 22 } func (msg BlockBodies) Code() int { return 22 }
func (msg BlockBodies) ReqID() uint64 { return msg.RequestId }
// NewBlock is the network packet for the block propagation message. // NewBlock is the network packet for the block propagation message.
type NewBlock eth.NewBlockPacket type NewBlock eth.NewBlockPacket
func (nb NewBlock) Code() int { return 23 } func (msg NewBlock) Code() int { return 23 }
func (msg NewBlock) ReqID() uint64 { return 0 }
// NewPooledTransactionHashes is the network packet for the tx hash propagation message. // NewPooledTransactionHashes is the network packet for the tx hash propagation message.
type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket
func (nb NewPooledTransactionHashes) Code() int { return 24 } func (msg NewPooledTransactionHashes) Code() int { return 24 }
func (msg NewPooledTransactionHashes) ReqID() uint64 { return 0 }
type GetPooledTransactions eth.GetPooledTransactionsPacket type GetPooledTransactions eth.GetPooledTransactionsPacket66
func (gpt GetPooledTransactions) Code() int { return 25 } func (msg GetPooledTransactions) Code() int { return 25 }
func (msg GetPooledTransactions) ReqID() uint64 { return msg.RequestId }
type PooledTransactions eth.PooledTransactionsPacket type PooledTransactions eth.PooledTransactionsPacket66
func (pt PooledTransactions) Code() int { return 26 } func (msg PooledTransactions) Code() int { return 26 }
func (msg PooledTransactions) ReqID() uint64 { return msg.RequestId }
// Conn represents an individual connection with a peer // Conn represents an individual connection with a peer
type Conn struct { type Conn struct {
@ -135,62 +153,13 @@ type Conn struct {
caps []p2p.Cap caps []p2p.Cap
} }
// Read reads an eth packet from the connection. // Read reads an eth66 packet from the connection.
func (c *Conn) Read() Message { func (c *Conn) Read() Message {
code, rawData, _, err := c.Conn.Read() code, rawData, _, err := c.Conn.Read()
if err != nil { if err != nil {
return errorf("could not read from connection: %v", err) return errorf("could not read from connection: %v", err)
} }
var msg Message
switch int(code) {
case (Hello{}).Code():
msg = new(Hello)
case (Ping{}).Code():
msg = new(Ping)
case (Pong{}).Code():
msg = new(Pong)
case (Disconnect{}).Code():
msg = new(Disconnect)
case (Status{}).Code():
msg = new(Status)
case (GetBlockHeaders{}).Code():
msg = new(GetBlockHeaders)
case (BlockHeaders{}).Code():
msg = new(BlockHeaders)
case (GetBlockBodies{}).Code():
msg = new(GetBlockBodies)
case (BlockBodies{}).Code():
msg = new(BlockBodies)
case (NewBlock{}).Code():
msg = new(NewBlock)
case (NewBlockHashes{}).Code():
msg = new(NewBlockHashes)
case (Transactions{}).Code():
msg = new(Transactions)
case (NewPooledTransactionHashes{}).Code():
msg = new(NewPooledTransactionHashes)
case (GetPooledTransactions{}.Code()):
msg = new(GetPooledTransactions)
case (PooledTransactions{}.Code()):
msg = new(PooledTransactions)
default:
return errorf("invalid message code: %d", code)
}
// if message is devp2p, decode here
if err := rlp.DecodeBytes(rawData, msg); err != nil {
return errorf("could not rlp decode message: %v", err)
}
return msg
}
// Read66 reads an eth66 packet from the connection.
func (c *Conn) Read66() (uint64, Message) {
code, rawData, _, err := c.Conn.Read()
if err != nil {
return 0, errorf("could not read from connection: %v", err)
}
var msg Message var msg Message
switch int(code) { switch int(code) {
case (Hello{}).Code(): case (Hello{}).Code():
@ -206,27 +175,27 @@ func (c *Conn) Read66() (uint64, Message) {
case (GetBlockHeaders{}).Code(): case (GetBlockHeaders{}).Code():
ethMsg := new(eth.GetBlockHeadersPacket66) ethMsg := new(eth.GetBlockHeadersPacket66)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return 0, errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }
return ethMsg.RequestId, GetBlockHeaders(*ethMsg.GetBlockHeadersPacket) return (*GetBlockHeaders)(ethMsg)
case (BlockHeaders{}).Code(): case (BlockHeaders{}).Code():
ethMsg := new(eth.BlockHeadersPacket66) ethMsg := new(eth.BlockHeadersPacket66)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return 0, errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }
return ethMsg.RequestId, BlockHeaders(ethMsg.BlockHeadersPacket) return (*BlockHeaders)(ethMsg)
case (GetBlockBodies{}).Code(): case (GetBlockBodies{}).Code():
ethMsg := new(eth.GetBlockBodiesPacket66) ethMsg := new(eth.GetBlockBodiesPacket66)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return 0, errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }
return ethMsg.RequestId, GetBlockBodies(ethMsg.GetBlockBodiesPacket) return (*GetBlockBodies)(ethMsg)
case (BlockBodies{}).Code(): case (BlockBodies{}).Code():
ethMsg := new(eth.BlockBodiesPacket66) ethMsg := new(eth.BlockBodiesPacket66)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return 0, errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }
return ethMsg.RequestId, BlockBodies(ethMsg.BlockBodiesPacket) return (*BlockBodies)(ethMsg)
case (NewBlock{}).Code(): case (NewBlock{}).Code():
msg = new(NewBlock) msg = new(NewBlock)
case (NewBlockHashes{}).Code(): case (NewBlockHashes{}).Code():
@ -238,26 +207,26 @@ func (c *Conn) Read66() (uint64, Message) {
case (GetPooledTransactions{}.Code()): case (GetPooledTransactions{}.Code()):
ethMsg := new(eth.GetPooledTransactionsPacket66) ethMsg := new(eth.GetPooledTransactionsPacket66)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return 0, errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }
return ethMsg.RequestId, GetPooledTransactions(ethMsg.GetPooledTransactionsPacket) return (*GetPooledTransactions)(ethMsg)
case (PooledTransactions{}.Code()): case (PooledTransactions{}.Code()):
ethMsg := new(eth.PooledTransactionsPacket66) ethMsg := new(eth.PooledTransactionsPacket66)
if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { if err := rlp.DecodeBytes(rawData, ethMsg); err != nil {
return 0, errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }
return ethMsg.RequestId, PooledTransactions(ethMsg.PooledTransactionsPacket) return (*PooledTransactions)(ethMsg)
default: default:
msg = errorf("invalid message code: %d", code) msg = errorf("invalid message code: %d", code)
} }
if msg != nil { if msg != nil {
if err := rlp.DecodeBytes(rawData, msg); err != nil { if err := rlp.DecodeBytes(rawData, msg); err != nil {
return 0, errorf("could not rlp decode message: %v", err) return errorf("could not rlp decode message: %v", err)
} }
return 0, msg return msg
} }
return 0, errorf("invalid message: %s", string(rawData)) return errorf("invalid message: %s", string(rawData))
} }
// Write writes a eth packet to the connection. // Write writes a eth packet to the connection.
@ -270,16 +239,6 @@ func (c *Conn) Write(msg Message) error {
return err return err
} }
// Write66 writes an eth66 packet to the connection.
func (c *Conn) Write66(req eth.Packet, code int) error {
payload, err := rlp.EncodeToBytes(req)
if err != nil {
return err
}
_, err = c.Conn.Write(uint64(code), payload)
return err
}
// ReadSnap reads a snap/1 response with the given id from the connection. // ReadSnap reads a snap/1 response with the given id from the connection.
func (c *Conn) ReadSnap(id uint64) (Message, error) { func (c *Conn) ReadSnap(id uint64) (Message, error) {
respId := id + 1 respId := id + 1

View File

@ -22,7 +22,6 @@ import (
"github.com/ethereum/go-ethereum/cmd/devp2p/internal/ethtest" "github.com/ethereum/go-ethereum/cmd/devp2p/internal/ethtest"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/internal/utesting"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/rlpx" "github.com/ethereum/go-ethereum/p2p/rlpx"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
@ -110,13 +109,8 @@ func rlpxEthTest(ctx *cli.Context) error {
if err != nil { if err != nil {
exit(err) exit(err)
} }
// check if given node supports eth66, and if so, run eth66 protocol tests as well
is66Failed, _ := utesting.Run(utesting.Test{Name: "Is_66", Fn: suite.Is_66})
if is66Failed {
return runTests(ctx, suite.EthTests()) return runTests(ctx, suite.EthTests())
} }
return runTests(ctx, suite.AllEthTests())
}
// rlpxSnapTest runs the snap protocol test suite. // rlpxSnapTest runs the snap protocol test suite.
func rlpxSnapTest(ctx *cli.Context) error { func rlpxSnapTest(ctx *cli.Context) error {

View File

@ -268,7 +268,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
} }
func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB { func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB {
sdb := state.NewDatabase(db) sdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: true})
statedb, _ := state.New(common.Hash{}, sdb, nil) statedb, _ := state.New(common.Hash{}, sdb, nil)
for addr, a := range accounts { for addr, a := range accounts {
statedb.SetCode(addr, a.Code) statedb.SetCode(addr, a.Code)

View File

@ -138,7 +138,7 @@ func runCmd(ctx *cli.Context) error {
gen := readGenesis(ctx.String(GenesisFlag.Name)) gen := readGenesis(ctx.String(GenesisFlag.Name))
genesisConfig = gen genesisConfig = gen
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
genesis := gen.ToBlock(db) genesis := gen.MustCommit(db)
statedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil) statedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil)
chainConfig = gen.Config chainConfig = gen.Config
} else { } else {

View File

@ -248,7 +248,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*enode.Node, network ui
cfg.SyncMode = downloader.LightSync cfg.SyncMode = downloader.LightSync
cfg.NetworkId = network cfg.NetworkId = network
cfg.Genesis = genesis cfg.Genesis = genesis
utils.SetDNSDiscoveryDefaults(&cfg, genesis.ToBlock(nil).Hash()) utils.SetDNSDiscoveryDefaults(&cfg, genesis.ToBlock().Hash())
lesBackend, err := les.New(stack, &cfg) lesBackend, err := les.New(stack, &cfg)
if err != nil { if err != nil {
@ -709,7 +709,7 @@ func authTwitter(url string, tokenV1, tokenV2 string) (string, string, string, c
case tokenV2 != "": case tokenV2 != "":
return authTwitterWithTokenV2(tweetID, tokenV2) return authTwitterWithTokenV2(tweetID, tokenV2)
} }
// Twiter API token isn't provided so we just load the public posts // Twitter API token isn't provided so we just load the public posts
// and scrape it for the Ethereum address and profile URL. We need to load // and scrape it for the Ethereum address and profile URL. We need to load
// the mobile page though since the main page loads tweet contents via JS. // the mobile page though since the main page loads tweet contents via JS.
url = strings.Replace(url, "https://twitter.com/", "https://mobile.twitter.com/", 1) url = strings.Replace(url, "https://twitter.com/", "https://mobile.twitter.com/", 1)

View File

@ -384,12 +384,12 @@ func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, eth
return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash) return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
} }
} else { } else {
number, err := strconv.Atoi(arg) number, err := strconv.ParseUint(arg, 10, 64)
if err != nil { if err != nil {
return nil, nil, common.Hash{}, err return nil, nil, common.Hash{}, err
} }
if hash := rawdb.ReadCanonicalHash(db, uint64(number)); hash != (common.Hash{}) { if hash := rawdb.ReadCanonicalHash(db, number); hash != (common.Hash{}) {
header = rawdb.ReadHeader(db, hash, uint64(number)) header = rawdb.ReadHeader(db, hash, number)
} else { } else {
return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number) return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
} }

View File

@ -20,7 +20,6 @@ import (
"bufio" "bufio"
"errors" "errors"
"fmt" "fmt"
"math/big"
"os" "os"
"reflect" "reflect"
"unicode" "unicode"
@ -157,13 +156,16 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
// makeFullNode loads geth configuration and creates the Ethereum backend. // makeFullNode loads geth configuration and creates the Ethereum backend.
func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
stack, cfg := makeConfigNode(ctx) stack, cfg := makeConfigNode(ctx)
if ctx.IsSet(utils.OverrideGrayGlacierFlag.Name) {
cfg.Eth.OverrideGrayGlacier = new(big.Int).SetUint64(ctx.Uint64(utils.OverrideGrayGlacierFlag.Name))
}
if ctx.IsSet(utils.OverrideTerminalTotalDifficulty.Name) { if ctx.IsSet(utils.OverrideTerminalTotalDifficulty.Name) {
cfg.Eth.OverrideTerminalTotalDifficulty = flags.GlobalBig(ctx, utils.OverrideTerminalTotalDifficulty.Name) cfg.Eth.OverrideTerminalTotalDifficulty = flags.GlobalBig(ctx, utils.OverrideTerminalTotalDifficulty.Name)
} }
if ctx.IsSet(utils.OverrideTerminalTotalDifficultyPassed.Name) {
override := ctx.Bool(utils.OverrideTerminalTotalDifficultyPassed.Name)
cfg.Eth.OverrideTerminalTotalDifficultyPassed = &override
}
backend, eth := utils.RegisterEthService(stack, &cfg.Eth) backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
// Warn users to migrate if they have a legacy freezer format. // Warn users to migrate if they have a legacy freezer format.
if eth != nil && !ctx.IsSet(utils.IgnoreLegacyReceiptsFlag.Name) { if eth != nil && !ctx.IsSet(utils.IgnoreLegacyReceiptsFlag.Name) {
firstIdx := uint64(0) firstIdx := uint64(0)
@ -182,10 +184,14 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
} }
} }
// Configure GraphQL if requested // Configure log filter RPC API.
filterSystem := utils.RegisterFilterAPI(stack, backend, &cfg.Eth)
// Configure GraphQL if requested.
if ctx.IsSet(utils.GraphQLEnabledFlag.Name) { if ctx.IsSet(utils.GraphQLEnabledFlag.Name) {
utils.RegisterGraphQLService(stack, backend, cfg.Node) utils.RegisterGraphQLService(stack, backend, filterSystem, &cfg.Node)
} }
// Add the Ethereum Stats daemon if requested. // Add the Ethereum Stats daemon if requested.
if cfg.Ethstats.URL != "" { if cfg.Ethstats.URL != "" {
utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL) utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL)

View File

@ -155,7 +155,7 @@ To exit, press ctrl-d or type exit
} }
// trulyRandInt generates a crypto random integer used by the console tests to // trulyRandInt generates a crypto random integer used by the console tests to
// not clash network ports with other tests running cocurrently. // not clash network ports with other tests running concurrently.
func trulyRandInt(lo, hi int) int { func trulyRandInt(lo, hi int) int {
num, _ := rand.Int(rand.Reader, big.NewInt(int64(hi-lo))) num, _ := rand.Int(rand.Reader, big.NewInt(int64(hi-lo)))
return int(num.Int64()) + lo return int(num.Int64()) + lo

View File

@ -22,7 +22,6 @@ import (
"os" "os"
"os/signal" "os/signal"
"path/filepath" "path/filepath"
"sort"
"strconv" "strconv"
"strings" "strings"
"syscall" "syscall"
@ -160,8 +159,8 @@ WARNING: This is a low-level operation which may cause database corruption!`,
dbDumpFreezerIndex = &cli.Command{ dbDumpFreezerIndex = &cli.Command{
Action: freezerInspect, Action: freezerInspect,
Name: "freezer-index", Name: "freezer-index",
Usage: "Dump out the index of a given freezer type", Usage: "Dump out the index of a specific freezer table",
ArgsUsage: "<type> <start (int)> <end (int)>", ArgsUsage: "<freezer-type> <table-type> <start (int)> <end (int)>",
Flags: flags.Merge([]cli.Flag{ Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag, utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags), }, utils.NetworkFlags, utils.DatabasePathFlags),
@ -275,7 +274,7 @@ func inspect(ctx *cli.Context) error {
start []byte start []byte
) )
if ctx.NArg() > 2 { if ctx.NArg() > 2 {
return fmt.Errorf("Max 2 arguments: %v", ctx.Command.ArgsUsage) return fmt.Errorf("max 2 arguments: %v", ctx.Command.ArgsUsage)
} }
if ctx.NArg() >= 1 { if ctx.NArg() >= 1 {
if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil { if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil {
@ -536,43 +535,35 @@ func dbDumpTrie(ctx *cli.Context) error {
} }
func freezerInspect(ctx *cli.Context) error { func freezerInspect(ctx *cli.Context) error {
var ( if ctx.NArg() < 4 {
start, end int64
disableSnappy bool
err error
)
if ctx.NArg() < 3 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
} }
kind := ctx.Args().Get(0) var (
if noSnap, ok := rawdb.FreezerNoSnappy[kind]; !ok { freezer = ctx.Args().Get(0)
var options []string table = ctx.Args().Get(1)
for opt := range rawdb.FreezerNoSnappy { )
options = append(options, opt) start, err := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
} if err != nil {
sort.Strings(options) log.Info("Could not read start-param", "err", err)
return fmt.Errorf("Could read freezer-type '%v'. Available options: %v", kind, options)
} else {
disableSnappy = noSnap
}
if start, err = strconv.ParseInt(ctx.Args().Get(1), 10, 64); err != nil {
log.Info("Could read start-param", "error", err)
return err return err
} }
if end, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil { end, err := strconv.ParseInt(ctx.Args().Get(3), 10, 64)
log.Info("Could read count param", "error", err) if err != nil {
log.Info("Could not read count param", "err", err)
return err return err
} }
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)
defer stack.Close() defer stack.Close()
path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
log.Info("Opening freezer", "location", path, "name", kind) db := utils.MakeChainDatabase(ctx, stack, true)
if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy, true); err != nil { defer db.Close()
ancient, err := db.AncientDatadir()
if err != nil {
log.Info("Failed to retrieve ancient root", "err", err)
return err return err
} else {
f.DumpIndex(start, end)
} }
return nil return rawdb.InspectFreezerTable(ancient, freezer, table, start, end)
} }
func importLDBdata(ctx *cli.Context) error { func importLDBdata(ctx *cli.Context) error {

View File

@ -72,8 +72,8 @@ var (
utils.NoUSBFlag, utils.NoUSBFlag,
utils.USBFlag, utils.USBFlag,
utils.SmartCardDaemonPathFlag, utils.SmartCardDaemonPathFlag,
utils.OverrideGrayGlacierFlag,
utils.OverrideTerminalTotalDifficulty, utils.OverrideTerminalTotalDifficulty,
utils.OverrideTerminalTotalDifficultyPassed,
utils.EthashCacheDirFlag, utils.EthashCacheDirFlag,
utils.EthashCachesInMemoryFlag, utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag, utils.EthashCachesOnDiskFlag,
@ -120,6 +120,7 @@ var (
utils.CacheSnapshotFlag, utils.CacheSnapshotFlag,
utils.CacheNoPrefetchFlag, utils.CacheNoPrefetchFlag,
utils.CachePreimagesFlag, utils.CachePreimagesFlag,
utils.CacheLogSizeFlag,
utils.FDLimitFlag, utils.FDLimitFlag,
utils.ListenPortFlag, utils.ListenPortFlag,
utils.DiscoveryPortFlag, utils.DiscoveryPortFlag,

View File

@ -271,7 +271,7 @@ func traverseState(ctx *cli.Context) error {
log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64()) log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64())
} }
triedb := trie.NewDatabase(chaindb) triedb := trie.NewDatabase(chaindb)
t, err := trie.NewSecure(common.Hash{}, root, triedb) t, err := trie.NewStateTrie(common.Hash{}, root, triedb)
if err != nil { if err != nil {
log.Error("Failed to open trie", "root", root, "err", err) log.Error("Failed to open trie", "root", root, "err", err)
return err return err
@ -292,7 +292,7 @@ func traverseState(ctx *cli.Context) error {
return err return err
} }
if acc.Root != emptyRoot { if acc.Root != emptyRoot {
storageTrie, err := trie.NewSecure(common.BytesToHash(accIter.Key), acc.Root, triedb) storageTrie, err := trie.NewStateTrie(common.BytesToHash(accIter.Key), acc.Root, triedb)
if err != nil { if err != nil {
log.Error("Failed to open storage trie", "root", acc.Root, "err", err) log.Error("Failed to open storage trie", "root", acc.Root, "err", err)
return err return err
@ -360,7 +360,7 @@ func traverseRawState(ctx *cli.Context) error {
log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64()) log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64())
} }
triedb := trie.NewDatabase(chaindb) triedb := trie.NewDatabase(chaindb)
t, err := trie.NewSecure(common.Hash{}, root, triedb) t, err := trie.NewStateTrie(common.Hash{}, root, triedb)
if err != nil { if err != nil {
log.Error("Failed to open trie", "root", root, "err", err) log.Error("Failed to open trie", "root", root, "err", err)
return err return err
@ -406,7 +406,7 @@ func traverseRawState(ctx *cli.Context) error {
return errors.New("invalid account") return errors.New("invalid account")
} }
if acc.Root != emptyRoot { if acc.Root != emptyRoot {
storageTrie, err := trie.NewSecure(common.BytesToHash(accIter.LeafKey()), acc.Root, triedb) storageTrie, err := trie.NewStateTrie(common.BytesToHash(accIter.LeafKey()), acc.Root, triedb)
if err != nil { if err != nil {
log.Error("Failed to open storage trie", "root", acc.Root, "err", err) log.Error("Failed to open storage trie", "root", acc.Root, "err", err)
return errors.New("missing storage trie") return errors.New("missing storage trie")

View File

@ -150,3 +150,12 @@ func checkPort(host string, port int) error {
conn.Close() conn.Close()
return nil return nil
} }
// getEthName gets the Ethereum Name from ethstats
func getEthName(s string) string {
n := strings.Index(s, ":")
if n >= 0 {
return s[:n]
}
return s
}

View File

@ -104,7 +104,7 @@ func deployExplorer(client *sshClient, network string, bootnodes []string, confi
"Datadir": config.node.datadir, "Datadir": config.node.datadir,
"DBDir": config.dbdir, "DBDir": config.dbdir,
"EthPort": config.node.port, "EthPort": config.node.port,
"EthName": config.node.ethstats[:strings.Index(config.node.ethstats, ":")], "EthName": getEthName(config.node.ethstats),
"WebPort": config.port, "WebPort": config.port,
"Transformer": transformer, "Transformer": transformer,
}) })

View File

@ -116,7 +116,7 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
"VHost": config.host, "VHost": config.host,
"ApiPort": config.port, "ApiPort": config.port,
"EthPort": config.node.port, "EthPort": config.node.port,
"EthName": config.node.ethstats[:strings.Index(config.node.ethstats, ":")], "EthName": getEthName(config.node.ethstats),
"CaptchaToken": config.captchaToken, "CaptchaToken": config.captchaToken,
"CaptchaSecret": config.captchaSecret, "CaptchaSecret": config.captchaSecret,
"FaucetAmount": config.amount, "FaucetAmount": config.amount,

View File

@ -123,7 +123,7 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
"TotalPeers": config.peersTotal, "TotalPeers": config.peersTotal,
"Light": config.peersLight > 0, "Light": config.peersLight > 0,
"LightPeers": config.peersLight, "LightPeers": config.peersLight,
"Ethstats": config.ethstats[:strings.Index(config.ethstats, ":")], "Ethstats": getEthName(config.ethstats),
"Etherbase": config.etherbase, "Etherbase": config.etherbase,
"GasTarget": config.gasTarget, "GasTarget": config.gasTarget,
"GasLimit": config.gasLimit, "GasLimit": config.gasLimit,

View File

@ -163,7 +163,7 @@ func dial(server string, pubkey []byte) (*sshClient, error) {
return nil return nil
} }
// We have a mismatch, forbid connecting // We have a mismatch, forbid connecting
return errors.New("ssh key mismatch, readd the machine to update") return errors.New("ssh key mismatch, re-add the machine to update")
} }
client, err := ssh.Dial("tcp", hostport, &ssh.ClientConfig{User: username, Auth: auths, HostKeyCallback: keycheck}) client, err := ssh.Dial("tcp", hostport, &ssh.ClientConfig{User: username, Auth: auths, HostKeyCallback: keycheck})
if err != nil { if err != nil {

View File

@ -43,6 +43,7 @@ import (
ethcatalyst "github.com/ethereum/go-ethereum/eth/catalyst" ethcatalyst "github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/eth/gasprice" "github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
@ -64,6 +65,7 @@ import (
"github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
pcsclite "github.com/gballet/go-libpcsclite" pcsclite "github.com/gballet/go-libpcsclite"
gopsutil "github.com/shirou/gopsutil/mem" gopsutil "github.com/shirou/gopsutil/mem"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
@ -91,7 +93,7 @@ var (
} }
AncientFlag = &flags.DirectoryFlag{ AncientFlag = &flags.DirectoryFlag{
Name: "datadir.ancient", Name: "datadir.ancient",
Usage: "Data directory for ancient chain segments (default = inside chaindata)", Usage: "Root directory for ancient data (default = inside chaindata)",
Category: flags.EthCategory, Category: flags.EthCategory,
} }
MinFreeDiskSpaceFlag = &flags.DirectoryFlag{ MinFreeDiskSpaceFlag = &flags.DirectoryFlag{
@ -262,17 +264,16 @@ var (
Value: 2048, Value: 2048,
Category: flags.EthCategory, Category: flags.EthCategory,
} }
OverrideGrayGlacierFlag = &cli.Uint64Flag{
Name: "override.grayglacier",
Usage: "Manually specify Gray Glacier fork-block, overriding the bundled setting",
Category: flags.EthCategory,
}
OverrideTerminalTotalDifficulty = &flags.BigFlag{ OverrideTerminalTotalDifficulty = &flags.BigFlag{
Name: "override.terminaltotaldifficulty", Name: "override.terminaltotaldifficulty",
Usage: "Manually specify TerminalTotalDifficulty, overriding the bundled setting", Usage: "Manually specify TerminalTotalDifficulty, overriding the bundled setting",
Category: flags.EthCategory, Category: flags.EthCategory,
} }
OverrideTerminalTotalDifficultyPassed = &cli.BoolFlag{
Name: "override.terminaltotaldifficultypassed",
Usage: "Manually specify TerminalTotalDifficultyPassed, overriding the bundled setting",
Category: flags.EthCategory,
}
// Light server and client settings // Light server and client settings
LightServeFlag = &cli.IntFlag{ LightServeFlag = &cli.IntFlag{
Name: "light.serve", Name: "light.serve",
@ -492,6 +493,12 @@ var (
Usage: "Enable recording the SHA3/keccak preimages of trie keys", Usage: "Enable recording the SHA3/keccak preimages of trie keys",
Category: flags.PerfCategory, Category: flags.PerfCategory,
} }
CacheLogSizeFlag = &cli.IntFlag{
Name: "cache.blocklogs",
Usage: "Size (in number of blocks) of the log cache for filtering",
Category: flags.PerfCategory,
Value: ethconfig.Defaults.FilterLogCacheSize,
}
FDLimitFlag = &cli.IntFlag{ FDLimitFlag = &cli.IntFlag{
Name: "fdlimit", Name: "fdlimit",
Usage: "Raise the open file descriptor resource limit (default = system fd limit)", Usage: "Raise the open file descriptor resource limit (default = system fd limit)",
@ -1809,6 +1816,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheSnapshotFlag.Name) { if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheSnapshotFlag.Name) {
cfg.SnapshotCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheSnapshotFlag.Name) / 100 cfg.SnapshotCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheSnapshotFlag.Name) / 100
} }
if ctx.IsSet(CacheLogSizeFlag.Name) {
cfg.FilterLogCacheSize = ctx.Int(CacheLogSizeFlag.Name)
}
if !ctx.Bool(SnapshotFlag.Name) { if !ctx.Bool(SnapshotFlag.Name) {
// If snap-sync is requested, this flag is also required // If snap-sync is requested, this flag is also required
if cfg.SyncMode == downloader.SnapSync { if cfg.SyncMode == downloader.SnapSync {
@ -2006,21 +2016,34 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend
return backend.APIBackend, backend return backend.APIBackend, backend
} }
// RegisterEthStatsService configures the Ethereum Stats daemon and adds it to // RegisterEthStatsService configures the Ethereum Stats daemon and adds it to the node.
// the given node.
func RegisterEthStatsService(stack *node.Node, backend ethapi.Backend, url string) { func RegisterEthStatsService(stack *node.Node, backend ethapi.Backend, url string) {
if err := ethstats.New(stack, backend, backend.Engine(), url); err != nil { if err := ethstats.New(stack, backend, backend.Engine(), url); err != nil {
Fatalf("Failed to register the Ethereum Stats service: %v", err) Fatalf("Failed to register the Ethereum Stats service: %v", err)
} }
} }
// RegisterGraphQLService is a utility function to construct a new service and register it against a node. // RegisterGraphQLService adds the GraphQL API to the node.
func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, cfg node.Config) { func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, filterSystem *filters.FilterSystem, cfg *node.Config) {
if err := graphql.New(stack, backend, cfg.GraphQLCors, cfg.GraphQLVirtualHosts); err != nil { err := graphql.New(stack, backend, filterSystem, cfg.GraphQLCors, cfg.GraphQLVirtualHosts)
if err != nil {
Fatalf("Failed to register the GraphQL service: %v", err) Fatalf("Failed to register the GraphQL service: %v", err)
} }
} }
// RegisterFilterAPI adds the eth log filtering RPC API to the node.
func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconfig.Config) *filters.FilterSystem {
isLightClient := ethcfg.SyncMode == downloader.LightSync
filterSystem := filters.NewFilterSystem(backend, filters.Config{
LogCacheSize: ethcfg.FilterLogCacheSize,
})
stack.RegisterAPIs([]rpc.API{{
Namespace: "eth",
Service: filters.NewFilterAPI(filterSystem, isLightClient),
}})
return filterSystem
}
func SetupMetrics(ctx *cli.Context) { func SetupMetrics(ctx *cli.Context) {
if metrics.Enabled { if metrics.Enabled {
log.Info("Enabling metrics collection") log.Info("Enabling metrics collection")

View File

@ -66,13 +66,16 @@ func ParseCombinedJSON(combinedJSON []byte, source string, languageVersion strin
contracts := make(map[string]*Contract) contracts := make(map[string]*Contract)
for name, info := range output.Contracts { for name, info := range output.Contracts {
// Parse the individual compilation results. // Parse the individual compilation results.
var abi interface{} var abi, userdoc, devdoc interface{}
if err := json.Unmarshal([]byte(info.Abi), &abi); err != nil { if err := json.Unmarshal([]byte(info.Abi), &abi); err != nil {
return nil, fmt.Errorf("solc: error reading abi definition (%v)", err) return nil, fmt.Errorf("solc: error reading abi definition (%v)", err)
} }
var userdoc, devdoc interface{} if err := json.Unmarshal([]byte(info.Userdoc), &userdoc); err != nil {
json.Unmarshal([]byte(info.Userdoc), &userdoc) return nil, fmt.Errorf("solc: error reading userdoc definition (%v)", err)
json.Unmarshal([]byte(info.Devdoc), &devdoc) }
if err := json.Unmarshal([]byte(info.Devdoc), &devdoc); err != nil {
return nil, fmt.Errorf("solc: error reading devdoc definition (%v)", err)
}
contracts[name] = &Contract{ contracts[name] = &Contract{
Code: "0x" + info.Bin, Code: "0x" + info.Bin,

View File

@ -58,7 +58,7 @@ func (s *Simulated) Run(d time.Duration) {
s.mu.Lock() s.mu.Lock()
s.init() s.init()
end := s.now + AbsTime(d) end := s.now.Add(d)
var do []func() var do []func()
for len(s.scheduled) > 0 && s.scheduled[0].at <= end { for len(s.scheduled) > 0 && s.scheduled[0].at <= end {
ev := heap.Pop(&s.scheduled).(*simTimer) ev := heap.Pop(&s.scheduled).(*simTimer)
@ -134,7 +134,7 @@ func (s *Simulated) AfterFunc(d time.Duration, fn func()) Timer {
func (s *Simulated) schedule(d time.Duration, fn func()) *simTimer { func (s *Simulated) schedule(d time.Duration, fn func()) *simTimer {
s.init() s.init()
at := s.now + AbsTime(d) at := s.now.Add(d)
ev := &simTimer{do: fn, at: at, s: s} ev := &simTimer{do: fn, at: at, s: s}
heap.Push(&s.scheduled, ev) heap.Push(&s.scheduled, ev)
s.cond.Broadcast() s.cond.Broadcast()

View File

@ -87,13 +87,13 @@ func (q *LazyQueue) Refresh() {
// refresh re-evaluates items in the older queue and swaps the two queues // refresh re-evaluates items in the older queue and swaps the two queues
func (q *LazyQueue) refresh(now mclock.AbsTime) { func (q *LazyQueue) refresh(now mclock.AbsTime) {
q.maxUntil = now + mclock.AbsTime(q.period) q.maxUntil = now.Add(q.period)
for q.queue[0].Len() != 0 { for q.queue[0].Len() != 0 {
q.Push(heap.Pop(q.queue[0]).(*item).value) q.Push(heap.Pop(q.queue[0]).(*item).value)
} }
q.queue[0], q.queue[1] = q.queue[1], q.queue[0] q.queue[0], q.queue[1] = q.queue[1], q.queue[0]
q.indexOffset = 1 - q.indexOffset q.indexOffset = 1 - q.indexOffset
q.maxUntil += mclock.AbsTime(q.period) q.maxUntil = q.maxUntil.Add(q.period)
} }
// Push adds an item to the queue // Push adds an item to the queue

View File

@ -41,13 +41,13 @@ func (p *Prque) Push(data interface{}, priority int64) {
heap.Push(p.cont, &item{data, priority}) heap.Push(p.cont, &item{data, priority})
} }
// Peek returns the value with the greates priority but does not pop it off. // Peek returns the value with the greatest priority but does not pop it off.
func (p *Prque) Peek() (interface{}, int64) { func (p *Prque) Peek() (interface{}, int64) {
item := p.cont.blocks[0][0] item := p.cont.blocks[0][0]
return item.value, item.priority return item.value, item.priority
} }
// Pops the value with the greates priority off the stack and returns it. // Pops the value with the greatest priority off the stack and returns it.
// Currently no shrinking is done. // Currently no shrinking is done.
func (p *Prque) Pop() (interface{}, int64) { func (p *Prque) Pop() (interface{}, int64) {
item := heap.Pop(p.cont).(*item) item := heap.Pop(p.cont).(*item)

View File

@ -114,8 +114,16 @@ func (beacon *Beacon) VerifyHeaders(chain consensus.ChainHeaderReader, headers [
} }
} }
// All the headers have passed the transition point, use new rules.
if len(preHeaders) == 0 { if len(preHeaders) == 0 {
// All the headers are pos headers. Verify that the parent block reached total terminal difficulty.
if reached, _ := IsTTDReached(chain, headers[0].ParentHash, headers[0].Number.Uint64()-1); !reached {
// TTD not reached for the first block, mark subsequent with invalid terminal block
results := make(chan error, len(headers))
for i := 0; i < len(headers); i++ {
results <- consensus.ErrInvalidTerminalBlock
}
return make(chan struct{}), results
}
return beacon.verifyHeaders(chain, headers, nil) return beacon.verifyHeaders(chain, headers, nil)
} }

View File

@ -305,7 +305,7 @@ func TestClique(t *testing.T) {
}, { }, {
// Ensure that pending votes don't survive authorization status changes. This // Ensure that pending votes don't survive authorization status changes. This
// corner case can only appear if a signer is quickly added, removed and then // corner case can only appear if a signer is quickly added, removed and then
// readded (or the inverse), while one of the original voters dropped. If a // re-added (or the inverse), while one of the original voters dropped. If a
// past vote is left cached in the system somewhere, this will interfere with // past vote is left cached in the system somewhere, this will interfere with
// the final signer outcome. // the final signer outcome.
signers: []string{"A", "B", "C", "D", "E"}, signers: []string{"A", "B", "C", "D", "E"},
@ -344,7 +344,7 @@ func TestClique(t *testing.T) {
}, },
failure: errUnauthorizedSigner, failure: errUnauthorizedSigner,
}, { }, {
// An authorized signer that signed recenty should not be able to sign again // An authorized signer that signed recently should not be able to sign again
signers: []string{"A", "B"}, signers: []string{"A", "B"},
votes: []testerVote{ votes: []testerVote{
{signer: "A"}, {signer: "A"},
@ -403,7 +403,7 @@ func TestClique(t *testing.T) {
} }
// Create a pristine blockchain with the genesis injected // Create a pristine blockchain with the genesis injected
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
genesis.Commit(db) genesisBlock := genesis.MustCommit(db)
// Assemble a chain of headers from the cast votes // Assemble a chain of headers from the cast votes
config := *params.TestChainConfig config := *params.TestChainConfig
@ -414,7 +414,7 @@ func TestClique(t *testing.T) {
engine := New(config.Clique, db) engine := New(config.Clique, db)
engine.fakeDiff = true engine.fakeDiff = true
blocks, _ := core.GenerateChain(&config, genesis.ToBlock(db), engine, db, len(tt.votes), func(j int, gen *core.BlockGen) { blocks, _ := core.GenerateChain(&config, genesisBlock, engine, db, len(tt.votes), func(j int, gen *core.BlockGen) {
// Cast the vote contained in this block // Cast the vote contained in this block
gen.SetCoinbase(accounts.address(tt.votes[j].voted)) gen.SetCoinbase(accounts.address(tt.votes[j].voted))
if tt.votes[j].auth { if tt.votes[j].auth {

View File

@ -278,8 +278,11 @@ func (c *cache) generate(dir string, limit int, lock bool, test bool) {
// Iterate over all previous instances and delete old ones // Iterate over all previous instances and delete old ones
for ep := int(c.epoch) - limit; ep >= 0; ep-- { for ep := int(c.epoch) - limit; ep >= 0; ep-- {
seed := seedHash(uint64(ep)*epochLength + 1) seed := seedHash(uint64(ep)*epochLength + 1)
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s*", algorithmRevision, seed[:8], endian))
os.Remove(path) files, _ := filepath.Glob(path) // find also the temp files that are generated.
for _, file := range files {
os.Remove(file)
}
} }
}) })
} }

View File

@ -290,7 +290,7 @@ func (c *Console) AutoCompleteInput(line string, pos int) (string, []string, str
if len(line) == 0 || pos == 0 { if len(line) == 0 || pos == 0 {
return "", nil, "" return "", nil, ""
} }
// Chunck data to relevant part for autocompletion // Chunk data to relevant part for autocompletion
// E.g. in case of nested lines eth.getBalance(eth.coinb<tab><tab> // E.g. in case of nested lines eth.getBalance(eth.coinb<tab><tab>
start := pos - 1 start := pos - 1
for ; start > 0; start-- { for ; start > 0; start-- {
@ -407,7 +407,7 @@ func (c *Console) StopInteractive() {
} }
} }
// Interactive starts an interactive user session, where in.put is propted from // Interactive starts an interactive user session, where input is prompted from
// the configured user prompter. // the configured user prompter.
func (c *Console) Interactive() { func (c *Console) Interactive() {
var ( var (
@ -497,7 +497,7 @@ func (c *Console) readLines(input chan<- string, errc chan<- error, prompt <-cha
} }
} }
// countIndents returns the number of identations for the given input. // countIndents returns the number of indentations for the given input.
// In case of invalid input such as var a = } the result can be negative. // In case of invalid input such as var a = } the result can be negative.
func countIndents(input string) int { func countIndents(input string) int {
var ( var (

View File

@ -706,7 +706,7 @@ func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error {
if block == nil { if block == nil {
return fmt.Errorf("non existent block [%x..]", hash[:4]) return fmt.Errorf("non existent block [%x..]", hash[:4])
} }
if _, err := trie.NewSecure(common.Hash{}, block.Root(), bc.stateCache.TrieDB()); err != nil { if _, err := trie.NewStateTrie(common.Hash{}, block.Root(), bc.stateCache.TrieDB()); err != nil {
return err return err
} }
@ -893,6 +893,10 @@ func (bc *BlockChain) Stop() {
log.Error("Dangling trie nodes after full cleanup") log.Error("Dangling trie nodes after full cleanup")
} }
} }
// Flush the collected preimages to disk
if err := bc.stateCache.TrieDB().CommitPreimages(); err != nil {
log.Error("Failed to commit trie preimages", "err", err)
}
// Ensure all live cached entries be saved into disk, so that we can skip // Ensure all live cached entries be saved into disk, so that we can skip
// cache warmup when node restarts. // cache warmup when node restarts.
if bc.cacheConfig.TrieCleanJournal != "" { if bc.cacheConfig.TrieCleanJournal != "" {
@ -1244,7 +1248,7 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
// writeBlockWithState writes block, metadata and corresponding state data to the // writeBlockWithState writes block, metadata and corresponding state data to the
// database. // database.
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB) error { func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) error {
// Calculate the total difficulty of the block // Calculate the total difficulty of the block
ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
if ptd == nil { if ptd == nil {
@ -1339,7 +1343,7 @@ func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types
// writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead. // writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead.
// This function expects the chain mutex to be held. // This function expects the chain mutex to be held.
func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
if err := bc.writeBlockWithState(block, receipts, logs, state); err != nil { if err := bc.writeBlockWithState(block, receipts, state); err != nil {
return NonStatTy, err return NonStatTy, err
} }
currentBlock := bc.CurrentBlock() currentBlock := bc.CurrentBlock()
@ -1380,7 +1384,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
} }
// In theory we should fire a ChainHeadEvent when we inject // In theory we should fire a ChainHeadEvent when we inject
// a canonical block, but sometimes we can insert a batch of // a canonical block, but sometimes we can insert a batch of
// canonicial blocks. Avoid firing too many ChainHeadEvents, // canonical blocks. Avoid firing too many ChainHeadEvents,
// we will fire an accumulated ChainHeadEvent and disable fire // we will fire an accumulated ChainHeadEvent and disable fire
// event here. // event here.
if emitHeadEvent { if emitHeadEvent {
@ -1618,7 +1622,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
// block in the middle. It can only happen in the clique chain. Whenever // block in the middle. It can only happen in the clique chain. Whenever
// we insert blocks via `insertSideChain`, we only commit `td`, `header` // we insert blocks via `insertSideChain`, we only commit `td`, `header`
// and `body` if it's non-existent. Since we don't have receipts without // and `body` if it's non-existent. Since we don't have receipts without
// reexecution, so nothing to commit. But if the sidechain will be adpoted // reexecution, so nothing to commit. But if the sidechain will be adopted
// as the canonical chain eventually, it needs to be reexecuted for missing // as the canonical chain eventually, it needs to be reexecuted for missing
// state, but if it's this special case here(skip reexecution) we will lose // state, but if it's this special case here(skip reexecution) we will lose
// the empty receipt entry. // the empty receipt entry.
@ -1713,7 +1717,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
var status WriteStatus var status WriteStatus
if !setHead { if !setHead {
// Don't set the head, only insert the block // Don't set the head, only insert the block
err = bc.writeBlockWithState(block, receipts, logs, statedb) err = bc.writeBlockWithState(block, receipts, statedb)
} else { } else {
status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false) status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false)
} }

View File

@ -564,7 +564,7 @@ func testShortReorgedSnapSyncingRepair(t *testing.T, snapshots bool) {
// Tests a recovery for a long canonical chain with frozen blocks where a recent // Tests a recovery for a long canonical chain with frozen blocks where a recent
// block - newer than the ancient limit - was already committed to disk and then // block - newer than the ancient limit - was already committed to disk and then
// the process crashed. In this case we expect the chain to be rolled back to the // the process crashed. In this case we expect the chain to be rolled back to the
// committed block, with everything afterwads kept as fast sync data. // committed block, with everything afterwards kept as fast sync data.
func TestLongShallowRepair(t *testing.T) { testLongShallowRepair(t, false) } func TestLongShallowRepair(t *testing.T) { testLongShallowRepair(t, false) }
func TestLongShallowRepairWithSnapshots(t *testing.T) { testLongShallowRepair(t, true) } func TestLongShallowRepairWithSnapshots(t *testing.T) { testLongShallowRepair(t, true) }
@ -609,7 +609,7 @@ func testLongShallowRepair(t *testing.T, snapshots bool) {
// Tests a recovery for a long canonical chain with frozen blocks where a recent // Tests a recovery for a long canonical chain with frozen blocks where a recent
// block - older than the ancient limit - was already committed to disk and then // block - older than the ancient limit - was already committed to disk and then
// the process crashed. In this case we expect the chain to be rolled back to the // the process crashed. In this case we expect the chain to be rolled back to the
// committed block, with everything afterwads deleted. // committed block, with everything afterwards deleted.
func TestLongDeepRepair(t *testing.T) { testLongDeepRepair(t, false) } func TestLongDeepRepair(t *testing.T) { testLongDeepRepair(t, false) }
func TestLongDeepRepairWithSnapshots(t *testing.T) { testLongDeepRepair(t, true) } func TestLongDeepRepairWithSnapshots(t *testing.T) { testLongDeepRepair(t, true) }
@ -653,7 +653,7 @@ func testLongDeepRepair(t *testing.T, snapshots bool) {
// Tests a recovery for a long canonical chain with frozen blocks where the fast // Tests a recovery for a long canonical chain with frozen blocks where the fast
// sync pivot point - newer than the ancient limit - was already committed, after // sync pivot point - newer than the ancient limit - was already committed, after
// which the process crashed. In this case we expect the chain to be rolled back // which the process crashed. In this case we expect the chain to be rolled back
// to the committed block, with everything afterwads kept as fast sync data. // to the committed block, with everything afterwards kept as fast sync data.
func TestLongSnapSyncedShallowRepair(t *testing.T) { func TestLongSnapSyncedShallowRepair(t *testing.T) {
testLongSnapSyncedShallowRepair(t, false) testLongSnapSyncedShallowRepair(t, false)
} }
@ -702,7 +702,7 @@ func testLongSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
// Tests a recovery for a long canonical chain with frozen blocks where the fast // Tests a recovery for a long canonical chain with frozen blocks where the fast
// sync pivot point - older than the ancient limit - was already committed, after // sync pivot point - older than the ancient limit - was already committed, after
// which the process crashed. In this case we expect the chain to be rolled back // which the process crashed. In this case we expect the chain to be rolled back
// to the committed block, with everything afterwads deleted. // to the committed block, with everything afterwards deleted.
func TestLongSnapSyncedDeepRepair(t *testing.T) { testLongSnapSyncedDeepRepair(t, false) } func TestLongSnapSyncedDeepRepair(t *testing.T) { testLongSnapSyncedDeepRepair(t, false) }
func TestLongSnapSyncedDeepRepairWithSnapshots(t *testing.T) { testLongSnapSyncedDeepRepair(t, true) } func TestLongSnapSyncedDeepRepairWithSnapshots(t *testing.T) { testLongSnapSyncedDeepRepair(t, true) }
@ -843,7 +843,7 @@ func testLongSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
// side chain, where a recent block - newer than the ancient limit - was already // side chain, where a recent block - newer than the ancient limit - was already
// committed to disk and then the process crashed. In this test scenario the side // committed to disk and then the process crashed. In this test scenario the side
// chain is below the committed block. In this case we expect the chain to be // chain is below the committed block. In this case we expect the chain to be
// rolled back to the committed block, with everything afterwads kept as fast // rolled back to the committed block, with everything afterwards kept as fast
// sync data; the side chain completely nuked by the freezer. // sync data; the side chain completely nuked by the freezer.
func TestLongOldForkedShallowRepair(t *testing.T) { func TestLongOldForkedShallowRepair(t *testing.T) {
testLongOldForkedShallowRepair(t, false) testLongOldForkedShallowRepair(t, false)
@ -895,7 +895,7 @@ func testLongOldForkedShallowRepair(t *testing.T, snapshots bool) {
// side chain, where a recent block - older than the ancient limit - was already // side chain, where a recent block - older than the ancient limit - was already
// committed to disk and then the process crashed. In this test scenario the side // committed to disk and then the process crashed. In this test scenario the side
// chain is below the committed block. In this case we expect the canonical chain // chain is below the committed block. In this case we expect the canonical chain
// to be rolled back to the committed block, with everything afterwads deleted; // to be rolled back to the committed block, with everything afterwards deleted;
// the side chain completely nuked by the freezer. // the side chain completely nuked by the freezer.
func TestLongOldForkedDeepRepair(t *testing.T) { testLongOldForkedDeepRepair(t, false) } func TestLongOldForkedDeepRepair(t *testing.T) { testLongOldForkedDeepRepair(t, false) }
func TestLongOldForkedDeepRepairWithSnapshots(t *testing.T) { testLongOldForkedDeepRepair(t, true) } func TestLongOldForkedDeepRepairWithSnapshots(t *testing.T) { testLongOldForkedDeepRepair(t, true) }
@ -942,7 +942,7 @@ func testLongOldForkedDeepRepair(t *testing.T, snapshots bool) {
// side chain, where the fast sync pivot point - newer than the ancient limit - // side chain, where the fast sync pivot point - newer than the ancient limit -
// was already committed to disk and then the process crashed. In this test scenario // was already committed to disk and then the process crashed. In this test scenario
// the side chain is below the committed block. In this case we expect the chain // the side chain is below the committed block. In this case we expect the chain
// to be rolled back to the committed block, with everything afterwads kept as // to be rolled back to the committed block, with everything afterwards kept as
// fast sync data; the side chain completely nuked by the freezer. // fast sync data; the side chain completely nuked by the freezer.
func TestLongOldForkedSnapSyncedShallowRepair(t *testing.T) { func TestLongOldForkedSnapSyncedShallowRepair(t *testing.T) {
testLongOldForkedSnapSyncedShallowRepair(t, false) testLongOldForkedSnapSyncedShallowRepair(t, false)
@ -994,7 +994,7 @@ func testLongOldForkedSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
// side chain, where the fast sync pivot point - older than the ancient limit - // side chain, where the fast sync pivot point - older than the ancient limit -
// was already committed to disk and then the process crashed. In this test scenario // was already committed to disk and then the process crashed. In this test scenario
// the side chain is below the committed block. In this case we expect the canonical // the side chain is below the committed block. In this case we expect the canonical
// chain to be rolled back to the committed block, with everything afterwads deleted; // chain to be rolled back to the committed block, with everything afterwards deleted;
// the side chain completely nuked by the freezer. // the side chain completely nuked by the freezer.
func TestLongOldForkedSnapSyncedDeepRepair(t *testing.T) { func TestLongOldForkedSnapSyncedDeepRepair(t *testing.T) {
testLongOldForkedSnapSyncedDeepRepair(t, false) testLongOldForkedSnapSyncedDeepRepair(t, false)
@ -1149,7 +1149,7 @@ func testLongOldForkedSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
// side chain, where a recent block - newer than the ancient limit - was already // side chain, where a recent block - newer than the ancient limit - was already
// committed to disk and then the process crashed. In this test scenario the side // committed to disk and then the process crashed. In this test scenario the side
// chain is above the committed block. In this case we expect the chain to be // chain is above the committed block. In this case we expect the chain to be
// rolled back to the committed block, with everything afterwads kept as fast // rolled back to the committed block, with everything afterwards kept as fast
// sync data; the side chain completely nuked by the freezer. // sync data; the side chain completely nuked by the freezer.
func TestLongNewerForkedShallowRepair(t *testing.T) { func TestLongNewerForkedShallowRepair(t *testing.T) {
testLongNewerForkedShallowRepair(t, false) testLongNewerForkedShallowRepair(t, false)
@ -1201,7 +1201,7 @@ func testLongNewerForkedShallowRepair(t *testing.T, snapshots bool) {
// side chain, where a recent block - older than the ancient limit - was already // side chain, where a recent block - older than the ancient limit - was already
// committed to disk and then the process crashed. In this test scenario the side // committed to disk and then the process crashed. In this test scenario the side
// chain is above the committed block. In this case we expect the canonical chain // chain is above the committed block. In this case we expect the canonical chain
// to be rolled back to the committed block, with everything afterwads deleted; // to be rolled back to the committed block, with everything afterwards deleted;
// the side chain completely nuked by the freezer. // the side chain completely nuked by the freezer.
func TestLongNewerForkedDeepRepair(t *testing.T) { testLongNewerForkedDeepRepair(t, false) } func TestLongNewerForkedDeepRepair(t *testing.T) { testLongNewerForkedDeepRepair(t, false) }
func TestLongNewerForkedDeepRepairWithSnapshots(t *testing.T) { testLongNewerForkedDeepRepair(t, true) } func TestLongNewerForkedDeepRepairWithSnapshots(t *testing.T) { testLongNewerForkedDeepRepair(t, true) }
@ -1248,7 +1248,7 @@ func testLongNewerForkedDeepRepair(t *testing.T, snapshots bool) {
// side chain, where the fast sync pivot point - newer than the ancient limit - // side chain, where the fast sync pivot point - newer than the ancient limit -
// was already committed to disk and then the process crashed. In this test scenario // was already committed to disk and then the process crashed. In this test scenario
// the side chain is above the committed block. In this case we expect the chain // the side chain is above the committed block. In this case we expect the chain
// to be rolled back to the committed block, with everything afterwads kept as fast // to be rolled back to the committed block, with everything afterwards kept as fast
// sync data; the side chain completely nuked by the freezer. // sync data; the side chain completely nuked by the freezer.
func TestLongNewerForkedSnapSyncedShallowRepair(t *testing.T) { func TestLongNewerForkedSnapSyncedShallowRepair(t *testing.T) {
testLongNewerForkedSnapSyncedShallowRepair(t, false) testLongNewerForkedSnapSyncedShallowRepair(t, false)
@ -1300,7 +1300,7 @@ func testLongNewerForkedSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
// side chain, where the fast sync pivot point - older than the ancient limit - // side chain, where the fast sync pivot point - older than the ancient limit -
// was already committed to disk and then the process crashed. In this test scenario // was already committed to disk and then the process crashed. In this test scenario
// the side chain is above the committed block. In this case we expect the canonical // the side chain is above the committed block. In this case we expect the canonical
// chain to be rolled back to the committed block, with everything afterwads deleted; // chain to be rolled back to the committed block, with everything afterwards deleted;
// the side chain completely nuked by the freezer. // the side chain completely nuked by the freezer.
func TestLongNewerForkedSnapSyncedDeepRepair(t *testing.T) { func TestLongNewerForkedSnapSyncedDeepRepair(t *testing.T) {
testLongNewerForkedSnapSyncedDeepRepair(t, false) testLongNewerForkedSnapSyncedDeepRepair(t, false)
@ -1454,7 +1454,7 @@ func testLongNewerForkedSnapSyncingDeepRepair(t *testing.T, snapshots bool) {
// Tests a recovery for a long canonical chain with frozen blocks and a longer side // Tests a recovery for a long canonical chain with frozen blocks and a longer side
// chain, where a recent block - newer than the ancient limit - was already committed // chain, where a recent block - newer than the ancient limit - was already committed
// to disk and then the process crashed. In this case we expect the chain to be // to disk and then the process crashed. In this case we expect the chain to be
// rolled back to the committed block, with everything afterwads kept as fast sync // rolled back to the committed block, with everything afterwards kept as fast sync
// data. The side chain completely nuked by the freezer. // data. The side chain completely nuked by the freezer.
func TestLongReorgedShallowRepair(t *testing.T) { testLongReorgedShallowRepair(t, false) } func TestLongReorgedShallowRepair(t *testing.T) { testLongReorgedShallowRepair(t, false) }
func TestLongReorgedShallowRepairWithSnapshots(t *testing.T) { testLongReorgedShallowRepair(t, true) } func TestLongReorgedShallowRepairWithSnapshots(t *testing.T) { testLongReorgedShallowRepair(t, true) }
@ -1501,7 +1501,7 @@ func testLongReorgedShallowRepair(t *testing.T, snapshots bool) {
// Tests a recovery for a long canonical chain with frozen blocks and a longer side // Tests a recovery for a long canonical chain with frozen blocks and a longer side
// chain, where a recent block - older than the ancient limit - was already committed // chain, where a recent block - older than the ancient limit - was already committed
// to disk and then the process crashed. In this case we expect the canonical chains // to disk and then the process crashed. In this case we expect the canonical chains
// to be rolled back to the committed block, with everything afterwads deleted. The // to be rolled back to the committed block, with everything afterwards deleted. The
// side chain completely nuked by the freezer. // side chain completely nuked by the freezer.
func TestLongReorgedDeepRepair(t *testing.T) { testLongReorgedDeepRepair(t, false) } func TestLongReorgedDeepRepair(t *testing.T) { testLongReorgedDeepRepair(t, false) }
func TestLongReorgedDeepRepairWithSnapshots(t *testing.T) { testLongReorgedDeepRepair(t, true) } func TestLongReorgedDeepRepairWithSnapshots(t *testing.T) { testLongReorgedDeepRepair(t, true) }
@ -1548,7 +1548,7 @@ func testLongReorgedDeepRepair(t *testing.T, snapshots bool) {
// side chain, where the fast sync pivot point - newer than the ancient limit - // side chain, where the fast sync pivot point - newer than the ancient limit -
// was already committed to disk and then the process crashed. In this case we // was already committed to disk and then the process crashed. In this case we
// expect the chain to be rolled back to the committed block, with everything // expect the chain to be rolled back to the committed block, with everything
// afterwads kept as fast sync data. The side chain completely nuked by the // afterwards kept as fast sync data. The side chain completely nuked by the
// freezer. // freezer.
func TestLongReorgedSnapSyncedShallowRepair(t *testing.T) { func TestLongReorgedSnapSyncedShallowRepair(t *testing.T) {
testLongReorgedSnapSyncedShallowRepair(t, false) testLongReorgedSnapSyncedShallowRepair(t, false)
@ -1600,7 +1600,7 @@ func testLongReorgedSnapSyncedShallowRepair(t *testing.T, snapshots bool) {
// side chain, where the fast sync pivot point - older than the ancient limit - // side chain, where the fast sync pivot point - older than the ancient limit -
// was already committed to disk and then the process crashed. In this case we // was already committed to disk and then the process crashed. In this case we
// expect the canonical chains to be rolled back to the committed block, with // expect the canonical chains to be rolled back to the committed block, with
// everything afterwads deleted. The side chain completely nuked by the freezer. // everything afterwards deleted. The side chain completely nuked by the freezer.
func TestLongReorgedSnapSyncedDeepRepair(t *testing.T) { func TestLongReorgedSnapSyncedDeepRepair(t *testing.T) {
testLongReorgedSnapSyncedDeepRepair(t, false) testLongReorgedSnapSyncedDeepRepair(t, false)
} }

View File

@ -759,9 +759,9 @@ func TestFastVsFullChains(t *testing.T) {
block.AddTx(tx) block.AddTx(tx)
} }
} }
// If the block number is a multiple of 5, add a few bonus uncles to the block // If the block number is a multiple of 5, add an uncle to the block
if i%5 == 5 { if i%5 == 4 {
block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))}) block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 2).Hash(), Number: big.NewInt(int64(i))})
} }
}) })
// Import the chain as an archive node for the comparison baseline // Import the chain as an archive node for the comparison baseline
@ -1942,7 +1942,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
BaseFee: big.NewInt(params.InitialBaseFee), BaseFee: big.NewInt(params.InitialBaseFee),
} }
signer = types.LatestSigner(gspec.Config) signer = types.LatestSigner(gspec.Config)
genesis, _ = gspec.Commit(db) genesis = gspec.MustCommit(db)
) )
// Generate and import the canonical chain // Generate and import the canonical chain
diskdb := rawdb.NewMemoryDatabase() diskdb := rawdb.NewMemoryDatabase()

View File

@ -75,7 +75,7 @@ func (b *BloomIndexer) Process(ctx context.Context, header *types.Header) error
// Commit implements core.ChainIndexerBackend, finalizing the bloom section and // Commit implements core.ChainIndexerBackend, finalizing the bloom section and
// writing it out into the database. // writing it out into the database.
func (b *BloomIndexer) Commit() error { func (b *BloomIndexer) Commit() error {
batch := b.db.NewBatch() batch := b.db.NewBatchWithSize((int(b.size) / 8) * types.BloomBitLength)
for i := 0; i < types.BloomBitLength; i++ { for i := 0; i < types.BloomBitLength; i++ {
bits, err := b.gen.Bitset(uint(i)) bits, err := b.gen.Bitset(uint(i))
if err != nil { if err != nil {

View File

@ -80,10 +80,12 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
return nil return nil
} }
// flush adds allocated genesis accounts into a fresh new statedb and // deriveHash computes the state root according to the genesis specification.
// commit the state changes into the given database handler. func (ga *GenesisAlloc) deriveHash() (common.Hash, error) {
func (ga *GenesisAlloc) flush(db ethdb.Database) (common.Hash, error) { // Create an ephemeral in-memory database for computing hash,
statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil) // all the derived states will be discarded to not pollute disk.
db := state.NewDatabase(rawdb.NewMemoryDatabase())
statedb, err := state.New(common.Hash{}, db, nil)
if err != nil { if err != nil {
return common.Hash{}, err return common.Hash{}, err
} }
@ -95,25 +97,39 @@ func (ga *GenesisAlloc) flush(db ethdb.Database) (common.Hash, error) {
statedb.SetState(addr, key, value) statedb.SetState(addr, key, value)
} }
} }
return statedb.Commit(false)
}
// flush is very similar with deriveHash, but the main difference is
// all the generated states will be persisted into the given database.
// Also, the genesis state specification will be flushed as well.
func (ga *GenesisAlloc) flush(db ethdb.Database) error {
statedb, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(db, &trie.Config{Preimages: true}), nil)
if err != nil {
return err
}
for addr, account := range *ga {
statedb.AddBalance(addr, account.Balance)
statedb.SetCode(addr, account.Code)
statedb.SetNonce(addr, account.Nonce)
for key, value := range account.Storage {
statedb.SetState(addr, key, value)
}
}
root, err := statedb.Commit(false) root, err := statedb.Commit(false)
if err != nil { if err != nil {
return common.Hash{}, err return err
} }
err = statedb.Database().TrieDB().Commit(root, true, nil) err = statedb.Database().TrieDB().Commit(root, true, nil)
if err != nil { if err != nil {
return common.Hash{}, err return err
} }
return root, nil // Marshal the genesis state specification and persist.
}
// write writes the json marshaled genesis state into database
// with the given block hash as the unique identifier.
func (ga *GenesisAlloc) write(db ethdb.KeyValueWriter, hash common.Hash) error {
blob, err := json.Marshal(ga) blob, err := json.Marshal(ga)
if err != nil { if err != nil {
return err return err
} }
rawdb.WriteGenesisState(db, hash, blob) rawdb.WriteGenesisStateSpec(db, root, blob)
return nil return nil
} }
@ -121,7 +137,7 @@ func (ga *GenesisAlloc) write(db ethdb.KeyValueWriter, hash common.Hash) error {
// hash and commits them into the given database handler. // hash and commits them into the given database handler.
func CommitGenesisState(db ethdb.Database, hash common.Hash) error { func CommitGenesisState(db ethdb.Database, hash common.Hash) error {
var alloc GenesisAlloc var alloc GenesisAlloc
blob := rawdb.ReadGenesisState(db, hash) blob := rawdb.ReadGenesisStateSpec(db, hash)
if len(blob) != 0 { if len(blob) != 0 {
if err := alloc.UnmarshalJSON(blob); err != nil { if err := alloc.UnmarshalJSON(blob); err != nil {
return err return err
@ -151,8 +167,7 @@ func CommitGenesisState(db ethdb.Database, hash common.Hash) error {
return errors.New("not found") return errors.New("not found")
} }
} }
_, err := alloc.flush(db) return alloc.flush(db)
return err
} }
// GenesisAccount is an account in the state of the genesis block. // GenesisAccount is an account in the state of the genesis block.
@ -233,7 +248,7 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
return SetupGenesisBlockWithOverride(db, genesis, nil, nil) return SetupGenesisBlockWithOverride(db, genesis, nil, nil)
} }
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideGrayGlacier, overrideTerminalTotalDifficulty *big.Int) (*params.ChainConfig, common.Hash, error) { func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideTerminalTotalDifficulty *big.Int, overrideTerminalTotalDifficultyPassed *bool) (*params.ChainConfig, common.Hash, error) {
if genesis != nil && genesis.Config == nil { if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
} }
@ -243,8 +258,8 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
if overrideTerminalTotalDifficulty != nil { if overrideTerminalTotalDifficulty != nil {
config.TerminalTotalDifficulty = overrideTerminalTotalDifficulty config.TerminalTotalDifficulty = overrideTerminalTotalDifficulty
} }
if overrideGrayGlacier != nil { if overrideTerminalTotalDifficultyPassed != nil {
config.GrayGlacierBlock = overrideGrayGlacier config.TerminalTotalDifficultyPassed = *overrideTerminalTotalDifficultyPassed
} }
} }
} }
@ -273,7 +288,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
genesis = DefaultGenesisBlock() genesis = DefaultGenesisBlock()
} }
// Ensure the stored genesis matches with the given one. // Ensure the stored genesis matches with the given one.
hash := genesis.ToBlock(nil).Hash() hash := genesis.ToBlock().Hash()
if hash != stored { if hash != stored {
return genesis.Config, hash, &GenesisMismatchError{stored, hash} return genesis.Config, hash, &GenesisMismatchError{stored, hash}
} }
@ -286,7 +301,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
} }
// Check whether the genesis block is already written. // Check whether the genesis block is already written.
if genesis != nil { if genesis != nil {
hash := genesis.ToBlock(nil).Hash() hash := genesis.ToBlock().Hash()
if hash != stored { if hash != stored {
return genesis.Config, hash, &GenesisMismatchError{stored, hash} return genesis.Config, hash, &GenesisMismatchError{stored, hash}
} }
@ -347,13 +362,9 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
} }
} }
// ToBlock creates the genesis block and writes state of a genesis specification // ToBlock returns the genesis block according to genesis specification.
// to the given database (or discards it if nil). func (g *Genesis) ToBlock() *types.Block {
func (g *Genesis) ToBlock(db ethdb.Database) *types.Block { root, err := g.Alloc.deriveHash()
if db == nil {
db = rawdb.NewMemoryDatabase()
}
root, err := g.Alloc.flush(db)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -390,7 +401,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
// Commit writes the block and state of a genesis specification to the database. // Commit writes the block and state of a genesis specification to the database.
// The block is committed as the canonical head block. // The block is committed as the canonical head block.
func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) { func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
block := g.ToBlock(db) block := g.ToBlock()
if block.Number().Sign() != 0 { if block.Number().Sign() != 0 {
return nil, errors.New("can't commit genesis block with number > 0") return nil, errors.New("can't commit genesis block with number > 0")
} }
@ -404,7 +415,10 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
if config.Clique != nil && len(block.Extra()) < 32+crypto.SignatureLength { if config.Clique != nil && len(block.Extra()) < 32+crypto.SignatureLength {
return nil, errors.New("can't start clique chain without signers") return nil, errors.New("can't start clique chain without signers")
} }
if err := g.Alloc.write(db, block.Hash()); err != nil { // All the checks has passed, flush the states derived from the genesis
// specification as well as the specification itself into the provided
// database.
if err := g.Alloc.flush(db); err != nil {
return nil, err return nil, err
} }
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty()) rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty())
@ -428,15 +442,6 @@ func (g *Genesis) MustCommit(db ethdb.Database) *types.Block {
return block return block
} }
// GenesisBlockForTesting creates and writes a block in which addr has the given wei balance.
func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance *big.Int) *types.Block {
g := Genesis{
Alloc: GenesisAlloc{addr: {Balance: balance}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
return g.MustCommit(db)
}
// DefaultGenesisBlock returns the Ethereum main net genesis block. // DefaultGenesisBlock returns the Ethereum main net genesis block.
func DefaultGenesisBlock() *Genesis { func DefaultGenesisBlock() *Genesis {
return &Genesis{ return &Genesis{
@ -498,6 +503,7 @@ func DefaultSepoliaGenesisBlock() *Genesis {
} }
} }
// DefaultKilnGenesisBlock returns the kiln network genesis block.
func DefaultKilnGenesisBlock() *Genesis { func DefaultKilnGenesisBlock() *Genesis {
g := new(Genesis) g := new(Genesis)
reader := strings.NewReader(KilnAllocData) reader := strings.NewReader(KilnAllocData)

File diff suppressed because one or more lines are too long

View File

@ -178,7 +178,7 @@ func TestGenesisHashes(t *testing.T) {
t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex()) t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex())
} }
// Test via ToBlock // Test via ToBlock
if have := c.genesis.ToBlock(nil).Hash(); have != c.want { if have := c.genesis.ToBlock().Hash(); have != c.want {
t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex()) t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex())
} }
} }
@ -192,11 +192,7 @@ func TestGenesis_Commit(t *testing.T) {
} }
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
genesisBlock, err := genesis.Commit(db) genesisBlock := genesis.MustCommit(db)
if err != nil {
t.Fatal(err)
}
if genesis.Difficulty != nil { if genesis.Difficulty != nil {
t.Fatalf("assumption wrong") t.Fatalf("assumption wrong")
} }
@ -221,12 +217,12 @@ func TestReadWriteGenesisAlloc(t *testing.T) {
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}}, {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
{2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}}, {2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}},
} }
hash = common.HexToHash("0xdeadbeef") hash, _ = alloc.deriveHash()
) )
alloc.write(db, hash) alloc.flush(db)
var reload GenesisAlloc var reload GenesisAlloc
err := reload.UnmarshalJSON(rawdb.ReadGenesisState(db, hash)) err := reload.UnmarshalJSON(rawdb.ReadGenesisStateSpec(db, hash))
if err != nil { if err != nil {
t.Fatalf("Failed to load genesis state %v", err) t.Fatalf("Failed to load genesis state %v", err)
} }

View File

@ -37,7 +37,7 @@ import (
func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash { func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
var data []byte var data []byte
db.ReadAncients(func(reader ethdb.AncientReaderOp) error { db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
data, _ = reader.Ancient(freezerHashTable, number) data, _ = reader.Ancient(chainFreezerHashTable, number)
if len(data) == 0 { if len(data) == 0 {
// Get it by hash from leveldb // Get it by hash from leveldb
data, _ = db.Get(headerHashKey(number)) data, _ = db.Get(headerHashKey(number))
@ -335,7 +335,7 @@ func ReadHeaderRange(db ethdb.Reader, number uint64, count uint64) []rlp.RawValu
} }
// read remaining from ancients // read remaining from ancients
max := count * 700 max := count * 700
data, err := db.AncientRange(freezerHeaderTable, i+1-count, count, max) data, err := db.AncientRange(chainFreezerHeaderTable, i+1-count, count, max)
if err == nil && uint64(len(data)) == count { if err == nil && uint64(len(data)) == count {
// the data is on the order [h, h+1, .., n] -- reordering needed // the data is on the order [h, h+1, .., n] -- reordering needed
for i := range data { for i := range data {
@ -352,7 +352,7 @@ func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValu
// First try to look up the data in ancient database. Extra hash // First try to look up the data in ancient database. Extra hash
// comparison is necessary since ancient database only maintains // comparison is necessary since ancient database only maintains
// the canonical data. // the canonical data.
data, _ = reader.Ancient(freezerHeaderTable, number) data, _ = reader.Ancient(chainFreezerHeaderTable, number)
if len(data) > 0 && crypto.Keccak256Hash(data) == hash { if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
return nil return nil
} }
@ -428,7 +428,7 @@ func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number
// isCanon is an internal utility method, to check whether the given number/hash // isCanon is an internal utility method, to check whether the given number/hash
// is part of the ancient (canon) set. // is part of the ancient (canon) set.
func isCanon(reader ethdb.AncientReaderOp, number uint64, hash common.Hash) bool { func isCanon(reader ethdb.AncientReaderOp, number uint64, hash common.Hash) bool {
h, err := reader.Ancient(freezerHashTable, number) h, err := reader.Ancient(chainFreezerHashTable, number)
if err != nil { if err != nil {
return false return false
} }
@ -444,7 +444,7 @@ func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue
db.ReadAncients(func(reader ethdb.AncientReaderOp) error { db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
// Check if the data is in ancients // Check if the data is in ancients
if isCanon(reader, number, hash) { if isCanon(reader, number, hash) {
data, _ = reader.Ancient(freezerBodiesTable, number) data, _ = reader.Ancient(chainFreezerBodiesTable, number)
return nil return nil
} }
// If not, try reading from leveldb // If not, try reading from leveldb
@ -459,7 +459,7 @@ func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue
func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue { func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
var data []byte var data []byte
db.ReadAncients(func(reader ethdb.AncientReaderOp) error { db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
data, _ = reader.Ancient(freezerBodiesTable, number) data, _ = reader.Ancient(chainFreezerBodiesTable, number)
if len(data) > 0 { if len(data) > 0 {
return nil return nil
} }
@ -527,7 +527,7 @@ func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
db.ReadAncients(func(reader ethdb.AncientReaderOp) error { db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
// Check if the data is in ancients // Check if the data is in ancients
if isCanon(reader, number, hash) { if isCanon(reader, number, hash) {
data, _ = reader.Ancient(freezerDifficultyTable, number) data, _ = reader.Ancient(chainFreezerDifficultyTable, number)
return nil return nil
} }
// If not, try reading from leveldb // If not, try reading from leveldb
@ -587,7 +587,7 @@ func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawVa
db.ReadAncients(func(reader ethdb.AncientReaderOp) error { db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
// Check if the data is in ancients // Check if the data is in ancients
if isCanon(reader, number, hash) { if isCanon(reader, number, hash) {
data, _ = reader.Ancient(freezerReceiptTable, number) data, _ = reader.Ancient(chainFreezerReceiptTable, number)
return nil return nil
} }
// If not, try reading from leveldb // If not, try reading from leveldb
@ -819,19 +819,19 @@ func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts
func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *types.Header, receipts []*types.ReceiptForStorage, td *big.Int) error { func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *types.Header, receipts []*types.ReceiptForStorage, td *big.Int) error {
num := block.NumberU64() num := block.NumberU64()
if err := op.AppendRaw(freezerHashTable, num, block.Hash().Bytes()); err != nil { if err := op.AppendRaw(chainFreezerHashTable, num, block.Hash().Bytes()); err != nil {
return fmt.Errorf("can't add block %d hash: %v", num, err) return fmt.Errorf("can't add block %d hash: %v", num, err)
} }
if err := op.Append(freezerHeaderTable, num, header); err != nil { if err := op.Append(chainFreezerHeaderTable, num, header); err != nil {
return fmt.Errorf("can't append block header %d: %v", num, err) return fmt.Errorf("can't append block header %d: %v", num, err)
} }
if err := op.Append(freezerBodiesTable, num, block.Body()); err != nil { if err := op.Append(chainFreezerBodiesTable, num, block.Body()); err != nil {
return fmt.Errorf("can't append block body %d: %v", num, err) return fmt.Errorf("can't append block body %d: %v", num, err)
} }
if err := op.Append(freezerReceiptTable, num, receipts); err != nil { if err := op.Append(chainFreezerReceiptTable, num, receipts); err != nil {
return fmt.Errorf("can't append block %d receipts: %v", num, err) return fmt.Errorf("can't append block %d receipts: %v", num, err)
} }
if err := op.Append(freezerDifficultyTable, num, td); err != nil { if err := op.Append(chainFreezerDifficultyTable, num, td); err != nil {
return fmt.Errorf("can't append block %d total difficulty: %v", num, err) return fmt.Errorf("can't append block %d total difficulty: %v", num, err)
} }
return nil return nil

View File

@ -285,7 +285,7 @@ func TestTdStorage(t *testing.T) {
func TestCanonicalMappingStorage(t *testing.T) { func TestCanonicalMappingStorage(t *testing.T) {
db := NewMemoryDatabase() db := NewMemoryDatabase()
// Create a test canonical number and assinged hash to move around // Create a test canonical number and assigned hash to move around
hash, number := common.Hash{0: 0xff}, uint64(314) hash, number := common.Hash{0: 0xff}, uint64(314)
if entry := ReadCanonicalHash(db, number); entry != (common.Hash{}) { if entry := ReadCanonicalHash(db, number); entry != (common.Hash{}) {
t.Fatalf("Non existent canonical mapping returned: %v", entry) t.Fatalf("Non existent canonical mapping returned: %v", entry)

View File

@ -81,15 +81,16 @@ func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.Cha
} }
} }
// ReadGenesisState retrieves the genesis state based on the given genesis hash. // ReadGenesisStateSpec retrieves the genesis state specification based on the
func ReadGenesisState(db ethdb.KeyValueReader, hash common.Hash) []byte { // given genesis hash.
data, _ := db.Get(genesisKey(hash)) func ReadGenesisStateSpec(db ethdb.KeyValueReader, hash common.Hash) []byte {
data, _ := db.Get(genesisStateSpecKey(hash))
return data return data
} }
// WriteGenesisState writes the genesis state into the disk. // WriteGenesisStateSpec writes the genesis state specification into the disk.
func WriteGenesisState(db ethdb.KeyValueWriter, hash common.Hash, data []byte) { func WriteGenesisStateSpec(db ethdb.KeyValueWriter, hash common.Hash, data []byte) {
if err := db.Put(genesisKey(hash), data); err != nil { if err := db.Put(genesisStateSpecKey(hash), data); err != nil {
log.Crit("Failed to store genesis state", "err", err) log.Crit("Failed to store genesis state", "err", err)
} }
} }

View File

@ -0,0 +1,86 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rawdb
import "fmt"
// The list of table names of chain freezer.
const (
// chainFreezerHeaderTable indicates the name of the freezer header table.
chainFreezerHeaderTable = "headers"
// chainFreezerHashTable indicates the name of the freezer canonical hash table.
chainFreezerHashTable = "hashes"
// chainFreezerBodiesTable indicates the name of the freezer block body table.
chainFreezerBodiesTable = "bodies"
// chainFreezerReceiptTable indicates the name of the freezer receipts table.
chainFreezerReceiptTable = "receipts"
// chainFreezerDifficultyTable indicates the name of the freezer total difficulty table.
chainFreezerDifficultyTable = "diffs"
)
// chainFreezerNoSnappy configures whether compression is disabled for the ancient-tables.
// Hashes and difficulties don't compress well.
var chainFreezerNoSnappy = map[string]bool{
chainFreezerHeaderTable: false,
chainFreezerHashTable: true,
chainFreezerBodiesTable: false,
chainFreezerReceiptTable: false,
chainFreezerDifficultyTable: true,
}
// The list of identifiers of ancient stores.
var (
chainFreezerName = "chain" // the folder name of chain segment ancient store.
)
// freezers the collections of all builtin freezers.
var freezers = []string{chainFreezerName}
// InspectFreezerTable dumps out the index of a specific freezer table. The passed
// ancient indicates the path of root ancient directory where the chain freezer can
// be opened. Start and end specify the range for dumping out indexes.
// Note this function can only be used for debugging purposes.
func InspectFreezerTable(ancient string, freezerName string, tableName string, start, end int64) error {
var (
path string
tables map[string]bool
)
switch freezerName {
case chainFreezerName:
path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
default:
return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
}
noSnappy, exist := tables[tableName]
if !exist {
var names []string
for name := range tables {
names = append(names, name)
}
return fmt.Errorf("unknown table, supported ones: %v", names)
}
table, err := newFreezerTable(path, tableName, noSnappy, true)
if err != nil {
return err
}
table.dumpIndexStdout(start, end)
return nil
}

View File

@ -241,7 +241,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
if n := len(ancients); n > 0 { if n := len(ancients); n > 0 {
context = append(context, []interface{}{"hash", ancients[n-1]}...) context = append(context, []interface{}{"hash", ancients[n-1]}...)
} }
log.Info("Deep froze chain segment", context...) log.Debug("Deep froze chain segment", context...)
// Avoid database thrashing with tiny writes // Avoid database thrashing with tiny writes
if frozen-first < freezerBatchLimit { if frozen-first < freezerBatchLimit {
@ -278,19 +278,19 @@ func (f *chainFreezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hash
} }
// Write to the batch. // Write to the batch.
if err := op.AppendRaw(freezerHashTable, number, hash[:]); err != nil { if err := op.AppendRaw(chainFreezerHashTable, number, hash[:]); err != nil {
return fmt.Errorf("can't write hash to Freezer: %v", err) return fmt.Errorf("can't write hash to Freezer: %v", err)
} }
if err := op.AppendRaw(freezerHeaderTable, number, header); err != nil { if err := op.AppendRaw(chainFreezerHeaderTable, number, header); err != nil {
return fmt.Errorf("can't write header to Freezer: %v", err) return fmt.Errorf("can't write header to Freezer: %v", err)
} }
if err := op.AppendRaw(freezerBodiesTable, number, body); err != nil { if err := op.AppendRaw(chainFreezerBodiesTable, number, body); err != nil {
return fmt.Errorf("can't write body to Freezer: %v", err) return fmt.Errorf("can't write body to Freezer: %v", err)
} }
if err := op.AppendRaw(freezerReceiptTable, number, receipts); err != nil { if err := op.AppendRaw(chainFreezerReceiptTable, number, receipts); err != nil {
return fmt.Errorf("can't write receipts to Freezer: %v", err) return fmt.Errorf("can't write receipts to Freezer: %v", err)
} }
if err := op.AppendRaw(freezerDifficultyTable, number, td); err != nil { if err := op.AppendRaw(chainFreezerDifficultyTable, number, td); err != nil {
return fmt.Errorf("can't write td to Freezer: %v", err) return fmt.Errorf("can't write td to Freezer: %v", err)
} }

View File

@ -50,7 +50,7 @@ func InitDatabaseFromFreezer(db ethdb.Database) {
if i+count > frozen { if i+count > frozen {
count = frozen - i count = frozen - i
} }
data, err := db.AncientRange(freezerHashTable, i, count, 32*count) data, err := db.AncientRange(chainFreezerHashTable, i, count, 32*count)
if err != nil { if err != nil {
log.Crit("Failed to init database from freezer", "err", err) log.Crit("Failed to init database from freezer", "err", err)
} }

View File

@ -21,6 +21,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"os" "os"
"path"
"sync/atomic" "sync/atomic"
"time" "time"
@ -34,10 +35,16 @@ import (
// freezerdb is a database wrapper that enabled freezer data retrievals. // freezerdb is a database wrapper that enabled freezer data retrievals.
type freezerdb struct { type freezerdb struct {
ancientRoot string
ethdb.KeyValueStore ethdb.KeyValueStore
ethdb.AncientStore ethdb.AncientStore
} }
// AncientDatadir returns the path of root ancient directory.
func (frdb *freezerdb) AncientDatadir() (string, error) {
return frdb.ancientRoot, nil
}
// Close implements io.Closer, closing both the fast key-value store as well as // Close implements io.Closer, closing both the fast key-value store as well as
// the slow ancient tables. // the slow ancient tables.
func (frdb *freezerdb) Close() error { func (frdb *freezerdb) Close() error {
@ -162,12 +169,36 @@ func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
return &nofreezedb{KeyValueStore: db} return &nofreezedb{KeyValueStore: db}
} }
// resolveChainFreezerDir is a helper function which resolves the absolute path
// of chain freezer by considering backward compatibility.
func resolveChainFreezerDir(ancient string) string {
// Check if the chain freezer is already present in the specified
// sub folder, if not then two possibilities:
// - chain freezer is not initialized
// - chain freezer exists in legacy location (root ancient folder)
freezer := path.Join(ancient, chainFreezerName)
if !common.FileExist(freezer) {
if !common.FileExist(ancient) {
// The entire ancient store is not initialized, still use the sub
// folder for initialization.
} else {
// Ancient root is already initialized, then we hold the assumption
// that chain freezer is also initialized and located in root folder.
// In this case fallback to legacy location.
freezer = ancient
log.Info("Found legacy ancient chain path", "location", ancient)
}
}
return freezer
}
// NewDatabaseWithFreezer creates a high level database on top of a given key- // NewDatabaseWithFreezer creates a high level database on top of a given key-
// value data store with a freezer moving immutable chain segments into cold // value data store with a freezer moving immutable chain segments into cold
// storage. // storage. The passed ancient indicates the path of root ancient directory
func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string, readonly bool) (ethdb.Database, error) { // where the chain freezer can be opened.
func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly bool) (ethdb.Database, error) {
// Create the idle freezer instance // Create the idle freezer instance
frdb, err := newChainFreezer(freezer, namespace, readonly, freezerTableSize, FreezerNoSnappy) frdb, err := newChainFreezer(resolveChainFreezerDir(ancient), namespace, readonly, freezerTableSize, chainFreezerNoSnappy)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -198,7 +229,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
// If the freezer already contains something, ensure that the genesis blocks // If the freezer already contains something, ensure that the genesis blocks
// match, otherwise we might mix up freezers across chains and destroy both // match, otherwise we might mix up freezers across chains and destroy both
// the freezer and the key-value store. // the freezer and the key-value store.
frgenesis, err := frdb.Ancient(freezerHashTable, 0) frgenesis, err := frdb.Ancient(chainFreezerHashTable, 0)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to retrieve genesis from ancient %v", err) return nil, fmt.Errorf("failed to retrieve genesis from ancient %v", err)
} else if !bytes.Equal(kvgenesis, frgenesis) { } else if !bytes.Equal(kvgenesis, frgenesis) {
@ -229,7 +260,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
if kvblob, _ := db.Get(headerHashKey(1)); len(kvblob) == 0 { if kvblob, _ := db.Get(headerHashKey(1)); len(kvblob) == 0 {
return nil, errors.New("ancient chain segments already extracted, please set --datadir.ancient to the correct path") return nil, errors.New("ancient chain segments already extracted, please set --datadir.ancient to the correct path")
} }
// Block #1 is still in the database, we're allowed to init a new feezer // Block #1 is still in the database, we're allowed to init a new freezer
} }
// Otherwise, the head header is still the genesis, we're allowed to init a new // Otherwise, the head header is still the genesis, we're allowed to init a new
// freezer. // freezer.
@ -244,6 +275,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
}() }()
} }
return &freezerdb{ return &freezerdb{
ancientRoot: ancient,
KeyValueStore: db, KeyValueStore: db,
AncientStore: frdb, AncientStore: frdb,
}, nil }, nil
@ -273,13 +305,15 @@ func NewLevelDBDatabase(file string, cache int, handles int, namespace string, r
} }
// NewLevelDBDatabaseWithFreezer creates a persistent key-value database with a // NewLevelDBDatabaseWithFreezer creates a persistent key-value database with a
// freezer moving immutable chain segments into cold storage. // freezer moving immutable chain segments into cold storage. The passed ancient
func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, freezer string, namespace string, readonly bool) (ethdb.Database, error) { // indicates the path of root ancient directory where the chain freezer can be
// opened.
func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, ancient string, namespace string, readonly bool) (ethdb.Database, error) {
kvdb, err := leveldb.New(file, cache, handles, namespace, readonly) kvdb, err := leveldb.New(file, cache, handles, namespace, readonly)
if err != nil { if err != nil {
return nil, err return nil, err
} }
frdb, err := NewDatabaseWithFreezer(kvdb, freezer, namespace, readonly) frdb, err := NewDatabaseWithFreezer(kvdb, ancient, namespace, readonly)
if err != nil { if err != nil {
kvdb.Close() kvdb.Close()
return nil, err return nil, err
@ -441,7 +475,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
} }
// Inspect append-only file store then. // Inspect append-only file store then.
ancientSizes := []*common.StorageSize{&ancientHeadersSize, &ancientBodiesSize, &ancientReceiptsSize, &ancientHashesSize, &ancientTdsSize} ancientSizes := []*common.StorageSize{&ancientHeadersSize, &ancientBodiesSize, &ancientReceiptsSize, &ancientHashesSize, &ancientTdsSize}
for i, category := range []string{freezerHeaderTable, freezerBodiesTable, freezerReceiptTable, freezerHashTable, freezerDifficultyTable} { for i, category := range []string{chainFreezerHeaderTable, chainFreezerBodiesTable, chainFreezerReceiptTable, chainFreezerHashTable, chainFreezerDifficultyTable} {
if size, err := db.AncientSize(category); err == nil { if size, err := db.AncientSize(category); err == nil {
*ancientSizes[i] += common.StorageSize(size) *ancientSizes[i] += common.StorageSize(size)
total += common.StorageSize(size) total += common.StorageSize(size)

View File

@ -68,8 +68,6 @@ type Freezer struct {
frozen uint64 // Number of blocks already frozen frozen uint64 // Number of blocks already frozen
tail uint64 // Number of the first stored item in the freezer tail uint64 // Number of the first stored item in the freezer
datadir string // Path of root directory of ancient store
// This lock synchronizes writers and the truncate operation, as well as // This lock synchronizes writers and the truncate operation, as well as
// the "atomic" (batched) read operations. // the "atomic" (batched) read operations.
writeLock sync.RWMutex writeLock sync.RWMutex
@ -111,7 +109,6 @@ func NewFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
readonly: readonly, readonly: readonly,
tables: make(map[string]*freezerTable), tables: make(map[string]*freezerTable),
instanceLock: lock, instanceLock: lock,
datadir: datadir,
} }
// Create the tables. // Create the tables.
@ -432,7 +429,7 @@ func (f *Freezer) MigrateTable(kind string, convert convertLegacyFn) error {
// Set up new dir for the migrated table, the content of which // Set up new dir for the migrated table, the content of which
// we'll at the end move over to the ancients dir. // we'll at the end move over to the ancients dir.
migrationPath := filepath.Join(ancientsPath, "migration") migrationPath := filepath.Join(ancientsPath, "migration")
newTable, err := NewFreezerTable(migrationPath, kind, table.noCompression, false) newTable, err := newFreezerTable(migrationPath, kind, table.noCompression, false)
if err != nil { if err != nil {
return err return err
} }
@ -489,11 +486,5 @@ func (f *Freezer) MigrateTable(kind string, convert convertLegacyFn) error {
if err := os.Remove(migrationPath); err != nil { if err := os.Remove(migrationPath); err != nil {
return err return err
} }
return nil return nil
} }
// AncientDatadir returns the root directory path of the ancient store.
func (f *Freezer) AncientDatadir() (string, error) {
return f.datadir, nil
}

View File

@ -123,8 +123,8 @@ type freezerTable struct {
lock sync.RWMutex // Mutex protecting the data file descriptors lock sync.RWMutex // Mutex protecting the data file descriptors
} }
// NewFreezerTable opens the given path as a freezer table. // newFreezerTable opens the given path as a freezer table.
func NewFreezerTable(path, name string, disableSnappy, readonly bool) (*freezerTable, error) { func newFreezerTable(path, name string, disableSnappy, readonly bool) (*freezerTable, error) {
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, readonly) return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, readonly)
} }
@ -884,9 +884,7 @@ func (t *freezerTable) Sync() error {
return t.head.Sync() return t.head.Sync()
} }
// DumpIndex is a debug print utility function, mainly for testing. It can also func (t *freezerTable) dumpIndexStdout(start, stop int64) {
// be used to analyse a live freezer table index.
func (t *freezerTable) DumpIndex(start, stop int64) {
t.dumpIndex(os.Stdout, start, stop) t.dumpIndex(os.Stdout, start, stop)
} }

View File

@ -902,7 +902,7 @@ func TestSequentialRead(t *testing.T) {
} }
// Write 15 bytes 30 times // Write 15 bytes 30 times
writeChunks(t, f, 30, 15) writeChunks(t, f, 30, 15)
f.DumpIndex(0, 30) f.dumpIndexStdout(0, 30)
f.Close() f.Close()
} }
{ // Open it, iterate, verify iteration { // Open it, iterate, verify iteration

View File

@ -59,7 +59,7 @@ func PluginCommitUpdate(pl *plugins.PluginLoader, num uint64) {
} }
appendAncientFnList := pl.Lookup("AppendAncient", func(item interface{}) bool { appendAncientFnList := pl.Lookup("AppendAncient", func(item interface{}) bool {
_, ok := item.(func(number uint64, hash, header, body, receipts, td []byte)) _, ok := item.(func(number uint64, hash, header, body, receipts, td []byte))
if ok { log.Warn("PlugEth's AppendAncient is deprecated. Please update to ModifyAncients.") } if ok { log.Warn("PluGeth's AppendAncient is deprecated. Please update to ModifyAncients.") }
return ok return ok
}) })
if len(appendAncientFnList) > 0 { if len(appendAncientFnList) > 0 {
@ -70,7 +70,7 @@ func PluginCommitUpdate(pl *plugins.PluginLoader, num uint64) {
receipts []byte receipts []byte
td []byte td []byte
) )
if hashi, ok := update[freezerHashTable]; ok { if hashi, ok := update[chainFreezerHashTable]; ok {
switch v := hashi.(type) { switch v := hashi.(type) {
case []byte: case []byte:
hash = v hash = v
@ -78,7 +78,7 @@ func PluginCommitUpdate(pl *plugins.PluginLoader, num uint64) {
hash, _ = rlp.EncodeToBytes(v) hash, _ = rlp.EncodeToBytes(v)
} }
} }
if headeri, ok := update[freezerHeaderTable]; ok { if headeri, ok := update[chainFreezerHeaderTable]; ok {
switch v := headeri.(type) { switch v := headeri.(type) {
case []byte: case []byte:
header = v header = v
@ -86,7 +86,7 @@ func PluginCommitUpdate(pl *plugins.PluginLoader, num uint64) {
header, _ = rlp.EncodeToBytes(v) header, _ = rlp.EncodeToBytes(v)
} }
} }
if bodyi, ok := update[freezerBodiesTable]; ok { if bodyi, ok := update[chainFreezerBodiesTable]; ok {
switch v := bodyi.(type) { switch v := bodyi.(type) {
case []byte: case []byte:
body = v body = v
@ -94,7 +94,7 @@ func PluginCommitUpdate(pl *plugins.PluginLoader, num uint64) {
body, _ = rlp.EncodeToBytes(v) body, _ = rlp.EncodeToBytes(v)
} }
} }
if receiptsi, ok := update[freezerReceiptTable]; ok { if receiptsi, ok := update[chainFreezerReceiptTable]; ok {
switch v := receiptsi.(type) { switch v := receiptsi.(type) {
case []byte: case []byte:
receipts = v receipts = v
@ -102,7 +102,7 @@ func PluginCommitUpdate(pl *plugins.PluginLoader, num uint64) {
receipts, _ = rlp.EncodeToBytes(v) receipts, _ = rlp.EncodeToBytes(v)
} }
} }
if tdi, ok := update[freezerDifficultyTable]; ok { if tdi, ok := update[chainFreezerDifficultyTable]; ok {
switch v := tdi.(type) { switch v := tdi.(type) {
case []byte: case []byte:
td = v td = v

View File

@ -111,33 +111,6 @@ var (
preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil) preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
) )
const (
// freezerHeaderTable indicates the name of the freezer header table.
freezerHeaderTable = "headers"
// freezerHashTable indicates the name of the freezer canonical hash table.
freezerHashTable = "hashes"
// freezerBodiesTable indicates the name of the freezer block body table.
freezerBodiesTable = "bodies"
// freezerReceiptTable indicates the name of the freezer receipts table.
freezerReceiptTable = "receipts"
// freezerDifficultyTable indicates the name of the freezer total difficulty table.
freezerDifficultyTable = "diffs"
)
// FreezerNoSnappy configures whether compression is disabled for the ancient-tables.
// Hashes and difficulties don't compress well.
var FreezerNoSnappy = map[string]bool{
freezerHeaderTable: false,
freezerHashTable: true,
freezerBodiesTable: false,
freezerReceiptTable: false,
freezerDifficultyTable: true,
}
// LegacyTxLookupEntry is the legacy TxLookupEntry definition with some unnecessary // LegacyTxLookupEntry is the legacy TxLookupEntry definition with some unnecessary
// fields. // fields.
type LegacyTxLookupEntry struct { type LegacyTxLookupEntry struct {
@ -247,7 +220,7 @@ func configKey(hash common.Hash) []byte {
return append(configPrefix, hash.Bytes()...) return append(configPrefix, hash.Bytes()...)
} }
// genesisKey = genesisPrefix + hash // genesisStateSpecKey = genesisPrefix + hash
func genesisKey(hash common.Hash) []byte { func genesisStateSpecKey(hash common.Hash) []byte {
return append(genesisPrefix, hash.Bytes()...) return append(genesisPrefix, hash.Bytes()...)
} }

View File

@ -63,7 +63,7 @@ type Trie interface {
// GetKey returns the sha3 preimage of a hashed key that was previously used // GetKey returns the sha3 preimage of a hashed key that was previously used
// to store a value. // to store a value.
// //
// TODO(fjl): remove this when SecureTrie is removed // TODO(fjl): remove this when StateTrie is removed
GetKey([]byte) []byte GetKey([]byte) []byte
// TryGet returns the value for key stored in the trie. The value bytes must // TryGet returns the value for key stored in the trie. The value bytes must
@ -71,8 +71,8 @@ type Trie interface {
// trie.MissingNodeError is returned. // trie.MissingNodeError is returned.
TryGet(key []byte) ([]byte, error) TryGet(key []byte) ([]byte, error)
// TryUpdateAccount abstract an account write in the trie. // TryGetAccount abstract an account read from the trie.
TryUpdateAccount(key []byte, account *types.StateAccount) error TryGetAccount(key []byte) (*types.StateAccount, error)
// TryUpdate associates key with value in the trie. If value has length zero, any // TryUpdate associates key with value in the trie. If value has length zero, any
// existing value is deleted from the trie. The value bytes must not be modified // existing value is deleted from the trie. The value bytes must not be modified
@ -80,17 +80,27 @@ type Trie interface {
// database, a trie.MissingNodeError is returned. // database, a trie.MissingNodeError is returned.
TryUpdate(key, value []byte) error TryUpdate(key, value []byte) error
// TryUpdateAccount abstract an account write to the trie.
TryUpdateAccount(key []byte, account *types.StateAccount) error
// TryDelete removes any existing value for key from the trie. If a node was not // TryDelete removes any existing value for key from the trie. If a node was not
// found in the database, a trie.MissingNodeError is returned. // found in the database, a trie.MissingNodeError is returned.
TryDelete(key []byte) error TryDelete(key []byte) error
// TryDeleteAccount abstracts an account deletion from the trie.
TryDeleteAccount(key []byte) error
// Hash returns the root hash of the trie. It does not write to the database and // Hash returns the root hash of the trie. It does not write to the database and
// can be used even if the trie doesn't have one. // can be used even if the trie doesn't have one.
Hash() common.Hash Hash() common.Hash
// Commit writes all nodes to the trie's memory database, tracking the internal // Commit collects all dirty nodes in the trie and replace them with the
// and external (for account tries) references. // corresponding node hash. All collected nodes(including dirty leaves if
Commit(onleaf trie.LeafCallback) (common.Hash, int, error) // collectLeaf is true) will be encapsulated into a nodeset for return.
// The returned nodeset can be nil if the trie is clean(nothing to commit).
// Once the trie is committed, it's not usable anymore. A new trie must
// be created with new root and updated trie database for following usage
Commit(collectLeaf bool) (common.Hash, *trie.NodeSet, error)
// NodeIterator returns an iterator that returns nodes of the trie. Iteration // NodeIterator returns an iterator that returns nodes of the trie. Iteration
// starts at the key after the given start key. // starts at the key after the given start key.
@ -133,7 +143,7 @@ type cachingDB struct {
// OpenTrie opens the main account trie at a specific root hash. // OpenTrie opens the main account trie at a specific root hash.
func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
tr, err := trie.NewSecure(common.Hash{}, root, db.db) tr, err := trie.NewStateTrie(common.Hash{}, root, db.db)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -142,7 +152,7 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
// OpenStorageTrie opens the storage trie of an account. // OpenStorageTrie opens the storage trie of an account.
func (db *cachingDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) { func (db *cachingDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) {
tr, err := trie.NewSecure(addrHash, root, db.db) tr, err := trie.NewStateTrie(addrHash, root, db.db)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -152,7 +162,7 @@ func (db *cachingDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) {
// CopyTrie returns an independent copy of the given trie. // CopyTrie returns an independent copy of the given trie.
func (db *cachingDB) CopyTrie(t Trie) Trie { func (db *cachingDB) CopyTrie(t Trie) Trie {
switch t := t.(type) { switch t := t.(type) {
case *trie.SecureTrie: case *trie.StateTrie:
return t.Copy() return t.Copy()
default: default:
panic(fmt.Errorf("unknown trie type %T", t)) panic(fmt.Errorf("unknown trie type %T", t))

View File

@ -23,6 +23,6 @@ var (
storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil) storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil)
accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil) accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil)
storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil) storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil)
accountCommittedMeter = metrics.NewRegisteredMeter("state/commit/account", nil) accountTrieCommittedMeter = metrics.NewRegisteredMeter("state/commit/accountnodes", nil)
storageCommittedMeter = metrics.NewRegisteredMeter("state/commit/storage", nil) storageTriesCommittedMeter = metrics.NewRegisteredMeter("state/commit/storagenodes", nil)
) )

View File

@ -410,7 +410,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
if genesis == nil { if genesis == nil {
return errors.New("missing genesis block") return errors.New("missing genesis block")
} }
t, err := trie.NewSecure(common.Hash{}, genesis.Root(), trie.NewDatabase(db)) t, err := trie.NewStateTrie(common.Hash{}, genesis.Root(), trie.NewDatabase(db))
if err != nil { if err != nil {
return err return err
} }
@ -430,7 +430,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
return err return err
} }
if acc.Root != emptyRoot { if acc.Root != emptyRoot {
storageTrie, err := trie.NewSecure(common.BytesToHash(accIter.LeafKey()), acc.Root, trie.NewDatabase(db)) storageTrie, err := trie.NewStateTrie(common.BytesToHash(accIter.LeafKey()), acc.Root, trie.NewDatabase(db))
if err != nil { if err != nil {
return err return err
} }

View File

@ -367,7 +367,10 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, owner common.Hash, roo
for i, key := range result.keys { for i, key := range result.keys {
snapTrie.Update(key, result.vals[i]) snapTrie.Update(key, result.vals[i])
} }
root, _, _ := snapTrie.Commit(nil) root, nodes, _ := snapTrie.Commit(false)
if nodes != nil {
snapTrieDb.Update(trie.NewWithNodeSet(nodes))
}
snapTrieDb.Commit(root, false, nil) snapTrieDb.Commit(root, false, nil)
} }
// Construct the trie for state iteration, reuse the trie // Construct the trie for state iteration, reuse the trie

View File

@ -142,17 +142,19 @@ func checkSnapRoot(t *testing.T, snap *diskLayer, trieRoot common.Hash) {
type testHelper struct { type testHelper struct {
diskdb ethdb.Database diskdb ethdb.Database
triedb *trie.Database triedb *trie.Database
accTrie *trie.SecureTrie accTrie *trie.StateTrie
nodes *trie.MergedNodeSet
} }
func newHelper() *testHelper { func newHelper() *testHelper {
diskdb := rawdb.NewMemoryDatabase() diskdb := rawdb.NewMemoryDatabase()
triedb := trie.NewDatabase(diskdb) triedb := trie.NewDatabase(diskdb)
accTrie, _ := trie.NewSecure(common.Hash{}, common.Hash{}, triedb) accTrie, _ := trie.NewStateTrie(common.Hash{}, common.Hash{}, triedb)
return &testHelper{ return &testHelper{
diskdb: diskdb, diskdb: diskdb,
triedb: triedb, triedb: triedb,
accTrie: accTrie, accTrie: accTrie,
nodes: trie.NewMergedNodeSet(),
} }
} }
@ -180,21 +182,26 @@ func (t *testHelper) addSnapStorage(accKey string, keys []string, vals []string)
} }
func (t *testHelper) makeStorageTrie(stateRoot, owner common.Hash, keys []string, vals []string, commit bool) []byte { func (t *testHelper) makeStorageTrie(stateRoot, owner common.Hash, keys []string, vals []string, commit bool) []byte {
stTrie, _ := trie.NewSecure(owner, common.Hash{}, t.triedb) stTrie, _ := trie.NewStateTrie(owner, common.Hash{}, t.triedb)
for i, k := range keys { for i, k := range keys {
stTrie.Update([]byte(k), []byte(vals[i])) stTrie.Update([]byte(k), []byte(vals[i]))
} }
var root common.Hash
if !commit { if !commit {
root = stTrie.Hash() return stTrie.Hash().Bytes()
} else { }
root, _, _ = stTrie.Commit(nil) root, nodes, _ := stTrie.Commit(false)
if nodes != nil {
t.nodes.Merge(nodes)
} }
return root.Bytes() return root.Bytes()
} }
func (t *testHelper) Commit() common.Hash { func (t *testHelper) Commit() common.Hash {
root, _, _ := t.accTrie.Commit(nil) root, nodes, _ := t.accTrie.Commit(true)
if nodes != nil {
t.nodes.Merge(nodes)
}
t.triedb.Update(t.nodes)
t.triedb.Commit(root, false, nil) t.triedb.Commit(root, false, nil)
return root return root
} }
@ -378,7 +385,7 @@ func TestGenerateCorruptAccountTrie(t *testing.T) {
helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4 helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4
root, _, _ := helper.accTrie.Commit(nil) // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978 root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
// Delete an account trie leaf and ensure the generator chokes // Delete an account trie leaf and ensure the generator chokes
helper.triedb.Commit(root, false, nil) helper.triedb.Commit(root, false, nil)
@ -413,18 +420,8 @@ func TestGenerateMissingStorageTrie(t *testing.T) {
helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2 helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
root, _, _ := helper.accTrie.Commit(nil)
// We can only corrupt the disk database, so flush the tries out root := helper.Commit()
helper.triedb.Reference(
common.BytesToHash(stRoot),
common.HexToHash("0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e"),
)
helper.triedb.Reference(
common.BytesToHash(stRoot),
common.HexToHash("0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2"),
)
helper.triedb.Commit(root, false, nil)
// Delete a storage trie root and ensure the generator chokes // Delete a storage trie root and ensure the generator chokes
helper.diskdb.Delete(stRoot) helper.diskdb.Delete(stRoot)
@ -458,18 +455,7 @@ func TestGenerateCorruptStorageTrie(t *testing.T) {
stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2 helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
root, _, _ := helper.accTrie.Commit(nil) root := helper.Commit()
// We can only corrupt the disk database, so flush the tries out
helper.triedb.Reference(
common.BytesToHash(stRoot),
common.HexToHash("0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e"),
)
helper.triedb.Reference(
common.BytesToHash(stRoot),
common.HexToHash("0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2"),
)
helper.triedb.Commit(root, false, nil)
// Delete a storage trie leaf and ensure the generator chokes // Delete a storage trie leaf and ensure the generator chokes
helper.diskdb.Delete(common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371").Bytes()) helper.diskdb.Delete(common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371").Bytes())
@ -825,10 +811,12 @@ func populateDangling(disk ethdb.KeyValueStore) {
// This test will populate some dangling storages to see if they can be cleaned up. // This test will populate some dangling storages to see if they can be cleaned up.
func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) { func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) {
var helper = newHelper() var helper = newHelper()
stRoot := helper.makeStorageTrie(common.Hash{}, common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()})
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
@ -858,10 +846,12 @@ func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) {
// This test will populate some dangling storages to see if they can be cleaned up. // This test will populate some dangling storages to see if they can be cleaned up.
func TestGenerateBrokenSnapshotWithDanglingStorage(t *testing.T) { func TestGenerateBrokenSnapshotWithDanglingStorage(t *testing.T) {
var helper = newHelper() var helper = newHelper()
stRoot := helper.makeStorageTrie(common.Hash{}, common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()})
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()})
populateDangling(helper.diskdb) populateDangling(helper.diskdb)

View File

@ -319,7 +319,7 @@ func (fi *fastIterator) Slot() []byte {
} }
// Release iterates over all the remaining live layer iterators and releases each // Release iterates over all the remaining live layer iterators and releases each
// of thme individually. // of them individually.
func (fi *fastIterator) Release() { func (fi *fastIterator) Release() {
for _, it := range fi.iterators { for _, it := range fi.iterators {
it.it.Release() it.it.Release()
@ -327,7 +327,7 @@ func (fi *fastIterator) Release() {
fi.iterators = nil fi.iterators = nil
} }
// Debug is a convencience helper during testing // Debug is a convenience helper during testing
func (fi *fastIterator) Debug() { func (fi *fastIterator) Debug() {
for _, it := range fi.iterators { for _, it := range fi.iterators {
fmt.Printf("[p=%v v=%v] ", it.priority, it.it.Hash()[0]) fmt.Printf("[p=%v v=%v] ", it.priority, it.it.Hash()[0])

View File

@ -265,7 +265,7 @@ func TestPostCapBasicDataAccess(t *testing.T) {
snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil) snaps.Update(common.HexToHash("0xa3"), common.HexToHash("0xa2"), nil, setAccount("0xa3"), nil)
snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil) snaps.Update(common.HexToHash("0xb3"), common.HexToHash("0xb2"), nil, setAccount("0xb3"), nil)
// checkExist verifies if an account exiss in a snapshot // checkExist verifies if an account exists in a snapshot
checkExist := func(layer *diffLayer, key string) error { checkExist := func(layer *diffLayer, key string) error {
if data, _ := layer.Account(common.HexToHash(key)); data == nil { if data, _ := layer.Account(common.HexToHash(key)); data == nil {
return fmt.Errorf("expected %x to exist, got nil", common.HexToHash(key)) return fmt.Errorf("expected %x to exist, got nil", common.HexToHash(key))

View File

@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
) )
var emptyCodeHash = crypto.Keccak256(nil) var emptyCodeHash = crypto.Keccak256(nil)
@ -375,23 +376,23 @@ func (s *stateObject) updateRoot(db Database) {
// CommitTrie the storage trie of the object to db. // CommitTrie the storage trie of the object to db.
// This updates the trie root. // This updates the trie root.
func (s *stateObject) CommitTrie(db Database) (int, error) { func (s *stateObject) CommitTrie(db Database) (*trie.NodeSet, error) {
// If nothing changed, don't bother with hashing anything // If nothing changed, don't bother with hashing anything
if s.updateTrie(db) == nil { if s.updateTrie(db) == nil {
return 0, nil return nil, nil
} }
if s.dbErr != nil { if s.dbErr != nil {
return 0, s.dbErr return nil, s.dbErr
} }
// Track the amount of time wasted on committing the storage trie // Track the amount of time wasted on committing the storage trie
if metrics.EnabledExpensive { if metrics.EnabledExpensive {
defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now()) defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now())
} }
root, committed, err := s.trie.Commit(nil) root, nodes, err := s.trie.Commit(false)
if err == nil { if err == nil {
s.data.Root = root s.data.Root = root
} }
return committed, err return nodes, err
} }
// AddBalance adds amount to s's balance. // AddBalance adds amount to s's balance.

View File

@ -25,6 +25,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
) )
type stateTest struct { type stateTest struct {
@ -40,7 +41,7 @@ func newStateTest() *stateTest {
func TestDump(t *testing.T) { func TestDump(t *testing.T) {
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
sdb, _ := New(common.Hash{}, NewDatabaseWithConfig(db, nil), nil) sdb, _ := New(common.Hash{}, NewDatabaseWithConfig(db, &trie.Config{Preimages: true}), nil)
s := &stateTest{db: db, state: sdb} s := &stateTest{db: db, state: sdb}
// generate a few entries // generate a few entries

View File

@ -493,7 +493,7 @@ func (s *StateDB) deleteStateObject(obj *stateObject) {
} }
// Delete the account from the trie // Delete the account from the trie
addr := obj.Address() addr := obj.Address()
if err := s.trie.TryDelete(addr[:]); err != nil { if err := s.trie.TryDeleteAccount(addr[:]); err != nil {
s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err)) s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err))
} }
} }
@ -546,20 +546,16 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
// If snapshot unavailable or reading from it failed, load from the database // If snapshot unavailable or reading from it failed, load from the database
if data == nil { if data == nil {
start := time.Now() start := time.Now()
enc, err := s.trie.TryGet(addr.Bytes()) var err error
data, err = s.trie.TryGetAccount(addr.Bytes())
if metrics.EnabledExpensive { if metrics.EnabledExpensive {
s.AccountReads += time.Since(start) s.AccountReads += time.Since(start)
} }
if err != nil { if err != nil {
s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err)) s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %w", addr.Bytes(), err))
return nil return nil
} }
if len(enc) == 0 { if data == nil {
return nil
}
data = new(types.StateAccount)
if err := rlp.DecodeBytes(enc, data); err != nil {
log.Error("Failed to decode state object", "addr", addr, "err", err)
return nil return nil
} }
} }
@ -783,7 +779,7 @@ func (s *StateDB) GetRefund() uint64 {
return s.refund return s.refund
} }
// Finalise finalises the state by removing the s destructed objects and clears // Finalise finalises the state by removing the destructed objects and clears
// the journal as well as the refunds. Finalise, however, will not push any updates // the journal as well as the refunds. Finalise, however, will not push any updates
// into the tries just yet. Only IntermediateRoot or Commit will do that. // into the tries just yet. Only IntermediateRoot or Commit will do that.
func (s *StateDB) Finalise(deleteEmptyObjects bool) { func (s *StateDB) Finalise(deleteEmptyObjects bool) {
@ -805,7 +801,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
// If state snapshotting is active, also mark the destruction there. // If state snapshotting is active, also mark the destruction there.
// Note, we can't do this only at the end of a block because multiple // Note, we can't do this only at the end of a block because multiple
// transactions within the same block might self destruct and then // transactions within the same block might self destruct and then
// ressurrect an account; but the snapshotter needs both events. // resurrect an account; but the snapshotter needs both events.
if s.snap != nil { if s.snap != nil {
s.snapDestructs[obj.addrHash] = struct{}{} // We need to maintain account deletions explicitly (will remain set indefinitely) s.snapDestructs[obj.addrHash] = struct{}{} // We need to maintain account deletions explicitly (will remain set indefinitely)
delete(s.snapAccounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a ressurrect) delete(s.snapAccounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a ressurrect)
@ -853,7 +849,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// Although naively it makes sense to retrieve the account trie and then do // Although naively it makes sense to retrieve the account trie and then do
// the contract storage and account updates sequentially, that short circuits // the contract storage and account updates sequentially, that short circuits
// the account prefetcher. Instead, let's process all the storage updates // the account prefetcher. Instead, let's process all the storage updates
// first, giving the account prefeches just a few more milliseconds of time // first, giving the account prefetches just a few more milliseconds of time
// to pull useful data from disk. // to pull useful data from disk.
for addr := range s.stateObjectsPending { for addr := range s.stateObjectsPending {
if obj := s.stateObjects[addr]; !obj.deleted { if obj := s.stateObjects[addr]; !obj.deleted {
@ -897,7 +893,6 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
func (s *StateDB) Prepare(thash common.Hash, ti int) { func (s *StateDB) Prepare(thash common.Hash, ti int) {
s.thash = thash s.thash = thash
s.txIndex = ti s.txIndex = ti
s.accessList = newAccessList()
} }
func (s *StateDB) clearJournalAndRefund() { func (s *StateDB) clearJournalAndRefund() {
@ -905,7 +900,7 @@ func (s *StateDB) clearJournalAndRefund() {
s.journal = newJournal() s.journal = newJournal()
s.refund = 0 s.refund = 0
} }
s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entires s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entries
} }
// Commit writes the state to the underlying in-memory trie database. // Commit writes the state to the underlying in-memory trie database.
@ -917,23 +912,37 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
s.IntermediateRoot(deleteEmptyObjects) s.IntermediateRoot(deleteEmptyObjects)
// Commit objects to the trie, measuring the elapsed time // Commit objects to the trie, measuring the elapsed time
var storageCommitted int var (
accountTrieNodes int
storageTrieNodes int
nodes = trie.NewMergedNodeSet()
)
// PluGeth injection
codeUpdates := make(map[common.Hash][]byte) codeUpdates := make(map[common.Hash][]byte)
// PluGeth injection
codeWriter := s.db.TrieDB().DiskDB().NewBatch() codeWriter := s.db.TrieDB().DiskDB().NewBatch()
for addr := range s.stateObjectsDirty { for addr := range s.stateObjectsDirty {
if obj := s.stateObjects[addr]; !obj.deleted { if obj := s.stateObjects[addr]; !obj.deleted {
// Write any contract code associated with the state object // Write any contract code associated with the state object
if obj.code != nil && obj.dirtyCode { if obj.code != nil && obj.dirtyCode {
rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code) rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
// PluGeth injection
codeUpdates[common.BytesToHash(obj.CodeHash())] = obj.code codeUpdates[common.BytesToHash(obj.CodeHash())] = obj.code
// PluGeth injection
obj.dirtyCode = false obj.dirtyCode = false
} }
// Write any storage changes in the state object to its storage trie // Write any storage changes in the state object to its storage trie
committed, err := obj.CommitTrie(s.db) set, err := obj.CommitTrie(s.db)
if err != nil { if err != nil {
return common.Hash{}, err return common.Hash{}, err
} }
storageCommitted += committed // Merge the dirty nodes of storage trie into global set
if set != nil {
if err := nodes.Merge(set); err != nil {
return common.Hash{}, err
}
storageTrieNodes += set.Len()
}
} }
} }
if len(s.stateObjectsDirty) > 0 { if len(s.stateObjectsDirty) > 0 {
@ -944,26 +953,22 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
log.Crit("Failed to commit dirty codes", "error", err) log.Crit("Failed to commit dirty codes", "error", err)
} }
} }
// Write the account trie changes, measuing the amount of wasted time // Write the account trie changes, measuring the amount of wasted time
var start time.Time var start time.Time
if metrics.EnabledExpensive { if metrics.EnabledExpensive {
start = time.Now() start = time.Now()
} }
// The onleaf func is called _serially_, so we can reuse the same account root, set, err := s.trie.Commit(true)
// for unmarshalling every time.
var account types.StateAccount
root, accountCommitted, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash, _ []byte) error {
if err := rlp.DecodeBytes(leaf, &account); err != nil {
return nil
}
if account.Root != emptyRoot {
s.db.TrieDB().Reference(account.Root, parent)
}
return nil
})
if err != nil { if err != nil {
return common.Hash{}, err return common.Hash{}, err
} }
// Merge the dirty nodes of account trie into global set
if set != nil {
if err := nodes.Merge(set); err != nil {
return common.Hash{}, err
}
accountTrieNodes = set.Len()
}
if metrics.EnabledExpensive { if metrics.EnabledExpensive {
s.AccountCommits += time.Since(start) s.AccountCommits += time.Since(start)
@ -971,8 +976,8 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
storageUpdatedMeter.Mark(int64(s.StorageUpdated)) storageUpdatedMeter.Mark(int64(s.StorageUpdated))
accountDeletedMeter.Mark(int64(s.AccountDeleted)) accountDeletedMeter.Mark(int64(s.AccountDeleted))
storageDeletedMeter.Mark(int64(s.StorageDeleted)) storageDeletedMeter.Mark(int64(s.StorageDeleted))
accountCommittedMeter.Mark(int64(accountCommitted)) accountTrieCommittedMeter.Mark(int64(accountTrieNodes))
storageCommittedMeter.Mark(int64(storageCommitted)) storageTriesCommittedMeter.Mark(int64(storageTrieNodes))
s.AccountUpdated, s.AccountDeleted = 0, 0 s.AccountUpdated, s.AccountDeleted = 0, 0
s.StorageUpdated, s.StorageDeleted = 0, 0 s.StorageUpdated, s.StorageDeleted = 0, 0
} }
@ -1001,6 +1006,9 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
} }
s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil
} }
if err := s.db.TrieDB().Update(nodes); err != nil {
return common.Hash{}, err
}
s.originalRoot = root s.originalRoot = root
return root, err return root, err
} }

View File

@ -771,7 +771,7 @@ func TestStateDBAccessList(t *testing.T) {
t.Fatalf("expected %x to be in access list", address) t.Fatalf("expected %x to be in access list", address)
} }
} }
// Check that only the expected addresses are present in the acesslist // Check that only the expected addresses are present in the access list
for address := range state.accessList.addresses { for address := range state.accessList.addresses {
if _, exist := addressMap[address]; !exist { if _, exist := addressMap[address]; !exist {
t.Fatalf("extra address %x in access list", address) t.Fatalf("extra address %x in access list", address)

View File

@ -305,8 +305,8 @@ func TestIterativeDelayedStateSync(t *testing.T) {
} }
for len(nodeElements)+len(codeElements) > 0 { for len(nodeElements)+len(codeElements) > 0 {
// Sync only half of the scheduled nodes // Sync only half of the scheduled nodes
var nodeProcessd int var nodeProcessed int
var codeProcessd int var codeProcessed int
if len(codeElements) > 0 { if len(codeElements) > 0 {
codeResults := make([]trie.CodeSyncResult, len(codeElements)/2+1) codeResults := make([]trie.CodeSyncResult, len(codeElements)/2+1)
for i, element := range codeElements[:len(codeResults)] { for i, element := range codeElements[:len(codeResults)] {
@ -321,7 +321,7 @@ func TestIterativeDelayedStateSync(t *testing.T) {
t.Fatalf("failed to process result %v", err) t.Fatalf("failed to process result %v", err)
} }
} }
codeProcessd = len(codeResults) codeProcessed = len(codeResults)
} }
if len(nodeElements) > 0 { if len(nodeElements) > 0 {
nodeResults := make([]trie.NodeSyncResult, len(nodeElements)/2+1) nodeResults := make([]trie.NodeSyncResult, len(nodeElements)/2+1)
@ -337,7 +337,7 @@ func TestIterativeDelayedStateSync(t *testing.T) {
t.Fatalf("failed to process result %v", err) t.Fatalf("failed to process result %v", err)
} }
} }
nodeProcessd = len(nodeResults) nodeProcessed = len(nodeResults)
} }
batch := dstDb.NewBatch() batch := dstDb.NewBatch()
if err := sched.Commit(batch); err != nil { if err := sched.Commit(batch); err != nil {
@ -346,7 +346,7 @@ func TestIterativeDelayedStateSync(t *testing.T) {
batch.Write() batch.Write()
paths, nodes, codes = sched.Missing(0) paths, nodes, codes = sched.Missing(0)
nodeElements = nodeElements[nodeProcessd:] nodeElements = nodeElements[nodeProcessed:]
for i := 0; i < len(paths); i++ { for i := 0; i < len(paths); i++ {
nodeElements = append(nodeElements, stateElement{ nodeElements = append(nodeElements, stateElement{
path: paths[i], path: paths[i],
@ -354,7 +354,7 @@ func TestIterativeDelayedStateSync(t *testing.T) {
syncPath: trie.NewSyncPath([]byte(paths[i])), syncPath: trie.NewSyncPath([]byte(paths[i])),
}) })
} }
codeElements = codeElements[codeProcessd:] codeElements = codeElements[codeProcessed:]
for i := 0; i < len(codes); i++ { for i := 0; i < len(codes); i++ {
codeElements = append(codeElements, stateElement{ codeElements = append(codeElements, stateElement{
code: codes[i], code: codes[i],

View File

@ -212,7 +212,7 @@ type subfetcher struct {
wake chan struct{} // Wake channel if a new task is scheduled wake chan struct{} // Wake channel if a new task is scheduled
stop chan struct{} // Channel to interrupt processing stop chan struct{} // Channel to interrupt processing
term chan struct{} // Channel to signal iterruption term chan struct{} // Channel to signal interruption
copy chan chan Trie // Channel to request a copy of the current trie copy chan chan Trie // Channel to request a copy of the current trie
seen map[string]struct{} // Tracks the entries already loaded seen map[string]struct{} // Tracks the entries already loaded
@ -330,8 +330,12 @@ func (sf *subfetcher) loop() {
// No termination request yet, prefetch the next entry // No termination request yet, prefetch the next entry
if _, ok := sf.seen[string(task)]; ok { if _, ok := sf.seen[string(task)]; ok {
sf.dups++ sf.dups++
} else {
if len(task) == len(common.Address{}) {
sf.trie.TryGetAccount(task)
} else { } else {
sf.trie.TryGet(task) sf.trie.TryGet(task)
}
sf.seen[string(task)] = struct{}{} sf.seen[string(task)] = struct{}{}
} }
} }

View File

@ -91,7 +91,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
statedb.Prepare(tx.Hash(), i) statedb.Prepare(tx.Hash(), i)
pluginPreProcessTransaction(tx, block, i) pluginPreProcessTransaction(tx, block, i)
blockTracer.PreProcessTransaction(tx, block, i) blockTracer.PreProcessTransaction(tx, block, i)
receipt, err := applyTransaction(msg, p.config, p.bc, nil, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv) receipt, err := applyTransaction(msg, p.config, nil, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv)
if err != nil { if err != nil {
pluginBlockProcessingError(tx, block, err) pluginBlockProcessingError(tx, block, err)
blockTracer.BlockProcessingError(tx, block, err) blockTracer.BlockProcessingError(tx, block, err)
@ -110,7 +110,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
return receipts, allLogs, *usedGas, nil return receipts, allLogs, *usedGas, nil
} }
func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (*types.Receipt, error) { func applyTransaction(msg types.Message, config *params.ChainConfig, author *common.Address, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (*types.Receipt, error) {
// Create a new context to be used in the EVM environment. // Create a new context to be used in the EVM environment.
txContext := NewEVMTxContext(msg) txContext := NewEVMTxContext(msg)
evm.Reset(txContext, statedb) evm.Reset(txContext, statedb)
@ -167,5 +167,5 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
// Create a new context to be used in the EVM environment // Create a new context to be used in the EVM environment
blockContext := NewEVMBlockContext(header, bc, author) blockContext := NewEVMBlockContext(header, bc, author)
vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg) vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg)
return applyTransaction(msg, config, bc, author, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv) return applyTransaction(msg, config, author, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv)
} }

View File

@ -19,6 +19,7 @@ package core
import ( import (
"errors" "errors"
"io" "io"
"io/fs"
"os" "os"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -57,12 +58,12 @@ func newTxJournal(path string) *txJournal {
// load parses a transaction journal dump from disk, loading its contents into // load parses a transaction journal dump from disk, loading its contents into
// the specified pool. // the specified pool.
func (journal *txJournal) load(add func([]*types.Transaction) []error) error { func (journal *txJournal) load(add func([]*types.Transaction) []error) error {
// Skip the parsing if the journal file doesn't exist at all
if !common.FileExist(journal.path) {
return nil
}
// Open the journal for loading any past transactions // Open the journal for loading any past transactions
input, err := os.Open(journal.path) input, err := os.Open(journal.path)
if errors.Is(err, fs.ErrNotExist) {
// Skip the parsing if the journal file doesn't exist at all
return nil
}
if err != nil { if err != nil {
return err return err
} }

View File

@ -317,7 +317,7 @@ func (b *Block) Header() *Header { return CopyHeader(b.header) }
func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles} } func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles} }
// Size returns the true RLP encoded storage size of the block, either by encoding // Size returns the true RLP encoded storage size of the block, either by encoding
// and returning it, or returning a previsouly cached value. // and returning it, or returning a previously cached value.
func (b *Block) Size() common.StorageSize { func (b *Block) Size() common.StorageSize {
if size := b.size.Load(); size != nil { if size := b.size.Load(); size != nil {
return size.(common.StorageSize) return size.(common.StorageSize)

View File

@ -314,7 +314,7 @@ func TestRlpDecodeParentHash(t *testing.T) {
} }
// Also test a very very large header. // Also test a very very large header.
{ {
// The rlp-encoding of the heder belowCauses _total_ length of 65540, // The rlp-encoding of the header belowCauses _total_ length of 65540,
// which is the first to blow the fast-path. // which is the first to blow the fast-path.
h := &Header{ h := &Header{
ParentHash: want, ParentHash: want,

View File

@ -333,7 +333,7 @@ func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode
cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) cfg.State, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
cfg.GasLimit = gas cfg.GasLimit = gas
if len(tracerCode) > 0 { if len(tracerCode) > 0 {
tracer, err := tracers.New(tracerCode, new(tracers.Context)) tracer, err := tracers.New(tracerCode, new(tracers.Context), nil)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -457,7 +457,7 @@ func BenchmarkSimpleLoop(b *testing.B) {
byte(vm.JUMP), byte(vm.JUMP),
} }
calllRevertingContractWithInput := []byte{ callRevertingContractWithInput := []byte{
byte(vm.JUMPDEST), // byte(vm.JUMPDEST), //
// push args for the call // push args for the call
byte(vm.PUSH1), 0, // out size byte(vm.PUSH1), 0, // out size
@ -485,7 +485,7 @@ func BenchmarkSimpleLoop(b *testing.B) {
benchmarkNonModifyingCode(100000000, loopingCode, "loop-100M", "", b) benchmarkNonModifyingCode(100000000, loopingCode, "loop-100M", "", b)
benchmarkNonModifyingCode(100000000, callInexistant, "call-nonexist-100M", "", b) benchmarkNonModifyingCode(100000000, callInexistant, "call-nonexist-100M", "", b)
benchmarkNonModifyingCode(100000000, callEOA, "call-EOA-100M", "", b) benchmarkNonModifyingCode(100000000, callEOA, "call-EOA-100M", "", b)
benchmarkNonModifyingCode(100000000, calllRevertingContractWithInput, "call-reverting-100M", "", b) benchmarkNonModifyingCode(100000000, callRevertingContractWithInput, "call-reverting-100M", "", b)
//benchmarkNonModifyingCode(10000000, staticCallIdentity, "staticcall-identity-10M", b) //benchmarkNonModifyingCode(10000000, staticCallIdentity, "staticcall-identity-10M", b)
//benchmarkNonModifyingCode(10000000, loopingCode, "loop-10M", b) //benchmarkNonModifyingCode(10000000, loopingCode, "loop-10M", b)
@ -832,7 +832,7 @@ func TestRuntimeJSTracer(t *testing.T) {
statedb.SetCode(common.HexToAddress("0xee"), calleeCode) statedb.SetCode(common.HexToAddress("0xee"), calleeCode)
statedb.SetCode(common.HexToAddress("0xff"), depressedCode) statedb.SetCode(common.HexToAddress("0xff"), depressedCode)
tracer, err := tracers.New(jsTracer, new(tracers.Context)) tracer, err := tracers.New(jsTracer, new(tracers.Context), nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -868,7 +868,7 @@ func TestJSTracerCreateTx(t *testing.T) {
code := []byte{byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN)} code := []byte{byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN)}
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
tracer, err := tracers.New(jsTracer, new(tracers.Context)) tracer, err := tracers.New(jsTracer, new(tracers.Context), nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -19,7 +19,7 @@ package bls12381
// isogenyMapG1 applies 11-isogeny map for BLS12-381 G1 defined at draft-irtf-cfrg-hash-to-curve-06. // isogenyMapG1 applies 11-isogeny map for BLS12-381 G1 defined at draft-irtf-cfrg-hash-to-curve-06.
func isogenyMapG1(x, y *fe) { func isogenyMapG1(x, y *fe) {
// https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#appendix-C.2 // https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-06#appendix-C.2
params := isogenyConstansG1 params := isogenyConstantsG1
degree := 15 degree := 15
xNum, xDen, yNum, yDen := new(fe), new(fe), new(fe), new(fe) xNum, xDen, yNum, yDen := new(fe), new(fe), new(fe), new(fe)
xNum.set(params[0][degree]) xNum.set(params[0][degree])
@ -76,7 +76,7 @@ func isogenyMapG2(e *fp2, x, y *fe2) {
y.set(yNum) y.set(yNum)
} }
var isogenyConstansG1 = [4][16]*fe{ var isogenyConstantsG1 = [4][16]*fe{
{ {
{0x4d18b6f3af00131c, 0x19fa219793fee28c, 0x3f2885f1467f19ae, 0x23dcea34f2ffb304, 0xd15b58d2ffc00054, 0x0913be200a20bef4}, {0x4d18b6f3af00131c, 0x19fa219793fee28c, 0x3f2885f1467f19ae, 0x23dcea34f2ffb304, 0xd15b58d2ffc00054, 0x0913be200a20bef4},
{0x898985385cdbbd8b, 0x3c79e43cc7d966aa, 0x1597e193f4cd233a, 0x8637ef1e4d6623ad, 0x11b22deed20d827b, 0x07097bc5998784ad}, {0x898985385cdbbd8b, 0x3c79e43cc7d966aa, 0x1597e193f4cd233a, 0x8637ef1e4d6623ad, 0x11b22deed20d827b, 0x07097bc5998784ad},

View File

@ -157,7 +157,7 @@ func (api *AdminAPI) ExportChain(file string, first *uint64, last *uint64) (bool
} }
if _, err := os.Stat(file); err == nil { if _, err := os.Stat(file); err == nil {
// File already exists. Allowing overwrite could be a DoS vector, // File already exists. Allowing overwrite could be a DoS vector,
// since the 'file' may point to arbitrary paths on the drive // since the 'file' may point to arbitrary paths on the drive.
return false, errors.New("location would overwrite an existing file") return false, errors.New("location would overwrite an existing file")
} }
// Make sure we can create the file to export into // Make sure we can create the file to export into
@ -506,11 +506,11 @@ func (api *DebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]c
} }
triedb := api.eth.BlockChain().StateCache().TrieDB() triedb := api.eth.BlockChain().StateCache().TrieDB()
oldTrie, err := trie.NewSecure(common.Hash{}, startBlock.Root(), triedb) oldTrie, err := trie.NewStateTrie(common.Hash{}, startBlock.Root(), triedb)
if err != nil { if err != nil {
return nil, err return nil, err
} }
newTrie, err := trie.NewSecure(common.Hash{}, endBlock.Root(), triedb) newTrie, err := trie.NewStateTrie(common.Hash{}, endBlock.Root(), triedb)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -586,5 +586,5 @@ func (api *DebugAPI) GetAccessibleState(from, to rpc.BlockNumber) (uint64, error
return uint64(i), nil return uint64(i), nil
} }
} }
return 0, fmt.Errorf("no state found") return 0, errors.New("no state found")
} }

View File

@ -19,7 +19,6 @@ package eth
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"math/big" "math/big"
"time" "time"
@ -202,17 +201,8 @@ func (b *EthAPIBackend) GetReceipts(ctx context.Context, hash common.Hash) (type
return b.eth.blockchain.GetReceiptsByHash(hash), nil return b.eth.blockchain.GetReceiptsByHash(hash), nil
} }
func (b *EthAPIBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) { func (b *EthAPIBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) {
db := b.eth.ChainDb() return rawdb.ReadLogs(b.eth.chainDb, hash, number, b.ChainConfig()), nil
number := rawdb.ReadHeaderNumber(db, hash)
if number == nil {
return nil, fmt.Errorf("failed to get block number for hash %#x", hash)
}
logs := rawdb.ReadLogs(db, hash, *number, b.eth.blockchain.Config())
if logs == nil {
return nil, fmt.Errorf("failed to get logs for block #%d (0x%s)", *number, hash.TerminalString())
}
return logs, nil
} }
func (b *EthAPIBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int { func (b *EthAPIBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int {

View File

@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/trie"
) )
var dumper = spew.ConfigState{Indent: " "} var dumper = spew.ConfigState{Indent: " "}
@ -66,7 +67,7 @@ func TestAccountRange(t *testing.T) {
t.Parallel() t.Parallel()
var ( var (
statedb = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), nil) statedb = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: true})
state, _ = state.New(common.Hash{}, statedb, nil) state, _ = state.New(common.Hash{}, statedb, nil)
addrs = [AccountRangeMaxResults * 2]common.Address{} addrs = [AccountRangeMaxResults * 2]common.Address{}
m = map[common.Address]bool{} m = map[common.Address]bool{}

View File

@ -25,7 +25,6 @@ import (
"strings" "strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -41,7 +40,6 @@ import (
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/eth/gasprice" "github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/eth/protocols/snap"
@ -137,7 +135,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideGrayGlacier, config.OverrideTerminalTotalDifficulty) chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideTerminalTotalDifficulty, config.OverrideTerminalTotalDifficultyPassed)
if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok { if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
return nil, genesisErr return nil, genesisErr
} }
@ -315,9 +313,6 @@ func (s *Ethereum) APIs() []rpc.API {
}, { }, {
Namespace: "eth", Namespace: "eth",
Service: downloader.NewDownloaderAPI(s.handler.downloader, s.eventMux), Service: downloader.NewDownloaderAPI(s.handler.downloader, s.eventMux),
}, {
Namespace: "eth",
Service: filters.NewFilterAPI(s.APIBackend, false, 5*time.Minute),
}, { }, {
Namespace: "admin", Namespace: "admin",
Service: NewAdminAPI(s), Service: NewAdminAPI(s),

View File

@ -22,6 +22,7 @@ import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
"math/big"
"sync" "sync"
"time" "time"
@ -59,6 +60,24 @@ const (
// invalidTipsetsCap is the max number of recent block hashes tracked that // invalidTipsetsCap is the max number of recent block hashes tracked that
// have lead to some bad ancestor block. It's just an OOM protection. // have lead to some bad ancestor block. It's just an OOM protection.
invalidTipsetsCap = 512 invalidTipsetsCap = 512
// beaconUpdateStartupTimeout is the time to wait for a beacon client to get
// attached before starting to issue warnings.
beaconUpdateStartupTimeout = 30 * time.Second
// beaconUpdateExchangeTimeout is the max time allowed for a beacon client to
// do a transition config exchange before it's considered offline and the user
// is warned.
beaconUpdateExchangeTimeout = 2 * time.Minute
// beaconUpdateConsensusTimeout is the max time allowed for a beacon client
// to send a consensus update before it's considered offline and the user is
// warned.
beaconUpdateConsensusTimeout = 30 * time.Second
// beaconUpdateWarnFrequency is the frequency at which to warn the user that
// the beacon client is offline.
beaconUpdateWarnFrequency = 5 * time.Minute
) )
type ConsensusAPI struct { type ConsensusAPI struct {
@ -90,7 +109,17 @@ type ConsensusAPI struct {
invalidTipsets map[common.Hash]*types.Header // Ephemeral cache to track invalid tipsets and their bad ancestor invalidTipsets map[common.Hash]*types.Header // Ephemeral cache to track invalid tipsets and their bad ancestor
invalidLock sync.Mutex // Protects the invalid maps from concurrent access invalidLock sync.Mutex // Protects the invalid maps from concurrent access
forkChoiceLock sync.Mutex // Lock for the forkChoiceUpdated method // Geth can appear to be stuck or do strange things if the beacon client is
// offline or is sending us strange data. Stash some update stats away so
// that we can warn the user and not have them open issues on our tracker.
lastTransitionUpdate time.Time
lastTransitionLock sync.Mutex
lastForkchoiceUpdate time.Time
lastForkchoiceLock sync.Mutex
lastNewPayloadUpdate time.Time
lastNewPayloadLock sync.Mutex
forkchoiceLock sync.Mutex // Lock for the forkChoiceUpdated method
} }
// NewConsensusAPI creates a new consensus api for the given backend. // NewConsensusAPI creates a new consensus api for the given backend.
@ -107,6 +136,7 @@ func NewConsensusAPI(eth *eth.Ethereum) *ConsensusAPI {
invalidTipsets: make(map[common.Hash]*types.Header), invalidTipsets: make(map[common.Hash]*types.Header),
} }
eth.Downloader().SetBadBlockCallback(api.setInvalidAncestor) eth.Downloader().SetBadBlockCallback(api.setInvalidAncestor)
go api.heartbeat()
return api return api
} }
@ -122,14 +152,18 @@ func NewConsensusAPI(eth *eth.Ethereum) *ConsensusAPI {
// If there are payloadAttributes: // If there are payloadAttributes:
// we try to assemble a block with the payloadAttributes and return its payloadID // we try to assemble a block with the payloadAttributes and return its payloadID
func (api *ConsensusAPI) ForkchoiceUpdatedV1(update beacon.ForkchoiceStateV1, payloadAttributes *beacon.PayloadAttributesV1) (beacon.ForkChoiceResponse, error) { func (api *ConsensusAPI) ForkchoiceUpdatedV1(update beacon.ForkchoiceStateV1, payloadAttributes *beacon.PayloadAttributesV1) (beacon.ForkChoiceResponse, error) {
api.forkChoiceLock.Lock() api.forkchoiceLock.Lock()
defer api.forkChoiceLock.Unlock() defer api.forkchoiceLock.Unlock()
log.Trace("Engine API request received", "method", "ForkchoiceUpdated", "head", update.HeadBlockHash, "finalized", update.FinalizedBlockHash, "safe", update.SafeBlockHash) log.Trace("Engine API request received", "method", "ForkchoiceUpdated", "head", update.HeadBlockHash, "finalized", update.FinalizedBlockHash, "safe", update.SafeBlockHash)
if update.HeadBlockHash == (common.Hash{}) { if update.HeadBlockHash == (common.Hash{}) {
log.Warn("Forkchoice requested update to zero hash") log.Warn("Forkchoice requested update to zero hash")
return beacon.STATUS_INVALID, nil // TODO(karalabe): Why does someone send us this? return beacon.STATUS_INVALID, nil // TODO(karalabe): Why does someone send us this?
} }
// Stash away the last update to warn the user if the beacon client goes offline
api.lastForkchoiceLock.Lock()
api.lastForkchoiceUpdate = time.Now()
api.lastForkchoiceLock.Unlock()
// Check whether we have the block yet in our database or not. If not, we'll // Check whether we have the block yet in our database or not. If not, we'll
// need to either trigger a sync, or to reject this forkchoice update for a // need to either trigger a sync, or to reject this forkchoice update for a
@ -265,15 +299,20 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV1(update beacon.ForkchoiceStateV1, pa
// ExchangeTransitionConfigurationV1 checks the given configuration against // ExchangeTransitionConfigurationV1 checks the given configuration against
// the configuration of the node. // the configuration of the node.
func (api *ConsensusAPI) ExchangeTransitionConfigurationV1(config beacon.TransitionConfigurationV1) (*beacon.TransitionConfigurationV1, error) { func (api *ConsensusAPI) ExchangeTransitionConfigurationV1(config beacon.TransitionConfigurationV1) (*beacon.TransitionConfigurationV1, error) {
log.Trace("Engine API request received", "method", "ExchangeTransitionConfiguration", "ttd", config.TerminalTotalDifficulty)
if config.TerminalTotalDifficulty == nil { if config.TerminalTotalDifficulty == nil {
return nil, errors.New("invalid terminal total difficulty") return nil, errors.New("invalid terminal total difficulty")
} }
// Stash away the last update to warn the user if the beacon client goes offline
api.lastTransitionLock.Lock()
api.lastTransitionUpdate = time.Now()
api.lastTransitionLock.Unlock()
ttd := api.eth.BlockChain().Config().TerminalTotalDifficulty ttd := api.eth.BlockChain().Config().TerminalTotalDifficulty
if ttd == nil || ttd.Cmp(config.TerminalTotalDifficulty.ToInt()) != 0 { if ttd == nil || ttd.Cmp(config.TerminalTotalDifficulty.ToInt()) != 0 {
log.Warn("Invalid TTD configured", "geth", ttd, "beacon", config.TerminalTotalDifficulty) log.Warn("Invalid TTD configured", "geth", ttd, "beacon", config.TerminalTotalDifficulty)
return nil, fmt.Errorf("invalid ttd: execution %v consensus %v", ttd, config.TerminalTotalDifficulty) return nil, fmt.Errorf("invalid ttd: execution %v consensus %v", ttd, config.TerminalTotalDifficulty)
} }
if config.TerminalBlockHash != (common.Hash{}) { if config.TerminalBlockHash != (common.Hash{}) {
if hash := api.eth.BlockChain().GetCanonicalHash(uint64(config.TerminalBlockNumber)); hash == config.TerminalBlockHash { if hash := api.eth.BlockChain().GetCanonicalHash(uint64(config.TerminalBlockNumber)); hash == config.TerminalBlockHash {
return &beacon.TransitionConfigurationV1{ return &beacon.TransitionConfigurationV1{
@ -305,6 +344,11 @@ func (api *ConsensusAPI) NewPayloadV1(params beacon.ExecutableDataV1) (beacon.Pa
log.Debug("Invalid NewPayload params", "params", params, "error", err) log.Debug("Invalid NewPayload params", "params", params, "error", err)
return beacon.PayloadStatusV1{Status: beacon.INVALIDBLOCKHASH}, nil return beacon.PayloadStatusV1{Status: beacon.INVALIDBLOCKHASH}, nil
} }
// Stash away the last update to warn the user if the beacon client goes offline
api.lastNewPayloadLock.Lock()
api.lastNewPayloadUpdate = time.Now()
api.lastNewPayloadLock.Unlock()
// If we already have the block locally, ignore the entire execution and just // If we already have the block locally, ignore the entire execution and just
// return a fake success. // return a fake success.
if block := api.eth.BlockChain().GetBlockByHash(params.BlockHash); block != nil { if block := api.eth.BlockChain().GetBlockByHash(params.BlockHash); block != nil {
@ -417,8 +461,18 @@ func (api *ConsensusAPI) delayPayloadImport(block *types.Block) (beacon.PayloadS
// payload as non-integratable on top of the existing sync. We'll just // payload as non-integratable on top of the existing sync. We'll just
// have to rely on the beacon client to forcefully update the head with // have to rely on the beacon client to forcefully update the head with
// a forkchoice update request. // a forkchoice update request.
if api.eth.SyncMode() == downloader.FullSync {
// In full sync mode, failure to import a well-formed block can only mean
// that the parent state is missing and the syncer rejected extending the
// current cycle with the new payload.
log.Warn("Ignoring payload with missing parent", "number", block.NumberU64(), "hash", block.Hash(), "parent", block.ParentHash()) log.Warn("Ignoring payload with missing parent", "number", block.NumberU64(), "hash", block.Hash(), "parent", block.ParentHash())
return beacon.PayloadStatusV1{Status: beacon.ACCEPTED}, nil } else {
// In non-full sync mode (i.e. snap sync) all payloads are rejected until
// snap sync terminates as snap sync relies on direct database injections
// and cannot afford concurrent out-if-band modifications via imports.
log.Warn("Ignoring payload while snap syncing", "number", block.NumberU64(), "hash", block.Hash())
}
return beacon.PayloadStatusV1{Status: beacon.SYNCING}, nil
} }
// setInvalidAncestor is a callback for the downloader to notify us if a bad block // setInvalidAncestor is a callback for the downloader to notify us if a bad block
@ -469,10 +523,15 @@ func (api *ConsensusAPI) checkInvalidAncestor(check common.Hash, head common.Has
} }
api.invalidTipsets[head] = invalid api.invalidTipsets[head] = invalid
} }
// If the last valid hash is the terminal pow block, return 0x0 for latest valid hash
lastValid := &invalid.ParentHash
if header := api.eth.BlockChain().GetHeader(invalid.ParentHash, invalid.Number.Uint64()-1); header != nil && header.Difficulty.Sign() != 0 {
lastValid = &common.Hash{}
}
failure := "links to previously rejected block" failure := "links to previously rejected block"
return &beacon.PayloadStatusV1{ return &beacon.PayloadStatusV1{
Status: beacon.INVALID, Status: beacon.INVALID,
LatestValidHash: &invalid.ParentHash, LatestValidHash: lastValid,
ValidationError: &failure, ValidationError: &failure,
} }
} }
@ -492,3 +551,127 @@ func (api *ConsensusAPI) invalid(err error, latestValid *types.Header) beacon.Pa
errorMsg := err.Error() errorMsg := err.Error()
return beacon.PayloadStatusV1{Status: beacon.INVALID, LatestValidHash: &currentHash, ValidationError: &errorMsg} return beacon.PayloadStatusV1{Status: beacon.INVALID, LatestValidHash: &currentHash, ValidationError: &errorMsg}
} }
// heartbeat loops indefinitely, and checks if there have been beacon client updates
// received in the last while. If not - or if they but strange ones - it warns the
// user that something might be off with their consensus node.
//
// TODO(karalabe): Spin this goroutine down somehow
func (api *ConsensusAPI) heartbeat() {
// Sleep a bit on startup since there's obviously no beacon client yet
// attached, so no need to print scary warnings to the user.
time.Sleep(beaconUpdateStartupTimeout)
var (
offlineLogged time.Time
)
for {
// Sleep a bit and retrieve the last known consensus updates
time.Sleep(5 * time.Second)
// If the network is not yet merged/merging, don't bother scaring the user
ttd := api.eth.BlockChain().Config().TerminalTotalDifficulty
if ttd == nil {
continue
}
api.lastTransitionLock.Lock()
lastTransitionUpdate := api.lastTransitionUpdate
api.lastTransitionLock.Unlock()
api.lastForkchoiceLock.Lock()
lastForkchoiceUpdate := api.lastForkchoiceUpdate
api.lastForkchoiceLock.Unlock()
api.lastNewPayloadLock.Lock()
lastNewPayloadUpdate := api.lastNewPayloadUpdate
api.lastNewPayloadLock.Unlock()
// If there have been no updates for the past while, warn the user
// that the beacon client is probably offline
if api.eth.BlockChain().Config().TerminalTotalDifficultyPassed || api.eth.Merger().TDDReached() {
if time.Since(lastForkchoiceUpdate) > beaconUpdateConsensusTimeout && time.Since(lastNewPayloadUpdate) > beaconUpdateConsensusTimeout {
if time.Since(lastTransitionUpdate) > beaconUpdateExchangeTimeout {
if time.Since(offlineLogged) > beaconUpdateWarnFrequency {
if lastTransitionUpdate.IsZero() {
log.Warn("Post-merge network, but no beacon client seen. Please launch one to follow the chain!")
} else {
log.Warn("Previously seen beacon client is offline. Please ensure it is operational to follow the chain!")
}
offlineLogged = time.Now()
}
continue
}
if time.Since(offlineLogged) > beaconUpdateWarnFrequency {
if lastForkchoiceUpdate.IsZero() && lastNewPayloadUpdate.IsZero() {
log.Warn("Beacon client online, but never received consensus updates. Please ensure your beacon client is operational to follow the chain!")
} else {
log.Warn("Beacon client online, but no consensus updates received in a while. Please fix your beacon client to follow the chain!")
}
offlineLogged = time.Now()
}
continue
} else {
offlineLogged = time.Time{}
}
} else {
if time.Since(lastTransitionUpdate) > beaconUpdateExchangeTimeout {
if time.Since(offlineLogged) > beaconUpdateWarnFrequency {
// Retrieve the last few blocks and make a rough estimate as
// to when the merge transition should happen
var (
chain = api.eth.BlockChain()
head = chain.CurrentBlock()
htd = chain.GetTd(head.Hash(), head.NumberU64())
eta time.Duration
)
if head.NumberU64() > 0 && htd.Cmp(ttd) < 0 {
// Accumulate the last 64 difficulties to estimate the growth
var diff float64
block := head
for i := 0; i < 64; i++ {
diff += float64(block.Difficulty().Uint64())
if parent := chain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent == nil {
break
} else {
block = parent
}
}
// Estimate an ETA based on the block times and the difficulty growth
growth := diff / float64(head.Time()-block.Time()+1) // +1 to avoid div by zero
if growth > 0 {
if left := new(big.Int).Sub(ttd, htd); left.IsUint64() {
eta = time.Duration(float64(left.Uint64())/growth) * time.Second
} else {
eta = time.Duration(new(big.Int).Div(left, big.NewInt(int64(growth))).Uint64()) * time.Second
}
}
}
var message string
if htd.Cmp(ttd) > 0 {
if lastTransitionUpdate.IsZero() {
message = "Merge already reached, but no beacon client seen. Please launch one to follow the chain!"
} else {
message = "Merge already reached, but previously seen beacon client is offline. Please ensure it is operational to follow the chain!"
}
} else {
if lastTransitionUpdate.IsZero() {
message = "Merge is configured, but no beacon client seen. Please ensure you have one available before the transition arrives!"
} else {
message = "Merge is configured, but previously seen beacon client is offline. Please ensure it is operational before the transition arrives!"
}
}
if eta == 0 {
log.Warn(message)
} else {
log.Warn(message, "eta", common.PrettyAge(time.Now().Add(-eta))) // weird hack, but duration formatted doesn't handle days
}
offlineLogged = time.Now()
}
continue
} else {
offlineLogged = time.Time{}
}
}
}
}

View File

@ -69,7 +69,7 @@ func generatePreMergeChain(n int) (*core.Genesis, []*types.Block) {
g.AddTx(tx) g.AddTx(tx)
testNonce++ testNonce++
} }
gblock := genesis.ToBlock(db) gblock := genesis.MustCommit(db)
engine := ethash.NewFaker() engine := ethash.NewFaker()
blocks, _ := core.GenerateChain(config, gblock, engine, db, n, generate) blocks, _ := core.GenerateChain(config, gblock, engine, db, n, generate)
totalDifficulty := big.NewInt(0) totalDifficulty := big.NewInt(0)
@ -662,8 +662,8 @@ func TestEmptyBlocks(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if status.Status != beacon.ACCEPTED { if status.Status != beacon.SYNCING {
t.Errorf("invalid status: expected ACCEPTED got: %v", status.Status) t.Errorf("invalid status: expected SYNCING got: %v", status.Status)
} }
if status.LatestValidHash != nil { if status.LatestValidHash != nil {
t.Fatalf("invalid LVH: got %v wanted nil", status.LatestValidHash) t.Fatalf("invalid LVH: got %v wanted nil", status.LatestValidHash)

View File

@ -125,7 +125,7 @@ type SyncingResult struct {
Status ethereum.SyncProgress `json:"status"` Status ethereum.SyncProgress `json:"status"`
} }
// uninstallSyncSubscriptionRequest uninstalles a syncing subscription in the API event loop. // uninstallSyncSubscriptionRequest uninstalls a syncing subscription in the API event loop.
type uninstallSyncSubscriptionRequest struct { type uninstallSyncSubscriptionRequest struct {
c chan interface{} c chan interface{}
uninstalled chan interface{} uninstalled chan interface{}

View File

@ -236,7 +236,7 @@ func (d *Downloader) findBeaconAncestor() (uint64, error) {
// Binary search to find the ancestor // Binary search to find the ancestor
start, end := beaconTail.Number.Uint64()-1, number start, end := beaconTail.Number.Uint64()-1, number
if number := beaconHead.Number.Uint64(); end > number { if number := beaconHead.Number.Uint64(); end > number {
// This shouldn't really happen in a healty network, but if the consensus // This shouldn't really happen in a healthy network, but if the consensus
// clients feeds us a shorter chain as the canonical, we should not attempt // clients feeds us a shorter chain as the canonical, we should not attempt
// to access non-existent skeleton items. // to access non-existent skeleton items.
log.Warn("Beacon head lower than local chain", "beacon", number, "local", end) log.Warn("Beacon head lower than local chain", "beacon", number, "local", end)

View File

@ -364,7 +364,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int,
// The beacon header syncer is async. It will start this synchronization and // The beacon header syncer is async. It will start this synchronization and
// will continue doing other tasks. However, if synchronization needs to be // will continue doing other tasks. However, if synchronization needs to be
// cancelled, the syncer needs to know if we reached the startup point (and // cancelled, the syncer needs to know if we reached the startup point (and
// inited the cancel cannel) or not yet. Make sure that we'll signal even in // inited the cancel channel) or not yet. Make sure that we'll signal even in
// case of a failure. // case of a failure.
if beaconPing != nil { if beaconPing != nil {
defer func() { defer func() {
@ -1461,7 +1461,7 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode
} }
d.syncStatsLock.Unlock() d.syncStatsLock.Unlock()
// Signal the content downloaders of the availablility of new tasks // Signal the content downloaders of the availability of new tasks
for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} {
select { select {
case ch <- true: case ch <- true:

View File

@ -68,7 +68,11 @@ func newTesterWithNotification(t *testing.T, success func()) *downloadTester {
t.Cleanup(func() { t.Cleanup(func() {
db.Close() db.Close()
}) })
core.GenesisBlockForTesting(db, testAddress, big.NewInt(1000000000000000)) gspec := core.Genesis{
Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
gspec.MustCommit(db)
chain, err := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil) chain, err := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
if err != nil { if err != nil {
@ -356,7 +360,7 @@ func (dlp *downloadTesterPeer) RequestAccountRange(id uint64, root, origin, limi
} }
// RequestStorageRanges fetches a batch of storage slots belonging to one or // RequestStorageRanges fetches a batch of storage slots belonging to one or
// more accounts. If slots from only one accout is requested, an origin marker // more accounts. If slots from only one account is requested, an origin marker
// may also be used to retrieve from there. // may also be used to retrieve from there.
func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error { func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {
// Create the request and service it // Create the request and service it
@ -567,8 +571,8 @@ func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
assertOwnChain(t, tester, len(chainB.blocks)) assertOwnChain(t, tester, len(chainB.blocks))
} }
// Tests that synchronising against a much shorter but much heavyer fork works // Tests that synchronising against a much shorter but much heavier fork works
// corrently and is not dropped. // currently and is not dropped.
func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) } func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) }
func TestHeavyForkedSync66Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, SnapSync) } func TestHeavyForkedSync66Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, SnapSync) }
func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) } func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) }

View File

@ -47,7 +47,7 @@ type typedQueue interface {
// capacity is responsible for calculating how many items of the abstracted // capacity is responsible for calculating how many items of the abstracted
// type a particular peer is estimated to be able to retrieve within the // type a particular peer is estimated to be able to retrieve within the
// alloted round trip time. // allotted round trip time.
capacity(peer *peerConnection, rtt time.Duration) int capacity(peer *peerConnection, rtt time.Duration) int
// updateCapacity is responsible for updating how many items of the abstracted // updateCapacity is responsible for updating how many items of the abstracted
@ -58,7 +58,7 @@ type typedQueue interface {
// from the download queue to the specified peer. // from the download queue to the specified peer.
reserve(peer *peerConnection, items int) (*fetchRequest, bool, bool) reserve(peer *peerConnection, items int) (*fetchRequest, bool, bool)
// unreserve is resposible for removing the current retrieval allocation // unreserve is responsible for removing the current retrieval allocation
// assigned to a specific peer and placing it back into the pool to allow // assigned to a specific peer and placing it back into the pool to allow
// reassigning to some other peer. // reassigning to some other peer.
unreserve(peer string) int unreserve(peer string) int
@ -190,7 +190,7 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error {
req, err := queue.request(peer, request, responses) req, err := queue.request(peer, request, responses)
if err != nil { if err != nil {
// Sending the request failed, which generally means the peer // Sending the request failed, which generally means the peer
// was diconnected in between assignment and network send. // was disconnected in between assignment and network send.
// Although all peer removal operations return allocated tasks // Although all peer removal operations return allocated tasks
// to the queue, that is async, and we can do better here by // to the queue, that is async, and we can do better here by
// immediately pushing the unfulfilled requests. // immediately pushing the unfulfilled requests.

View File

@ -41,7 +41,7 @@ func (q *bodyQueue) pending() int {
} }
// capacity is responsible for calculating how many bodies a particular peer is // capacity is responsible for calculating how many bodies a particular peer is
// estimated to be able to retrieve within the alloted round trip time. // estimated to be able to retrieve within the allotted round trip time.
func (q *bodyQueue) capacity(peer *peerConnection, rtt time.Duration) int { func (q *bodyQueue) capacity(peer *peerConnection, rtt time.Duration) int {
return peer.BodyCapacity(rtt) return peer.BodyCapacity(rtt)
} }
@ -58,7 +58,7 @@ func (q *bodyQueue) reserve(peer *peerConnection, items int) (*fetchRequest, boo
return q.queue.ReserveBodies(peer, items) return q.queue.ReserveBodies(peer, items)
} }
// unreserve is resposible for removing the current body retrieval allocation // unreserve is responsible for removing the current body retrieval allocation
// assigned to a specific peer and placing it back into the pool to allow // assigned to a specific peer and placing it back into the pool to allow
// reassigning to some other peer. // reassigning to some other peer.
func (q *bodyQueue) unreserve(peer string) int { func (q *bodyQueue) unreserve(peer string) int {

View File

@ -41,7 +41,7 @@ func (q *headerQueue) pending() int {
} }
// capacity is responsible for calculating how many headers a particular peer is // capacity is responsible for calculating how many headers a particular peer is
// estimated to be able to retrieve within the alloted round trip time. // estimated to be able to retrieve within the allotted round trip time.
func (q *headerQueue) capacity(peer *peerConnection, rtt time.Duration) int { func (q *headerQueue) capacity(peer *peerConnection, rtt time.Duration) int {
return peer.HeaderCapacity(rtt) return peer.HeaderCapacity(rtt)
} }
@ -58,7 +58,7 @@ func (q *headerQueue) reserve(peer *peerConnection, items int) (*fetchRequest, b
return q.queue.ReserveHeaders(peer, items), false, false return q.queue.ReserveHeaders(peer, items), false, false
} }
// unreserve is resposible for removing the current header retrieval allocation // unreserve is responsible for removing the current header retrieval allocation
// assigned to a specific peer and placing it back into the pool to allow // assigned to a specific peer and placing it back into the pool to allow
// reassigning to some other peer. // reassigning to some other peer.
func (q *headerQueue) unreserve(peer string) int { func (q *headerQueue) unreserve(peer string) int {

View File

@ -28,7 +28,7 @@ import (
// concurrent fetcher and the downloader. // concurrent fetcher and the downloader.
type receiptQueue Downloader type receiptQueue Downloader
// waker returns a notification channel that gets pinged in case more reecipt // waker returns a notification channel that gets pinged in case more receipt
// fetches have been queued up, so the fetcher might assign it to idle peers. // fetches have been queued up, so the fetcher might assign it to idle peers.
func (q *receiptQueue) waker() chan bool { func (q *receiptQueue) waker() chan bool {
return q.queue.receiptWakeCh return q.queue.receiptWakeCh
@ -41,7 +41,7 @@ func (q *receiptQueue) pending() int {
} }
// capacity is responsible for calculating how many receipts a particular peer is // capacity is responsible for calculating how many receipts a particular peer is
// estimated to be able to retrieve within the alloted round trip time. // estimated to be able to retrieve within the allotted round trip time.
func (q *receiptQueue) capacity(peer *peerConnection, rtt time.Duration) int { func (q *receiptQueue) capacity(peer *peerConnection, rtt time.Duration) int {
return peer.ReceiptCapacity(rtt) return peer.ReceiptCapacity(rtt)
} }
@ -58,7 +58,7 @@ func (q *receiptQueue) reserve(peer *peerConnection, items int) (*fetchRequest,
return q.queue.ReserveReceipts(peer, items) return q.queue.ReserveReceipts(peer, items)
} }
// unreserve is resposible for removing the current receipt retrieval allocation // unreserve is responsible for removing the current receipt retrieval allocation
// assigned to a specific peer and placing it back into the pool to allow // assigned to a specific peer and placing it back into the pool to allow
// reassigning to some other peer. // reassigning to some other peer.
func (q *receiptQueue) unreserve(peer string) int { func (q *receiptQueue) unreserve(peer string) int {

View File

@ -237,6 +237,7 @@ func (ps *peerSet) Register(p *peerConnection) error {
} }
p.rates = msgrate.NewTracker(ps.rates.MeanCapacities(), ps.rates.MedianRoundTrip()) p.rates = msgrate.NewTracker(ps.rates.MeanCapacities(), ps.rates.MedianRoundTrip())
if err := ps.rates.Track(p.id, p.rates); err != nil { if err := ps.rates.Track(p.id, p.rates); err != nil {
ps.lock.Unlock()
return err return err
} }
ps.peers[p.id] = p ps.peers[p.id] = p

View File

@ -859,7 +859,7 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header,
if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil { if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil {
reconstruct(accepted, res) reconstruct(accepted, res)
} else { } else {
// else: betweeen here and above, some other peer filled this result, // else: between here and above, some other peer filled this result,
// or it was indeed a no-op. This should not happen, but if it does it's // or it was indeed a no-op. This should not happen, but if it does it's
// not something to panic about // not something to panic about
log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err) log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err)

Some files were not shown because too many files have changed in this diff Show More