Merge commit '5ed08c473' into merge/geth-v1.11.3

This commit is contained in:
philip-morlier 2023-03-07 10:04:53 -08:00
commit 6e40cd7920
138 changed files with 6361 additions and 855 deletions

View File

@ -5,7 +5,6 @@ jobs:
allow_failures: allow_failures:
- stage: build - stage: build
os: osx os: osx
go: 1.17.x
env: env:
- azure-osx - azure-osx
@ -141,7 +140,7 @@ jobs:
env: env:
- GO111MODULE=on - GO111MODULE=on
script: script:
- go run build/ci.go test -coverage $TEST_PACKAGES - go run build/ci.go test $TEST_PACKAGES
- stage: build - stage: build
if: type = pull_request if: type = pull_request
@ -152,7 +151,7 @@ jobs:
env: env:
- GO111MODULE=on - GO111MODULE=on
script: script:
- go run build/ci.go test -coverage $TEST_PACKAGES - go run build/ci.go test $TEST_PACKAGES
- stage: build - stage: build
os: linux os: linux
@ -161,7 +160,7 @@ jobs:
env: env:
- GO111MODULE=on - GO111MODULE=on
script: script:
- go run build/ci.go test -coverage $TEST_PACKAGES - go run build/ci.go test $TEST_PACKAGES
# This builder does the Azure archive purges to avoid accumulating junk # This builder does the Azure archive purges to avoid accumulating junk
- stage: build - stage: build
@ -186,5 +185,5 @@ jobs:
env: env:
- GO111MODULE=on - GO111MODULE=on
script: script:
- go run build/ci.go test -race -coverage $TEST_PACKAGES - go run build/ci.go test -race $TEST_PACKAGES

View File

@ -16,7 +16,7 @@ archives are published at https://geth.ethereum.org/downloads/.
For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/getting-started/installing-geth). For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/getting-started/installing-geth).
Building `geth` requires both a Go (version 1.18 or later) and a C compiler. You can install Building `geth` requires both a Go (version 1.19 or later) and a C compiler. You can install
them using your favourite package manager. Once the dependencies are installed, run them using your favourite package manager. Once the dependencies are installed, run
```shell ```shell

View File

@ -95,7 +95,10 @@ func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.Genesis
backend.filterSystem = filters.NewFilterSystem(filterBackend, filters.Config{}) backend.filterSystem = filters.NewFilterSystem(filterBackend, filters.Config{})
backend.events = filters.NewEventSystem(backend.filterSystem, false) backend.events = filters.NewEventSystem(backend.filterSystem, false)
backend.rollback(blockchain.CurrentBlock()) header := backend.blockchain.CurrentBlock()
block := backend.blockchain.GetBlock(header.Hash(), header.Number.Uint64())
backend.rollback(block)
return backend return backend
} }
@ -135,7 +138,10 @@ func (b *SimulatedBackend) Rollback() {
b.mu.Lock() b.mu.Lock()
defer b.mu.Unlock() defer b.mu.Unlock()
b.rollback(b.blockchain.CurrentBlock()) header := b.blockchain.CurrentBlock()
block := b.blockchain.GetBlock(header.Hash(), header.Number.Uint64())
b.rollback(block)
} }
func (b *SimulatedBackend) rollback(parent *types.Block) { func (b *SimulatedBackend) rollback(parent *types.Block) {
@ -174,7 +180,7 @@ func (b *SimulatedBackend) Fork(ctx context.Context, parent common.Hash) error {
// stateByBlockNumber retrieves a state by a given blocknumber. // stateByBlockNumber retrieves a state by a given blocknumber.
func (b *SimulatedBackend) stateByBlockNumber(ctx context.Context, blockNumber *big.Int) (*state.StateDB, error) { func (b *SimulatedBackend) stateByBlockNumber(ctx context.Context, blockNumber *big.Int) (*state.StateDB, error) {
if blockNumber == nil || blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) == 0 { if blockNumber == nil || blockNumber.Cmp(b.blockchain.CurrentBlock().Number) == 0 {
return b.blockchain.State() return b.blockchain.State()
} }
block, err := b.blockByNumber(ctx, blockNumber) block, err := b.blockByNumber(ctx, blockNumber)
@ -303,7 +309,7 @@ func (b *SimulatedBackend) BlockByNumber(ctx context.Context, number *big.Int) (
// (associated with its hash) if found without Lock. // (associated with its hash) if found without Lock.
func (b *SimulatedBackend) blockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { func (b *SimulatedBackend) blockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {
if number == nil || number.Cmp(b.pendingBlock.Number()) == 0 { if number == nil || number.Cmp(b.pendingBlock.Number()) == 0 {
return b.blockchain.CurrentBlock(), nil return b.blockByHash(ctx, b.blockchain.CurrentBlock().Hash())
} }
block := b.blockchain.GetBlockByNumber(uint64(number.Int64())) block := b.blockchain.GetBlockByNumber(uint64(number.Int64()))
@ -431,7 +437,7 @@ func (b *SimulatedBackend) CallContract(ctx context.Context, call ethereum.CallM
b.mu.Lock() b.mu.Lock()
defer b.mu.Unlock() defer b.mu.Unlock()
if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 { if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number) != 0 {
return nil, errBlockNumberUnsupported return nil, errBlockNumberUnsupported
} }
stateDB, err := b.blockchain.State() stateDB, err := b.blockchain.State()
@ -455,7 +461,7 @@ func (b *SimulatedBackend) PendingCallContract(ctx context.Context, call ethereu
defer b.mu.Unlock() defer b.mu.Unlock()
defer b.pendingState.RevertToSnapshot(b.pendingState.Snapshot()) defer b.pendingState.RevertToSnapshot(b.pendingState.Snapshot())
res, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState) res, err := b.callContract(ctx, call, b.pendingBlock.Header(), b.pendingState)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -549,7 +555,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
call.Gas = gas call.Gas = gas
snapshot := b.pendingState.Snapshot() snapshot := b.pendingState.Snapshot()
res, err := b.callContract(ctx, call, b.pendingBlock, b.pendingState) res, err := b.callContract(ctx, call, b.pendingBlock.Header(), b.pendingState)
b.pendingState.RevertToSnapshot(snapshot) b.pendingState.RevertToSnapshot(snapshot)
if err != nil { if err != nil {
@ -599,7 +605,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
// callContract implements common code between normal and pending contract calls. // callContract implements common code between normal and pending contract calls.
// state is modified during execution, make sure to copy it if necessary. // state is modified during execution, make sure to copy it if necessary.
func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, block *types.Block, stateDB *state.StateDB) (*core.ExecutionResult, error) { func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallMsg, header *types.Header, stateDB *state.StateDB) (*core.ExecutionResult, error) {
// Gas prices post 1559 need to be initialized // Gas prices post 1559 need to be initialized
if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) { if call.GasPrice != nil && (call.GasFeeCap != nil || call.GasTipCap != nil) {
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
@ -645,7 +651,7 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
msg := callMsg{call} msg := callMsg{call}
txContext := core.NewEVMTxContext(msg) txContext := core.NewEVMTxContext(msg)
evmContext := core.NewEVMBlockContext(block.Header(), b.blockchain, nil) evmContext := core.NewEVMBlockContext(header, b.blockchain, nil)
// Create a new environment which holds all relevant information // Create a new environment which holds all relevant information
// about the transaction and calling mechanisms. // about the transaction and calling mechanisms.
vmEnv := vm.NewEVM(evmContext, txContext, stateDB, b.config, vm.Config{NoBaseFee: true}) vmEnv := vm.NewEVM(evmContext, txContext, stateDB, b.config, vm.Config{NoBaseFee: true})
@ -854,15 +860,9 @@ func (fb *filterBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNum
case rpc.LatestBlockNumber: case rpc.LatestBlockNumber:
return fb.bc.CurrentHeader(), nil return fb.bc.CurrentHeader(), nil
case rpc.FinalizedBlockNumber: case rpc.FinalizedBlockNumber:
if block := fb.bc.CurrentFinalizedBlock(); block != nil { return fb.bc.CurrentFinalBlock(), nil
return block.Header(), nil
}
return nil, errors.New("finalized block not found")
case rpc.SafeBlockNumber: case rpc.SafeBlockNumber:
if block := fb.bc.CurrentSafeBlock(); block != nil { return fb.bc.CurrentSafeBlock(), nil
return block.Header(), nil
}
return nil, errors.New("safe block not found")
default: default:
return fb.bc.GetHeaderByNumber(uint64(number.Int64())), nil return fb.bc.GetHeaderByNumber(uint64(number.Int64())), nil
} }

View File

@ -1189,7 +1189,7 @@ func TestFork(t *testing.T) {
sim.Commit() sim.Commit()
} }
// 3. // 3.
if sim.blockchain.CurrentBlock().NumberU64() != uint64(n) { if sim.blockchain.CurrentBlock().Number.Uint64() != uint64(n) {
t.Error("wrong chain length") t.Error("wrong chain length")
} }
// 4. // 4.
@ -1199,7 +1199,7 @@ func TestFork(t *testing.T) {
sim.Commit() sim.Commit()
} }
// 6. // 6.
if sim.blockchain.CurrentBlock().NumberU64() != uint64(n+1) { if sim.blockchain.CurrentBlock().Number.Uint64() != uint64(n+1) {
t.Error("wrong chain length") t.Error("wrong chain length")
} }
} }
@ -1344,7 +1344,7 @@ func TestCommitReturnValue(t *testing.T) {
sim := simTestBackend(testAddr) sim := simTestBackend(testAddr)
defer sim.Close() defer sim.Close()
startBlockHeight := sim.blockchain.CurrentBlock().NumberU64() startBlockHeight := sim.blockchain.CurrentBlock().Number.Uint64()
// Test if Commit returns the correct block hash // Test if Commit returns the correct block hash
h1 := sim.Commit() h1 := sim.Commit()

View File

@ -59,6 +59,8 @@ const (
ledgerP1InitTransactionData ledgerParam1 = 0x00 // First transaction data block for signing ledgerP1InitTransactionData ledgerParam1 = 0x00 // First transaction data block for signing
ledgerP1ContTransactionData ledgerParam1 = 0x80 // Subsequent transaction data block for signing ledgerP1ContTransactionData ledgerParam1 = 0x80 // Subsequent transaction data block for signing
ledgerP2DiscardAddressChainCode ledgerParam2 = 0x00 // Do not return the chain code along with the address ledgerP2DiscardAddressChainCode ledgerParam2 = 0x00 // Do not return the chain code along with the address
ledgerEip155Size int = 3 // Size of the EIP-155 chain_id,r,s in unsigned transactions
) )
// errLedgerReplyInvalidHeader is the error message returned by a Ledger data exchange // errLedgerReplyInvalidHeader is the error message returned by a Ledger data exchange
@ -347,9 +349,15 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
op = ledgerP1InitTransactionData op = ledgerP1InitTransactionData
reply []byte reply []byte
) )
// Chunk size selection to mitigate an underlying RLP deserialization issue on the ledger app.
// https://github.com/LedgerHQ/app-ethereum/issues/409
chunk := 255
for ; len(payload)%chunk <= ledgerEip155Size; chunk-- {
}
for len(payload) > 0 { for len(payload) > 0 {
// Calculate the size of the next data chunk // Calculate the size of the next data chunk
chunk := 255
if chunk > len(payload) { if chunk > len(payload) {
chunk = len(payload) chunk = len(payload)
} }

View File

@ -26,7 +26,7 @@ for:
- go run build/ci.go lint - go run build/ci.go lint
- go run build/ci.go install -dlgo - go run build/ci.go install -dlgo
test_script: test_script:
- go run build/ci.go test -dlgo -coverage - go run build/ci.go test -dlgo
# linux/386 is disabled. # linux/386 is disabled.
- matrix: - matrix:
@ -54,4 +54,4 @@ for:
- go run build/ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds - go run build/ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds - go run build/ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
test_script: test_script:
- go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -coverage - go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC%

View File

@ -233,5 +233,5 @@ func BlockToExecutableData(block *types.Block, fees *big.Int) *ExecutionPayloadE
// ExecutionPayloadBodyV1 is used in the response to GetPayloadBodiesByHashV1 and GetPayloadBodiesByRangeV1 // ExecutionPayloadBodyV1 is used in the response to GetPayloadBodiesByHashV1 and GetPayloadBodiesByRangeV1
type ExecutionPayloadBodyV1 struct { type ExecutionPayloadBodyV1 struct {
TransactionData []hexutil.Bytes `json:"transactions"` TransactionData []hexutil.Bytes `json:"transactions"`
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` Withdrawals []*types.Withdrawal `json:"withdrawals"`
} }

View File

@ -1,19 +1,19 @@
# This file contains sha256 checksums of optional build dependencies. # This file contains sha256 checksums of optional build dependencies.
b5c1a3af52c385a6d1c76aed5361cf26459023980d0320de7658bae3915831a2 go1.20.1.src.tar.gz 4d0e2850d197b4ddad3bdb0196300179d095bb3aefd4dfbc3b36702c3728f8ab go1.20.2.src.tar.gz
a300a45e801ab459f3008aae5bb9efbe9a6de9bcd12388f5ca9bbd14f70236de go1.20.1.darwin-amd64.tar.gz c93b8ced9517d07e1cd4c362c6e2d5242cb139e29b417a328fbf19aded08764c go1.20.2.darwin-amd64.tar.gz
f1a8e06c7f1ba1c008313577f3f58132eb166a41ceb95ce6e9af30bc5a3efca4 go1.20.1.darwin-arm64.tar.gz 7343c87f19e79c0063532e82e1c4d6f42175a32d99f7a4d15e658e88bf97f885 go1.20.2.darwin-arm64.tar.gz
57d80349dc4fbf692f8cd85a5971f97841aedafcf211e367e59d3ae812292660 go1.20.1.freebsd-386.tar.gz 14f9be2004e042b3a64d0facb0c020756a9084a5c7333e33b0752b393b6016ea go1.20.2.freebsd-386.tar.gz
6e124d54d5850a15fdb15754f782986f06af23c5ddb6690849417b9c74f05f98 go1.20.1.freebsd-amd64.tar.gz b41b67b4f1b56797a7cecf6ee7f47fcf4f93960b2788a3683c07dd009d30b2a4 go1.20.2.freebsd-amd64.tar.gz
3a7345036ebd92455b653e4b4f6aaf4f7e1f91f4ced33b23d7059159cec5f4d7 go1.20.1.linux-386.tar.gz ee240ed33ae57504c41f04c12236aeaa17fbeb6ea9fcd096cd9dc7a89d10d4db go1.20.2.linux-386.tar.gz
000a5b1fca4f75895f78befeb2eecf10bfff3c428597f3f1e69133b63b911b02 go1.20.1.linux-amd64.tar.gz 4eaea32f59cde4dc635fbc42161031d13e1c780b87097f4b4234cfce671f1768 go1.20.2.linux-amd64.tar.gz
5e5e2926733595e6f3c5b5ad1089afac11c1490351855e87849d0e7702b1ec2e go1.20.1.linux-arm64.tar.gz 78d632915bb75e9a6356a47a42625fd1a785c83a64a643fedd8f61e31b1b3bef go1.20.2.linux-arm64.tar.gz
e4edc05558ab3657ba3dddb909209463cee38df9c1996893dd08cde274915003 go1.20.1.linux-armv6l.tar.gz d79d56bafd6b52b8d8cbe3f8e967caaac5383a23d7a4fa9ac0e89778cd16a076 go1.20.2.linux-armv6l.tar.gz
85cfd4b89b48c94030783b6e9e619e35557862358b846064636361421d0b0c52 go1.20.1.linux-ppc64le.tar.gz 850564ddb760cb703db63bf20182dc4407abd2ff090a95fa66d6634d172fd095 go1.20.2.linux-ppc64le.tar.gz
ba3a14381ed4538216dec3ea72b35731750597edd851cece1eb120edf7d60149 go1.20.1.linux-s390x.tar.gz 8da24c5c4205fe8115f594237e5db7bcb1d23df67bc1fa9a999954b1976896e8 go1.20.2.linux-s390x.tar.gz
61259b5a346193e30b7b3c3f8d108062db25bbb80cf290ee251eeb855965f6ee go1.20.1.windows-386.zip 31838b291117495bbb93683603e98d5118bfabd2eb318b4d07540bfd524bab86 go1.20.2.windows-386.zip
3b493969196a6de8d9762d09f5bc5ae7a3e5814b0cfbf9cc26838c2bc1314f9c go1.20.1.windows-amd64.zip fe439f0e438f7555a7f5f7194ddb6f4a07b0de1fa414385d19f2aeb26d9f43db go1.20.2.windows-amd64.zip
62d14ddb44bcda27c9b1f5ad9ffd4463013374ed325d762417e2adefd59a802f go1.20.1.windows-arm64.zip ac5010c8b8b22849228a8dea698d58b9c7be2195d30c6d778cce0f709858fa64 go1.20.2.windows-arm64.zip
fba08acc4027f69f07cef48fbff70b8a7ecdfaa1c2aba9ad3fb31d60d9f5d4bc golangci-lint-1.51.1-darwin-amd64.tar.gz fba08acc4027f69f07cef48fbff70b8a7ecdfaa1c2aba9ad3fb31d60d9f5d4bc golangci-lint-1.51.1-darwin-amd64.tar.gz
75b8f0ff3a4e68147156be4161a49d4576f1be37a0b506473f8c482140c1e7f2 golangci-lint-1.51.1-darwin-arm64.tar.gz 75b8f0ff3a4e68147156be4161a49d4576f1be37a0b506473f8c482140c1e7f2 golangci-lint-1.51.1-darwin-arm64.tar.gz

View File

@ -139,7 +139,7 @@ var (
// This is the version of Go that will be downloaded by // This is the version of Go that will be downloaded by
// //
// go run ci.go install -dlgo // go run ci.go install -dlgo
dlgoVersion = "1.20.1" dlgoVersion = "1.20.2"
// This is the version of Go that will be used to bootstrap the PPA builder. // This is the version of Go that will be used to bootstrap the PPA builder.
// //

View File

@ -17,6 +17,8 @@
package main package main
import ( import (
"sync"
"sync/atomic"
"time" "time"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
@ -34,6 +36,7 @@ type crawler struct {
// settings // settings
revalidateInterval time.Duration revalidateInterval time.Duration
mu sync.RWMutex
} }
const ( const (
@ -67,7 +70,7 @@ func newCrawler(input nodeSet, disc resolver, iters ...enode.Iterator) *crawler
return c return c
} }
func (c *crawler) run(timeout time.Duration) nodeSet { func (c *crawler) run(timeout time.Duration, nthreads int) nodeSet {
var ( var (
timeoutTimer = time.NewTimer(timeout) timeoutTimer = time.NewTimer(timeout)
timeoutCh <-chan time.Time timeoutCh <-chan time.Time
@ -75,35 +78,51 @@ func (c *crawler) run(timeout time.Duration) nodeSet {
doneCh = make(chan enode.Iterator, len(c.iters)) doneCh = make(chan enode.Iterator, len(c.iters))
liveIters = len(c.iters) liveIters = len(c.iters)
) )
if nthreads < 1 {
nthreads = 1
}
defer timeoutTimer.Stop() defer timeoutTimer.Stop()
defer statusTicker.Stop() defer statusTicker.Stop()
for _, it := range c.iters { for _, it := range c.iters {
go c.runIterator(doneCh, it) go c.runIterator(doneCh, it)
} }
var ( var (
added int added uint64
updated int updated uint64
skipped int skipped uint64
recent int recent uint64
removed int removed uint64
wg sync.WaitGroup
) )
wg.Add(nthreads)
for i := 0; i < nthreads; i++ {
go func() {
defer wg.Done()
for {
select {
case n := <-c.ch:
switch c.updateNode(n) {
case nodeSkipIncompat:
atomic.AddUint64(&skipped, 1)
case nodeSkipRecent:
atomic.AddUint64(&recent, 1)
case nodeRemoved:
atomic.AddUint64(&removed, 1)
case nodeAdded:
atomic.AddUint64(&added, 1)
default:
atomic.AddUint64(&updated, 1)
}
case <-c.closed:
return
}
}
}()
}
loop: loop:
for { for {
select { select {
case n := <-c.ch:
switch c.updateNode(n) {
case nodeSkipIncompat:
skipped++
case nodeSkipRecent:
recent++
case nodeRemoved:
removed++
case nodeAdded:
added++
default:
updated++
}
case it := <-doneCh: case it := <-doneCh:
if it == c.inputIter { if it == c.inputIter {
// Enable timeout when we're done revalidating the input nodes. // Enable timeout when we're done revalidating the input nodes.
@ -119,8 +138,11 @@ loop:
break loop break loop
case <-statusTicker.C: case <-statusTicker.C:
log.Info("Crawling in progress", log.Info("Crawling in progress",
"added", added, "updated", updated, "removed", removed, "added", atomic.LoadUint64(&added),
"ignored(recent)", recent, "ignored(incompatible)", skipped) "updated", atomic.LoadUint64(&updated),
"removed", atomic.LoadUint64(&removed),
"ignored(recent)", atomic.LoadUint64(&removed),
"ignored(incompatible)", atomic.LoadUint64(&skipped))
} }
} }
@ -131,6 +153,7 @@ loop:
for ; liveIters > 0; liveIters-- { for ; liveIters > 0; liveIters-- {
<-doneCh <-doneCh
} }
wg.Wait()
return c.output return c.output
} }
@ -148,7 +171,9 @@ func (c *crawler) runIterator(done chan<- enode.Iterator, it enode.Iterator) {
// updateNode updates the info about the given node, and returns a status // updateNode updates the info about the given node, and returns a status
// about what changed // about what changed
func (c *crawler) updateNode(n *enode.Node) int { func (c *crawler) updateNode(n *enode.Node) int {
c.mu.RLock()
node, ok := c.output[n.ID()] node, ok := c.output[n.ID()]
c.mu.RUnlock()
// Skip validation of recently-seen nodes. // Skip validation of recently-seen nodes.
if ok && time.Since(node.LastCheck) < c.revalidateInterval { if ok && time.Since(node.LastCheck) < c.revalidateInterval {
@ -156,10 +181,9 @@ func (c *crawler) updateNode(n *enode.Node) int {
} }
// Request the node record. // Request the node record.
nn, err := c.disc.RequestENR(n)
node.LastCheck = truncNow()
status := nodeUpdated status := nodeUpdated
if err != nil { node.LastCheck = truncNow()
if nn, err := c.disc.RequestENR(n); err != nil {
if node.Score == 0 { if node.Score == 0 {
// Node doesn't implement EIP-868. // Node doesn't implement EIP-868.
log.Debug("Skipping node", "id", n.ID()) log.Debug("Skipping node", "id", n.ID())
@ -176,8 +200,9 @@ func (c *crawler) updateNode(n *enode.Node) int {
} }
node.LastResponse = node.LastCheck node.LastResponse = node.LastCheck
} }
// Store/update node in output set. // Store/update node in output set.
c.mu.Lock()
defer c.mu.Unlock()
if node.Score <= 0 { if node.Score <= 0 {
log.Debug("Removing node", "id", n.ID()) log.Debug("Removing node", "id", n.ID())
delete(c.output, n.ID()) delete(c.output, n.ID())

View File

@ -78,7 +78,7 @@ var (
Name: "crawl", Name: "crawl",
Usage: "Updates a nodes.json file with random nodes found in the DHT", Usage: "Updates a nodes.json file with random nodes found in the DHT",
Action: discv4Crawl, Action: discv4Crawl,
Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{crawlTimeoutFlag}), Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{crawlTimeoutFlag, crawlParallelismFlag}),
} }
discv4TestCommand = &cli.Command{ discv4TestCommand = &cli.Command{
Name: "test", Name: "test",
@ -120,6 +120,11 @@ var (
Usage: "Time limit for the crawl.", Usage: "Time limit for the crawl.",
Value: 30 * time.Minute, Value: 30 * time.Minute,
} }
crawlParallelismFlag = &cli.IntFlag{
Name: "parallel",
Usage: "How many parallel discoveries to attempt.",
Value: 16,
}
remoteEnodeFlag = &cli.StringFlag{ remoteEnodeFlag = &cli.StringFlag{
Name: "remote", Name: "remote",
Usage: "Enode of the remote node under test", Usage: "Enode of the remote node under test",
@ -195,7 +200,7 @@ func discv4ResolveJSON(ctx *cli.Context) error {
defer disc.Close() defer disc.Close()
c := newCrawler(inputSet, disc, enode.IterNodes(nodeargs)) c := newCrawler(inputSet, disc, enode.IterNodes(nodeargs))
c.revalidateInterval = 0 c.revalidateInterval = 0
output := c.run(0) output := c.run(0, 1)
writeNodesJSON(nodesFile, output) writeNodesJSON(nodesFile, output)
return nil return nil
} }
@ -214,7 +219,7 @@ func discv4Crawl(ctx *cli.Context) error {
defer disc.Close() defer disc.Close()
c := newCrawler(inputSet, disc, disc.RandomNodes()) c := newCrawler(inputSet, disc, disc.RandomNodes())
c.revalidateInterval = 10 * time.Minute c.revalidateInterval = 10 * time.Minute
output := c.run(ctx.Duration(crawlTimeoutFlag.Name)) output := c.run(ctx.Duration(crawlTimeoutFlag.Name), ctx.Int(crawlParallelismFlag.Name))
writeNodesJSON(nodesFile, output) writeNodesJSON(nodesFile, output)
return nil return nil
} }

View File

@ -110,7 +110,7 @@ func discv5Crawl(ctx *cli.Context) error {
defer disc.Close() defer disc.Close()
c := newCrawler(inputSet, disc, disc.RandomNodes()) c := newCrawler(inputSet, disc, disc.RandomNodes())
c.revalidateInterval = 10 * time.Minute c.revalidateInterval = 10 * time.Minute
output := c.run(ctx.Duration(crawlTimeoutFlag.Name)) output := c.run(ctx.Duration(crawlTimeoutFlag.Name), ctx.Int(crawlParallelismFlag.Name))
writeNodesJSON(nodesFile, output) writeNodesJSON(nodesFile, output)
return nil return nil
} }

View File

@ -126,11 +126,16 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
} }
// Iterate over the new records and inject anything missing. // Iterate over the new records and inject anything missing.
log.Info("Updating DNS entries")
created := 0
updated := 0
skipped := 0
for path, val := range records { for path, val := range records {
old, exists := existing[path] old, exists := existing[path]
if !exists { if !exists {
// Entry is unknown, push a new one to Cloudflare. // Entry is unknown, push a new one to Cloudflare.
log.Info(fmt.Sprintf("Creating %s = %q", path, val)) log.Debug(fmt.Sprintf("Creating %s = %q", path, val))
created++
ttl := rootTTL ttl := rootTTL
if path != name { if path != name {
ttl = treeNodeTTLCloudflare // Max TTL permitted by Cloudflare ttl = treeNodeTTLCloudflare // Max TTL permitted by Cloudflare
@ -139,27 +144,33 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
_, err = c.CreateDNSRecord(context.Background(), c.zoneID, record) _, err = c.CreateDNSRecord(context.Background(), c.zoneID, record)
} else if old.Content != val { } else if old.Content != val {
// Entry already exists, only change its content. // Entry already exists, only change its content.
log.Info(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val)) log.Debug(fmt.Sprintf("Updating %s from %q to %q", path, old.Content, val))
updated++
old.Content = val old.Content = val
err = c.UpdateDNSRecord(context.Background(), c.zoneID, old.ID, old) err = c.UpdateDNSRecord(context.Background(), c.zoneID, old.ID, old)
} else { } else {
skipped++
log.Debug(fmt.Sprintf("Skipping %s = %q", path, val)) log.Debug(fmt.Sprintf("Skipping %s = %q", path, val))
} }
if err != nil { if err != nil {
return fmt.Errorf("failed to publish %s: %v", path, err) return fmt.Errorf("failed to publish %s: %v", path, err)
} }
} }
log.Info("Updated DNS entries", "new", created, "updated", updated, "untouched", skipped)
// Iterate over the old records and delete anything stale. // Iterate over the old records and delete anything stale.
deleted := 0
log.Info("Deleting stale DNS entries")
for path, entry := range existing { for path, entry := range existing {
if _, ok := records[path]; ok { if _, ok := records[path]; ok {
continue continue
} }
// Stale entry, nuke it. // Stale entry, nuke it.
log.Info(fmt.Sprintf("Deleting %s = %q", path, entry.Content)) log.Debug(fmt.Sprintf("Deleting %s = %q", path, entry.Content))
deleted++
if err := c.DeleteDNSRecord(context.Background(), c.zoneID, entry.ID); err != nil { if err := c.DeleteDNSRecord(context.Background(), c.zoneID, entry.ID); err != nil {
return fmt.Errorf("failed to delete %s: %v", path, err) return fmt.Errorf("failed to delete %s: %v", path, err)
} }
} }
log.Info("Deleted stale DNS entries", "count", deleted)
return nil return nil
} }

View File

@ -329,8 +329,9 @@ func (c *route53Client) collectRecords(name string) (map[string]recordSet, error
var req route53.ListResourceRecordSetsInput var req route53.ListResourceRecordSetsInput
req.HostedZoneId = &c.zoneID req.HostedZoneId = &c.zoneID
existing := make(map[string]recordSet) existing := make(map[string]recordSet)
log.Info("Loading existing TXT records", "name", name, "zone", c.zoneID)
for page := 0; ; page++ { for page := 0; ; page++ {
log.Info("Loading existing TXT records", "name", name, "zone", c.zoneID, "page", page) log.Debug("Loading existing TXT records", "name", name, "zone", c.zoneID, "page", page)
resp, err := c.api.ListResourceRecordSets(context.TODO(), &req) resp, err := c.api.ListResourceRecordSets(context.TODO(), &req)
if err != nil { if err != nil {
return existing, err return existing, err
@ -360,7 +361,7 @@ func (c *route53Client) collectRecords(name string) (map[string]recordSet, error
req.StartRecordName = resp.NextRecordName req.StartRecordName = resp.NextRecordName
req.StartRecordType = resp.NextRecordType req.StartRecordType = resp.NextRecordType
} }
log.Info("Loaded existing TXT records", "name", name, "zone", c.zoneID, "records", len(existing))
return existing, nil return existing, nil
} }

View File

@ -176,8 +176,12 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
vmConfig.Tracer = tracer vmConfig.Tracer = tracer
vmConfig.Debug = (tracer != nil) vmConfig.Debug = (tracer != nil)
statedb.SetTxContext(tx.Hash(), txIndex) statedb.SetTxContext(tx.Hash(), txIndex)
txContext := core.NewEVMTxContext(msg)
snapshot := statedb.Snapshot() var (
txContext = core.NewEVMTxContext(msg)
snapshot = statedb.Snapshot()
prevGas = gaspool.Gas()
)
evm := vm.NewEVM(vmContext, txContext, statedb, chainConfig, vmConfig) evm := vm.NewEVM(vmContext, txContext, statedb, chainConfig, vmConfig)
// (ret []byte, usedGas uint64, failed bool, err error) // (ret []byte, usedGas uint64, failed bool, err error)
@ -186,6 +190,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
statedb.RevertToSnapshot(snapshot) statedb.RevertToSnapshot(snapshot)
log.Info("rejected tx", "index", i, "hash", tx.Hash(), "from", msg.From(), "error", err) log.Info("rejected tx", "index", i, "hash", tx.Hash(), "from", msg.From(), "error", err)
rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()})
gaspool.SetGas(prevGas)
continue continue
} }
includedTxs = append(includedTxs, tx) includedTxs = append(includedTxs, tx)

View File

@ -1,7 +1,7 @@
{ {
"currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", "currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
"currentDifficulty": "0x20000", "currentDifficulty": "0x20000",
"currentGasTarget": "0x1000000000", "currentGasLimit": "0x1000000000",
"currentBaseFee": "0x3B9ACA00", "currentBaseFee": "0x3B9ACA00",
"currentNumber": "0x1000000", "currentNumber": "0x1000000",
"currentTimestamp": "0x04" "currentTimestamp": "0x04"

View File

@ -44,10 +44,10 @@ $ dir=./testdata/9 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.jso
"nonce": "0x1" "nonce": "0x1"
}, },
"0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": {
"balance": "0xbfc02677a000" "balance": "0x5bb10ddef6e0"
}, },
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0xff104fcfea7800", "balance": "0xff745ee8832120",
"nonce": "0x2" "nonce": "0x2"
} }
} }

View File

@ -349,8 +349,8 @@ func exportChain(ctx *cli.Context) error {
if first < 0 || last < 0 { if first < 0 || last < 0 {
utils.Fatalf("Export error: block number must be greater than 0\n") utils.Fatalf("Export error: block number must be greater than 0\n")
} }
if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() { if head := chain.CurrentSnapBlock(); uint64(last) > head.Number.Uint64() {
utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64()) utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.Number.Uint64())
} }
err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
} }

View File

@ -222,7 +222,7 @@ func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block
head := chain.CurrentBlock() head := chain.CurrentBlock()
for i, block := range blocks { for i, block := range blocks {
// If we're behind the chain head, only check block, state is available at head // If we're behind the chain head, only check block, state is available at head
if head.NumberU64() > block.NumberU64() { if head.Number.Uint64() > block.NumberU64() {
if !chain.HasBlock(block.Hash(), block.NumberU64()) { if !chain.HasBlock(block.Hash(), block.NumberU64()) {
return blocks[i:] return blocks[i:]
} }

View File

@ -93,7 +93,7 @@ func TestReimportMirroredState(t *testing.T) {
if _, err := chain.InsertChain(blocks[:2]); err != nil { if _, err := chain.InsertChain(blocks[:2]); err != nil {
t.Fatalf("failed to insert initial blocks: %v", err) t.Fatalf("failed to insert initial blocks: %v", err)
} }
if head := chain.CurrentBlock().NumberU64(); head != 2 { if head := chain.CurrentBlock().Number.Uint64(); head != 2 {
t.Fatalf("chain head mismatch: have %d, want %d", head, 2) t.Fatalf("chain head mismatch: have %d, want %d", head, 2)
} }
@ -106,7 +106,7 @@ func TestReimportMirroredState(t *testing.T) {
if _, err := chain.InsertChain(blocks[2:]); err != nil { if _, err := chain.InsertChain(blocks[2:]); err != nil {
t.Fatalf("failed to insert final block: %v", err) t.Fatalf("failed to insert final block: %v", err)
} }
if head := chain.CurrentBlock().NumberU64(); head != 3 { if head := chain.CurrentBlock().Number.Uint64(); head != 3 {
t.Fatalf("chain head mismatch: have %d, want %d", head, 3) t.Fatalf("chain head mismatch: have %d, want %d", head, 3)
} }
} }

View File

@ -199,10 +199,10 @@ type BlockChain struct {
// Readers don't need to take it, they can just read the database. // Readers don't need to take it, they can just read the database.
chainmu *syncx.ClosableMutex chainmu *syncx.ClosableMutex
currentBlock atomic.Value // Current head of the block chain currentBlock atomic.Pointer[types.Header] // Current head of the chain
currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!) currentSnapBlock atomic.Pointer[types.Header] // Current head of snap-sync
currentFinalizedBlock atomic.Value // Current finalized head currentFinalBlock atomic.Pointer[types.Header] // Latest (consensus) finalized block
currentSafeBlock atomic.Value // Current safe head currentSafeBlock atomic.Pointer[types.Header] // Latest (consensus) safe block
bodyCache *lru.Cache[common.Hash, *types.Body] bodyCache *lru.Cache[common.Hash, *types.Body]
bodyRLPCache *lru.Cache[common.Hash, rlp.RawValue] bodyRLPCache *lru.Cache[common.Hash, rlp.RawValue]
@ -289,11 +289,10 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
return nil, ErrNoGenesis return nil, ErrNoGenesis
} }
var nilBlock *types.Block bc.currentBlock.Store(nil)
bc.currentBlock.Store(nilBlock) bc.currentSnapBlock.Store(nil)
bc.currentFastBlock.Store(nilBlock) bc.currentFinalBlock.Store(nil)
bc.currentFinalizedBlock.Store(nilBlock) bc.currentSafeBlock.Store(nil)
bc.currentSafeBlock.Store(nilBlock)
// If Geth is initialized with an external ancient store, re-initialize the // If Geth is initialized with an external ancient store, re-initialize the
// missing chain indexes and chain flags. This procedure can survive crash // missing chain indexes and chain flags. This procedure can survive crash
@ -307,7 +306,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
} }
// Make sure the state associated with the block is available // Make sure the state associated with the block is available
head := bc.CurrentBlock() head := bc.CurrentBlock()
if !bc.HasState(head.Root()) { if !bc.HasState(head.Root) {
// Head state is missing, before the state recovery, find out the // Head state is missing, before the state recovery, find out the
// disk layer point of snapshot(if it's enabled). Make sure the // disk layer point of snapshot(if it's enabled). Make sure the
// rewound point is lower than disk layer. // rewound point is lower than disk layer.
@ -316,9 +315,9 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
diskRoot = rawdb.ReadSnapshotRoot(bc.db) diskRoot = rawdb.ReadSnapshotRoot(bc.db)
} }
if diskRoot != (common.Hash{}) { if diskRoot != (common.Hash{}) {
log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot) log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "snaproot", diskRoot)
snapDisk, err := bc.setHeadBeyondRoot(head.NumberU64(), 0, diskRoot, true) snapDisk, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, diskRoot, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -327,13 +326,12 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk) rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk)
} }
} else { } else {
log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash()) log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash())
if _, err := bc.setHeadBeyondRoot(head.NumberU64(), 0, common.Hash{}, true); err != nil { if _, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, common.Hash{}, true); err != nil {
return nil, err return nil, err
} }
} }
} }
// Ensure that a previous crash in SetHead doesn't leave extra ancients // Ensure that a previous crash in SetHead doesn't leave extra ancients
if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 { if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
var ( var (
@ -344,18 +342,18 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
// blockchain repair. If the head full block is even lower than the ancient // blockchain repair. If the head full block is even lower than the ancient
// chain, truncate the ancient store. // chain, truncate the ancient store.
fullBlock := bc.CurrentBlock() fullBlock := bc.CurrentBlock()
if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.NumberU64() < frozen-1 { if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.Number.Uint64() < frozen-1 {
needRewind = true needRewind = true
low = fullBlock.NumberU64() low = fullBlock.Number.Uint64()
} }
// In fast sync, it may happen that ancient data has been written to the // In fast sync, it may happen that ancient data has been written to the
// ancient store, but the LastFastBlock has not been updated, truncate the // ancient store, but the LastFastBlock has not been updated, truncate the
// extra data here. // extra data here.
fastBlock := bc.CurrentFastBlock() snapBlock := bc.CurrentSnapBlock()
if fastBlock != nil && fastBlock.NumberU64() < frozen-1 { if snapBlock != nil && snapBlock.Number.Uint64() < frozen-1 {
needRewind = true needRewind = true
if fastBlock.NumberU64() < low || low == 0 { if snapBlock.Number.Uint64() < low || low == 0 {
low = fastBlock.NumberU64() low = snapBlock.Number.Uint64()
} }
} }
if needRewind { if needRewind {
@ -395,8 +393,8 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
var recover bool var recover bool
head := bc.CurrentBlock() head := bc.CurrentBlock()
if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer >= head.NumberU64() { if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer >= head.Number.Uint64() {
log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer) log.Warn("Enabling snapshot recovery", "chainhead", head.Number, "diskbase", *layer)
recover = true recover = true
} }
snapconfig := snapshot.Config{ snapconfig := snapshot.Config{
@ -405,7 +403,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
NoBuild: bc.cacheConfig.SnapshotNoBuild, NoBuild: bc.cacheConfig.SnapshotNoBuild,
AsyncBuild: !bc.cacheConfig.SnapshotWait, AsyncBuild: !bc.cacheConfig.SnapshotWait,
} }
bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root()) bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root)
} }
// Start future block processor. // Start future block processor.
@ -469,32 +467,32 @@ func (bc *BlockChain) loadLastState() error {
return bc.Reset() return bc.Reset()
} }
// Make sure the entire head block is available // Make sure the entire head block is available
currentBlock := bc.GetBlockByHash(head) headBlock := bc.GetBlockByHash(head)
if currentBlock == nil { if headBlock == nil {
// Corrupt or empty database, init from scratch // Corrupt or empty database, init from scratch
log.Warn("Head block missing, resetting chain", "hash", head) log.Warn("Head block missing, resetting chain", "hash", head)
return bc.Reset() return bc.Reset()
} }
// Everything seems to be fine, set as the head block // Everything seems to be fine, set as the head block
bc.currentBlock.Store(currentBlock) bc.currentBlock.Store(headBlock.Header())
headBlockGauge.Update(int64(currentBlock.NumberU64())) headBlockGauge.Update(int64(headBlock.NumberU64()))
// Restore the last known head header // Restore the last known head header
currentHeader := currentBlock.Header() headHeader := headBlock.Header()
if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) { if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
if header := bc.GetHeaderByHash(head); header != nil { if header := bc.GetHeaderByHash(head); header != nil {
currentHeader = header headHeader = header
} }
} }
bc.hc.SetCurrentHeader(currentHeader) bc.hc.SetCurrentHeader(headHeader)
// Restore the last known head fast block // Restore the last known head fast block
bc.currentFastBlock.Store(currentBlock) bc.currentSnapBlock.Store(headBlock.Header())
headFastBlockGauge.Update(int64(currentBlock.NumberU64())) headFastBlockGauge.Update(int64(headBlock.NumberU64()))
if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) { if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
if block := bc.GetBlockByHash(head); block != nil { if block := bc.GetBlockByHash(head); block != nil {
bc.currentFastBlock.Store(block) bc.currentSnapBlock.Store(block.Header())
headFastBlockGauge.Update(int64(block.NumberU64())) headFastBlockGauge.Update(int64(block.NumberU64()))
} }
} }
@ -504,27 +502,31 @@ func (bc *BlockChain) loadLastState() error {
// known finalized block on startup // known finalized block on startup
if head := rawdb.ReadFinalizedBlockHash(bc.db); head != (common.Hash{}) { if head := rawdb.ReadFinalizedBlockHash(bc.db); head != (common.Hash{}) {
if block := bc.GetBlockByHash(head); block != nil { if block := bc.GetBlockByHash(head); block != nil {
bc.currentFinalizedBlock.Store(block) bc.currentFinalBlock.Store(block.Header())
headFinalizedBlockGauge.Update(int64(block.NumberU64())) headFinalizedBlockGauge.Update(int64(block.NumberU64()))
bc.currentSafeBlock.Store(block) bc.currentSafeBlock.Store(block.Header())
headSafeBlockGauge.Update(int64(block.NumberU64())) headSafeBlockGauge.Update(int64(block.NumberU64()))
} }
} }
// Issue a status log for the user // Issue a status log for the user
currentFastBlock := bc.CurrentFastBlock() var (
currentFinalizedBlock := bc.CurrentFinalizedBlock() currentSnapBlock = bc.CurrentSnapBlock()
currentFinalBlock = bc.CurrentFinalBlock()
headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()) headerTd = bc.GetTd(headHeader.Hash(), headHeader.Number.Uint64())
blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) blockTd = bc.GetTd(headBlock.Hash(), headBlock.NumberU64())
fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()) )
if headHeader.Hash() != headBlock.Hash() {
log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0))) log.Info("Loaded most recent local header", "number", headHeader.Number, "hash", headHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(headHeader.Time), 0)))
log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0))) }
log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0))) log.Info("Loaded most recent local block", "number", headBlock.Number(), "hash", headBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(headBlock.Time()), 0)))
if headBlock.Hash() != currentSnapBlock.Hash() {
if currentFinalizedBlock != nil { fastTd := bc.GetTd(currentSnapBlock.Hash(), currentSnapBlock.Number.Uint64())
finalTd := bc.GetTd(currentFinalizedBlock.Hash(), currentFinalizedBlock.NumberU64()) log.Info("Loaded most recent local snap block", "number", currentSnapBlock.Number, "hash", currentSnapBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentSnapBlock.Time), 0)))
log.Info("Loaded most recent local finalized block", "number", currentFinalizedBlock.Number(), "hash", currentFinalizedBlock.Hash(), "td", finalTd, "age", common.PrettyAge(time.Unix(int64(currentFinalizedBlock.Time()), 0))) }
if currentFinalBlock != nil {
finalTd := bc.GetTd(currentFinalBlock.Hash(), currentFinalBlock.Number.Uint64())
log.Info("Loaded most recent local finalized block", "number", currentFinalBlock.Number, "hash", currentFinalBlock.Hash(), "td", finalTd, "age", common.PrettyAge(time.Unix(int64(currentFinalBlock.Time), 0)))
} }
if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil { if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil {
log.Info("Loaded last fast-sync pivot marker", "number", *pivot) log.Info("Loaded last fast-sync pivot marker", "number", *pivot)
@ -540,7 +542,16 @@ func (bc *BlockChain) SetHead(head uint64) error {
return err return err
} }
// Send chain head event to update the transaction pool // Send chain head event to update the transaction pool
bc.chainHeadFeed.Send(ChainHeadEvent{Block: bc.CurrentBlock()}) header := bc.CurrentBlock()
block := bc.GetBlock(header.Hash(), header.Number.Uint64())
if block == nil {
// This should never happen. In practice, previsouly currentBlock
// contained the entire block whereas now only a "marker", so there
// is an ever so slight chance for a race we should handle.
log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
return fmt.Errorf("current block missing: #%d [%x..]", header.Number, header.Hash().Bytes()[:4])
}
bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
return nil return nil
} }
@ -553,16 +564,25 @@ func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error {
return err return err
} }
// Send chain head event to update the transaction pool // Send chain head event to update the transaction pool
bc.chainHeadFeed.Send(ChainHeadEvent{Block: bc.CurrentBlock()}) header := bc.CurrentBlock()
block := bc.GetBlock(header.Hash(), header.Number.Uint64())
if block == nil {
// This should never happen. In practice, previsouly currentBlock
// contained the entire block whereas now only a "marker", so there
// is an ever so slight chance for a race we should handle.
log.Error("Current block not found in database", "block", header.Number, "hash", header.Hash())
return fmt.Errorf("current block missing: #%d [%x..]", header.Number, header.Hash().Bytes()[:4])
}
bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
return nil return nil
} }
// SetFinalized sets the finalized block. // SetFinalized sets the finalized block.
func (bc *BlockChain) SetFinalized(block *types.Block) { func (bc *BlockChain) SetFinalized(header *types.Header) {
bc.currentFinalizedBlock.Store(block) bc.currentFinalBlock.Store(header)
if block != nil { if header != nil {
rawdb.WriteFinalizedBlockHash(bc.db, block.Hash()) rawdb.WriteFinalizedBlockHash(bc.db, header.Hash())
headFinalizedBlockGauge.Update(int64(block.NumberU64())) headFinalizedBlockGauge.Update(int64(header.Number.Uint64()))
} else { } else {
rawdb.WriteFinalizedBlockHash(bc.db, common.Hash{}) rawdb.WriteFinalizedBlockHash(bc.db, common.Hash{})
headFinalizedBlockGauge.Update(0) headFinalizedBlockGauge.Update(0)
@ -570,10 +590,10 @@ func (bc *BlockChain) SetFinalized(block *types.Block) {
} }
// SetSafe sets the safe block. // SetSafe sets the safe block.
func (bc *BlockChain) SetSafe(block *types.Block) { func (bc *BlockChain) SetSafe(header *types.Header) {
bc.currentSafeBlock.Store(block) bc.currentSafeBlock.Store(header)
if block != nil { if header != nil {
headSafeBlockGauge.Update(int64(block.NumberU64())) headSafeBlockGauge.Update(int64(header.Number.Uint64()))
} else { } else {
headSafeBlockGauge.Update(0) headSafeBlockGauge.Update(0)
} }
@ -609,7 +629,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
// Rewind the blockchain, ensuring we don't end up with a stateless head // Rewind the blockchain, ensuring we don't end up with a stateless head
// block. Note, depth equality is permitted to allow using SetHead as a // block. Note, depth equality is permitted to allow using SetHead as a
// chain reparation mechanism without deleting any data! // chain reparation mechanism without deleting any data!
if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.NumberU64() { if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.Number.Uint64() {
newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
if newHeadBlock == nil { if newHeadBlock == nil {
log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash()) log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
@ -667,27 +687,27 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
// In theory we should update all in-memory markers in the // In theory we should update all in-memory markers in the
// last step, however the direction of SetHead is from high // last step, however the direction of SetHead is from high
// to low, so it's safe to update in-memory markers directly. // to low, so it's safe to update in-memory markers directly.
bc.currentBlock.Store(newHeadBlock) bc.currentBlock.Store(newHeadBlock.Header())
headBlockGauge.Update(int64(newHeadBlock.NumberU64())) headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
} }
// Rewind the fast block in a simpleton way to the target head // Rewind the fast block in a simpleton way to the target head
if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() { if currentSnapBlock := bc.CurrentSnapBlock(); currentSnapBlock != nil && header.Number.Uint64() < currentSnapBlock.Number.Uint64() {
newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) newHeadSnapBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
// If either blocks reached nil, reset to the genesis state // If either blocks reached nil, reset to the genesis state
if newHeadFastBlock == nil { if newHeadSnapBlock == nil {
newHeadFastBlock = bc.genesisBlock newHeadSnapBlock = bc.genesisBlock
} }
rawdb.WriteHeadFastBlockHash(db, newHeadFastBlock.Hash()) rawdb.WriteHeadFastBlockHash(db, newHeadSnapBlock.Hash())
// Degrade the chain markers if they are explicitly reverted. // Degrade the chain markers if they are explicitly reverted.
// In theory we should update all in-memory markers in the // In theory we should update all in-memory markers in the
// last step, however the direction of SetHead is from high // last step, however the direction of SetHead is from high
// to low, so it's safe the update in-memory markers directly. // to low, so it's safe the update in-memory markers directly.
bc.currentFastBlock.Store(newHeadFastBlock) bc.currentSnapBlock.Store(newHeadSnapBlock.Header())
headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64())) headFastBlockGauge.Update(int64(newHeadSnapBlock.NumberU64()))
} }
var ( var (
headHeader = bc.CurrentBlock().Header() headHeader = bc.CurrentBlock()
headNumber = headHeader.Number.Uint64() headNumber = headHeader.Number.Uint64()
) )
// If setHead underflown the freezer threshold and the block processing // If setHead underflown the freezer threshold and the block processing
@ -723,7 +743,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
// If SetHead was only called as a chain reparation method, try to skip // If SetHead was only called as a chain reparation method, try to skip
// touching the header chain altogether, unless the freezer is broken // touching the header chain altogether, unless the freezer is broken
if repair { if repair {
if target, force := updateFn(bc.db, bc.CurrentBlock().Header()); force { if target, force := updateFn(bc.db, bc.CurrentBlock()); force {
bc.hc.SetHead(target.Number.Uint64(), updateFn, delFn) bc.hc.SetHead(target.Number.Uint64(), updateFn, delFn)
} }
} else { } else {
@ -746,15 +766,14 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
bc.futureBlocks.Purge() bc.futureBlocks.Purge()
// Clear safe block, finalized block if needed // Clear safe block, finalized block if needed
if safe := bc.CurrentSafeBlock(); safe != nil && head < safe.NumberU64() { if safe := bc.CurrentSafeBlock(); safe != nil && head < safe.Number.Uint64() {
log.Warn("SetHead invalidated safe block") log.Warn("SetHead invalidated safe block")
bc.SetSafe(nil) bc.SetSafe(nil)
} }
if finalized := bc.CurrentFinalizedBlock(); finalized != nil && head < finalized.NumberU64() { if finalized := bc.CurrentFinalBlock(); finalized != nil && head < finalized.Number.Uint64() {
log.Error("SetHead invalidated finalized block") log.Error("SetHead invalidated finalized block")
bc.SetFinalized(nil) bc.SetFinalized(nil)
} }
return rootNumber, bc.loadLastState() return rootNumber, bc.loadLastState()
} }
@ -774,7 +793,7 @@ func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error {
if !bc.chainmu.TryLock() { if !bc.chainmu.TryLock() {
return errChainStopped return errChainStopped
} }
bc.currentBlock.Store(block) bc.currentBlock.Store(block.Header())
headBlockGauge.Update(int64(block.NumberU64())) headBlockGauge.Update(int64(block.NumberU64()))
bc.chainmu.Unlock() bc.chainmu.Unlock()
@ -815,18 +834,18 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
// Last update all in-memory chain markers // Last update all in-memory chain markers
bc.genesisBlock = genesis bc.genesisBlock = genesis
bc.currentBlock.Store(bc.genesisBlock) bc.currentBlock.Store(bc.genesisBlock.Header())
headBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) headBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
bc.hc.SetGenesis(bc.genesisBlock.Header()) bc.hc.SetGenesis(bc.genesisBlock.Header())
bc.hc.SetCurrentHeader(bc.genesisBlock.Header()) bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
bc.currentFastBlock.Store(bc.genesisBlock) bc.currentSnapBlock.Store(bc.genesisBlock.Header())
headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64())) headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
return nil return nil
} }
// Export writes the active chain to the given writer. // Export writes the active chain to the given writer.
func (bc *BlockChain) Export(w io.Writer) error { func (bc *BlockChain) Export(w io.Writer) error {
return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64()) return bc.ExportN(w, uint64(0), bc.CurrentBlock().Number.Uint64())
} }
// ExportN writes a subset of the active chain to the given writer. // ExportN writes a subset of the active chain to the given writer.
@ -883,10 +902,10 @@ func (bc *BlockChain) writeHeadBlock(block *types.Block) {
// Update all in-memory chain markers in the last step // Update all in-memory chain markers in the last step
bc.hc.SetCurrentHeader(block.Header()) bc.hc.SetCurrentHeader(block.Header())
bc.currentFastBlock.Store(block) bc.currentSnapBlock.Store(block.Header())
headFastBlockGauge.Update(int64(block.NumberU64())) headFastBlockGauge.Update(int64(block.NumberU64()))
bc.currentBlock.Store(block) bc.currentBlock.Store(block.Header())
headBlockGauge.Update(int64(block.NumberU64())) headBlockGauge.Update(int64(block.NumberU64()))
} }
@ -927,7 +946,7 @@ func (bc *BlockChain) Stop() {
var snapBase common.Hash var snapBase common.Hash
if bc.snaps != nil { if bc.snaps != nil {
var err error var err error
if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil { if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root); err != nil {
log.Error("Failed to journal state snapshot", "err", err) log.Error("Failed to journal state snapshot", "err", err)
} }
} }
@ -941,7 +960,7 @@ func (bc *BlockChain) Stop() {
triedb := bc.triedb triedb := bc.triedb
for _, offset := range []uint64{0, 1, TriesInMemory - 1} { for _, offset := range []uint64{0, 1, TriesInMemory - 1} {
if number := bc.CurrentBlock().NumberU64(); number > offset { if number := bc.CurrentBlock().Number.Uint64(); number > offset {
recent := bc.GetBlockByNumber(number - offset) recent := bc.GetBlockByNumber(number - offset)
log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
@ -1059,7 +1078,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// Rewind may have occurred, skip in that case. // Rewind may have occurred, skip in that case.
if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 { if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 {
reorg, err := bc.forker.ReorgNeeded(bc.CurrentFastBlock().Header(), head.Header()) reorg, err := bc.forker.ReorgNeeded(bc.CurrentSnapBlock(), head.Header())
if err != nil { if err != nil {
log.Warn("Reorg failed", "err", err) log.Warn("Reorg failed", "err", err)
return false return false
@ -1067,13 +1086,12 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
return false return false
} }
rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
bc.currentFastBlock.Store(head) bc.currentSnapBlock.Store(head.Header())
headFastBlockGauge.Update(int64(head.NumberU64())) headFastBlockGauge.Update(int64(head.NumberU64()))
return true return true
} }
return false return false
} }
// writeAncient writes blockchain and corresponding receipt chain into ancient store. // writeAncient writes blockchain and corresponding receipt chain into ancient store.
// //
// this function only accepts canonical chain data. All side chain will be reverted // this function only accepts canonical chain data. All side chain will be reverted
@ -1135,8 +1153,8 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
if batch.ValueSize() > ethdb.IdealBatchSize || i == len(blockChain)-1 { if batch.ValueSize() > ethdb.IdealBatchSize || i == len(blockChain)-1 {
size += int64(batch.ValueSize()) size += int64(batch.ValueSize())
if err = batch.Write(); err != nil { if err = batch.Write(); err != nil {
fastBlock := bc.CurrentFastBlock().NumberU64() snapBlock := bc.CurrentSnapBlock().Number.Uint64()
if err := bc.db.TruncateHead(fastBlock + 1); err != nil { if err := bc.db.TruncateHead(snapBlock + 1); err != nil {
log.Error("Can't truncate ancient store after failed insert", "err", err) log.Error("Can't truncate ancient store after failed insert", "err", err)
} }
return 0, err return 0, err
@ -1150,11 +1168,11 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
return 0, err return 0, err
} }
// Update the current fast block because all block data is now present in DB. // Update the current fast block because all block data is now present in DB.
previousFastBlock := bc.CurrentFastBlock().NumberU64() previousSnapBlock := bc.CurrentSnapBlock().Number.Uint64()
if !updateHead(blockChain[len(blockChain)-1]) { if !updateHead(blockChain[len(blockChain)-1]) {
// We end up here if the header chain has reorg'ed, and the blocks/receipts // We end up here if the header chain has reorg'ed, and the blocks/receipts
// don't match the canonical chain. // don't match the canonical chain.
if err := bc.db.TruncateHead(previousFastBlock + 1); err != nil { if err := bc.db.TruncateHead(previousSnapBlock + 1); err != nil {
log.Error("Can't truncate ancient store after failed insert", "err", err) log.Error("Can't truncate ancient store after failed insert", "err", err)
} }
return 0, errSideChainReceipts return 0, errSideChainReceipts
@ -1414,7 +1432,7 @@ func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types
return NonStatTy, err return NonStatTy, err
} }
currentBlock := bc.CurrentBlock() currentBlock := bc.CurrentBlock()
reorg, err := bc.forker.ReorgNeeded(currentBlock.Header(), block.Header()) reorg, err := bc.forker.ReorgNeeded(currentBlock, block.Header())
if err != nil { if err != nil {
return NonStatTy, err return NonStatTy, err
} }
@ -1574,7 +1592,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
current = bc.CurrentBlock() current = bc.CurrentBlock()
) )
for block != nil && bc.skipBlock(err, it) { for block != nil && bc.skipBlock(err, it) {
reorg, err = bc.forker.ReorgNeeded(current.Header(), block.Header()) reorg, err = bc.forker.ReorgNeeded(current, block.Header())
if err != nil { if err != nil {
return it.index, err return it.index, err
} }
@ -1584,7 +1602,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
// In eth2 the forker always returns true for reorg decision (blindly trusting // In eth2 the forker always returns true for reorg decision (blindly trusting
// the external consensus engine), but in order to prevent the unnecessary // the external consensus engine), but in order to prevent the unnecessary
// reorgs when importing known blocks, the special case is handled here. // reorgs when importing known blocks, the special case is handled here.
if block.NumberU64() > current.NumberU64() || bc.GetCanonicalHash(block.NumberU64()) != block.Hash() { if block.NumberU64() > current.Number.Uint64() || bc.GetCanonicalHash(block.NumberU64()) != block.Hash() {
break break
} }
} }
@ -1884,7 +1902,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
err := consensus.ErrPrunedAncestor err := consensus.ErrPrunedAncestor
for ; block != nil && errors.Is(err, consensus.ErrPrunedAncestor); block, err = it.next() { for ; block != nil && errors.Is(err, consensus.ErrPrunedAncestor); block, err = it.next() {
// Check the canonical state root for that number // Check the canonical state root for that number
if number := block.NumberU64(); current.NumberU64() >= number { if number := block.NumberU64(); current.Number.Uint64() >= number {
canonical := bc.GetBlockByNumber(number) canonical := bc.GetBlockByNumber(number)
if canonical != nil && canonical.Hash() == block.Hash() { if canonical != nil && canonical.Hash() == block.Hash() {
// Not a sidechain block, this is a re-import of a canon block which has it's state pruned // Not a sidechain block, this is a re-import of a canon block which has it's state pruned
@ -1934,12 +1952,12 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
// //
// If the externTd was larger than our local TD, we now need to reimport the previous // If the externTd was larger than our local TD, we now need to reimport the previous
// blocks to regenerate the required state // blocks to regenerate the required state
reorg, err := bc.forker.ReorgNeeded(current.Header(), lastBlock.Header()) reorg, err := bc.forker.ReorgNeeded(current, lastBlock.Header())
if err != nil { if err != nil {
return it.index, err return it.index, err
} }
if !reorg { if !reorg {
localTd := bc.GetTd(current.Hash(), current.NumberU64()) localTd := bc.GetTd(current.Hash(), current.Number.Uint64())
log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd) log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
return it.index, err return it.index, err
} }
@ -2043,7 +2061,7 @@ func (bc *BlockChain) recoverAncestors(block *types.Block) (common.Hash, error)
// the processing of a block. These logs are later announced as deleted or reborn. // the processing of a block. These logs are later announced as deleted or reborn.
func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log { func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log {
receipts := rawdb.ReadRawReceipts(bc.db, b.Hash(), b.NumberU64()) receipts := rawdb.ReadRawReceipts(bc.db, b.Hash(), b.NumberU64())
receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.Transactions()) receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.BaseFee(), b.Transactions())
var logs []*types.Log var logs []*types.Log
for _, receipt := range receipts { for _, receipt := range receipts {
@ -2063,7 +2081,7 @@ func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log {
// potential missing transactions and post an event about them. // potential missing transactions and post an event about them.
// Note the new head block won't be processed here, callers need to handle it // Note the new head block won't be processed here, callers need to handle it
// externally. // externally.
func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
var ( var (
newChain types.Blocks newChain types.Blocks
oldChain types.Blocks oldChain types.Blocks
@ -2072,6 +2090,12 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
deletedTxs []common.Hash deletedTxs []common.Hash
addedTxs []common.Hash addedTxs []common.Hash
) )
oldBlock := bc.GetBlock(oldHead.Hash(), oldHead.Number.Uint64())
if oldBlock == nil {
return errors.New("current head block missing")
}
newBlock := newHead
// Reduce the longer chain to the same number as the shorter one // Reduce the longer chain to the same number as the shorter one
if oldBlock.NumberU64() > newBlock.NumberU64() { if oldBlock.NumberU64() > newBlock.NumberU64() {
// Old chain is longer, gather all transactions and logs as deleted ones // Old chain is longer, gather all transactions and logs as deleted ones
@ -2088,10 +2112,10 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
} }
} }
if oldBlock == nil { if oldBlock == nil {
return fmt.Errorf("invalid old chain") return errors.New("invalid old chain")
} }
if newBlock == nil { if newBlock == nil {
return fmt.Errorf("invalid new chain") return errors.New("invalid new chain")
} }
// Both sides of the reorg are at the same number, reduce both until the common // Both sides of the reorg are at the same number, reduce both until the common
// ancestor is found // ancestor is found

View File

@ -40,26 +40,26 @@ func (bc *BlockChain) CurrentHeader() *types.Header {
// CurrentBlock retrieves the current head block of the canonical chain. The // CurrentBlock retrieves the current head block of the canonical chain. The
// block is retrieved from the blockchain's internal cache. // block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentBlock() *types.Block { func (bc *BlockChain) CurrentBlock() *types.Header {
return bc.currentBlock.Load().(*types.Block) return bc.currentBlock.Load()
} }
// CurrentFastBlock retrieves the current fast-sync head block of the canonical // CurrentSnapBlock retrieves the current snap-sync head block of the canonical
// chain. The block is retrieved from the blockchain's internal cache. // chain. The block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentFastBlock() *types.Block { func (bc *BlockChain) CurrentSnapBlock() *types.Header {
return bc.currentFastBlock.Load().(*types.Block) return bc.currentSnapBlock.Load()
} }
// CurrentFinalizedBlock retrieves the current finalized block of the canonical // CurrentFinalBlock retrieves the current finalized block of the canonical
// chain. The block is retrieved from the blockchain's internal cache. // chain. The block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentFinalizedBlock() *types.Block { func (bc *BlockChain) CurrentFinalBlock() *types.Header {
return bc.currentFinalizedBlock.Load().(*types.Block) return bc.currentFinalBlock.Load()
} }
// CurrentSafeBlock retrieves the current safe block of the canonical // CurrentSafeBlock retrieves the current safe block of the canonical
// chain. The block is retrieved from the blockchain's internal cache. // chain. The block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentSafeBlock() *types.Block { func (bc *BlockChain) CurrentSafeBlock() *types.Header {
return bc.currentSafeBlock.Load().(*types.Block) return bc.currentSafeBlock.Load()
} }
// HasHeader checks if a block header is present in the database or not, caching // HasHeader checks if a block header is present in the database or not, caching
@ -315,7 +315,7 @@ func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) {
// State returns a new mutable state based on the current HEAD block. // State returns a new mutable state based on the current HEAD block.
func (bc *BlockChain) State() (*state.StateDB, error) { func (bc *BlockChain) State() (*state.StateDB, error) {
return bc.StateAt(bc.CurrentBlock().Root()) return bc.StateAt(bc.CurrentBlock().Root)
} }
// StateAt returns a new mutable state based on a particular point in time. // StateAt returns a new mutable state based on a particular point in time.
@ -351,7 +351,7 @@ func (bc *BlockChain) StateCache() state.Database {
// GasLimit returns the gas limit of the current HEAD block. // GasLimit returns the gas limit of the current HEAD block.
func (bc *BlockChain) GasLimit() uint64 { func (bc *BlockChain) GasLimit() uint64 {
return bc.CurrentBlock().GasLimit() return bc.CurrentBlock().GasLimit
} }
// Genesis retrieves the chain's genesis block. // Genesis retrieves the chain's genesis block.

View File

@ -1857,11 +1857,11 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
if head := newChain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader { if head := newChain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader) t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
} }
if head := newChain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock { if head := newChain.CurrentSnapBlock(); head.Number.Uint64() != tt.expHeadFastBlock {
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock) t.Errorf("Head fast block mismatch: have %d, want %d", head.Number, tt.expHeadFastBlock)
} }
if head := newChain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock { if head := newChain.CurrentBlock(); head.Number.Uint64() != tt.expHeadBlock {
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock) t.Errorf("Head block mismatch: have %d, want %d", head.Number, tt.expHeadBlock)
} }
if frozen, err := db.(freezer).Ancients(); err != nil { if frozen, err := db.(freezer).Ancients(); err != nil {
t.Errorf("Failed to retrieve ancient count: %v\n", err) t.Errorf("Failed to retrieve ancient count: %v\n", err)
@ -1973,11 +1973,11 @@ func TestIssue23496(t *testing.T) {
if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) { if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4) t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4)
} }
if head := chain.CurrentFastBlock(); head.NumberU64() != uint64(4) { if head := chain.CurrentSnapBlock(); head.Number.Uint64() != uint64(4) {
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), uint64(4)) t.Errorf("Head fast block mismatch: have %d, want %d", head.Number, uint64(4))
} }
if head := chain.CurrentBlock(); head.NumberU64() != uint64(1) { if head := chain.CurrentBlock(); head.Number.Uint64() != uint64(1) {
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), uint64(1)) t.Errorf("Head block mismatch: have %d, want %d", head.Number, uint64(1))
} }
// Reinsert B2-B4 // Reinsert B2-B4
@ -1987,11 +1987,11 @@ func TestIssue23496(t *testing.T) {
if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) { if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4) t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4)
} }
if head := chain.CurrentFastBlock(); head.NumberU64() != uint64(4) { if head := chain.CurrentSnapBlock(); head.Number.Uint64() != uint64(4) {
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), uint64(4)) t.Errorf("Head fast block mismatch: have %d, want %d", head.Number, uint64(4))
} }
if head := chain.CurrentBlock(); head.NumberU64() != uint64(4) { if head := chain.CurrentBlock(); head.Number.Uint64() != uint64(4) {
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), uint64(4)) t.Errorf("Head block mismatch: have %d, want %d", head.Number, uint64(4))
} }
if layer := chain.Snapshots().Snapshot(blocks[2].Root()); layer == nil { if layer := chain.Snapshots().Snapshot(blocks[2].Root()); layer == nil {
t.Error("Failed to regenerate the snapshot of known state") t.Error("Failed to regenerate the snapshot of known state")

View File

@ -2047,11 +2047,11 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader { if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader) t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
} }
if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock { if head := chain.CurrentSnapBlock(); head.Number.Uint64() != tt.expHeadFastBlock {
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock) t.Errorf("Head fast block mismatch: have %d, want %d", head.Number, tt.expHeadFastBlock)
} }
if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock { if head := chain.CurrentBlock(); head.Number.Uint64() != tt.expHeadBlock {
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock) t.Errorf("Head block mismatch: have %d, want %d", head.Number, tt.expHeadBlock)
} }
if frozen, err := db.(freezer).Ancients(); err != nil { if frozen, err := db.(freezer).Ancients(); err != nil {
t.Errorf("Failed to retrieve ancient count: %v\n", err) t.Errorf("Failed to retrieve ancient count: %v\n", err)

View File

@ -136,11 +136,11 @@ func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks [
if head := chain.CurrentHeader(); head.Number.Uint64() != basic.expHeadHeader { if head := chain.CurrentHeader(); head.Number.Uint64() != basic.expHeadHeader {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, basic.expHeadHeader) t.Errorf("Head header mismatch: have %d, want %d", head.Number, basic.expHeadHeader)
} }
if head := chain.CurrentFastBlock(); head.NumberU64() != basic.expHeadFastBlock { if head := chain.CurrentSnapBlock(); head.Number.Uint64() != basic.expHeadFastBlock {
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), basic.expHeadFastBlock) t.Errorf("Head fast block mismatch: have %d, want %d", head.Number, basic.expHeadFastBlock)
} }
if head := chain.CurrentBlock(); head.NumberU64() != basic.expHeadBlock { if head := chain.CurrentBlock(); head.Number.Uint64() != basic.expHeadBlock {
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), basic.expHeadBlock) t.Errorf("Head block mismatch: have %d, want %d", head.Number, basic.expHeadBlock)
} }
// Check the disk layer, ensure they are matched // Check the disk layer, ensure they are matched

View File

@ -109,7 +109,7 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara
headerChainB []*types.Header headerChainB []*types.Header
) )
if full { if full {
blockChainB = makeBlockChain(blockchain2.chainConfig, blockchain2.CurrentBlock(), n, ethash.NewFaker(), genDb, forkSeed) blockChainB = makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed)
if _, err := blockchain2.InsertChain(blockChainB); err != nil { if _, err := blockchain2.InsertChain(blockChainB); err != nil {
t.Fatalf("failed to insert forking chain: %v", err) t.Fatalf("failed to insert forking chain: %v", err)
} }
@ -124,7 +124,7 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara
if full { if full {
cur := blockchain.CurrentBlock() cur := blockchain.CurrentBlock()
tdPre = blockchain.GetTd(cur.Hash(), cur.NumberU64()) tdPre = blockchain.GetTd(cur.Hash(), cur.Number.Uint64())
if err := testBlockChainImport(blockChainB, blockchain); err != nil { if err := testBlockChainImport(blockChainB, blockchain); err != nil {
t.Fatalf("failed to import forked block chain: %v", err) t.Fatalf("failed to import forked block chain: %v", err)
} }
@ -206,7 +206,7 @@ func TestLastBlock(t *testing.T) {
} }
defer blockchain.Stop() defer blockchain.Stop()
blocks := makeBlockChain(blockchain.chainConfig, blockchain.CurrentBlock(), 1, ethash.NewFullFaker(), genDb, 0) blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 1, ethash.NewFullFaker(), genDb, 0)
if _, err := blockchain.InsertChain(blocks); err != nil { if _, err := blockchain.InsertChain(blocks); err != nil {
t.Fatalf("Failed to insert block: %v", err) t.Fatalf("Failed to insert block: %v", err)
} }
@ -240,11 +240,11 @@ func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full b
// Extend the newly created chain // Extend the newly created chain
if full { if full {
blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.CurrentBlock(), n, ethash.NewFaker(), genDb, forkSeed) blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed)
if _, err := blockchain2.InsertChain(blockChainB); err != nil { if _, err := blockchain2.InsertChain(blockChainB); err != nil {
t.Fatalf("failed to insert forking chain: %v", err) t.Fatalf("failed to insert forking chain: %v", err)
} }
if blockchain2.CurrentBlock().NumberU64() != blockChainB[len(blockChainB)-1].NumberU64() { if blockchain2.CurrentBlock().Number.Uint64() != blockChainB[len(blockChainB)-1].NumberU64() {
t.Fatalf("failed to reorg to the given chain") t.Fatalf("failed to reorg to the given chain")
} }
if blockchain2.CurrentBlock().Hash() != blockChainB[len(blockChainB)-1].Hash() { if blockchain2.CurrentBlock().Hash() != blockChainB[len(blockChainB)-1].Hash() {
@ -477,7 +477,7 @@ func testBrokenChain(t *testing.T, full bool) {
// Create a forked chain, and try to insert with a missing link // Create a forked chain, and try to insert with a missing link
if full { if full {
chain := makeBlockChain(blockchain.chainConfig, blockchain.CurrentBlock(), 5, ethash.NewFaker(), genDb, forkSeed)[1:] chain := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 5, ethash.NewFaker(), genDb, forkSeed)[1:]
if err := testBlockChainImport(chain, blockchain); err == nil { if err := testBlockChainImport(chain, blockchain); err == nil {
t.Errorf("broken block chain not reported") t.Errorf("broken block chain not reported")
} }
@ -527,10 +527,10 @@ func testReorg(t *testing.T, first, second []int64, td int64, full bool) {
defer blockchain.Stop() defer blockchain.Stop()
// Insert an easy and a difficult chain afterwards // Insert an easy and a difficult chain afterwards
easyBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.CurrentBlock(), ethash.NewFaker(), genDb, len(first), func(i int, b *BlockGen) { easyBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), ethash.NewFaker(), genDb, len(first), func(i int, b *BlockGen) {
b.OffsetTime(first[i]) b.OffsetTime(first[i])
}) })
diffBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.CurrentBlock(), ethash.NewFaker(), genDb, len(second), func(i int, b *BlockGen) { diffBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), ethash.NewFaker(), genDb, len(second), func(i int, b *BlockGen) {
b.OffsetTime(second[i]) b.OffsetTime(second[i])
}) })
if full { if full {
@ -559,9 +559,9 @@ func testReorg(t *testing.T, first, second []int64, td int64, full bool) {
// Check that the chain is valid number and link wise // Check that the chain is valid number and link wise
if full { if full {
prev := blockchain.CurrentBlock() prev := blockchain.CurrentBlock()
for block := blockchain.GetBlockByNumber(blockchain.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, blockchain.GetBlockByNumber(block.NumberU64()-1) { for block := blockchain.GetBlockByNumber(blockchain.CurrentBlock().Number.Uint64() - 1); block.NumberU64() != 0; prev, block = block.Header(), blockchain.GetBlockByNumber(block.NumberU64()-1) {
if prev.ParentHash() != block.Hash() { if prev.ParentHash != block.Hash() {
t.Errorf("parent block hash mismatch: have %x, want %x", prev.ParentHash(), block.Hash()) t.Errorf("parent block hash mismatch: have %x, want %x", prev.ParentHash, block.Hash())
} }
} }
} else { } else {
@ -576,7 +576,7 @@ func testReorg(t *testing.T, first, second []int64, td int64, full bool) {
want := new(big.Int).Add(blockchain.genesisBlock.Difficulty(), big.NewInt(td)) want := new(big.Int).Add(blockchain.genesisBlock.Difficulty(), big.NewInt(td))
if full { if full {
cur := blockchain.CurrentBlock() cur := blockchain.CurrentBlock()
if have := blockchain.GetTd(cur.Hash(), cur.NumberU64()); have.Cmp(want) != 0 { if have := blockchain.GetTd(cur.Hash(), cur.Number.Uint64()); have.Cmp(want) != 0 {
t.Errorf("total difficulty mismatch: have %v, want %v", have, want) t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
} }
} else { } else {
@ -601,7 +601,7 @@ func testBadHashes(t *testing.T, full bool) {
// Create a chain, ban a hash and try to import // Create a chain, ban a hash and try to import
if full { if full {
blocks := makeBlockChain(blockchain.chainConfig, blockchain.CurrentBlock(), 3, ethash.NewFaker(), genDb, 10) blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 3, ethash.NewFaker(), genDb, 10)
BadHashes[blocks[2].Header().Hash()] = true BadHashes[blocks[2].Header().Hash()] = true
defer func() { delete(BadHashes, blocks[2].Header().Hash()) }() defer func() { delete(BadHashes, blocks[2].Header().Hash()) }()
@ -633,7 +633,7 @@ func testReorgBadHashes(t *testing.T, full bool) {
} }
// Create a chain, import and ban afterwards // Create a chain, import and ban afterwards
headers := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 4, ethash.NewFaker(), genDb, 10) headers := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 4, ethash.NewFaker(), genDb, 10)
blocks := makeBlockChain(blockchain.chainConfig, blockchain.CurrentBlock(), 4, ethash.NewFaker(), genDb, 10) blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 4, ethash.NewFaker(), genDb, 10)
if full { if full {
if _, err = blockchain.InsertChain(blocks); err != nil { if _, err = blockchain.InsertChain(blocks); err != nil {
@ -696,7 +696,7 @@ func testInsertNonceError(t *testing.T, full bool) {
failNum uint64 failNum uint64
) )
if full { if full {
blocks := makeBlockChain(blockchain.chainConfig, blockchain.CurrentBlock(), i, ethash.NewFaker(), genDb, 0) blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), i, ethash.NewFaker(), genDb, 0)
failAt = rand.Int() % len(blocks) failAt = rand.Int() % len(blocks)
failNum = blocks[failAt].NumberU64() failNum = blocks[failAt].NumberU64()
@ -894,11 +894,11 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
assert := func(t *testing.T, kind string, chain *BlockChain, header uint64, fast uint64, block uint64) { assert := func(t *testing.T, kind string, chain *BlockChain, header uint64, fast uint64, block uint64) {
t.Helper() t.Helper()
if num := chain.CurrentBlock().NumberU64(); num != block { if num := chain.CurrentBlock().Number.Uint64(); num != block {
t.Errorf("%s head block mismatch: have #%v, want #%v", kind, num, block) t.Errorf("%s head block mismatch: have #%v, want #%v", kind, num, block)
} }
if num := chain.CurrentFastBlock().NumberU64(); num != fast { if num := chain.CurrentSnapBlock().Number.Uint64(); num != fast {
t.Errorf("%s head fast-block mismatch: have #%v, want #%v", kind, num, fast) t.Errorf("%s head snap-block mismatch: have #%v, want #%v", kind, num, fast)
} }
if num := chain.CurrentHeader().Number.Uint64(); num != header { if num := chain.CurrentHeader().Number.Uint64(); num != header {
t.Errorf("%s head header mismatch: have #%v, want #%v", kind, num, header) t.Errorf("%s head header mismatch: have #%v, want #%v", kind, num, header)
@ -1649,13 +1649,13 @@ func TestBlockchainHeaderchainReorgConsistency(t *testing.T) {
t.Fatalf("block %d: failed to insert into chain: %v", i, err) t.Fatalf("block %d: failed to insert into chain: %v", i, err)
} }
if chain.CurrentBlock().Hash() != chain.CurrentHeader().Hash() { if chain.CurrentBlock().Hash() != chain.CurrentHeader().Hash() {
t.Errorf("block %d: current block/header mismatch: block #%d [%x..], header #%d [%x..]", i, chain.CurrentBlock().Number(), chain.CurrentBlock().Hash().Bytes()[:4], chain.CurrentHeader().Number, chain.CurrentHeader().Hash().Bytes()[:4]) t.Errorf("block %d: current block/header mismatch: block #%d [%x..], header #%d [%x..]", i, chain.CurrentBlock().Number, chain.CurrentBlock().Hash().Bytes()[:4], chain.CurrentHeader().Number, chain.CurrentHeader().Hash().Bytes()[:4])
} }
if _, err := chain.InsertChain(forks[i : i+1]); err != nil { if _, err := chain.InsertChain(forks[i : i+1]); err != nil {
t.Fatalf(" fork %d: failed to insert into chain: %v", i, err) t.Fatalf(" fork %d: failed to insert into chain: %v", i, err)
} }
if chain.CurrentBlock().Hash() != chain.CurrentHeader().Hash() { if chain.CurrentBlock().Hash() != chain.CurrentHeader().Hash() {
t.Errorf(" fork %d: current block/header mismatch: block #%d [%x..], header #%d [%x..]", i, chain.CurrentBlock().Number(), chain.CurrentBlock().Hash().Bytes()[:4], chain.CurrentHeader().Number, chain.CurrentHeader().Hash().Bytes()[:4]) t.Errorf(" fork %d: current block/header mismatch: block #%d [%x..], header #%d [%x..]", i, chain.CurrentBlock().Number, chain.CurrentBlock().Hash().Bytes()[:4], chain.CurrentHeader().Number, chain.CurrentHeader().Hash().Bytes()[:4])
} }
} }
} }
@ -1797,11 +1797,11 @@ func TestBlockchainRecovery(t *testing.T) {
// Reopen broken blockchain again // Reopen broken blockchain again
ancient, _ = NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) ancient, _ = NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer ancient.Stop() defer ancient.Stop()
if num := ancient.CurrentBlock().NumberU64(); num != 0 { if num := ancient.CurrentBlock().Number.Uint64(); num != 0 {
t.Errorf("head block mismatch: have #%v, want #%v", num, 0) t.Errorf("head block mismatch: have #%v, want #%v", num, 0)
} }
if num := ancient.CurrentFastBlock().NumberU64(); num != midBlock.NumberU64() { if num := ancient.CurrentSnapBlock().Number.Uint64(); num != midBlock.NumberU64() {
t.Errorf("head fast-block mismatch: have #%v, want #%v", num, midBlock.NumberU64()) t.Errorf("head snap-block mismatch: have #%v, want #%v", num, midBlock.NumberU64())
} }
if num := ancient.CurrentHeader().Number.Uint64(); num != midBlock.NumberU64() { if num := ancient.CurrentHeader().Number.Uint64(); num != midBlock.NumberU64() {
t.Errorf("head header mismatch: have #%v, want #%v", num, midBlock.NumberU64()) t.Errorf("head header mismatch: have #%v, want #%v", num, midBlock.NumberU64())
@ -1820,7 +1820,7 @@ func TestInsertReceiptChainRollback(t *testing.T) {
if _, err := tmpChain.InsertChain(sideblocks); err != nil { if _, err := tmpChain.InsertChain(sideblocks); err != nil {
t.Fatal("processing side chain failed:", err) t.Fatal("processing side chain failed:", err)
} }
t.Log("sidechain head:", tmpChain.CurrentBlock().Number(), tmpChain.CurrentBlock().Hash()) t.Log("sidechain head:", tmpChain.CurrentBlock().Number, tmpChain.CurrentBlock().Hash())
sidechainReceipts := make([]types.Receipts, len(sideblocks)) sidechainReceipts := make([]types.Receipts, len(sideblocks))
for i, block := range sideblocks { for i, block := range sideblocks {
sidechainReceipts[i] = tmpChain.GetReceiptsByHash(block.Hash()) sidechainReceipts[i] = tmpChain.GetReceiptsByHash(block.Hash())
@ -1829,7 +1829,7 @@ func TestInsertReceiptChainRollback(t *testing.T) {
if _, err := tmpChain.InsertChain(canonblocks); err != nil { if _, err := tmpChain.InsertChain(canonblocks); err != nil {
t.Fatal("processing canon chain failed:", err) t.Fatal("processing canon chain failed:", err)
} }
t.Log("canon head:", tmpChain.CurrentBlock().Number(), tmpChain.CurrentBlock().Hash()) t.Log("canon head:", tmpChain.CurrentBlock().Number, tmpChain.CurrentBlock().Hash())
canonReceipts := make([]types.Receipts, len(canonblocks)) canonReceipts := make([]types.Receipts, len(canonblocks))
for i, block := range canonblocks { for i, block := range canonblocks {
canonReceipts[i] = tmpChain.GetReceiptsByHash(block.Hash()) canonReceipts[i] = tmpChain.GetReceiptsByHash(block.Hash())
@ -1859,8 +1859,8 @@ func TestInsertReceiptChainRollback(t *testing.T) {
if err == nil { if err == nil {
t.Fatal("expected error from InsertReceiptChain.") t.Fatal("expected error from InsertReceiptChain.")
} }
if ancientChain.CurrentFastBlock().NumberU64() != 0 { if ancientChain.CurrentSnapBlock().Number.Uint64() != 0 {
t.Fatalf("failed to rollback ancient data, want %d, have %d", 0, ancientChain.CurrentFastBlock().NumberU64()) t.Fatalf("failed to rollback ancient data, want %d, have %d", 0, ancientChain.CurrentSnapBlock().Number)
} }
if frozen, err := ancientChain.db.Ancients(); err != nil || frozen != 1 { if frozen, err := ancientChain.db.Ancients(); err != nil || frozen != 1 {
t.Fatalf("failed to truncate ancient data, frozen index is %d", frozen) t.Fatalf("failed to truncate ancient data, frozen index is %d", frozen)
@ -1871,7 +1871,7 @@ func TestInsertReceiptChainRollback(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("can't import canon chain receipts: %v", err) t.Fatalf("can't import canon chain receipts: %v", err)
} }
if ancientChain.CurrentFastBlock().NumberU64() != canonblocks[len(canonblocks)-1].NumberU64() { if ancientChain.CurrentSnapBlock().Number.Uint64() != canonblocks[len(canonblocks)-1].NumberU64() {
t.Fatalf("failed to insert ancient recept chain after rollback") t.Fatalf("failed to insert ancient recept chain after rollback")
} }
if frozen, _ := ancientChain.db.Ancients(); frozen != uint64(len(canonblocks))+1 { if frozen, _ := ancientChain.db.Ancients(); frozen != uint64(len(canonblocks))+1 {
@ -1926,7 +1926,7 @@ func TestLowDiffLongChain(t *testing.T) {
} }
// Sanity check that all the canonical numbers are present // Sanity check that all the canonical numbers are present
header := chain.CurrentHeader() header := chain.CurrentHeader()
for number := head.NumberU64(); number > 0; number-- { for number := head.Number.Uint64(); number > 0; number-- {
if hash := chain.GetHeaderByNumber(number).Hash(); hash != header.Hash() { if hash := chain.GetHeaderByNumber(number).Hash(); hash != header.Hash() {
t.Fatalf("header %d: canonical hash mismatch: have %x, want %x", number, hash, header.Hash()) t.Fatalf("header %d: canonical hash mismatch: have %x, want %x", number, hash, header.Hash())
} }
@ -2150,8 +2150,8 @@ func testInsertKnownChainData(t *testing.T, typ string) {
return err return err
} }
asserter = func(t *testing.T, block *types.Block) { asserter = func(t *testing.T, block *types.Block) {
if chain.CurrentFastBlock().Hash() != block.Hash() { if chain.CurrentSnapBlock().Hash() != block.Hash() {
t.Fatalf("current head fast block mismatch, have %v, want %v", chain.CurrentFastBlock().Hash().Hex(), block.Hash().Hex()) t.Fatalf("current head fast block mismatch, have %v, want %v", chain.CurrentSnapBlock().Hash().Hex(), block.Hash().Hex())
} }
} }
} else { } else {
@ -2324,8 +2324,8 @@ func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight i
return err return err
} }
asserter = func(t *testing.T, block *types.Block) { asserter = func(t *testing.T, block *types.Block) {
if chain.CurrentFastBlock().Hash() != block.Hash() { if chain.CurrentSnapBlock().Hash() != block.Hash() {
t.Fatalf("current head fast block mismatch, have %v, want %v", chain.CurrentFastBlock().Hash().Hex(), block.Hash().Hex()) t.Fatalf("current head fast block mismatch, have %v, want %v", chain.CurrentSnapBlock().Hash().Hex(), block.Hash().Hex())
} }
} }
} else { } else {
@ -2452,7 +2452,7 @@ func TestReorgToShorterRemovesCanonMapping(t *testing.T) {
if n, err := chain.InsertChain(canonblocks); err != nil { if n, err := chain.InsertChain(canonblocks); err != nil {
t.Fatalf("block %d: failed to insert into chain: %v", n, err) t.Fatalf("block %d: failed to insert into chain: %v", n, err)
} }
canonNum := chain.CurrentBlock().NumberU64() canonNum := chain.CurrentBlock().Number.Uint64()
canonHash := chain.CurrentBlock().Hash() canonHash := chain.CurrentBlock().Hash()
_, err = chain.InsertChain(sideblocks) _, err = chain.InsertChain(sideblocks)
if err != nil { if err != nil {
@ -2467,7 +2467,7 @@ func TestReorgToShorterRemovesCanonMapping(t *testing.T) {
t.Errorf("expected block to be gone: %v", blockByNum.NumberU64()) t.Errorf("expected block to be gone: %v", blockByNum.NumberU64())
} }
if headerByNum := chain.GetHeaderByNumber(canonNum); headerByNum != nil { if headerByNum := chain.GetHeaderByNumber(canonNum); headerByNum != nil {
t.Errorf("expected header to be gone: %v", headerByNum.Number.Uint64()) t.Errorf("expected header to be gone: %v", headerByNum.Number)
} }
if blockByHash := chain.GetBlockByHash(canonHash); blockByHash == nil { if blockByHash := chain.GetBlockByHash(canonHash); blockByHash == nil {
t.Errorf("expected block to be present: %x", blockByHash.Hash()) t.Errorf("expected block to be present: %x", blockByHash.Hash())
@ -2553,7 +2553,7 @@ func TestTransactionIndices(t *testing.T) {
t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored) t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored)
} }
if tail != nil { if tail != nil {
for i := *tail; i <= chain.CurrentBlock().NumberU64(); i++ { for i := *tail; i <= chain.CurrentBlock().Number.Uint64(); i++ {
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
if block.Transactions().Len() == 0 { if block.Transactions().Len() == 0 {
continue continue
@ -2649,7 +2649,7 @@ func TestSkipStaleTxIndicesInSnapSync(t *testing.T) {
t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored) t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored)
} }
if tail != nil { if tail != nil {
for i := *tail; i <= chain.CurrentBlock().NumberU64(); i++ { for i := *tail; i <= chain.CurrentBlock().Number.Uint64(); i++ {
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
if block.Transactions().Len() == 0 { if block.Transactions().Len() == 0 {
continue continue
@ -2752,7 +2752,8 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in
b.Fatalf("failed to insert shared chain: %v", err) b.Fatalf("failed to insert shared chain: %v", err)
} }
b.StopTimer() b.StopTimer()
if got := chain.CurrentBlock().Transactions().Len(); got != numTxs*numBlocks { block := chain.GetBlockByHash(chain.CurrentBlock().Hash())
if got := block.Transactions().Len(); got != numTxs*numBlocks {
b.Fatalf("Transactions were not included, expected %d, got %d", numTxs*numBlocks, got) b.Fatalf("Transactions were not included, expected %d, got %d", numTxs*numBlocks, got)
} }
} }
@ -3715,8 +3716,8 @@ func TestSetCanonical(t *testing.T) {
if chain.CurrentBlock().Hash() != head.Hash() { if chain.CurrentBlock().Hash() != head.Hash() {
t.Fatalf("Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash()) t.Fatalf("Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash())
} }
if chain.CurrentFastBlock().Hash() != head.Hash() { if chain.CurrentSnapBlock().Hash() != head.Hash() {
t.Fatalf("Unexpected fast block hash, want %x, got %x", head.Hash(), chain.CurrentFastBlock().Hash()) t.Fatalf("Unexpected fast block hash, want %x, got %x", head.Hash(), chain.CurrentSnapBlock().Hash())
} }
if chain.CurrentHeader().Hash() != head.Hash() { if chain.CurrentHeader().Hash() != head.Hash() {
t.Fatalf("Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash()) t.Fatalf("Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash())
@ -3799,8 +3800,8 @@ func TestCanonicalHashMarker(t *testing.T) {
if chain.CurrentBlock().Hash() != head.Hash() { if chain.CurrentBlock().Hash() != head.Hash() {
t.Fatalf("Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash()) t.Fatalf("Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash())
} }
if chain.CurrentFastBlock().Hash() != head.Hash() { if chain.CurrentSnapBlock().Hash() != head.Hash() {
t.Fatalf("Unexpected fast block hash, want %x, got %x", head.Hash(), chain.CurrentFastBlock().Hash()) t.Fatalf("Unexpected fast block hash, want %x, got %x", head.Hash(), chain.CurrentSnapBlock().Hash())
} }
if chain.CurrentHeader().Hash() != head.Hash() { if chain.CurrentHeader().Hash() != head.Hash() {
t.Fatalf("Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash()) t.Fatalf("Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash())

View File

@ -207,23 +207,31 @@ func (b *BlockGen) AddUncle(h *types.Header) {
} }
// AddWithdrawal adds a withdrawal to the generated block. // AddWithdrawal adds a withdrawal to the generated block.
func (b *BlockGen) AddWithdrawal(w *types.Withdrawal) { // It returns the withdrawal index.
// The withdrawal will be assigned the next valid index. func (b *BlockGen) AddWithdrawal(w *types.Withdrawal) uint64 {
var idx uint64 cpy := *w
cpy.Index = b.nextWithdrawalIndex()
b.withdrawals = append(b.withdrawals, &cpy)
return cpy.Index
}
// nextWithdrawalIndex computes the index of the next withdrawal.
func (b *BlockGen) nextWithdrawalIndex() uint64 {
if len(b.withdrawals) != 0 {
return b.withdrawals[len(b.withdrawals)-1].Index + 1
}
for i := b.i - 1; i >= 0; i-- { for i := b.i - 1; i >= 0; i-- {
if wd := b.chain[i].Withdrawals(); len(wd) != 0 { if wd := b.chain[i].Withdrawals(); len(wd) != 0 {
idx = wd[len(wd)-1].Index + 1 return wd[len(wd)-1].Index + 1
break
} }
if i == 0 { if i == 0 {
// Correctly set the index if no parent had withdrawals // Correctly set the index if no parent had withdrawals.
if wd := b.parent.Withdrawals(); len(wd) != 0 { if wd := b.parent.Withdrawals(); len(wd) != 0 {
idx = wd[len(wd)-1].Index + 1 return wd[len(wd)-1].Index + 1
} }
} }
} }
w.Index = idx return 0
b.withdrawals = append(b.withdrawals, w)
} }
// PrevBlock returns a previously generated block by number. It panics if // PrevBlock returns a previously generated block by number. It panics if

View File

@ -19,7 +19,10 @@ package core
import ( import (
"fmt" "fmt"
"math/big" "math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
@ -28,6 +31,112 @@ import (
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
func TestGenerateWithdrawalChain(t *testing.T) {
var (
keyHex = "9c647b8b7c4e7c3490668fb6c11473619db80c93704c70893d3813af4090c39c"
key, _ = crypto.HexToECDSA(keyHex)
address = crypto.PubkeyToAddress(key.PublicKey) // 658bdf435d810c91414ec09147daa6db62406379
aa = common.Address{0xaa}
bb = common.Address{0xbb}
funds = big.NewInt(0).Mul(big.NewInt(1337), big.NewInt(params.Ether))
config = *params.AllEthashProtocolChanges
gspec = &Genesis{
Config: &config,
Alloc: GenesisAlloc{address: {Balance: funds}},
BaseFee: big.NewInt(params.InitialBaseFee),
Difficulty: common.Big1,
GasLimit: 5_000_000,
}
gendb = rawdb.NewMemoryDatabase()
signer = types.LatestSigner(gspec.Config)
db = rawdb.NewMemoryDatabase()
)
config.TerminalTotalDifficultyPassed = true
config.TerminalTotalDifficulty = common.Big0
config.ShanghaiTime = u64(0)
// init 0xaa with some storage elements
storage := make(map[common.Hash]common.Hash)
storage[common.Hash{0x00}] = common.Hash{0x00}
storage[common.Hash{0x01}] = common.Hash{0x01}
storage[common.Hash{0x02}] = common.Hash{0x02}
storage[common.Hash{0x03}] = common.HexToHash("0303")
gspec.Alloc[aa] = GenesisAccount{
Balance: common.Big1,
Nonce: 1,
Storage: storage,
Code: common.Hex2Bytes("6042"),
}
gspec.Alloc[bb] = GenesisAccount{
Balance: common.Big2,
Nonce: 1,
Storage: storage,
Code: common.Hex2Bytes("600154600354"),
}
genesis := gspec.MustCommit(gendb)
chain, _ := GenerateChain(gspec.Config, genesis, beacon.NewFaker(), gendb, 4, func(i int, gen *BlockGen) {
tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(address), address, big.NewInt(1000), params.TxGas, new(big.Int).Add(gen.BaseFee(), common.Big1), nil), signer, key)
gen.AddTx(tx)
if i == 1 {
gen.AddWithdrawal(&types.Withdrawal{
Validator: 42,
Address: common.Address{0xee},
Amount: 1337,
})
gen.AddWithdrawal(&types.Withdrawal{
Validator: 13,
Address: common.Address{0xee},
Amount: 1,
})
}
if i == 3 {
gen.AddWithdrawal(&types.Withdrawal{
Validator: 42,
Address: common.Address{0xee},
Amount: 1337,
})
gen.AddWithdrawal(&types.Withdrawal{
Validator: 13,
Address: common.Address{0xee},
Amount: 1,
})
}
})
// Import the chain. This runs all block validation rules.
blockchain, _ := NewBlockChain(db, nil, gspec, nil, beacon.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
if i, err := blockchain.InsertChain(chain); err != nil {
fmt.Printf("insert error (block %d): %v\n", chain[i].NumberU64(), err)
return
}
// enforce that withdrawal indexes are monotonically increasing from 0
var (
withdrawalIndex uint64
head = blockchain.CurrentBlock().Number.Uint64()
)
for i := 0; i < int(head); i++ {
block := blockchain.GetBlockByNumber(uint64(i))
if block == nil {
t.Fatalf("block %d not found", i)
}
if len(block.Withdrawals()) == 0 {
continue
}
for j := 0; j < len(block.Withdrawals()); j++ {
if block.Withdrawals()[j].Index != withdrawalIndex {
t.Fatalf("withdrawal index %d does not equal expected index %d", block.Withdrawals()[j].Index, withdrawalIndex)
}
withdrawalIndex += 1
}
}
}
func ExampleGenerateChain() { func ExampleGenerateChain() {
var ( var (
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
@ -88,7 +197,7 @@ func ExampleGenerateChain() {
} }
state, _ := blockchain.State() state, _ := blockchain.State()
fmt.Printf("last block: #%d\n", blockchain.CurrentBlock().Number()) fmt.Printf("last block: #%d\n", blockchain.CurrentBlock().Number)
fmt.Println("balance of addr1:", state.GetBalance(addr1)) fmt.Println("balance of addr1:", state.GetBalance(addr1))
fmt.Println("balance of addr2:", state.GetBalance(addr2)) fmt.Println("balance of addr2:", state.GetBalance(addr2))
fmt.Println("balance of addr3:", state.GetBalance(addr3)) fmt.Println("balance of addr3:", state.GetBalance(addr3))

View File

@ -76,7 +76,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Create a pro-fork block, and try to feed into the no-fork chain // Create a pro-fork block, and try to feed into the no-fork chain
bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64())) blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().Number.Uint64()))
for j := 0; j < len(blocks)/2; j++ { for j := 0; j < len(blocks)/2; j++ {
blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j]
} }
@ -87,19 +87,19 @@ func TestDAOForkRangeExtradata(t *testing.T) {
t.Fatalf("failed to commit contra-fork head for expansion: %v", err) t.Fatalf("failed to commit contra-fork head for expansion: %v", err)
} }
bc.Stop() bc.Stop()
blocks, _ = GenerateChain(&proConf, conBc.CurrentBlock(), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {}) blocks, _ = GenerateChain(&proConf, conBc.GetBlockByHash(conBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
if _, err := conBc.InsertChain(blocks); err == nil { if _, err := conBc.InsertChain(blocks); err == nil {
t.Fatalf("contra-fork chain accepted pro-fork block: %v", blocks[0]) t.Fatalf("contra-fork chain accepted pro-fork block: %v", blocks[0])
} }
// Create a proper no-fork block for the contra-forker // Create a proper no-fork block for the contra-forker
blocks, _ = GenerateChain(&conConf, conBc.CurrentBlock(), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {}) blocks, _ = GenerateChain(&conConf, conBc.GetBlockByHash(conBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
if _, err := conBc.InsertChain(blocks); err != nil { if _, err := conBc.InsertChain(blocks); err != nil {
t.Fatalf("contra-fork chain didn't accepted no-fork block: %v", err) t.Fatalf("contra-fork chain didn't accepted no-fork block: %v", err)
} }
// Create a no-fork block, and try to feed into the pro-fork chain // Create a no-fork block, and try to feed into the pro-fork chain
bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64())) blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().Number.Uint64()))
for j := 0; j < len(blocks)/2; j++ { for j := 0; j < len(blocks)/2; j++ {
blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j]
} }
@ -110,12 +110,12 @@ func TestDAOForkRangeExtradata(t *testing.T) {
t.Fatalf("failed to commit pro-fork head for expansion: %v", err) t.Fatalf("failed to commit pro-fork head for expansion: %v", err)
} }
bc.Stop() bc.Stop()
blocks, _ = GenerateChain(&conConf, proBc.CurrentBlock(), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {}) blocks, _ = GenerateChain(&conConf, proBc.GetBlockByHash(proBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
if _, err := proBc.InsertChain(blocks); err == nil { if _, err := proBc.InsertChain(blocks); err == nil {
t.Fatalf("pro-fork chain accepted contra-fork block: %v", blocks[0]) t.Fatalf("pro-fork chain accepted contra-fork block: %v", blocks[0])
} }
// Create a proper pro-fork block for the pro-forker // Create a proper pro-fork block for the pro-forker
blocks, _ = GenerateChain(&proConf, proBc.CurrentBlock(), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {}) blocks, _ = GenerateChain(&proConf, proBc.GetBlockByHash(proBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
if _, err := proBc.InsertChain(blocks); err != nil { if _, err := proBc.InsertChain(blocks); err != nil {
t.Fatalf("pro-fork chain didn't accepted pro-fork block: %v", err) t.Fatalf("pro-fork chain didn't accepted pro-fork block: %v", err)
} }
@ -124,7 +124,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer bc.Stop() defer bc.Stop()
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64())) blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().Number.Uint64()))
for j := 0; j < len(blocks)/2; j++ { for j := 0; j < len(blocks)/2; j++ {
blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j]
} }
@ -134,7 +134,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, false); err != nil { if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, false); err != nil {
t.Fatalf("failed to commit contra-fork head for expansion: %v", err) t.Fatalf("failed to commit contra-fork head for expansion: %v", err)
} }
blocks, _ = GenerateChain(&proConf, conBc.CurrentBlock(), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {}) blocks, _ = GenerateChain(&proConf, conBc.GetBlockByHash(conBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
if _, err := conBc.InsertChain(blocks); err != nil { if _, err := conBc.InsertChain(blocks); err != nil {
t.Fatalf("contra-fork chain didn't accept pro-fork block post-fork: %v", err) t.Fatalf("contra-fork chain didn't accept pro-fork block post-fork: %v", err)
} }
@ -142,7 +142,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer bc.Stop() defer bc.Stop()
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64())) blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().Number.Uint64()))
for j := 0; j < len(blocks)/2; j++ { for j := 0; j < len(blocks)/2; j++ {
blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j]
} }
@ -152,7 +152,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, false); err != nil { if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, false); err != nil {
t.Fatalf("failed to commit pro-fork head for expansion: %v", err) t.Fatalf("failed to commit pro-fork head for expansion: %v", err)
} }
blocks, _ = GenerateChain(&conConf, proBc.CurrentBlock(), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {}) blocks, _ = GenerateChain(&conConf, proBc.GetBlockByHash(proBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
if _, err := proBc.InsertChain(blocks); err != nil { if _, err := proBc.InsertChain(blocks); err != nil {
t.Fatalf("pro-fork chain didn't accept contra-fork block post-fork: %v", err) t.Fatalf("pro-fork chain didn't accept contra-fork block post-fork: %v", err)
} }

View File

@ -107,14 +107,16 @@ func TestCreation(t *testing.T) {
params.GoerliChainConfig, params.GoerliChainConfig,
params.GoerliGenesisHash, params.GoerliGenesisHash,
[]testcase{ []testcase{
{0, 0, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople and first Petersburg block {0, 0, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople and first Petersburg block
{1561650, 0, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Last Petersburg block {1561650, 0, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Last Petersburg block
{1561651, 0, ID{Hash: checksumToBytes(0xc25efa5c), Next: 4460644}}, // First Istanbul block {1561651, 0, ID{Hash: checksumToBytes(0xc25efa5c), Next: 4460644}}, // First Istanbul block
{4460643, 0, ID{Hash: checksumToBytes(0xc25efa5c), Next: 4460644}}, // Last Istanbul block {4460643, 0, ID{Hash: checksumToBytes(0xc25efa5c), Next: 4460644}}, // Last Istanbul block
{4460644, 0, ID{Hash: checksumToBytes(0x757a1c47), Next: 5062605}}, // First Berlin block {4460644, 0, ID{Hash: checksumToBytes(0x757a1c47), Next: 5062605}}, // First Berlin block
{5000000, 0, ID{Hash: checksumToBytes(0x757a1c47), Next: 5062605}}, // Last Berlin block {5000000, 0, ID{Hash: checksumToBytes(0x757a1c47), Next: 5062605}}, // Last Berlin block
{5062605, 0, ID{Hash: checksumToBytes(0xB8C6299D), Next: 0}}, // First London block {5062605, 0, ID{Hash: checksumToBytes(0xB8C6299D), Next: 1678832736}}, // First London block
{6000000, 0, ID{Hash: checksumToBytes(0xB8C6299D), Next: 0}}, // Future London block {6000000, 1678832735, ID{Hash: checksumToBytes(0xB8C6299D), Next: 1678832736}}, // Last London block
{6000001, 1678832736, ID{Hash: checksumToBytes(0xf9843abf), Next: 0}}, // First Shanghai block
{6500000, 2678832736, ID{Hash: checksumToBytes(0xf9843abf), Next: 0}}, // Future Shanghai block
}, },
}, },
// Sepolia test cases // Sepolia test cases

View File

@ -49,6 +49,11 @@ func (gp *GasPool) Gas() uint64 {
return uint64(*gp) return uint64(*gp)
} }
// SetGas sets the amount of gas with the provided number.
func (gp *GasPool) SetGas(gas uint64) {
*(*uint64)(gp) = gas
}
func (gp *GasPool) String() string { func (gp *GasPool) String() string {
return fmt.Sprintf("%d", *gp) return fmt.Sprintf("%d", *gp)
} }

View File

@ -636,7 +636,14 @@ func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, config *para
log.Error("Missing body but have receipt", "hash", hash, "number", number) log.Error("Missing body but have receipt", "hash", hash, "number", number)
return nil return nil
} }
if err := receipts.DeriveFields(config, hash, number, body.Transactions); err != nil { header := ReadHeader(db, hash, number)
var baseFee *big.Int
if header == nil {
baseFee = big.NewInt(0)
} else {
baseFee = header.BaseFee
}
if err := receipts.DeriveFields(config, hash, number, baseFee, body.Transactions); err != nil {
log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err) log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)
return nil return nil
} }

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/> // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
//go:build arm64 || amd64 //go:build (arm64 || amd64) && !openbsd
package rawdb package rawdb

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build !(arm64 || amd64) //go:build !((arm64 || amd64) && !openbsd)
package rawdb package rawdb

View File

@ -30,7 +30,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/prometheus/tsdb/fileutil" "github.com/gofrs/flock"
) )
var ( var (
@ -75,7 +75,7 @@ type Freezer struct {
readonly bool readonly bool
tables map[string]*freezerTable // Data tables for storing everything tables map[string]*freezerTable // Data tables for storing everything
instanceLock fileutil.Releaser // File-system lock to prevent double opens instanceLock *flock.Flock // File-system lock to prevent double opens
closeOnce sync.Once closeOnce sync.Once
} }
@ -104,11 +104,17 @@ func NewFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
return nil, errSymlinkDatadir return nil, errSymlinkDatadir
} }
} }
flockFile := filepath.Join(datadir, "FLOCK")
if err := os.MkdirAll(filepath.Dir(flockFile), 0755); err != nil {
return nil, err
}
// Leveldb uses LOCK as the filelock filename. To prevent the // Leveldb uses LOCK as the filelock filename. To prevent the
// name collision, we use FLOCK as the lock name. // name collision, we use FLOCK as the lock name.
lock, _, err := fileutil.Flock(filepath.Join(datadir, "FLOCK")) lock := flock.New(flockFile)
if err != nil { if locked, err := lock.TryLock(); err != nil {
return nil, err return nil, err
} else if !locked {
return nil, errors.New("locking failed")
} }
// Open all the supported data tables // Open all the supported data tables
freezer := &Freezer{ freezer := &Freezer{
@ -124,12 +130,12 @@ func NewFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
for _, table := range freezer.tables { for _, table := range freezer.tables {
table.Close() table.Close()
} }
lock.Release() lock.Unlock()
return nil, err return nil, err
} }
freezer.tables[name] = table freezer.tables[name] = table
} }
var err error
if freezer.readonly { if freezer.readonly {
// In readonly mode only validate, don't truncate. // In readonly mode only validate, don't truncate.
// validate also sets `freezer.frozen`. // validate also sets `freezer.frozen`.
@ -142,7 +148,7 @@ func NewFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
for _, table := range freezer.tables { for _, table := range freezer.tables {
table.Close() table.Close()
} }
lock.Release() lock.Unlock()
return nil, err return nil, err
} }
@ -165,7 +171,7 @@ func (f *Freezer) Close() error {
errs = append(errs, err) errs = append(errs, err)
} }
} }
if err := f.instanceLock.Release(); err != nil { if err := f.instanceLock.Unlock(); err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
}) })

View File

@ -145,7 +145,7 @@ const (
// blockChain provides the state of blockchain and current gas limit to do // blockChain provides the state of blockchain and current gas limit to do
// some pre checks in tx pool and event subscribers. // some pre checks in tx pool and event subscribers.
type blockChain interface { type blockChain interface {
CurrentBlock() *types.Block CurrentBlock() *types.Header
GetBlock(hash common.Hash, number uint64) *types.Block GetBlock(hash common.Hash, number uint64) *types.Block
StateAt(root common.Hash) (*state.StateDB, error) StateAt(root common.Hash) (*state.StateDB, error)
@ -309,7 +309,7 @@ func NewTxPool(config Config, chainconfig *params.ChainConfig, chain blockChain)
pool.locals.add(addr) pool.locals.add(addr)
} }
pool.priced = newPricedList(pool.all) pool.priced = newPricedList(pool.all)
pool.reset(nil, chain.CurrentBlock().Header()) pool.reset(nil, chain.CurrentBlock())
// Start the reorg loop early so it can handle requests generated during journal loading. // Start the reorg loop early so it can handle requests generated during journal loading.
pool.wg.Add(1) pool.wg.Add(1)
@ -361,8 +361,8 @@ func (pool *TxPool) loop() {
// Handle ChainHeadEvent // Handle ChainHeadEvent
case ev := <-pool.chainHeadCh: case ev := <-pool.chainHeadCh:
if ev.Block != nil { if ev.Block != nil {
pool.requestReset(head.Header(), ev.Block.Header()) pool.requestReset(head, ev.Block.Header())
head = ev.Block head = ev.Block.Header()
} }
// System shutdown. // System shutdown.
@ -1291,7 +1291,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) {
} }
// Initialize the internal state to the current head // Initialize the internal state to the current head
if newHead == nil { if newHead == nil {
newHead = pool.chain.CurrentBlock().Header() // Special case during testing newHead = pool.chain.CurrentBlock() // Special case during testing
} }
statedb, err := pool.chain.StateAt(newHead.Root) statedb, err := pool.chain.StateAt(newHead.Root)
if err != nil { if err != nil {

View File

@ -64,14 +64,15 @@ type testBlockChain struct {
chainHeadFeed *event.Feed chainHeadFeed *event.Feed
} }
func (bc *testBlockChain) CurrentBlock() *types.Block { func (bc *testBlockChain) CurrentBlock() *types.Header {
return types.NewBlock(&types.Header{ return &types.Header{
Number: new(big.Int),
GasLimit: atomic.LoadUint64(&bc.gasLimit), GasLimit: atomic.LoadUint64(&bc.gasLimit),
}, nil, nil, nil, trie.NewStackTrie(nil)) }
} }
func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
return bc.CurrentBlock() return types.NewBlock(bc.CurrentBlock(), nil, nil, nil, trie.NewStackTrie(nil))
} }
func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) { func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) {

View File

@ -279,6 +279,7 @@ func CopyHeader(h *Header) *Header {
copy(cpy.Extra, h.Extra) copy(cpy.Extra, h.Extra)
} }
if h.WithdrawalsHash != nil { if h.WithdrawalsHash != nil {
cpy.WithdrawalsHash = new(common.Hash)
*cpy.WithdrawalsHash = *h.WithdrawalsHash *cpy.WithdrawalsHash = *h.WithdrawalsHash
} }
return &cpy return &cpy

View File

@ -25,6 +25,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) {
TxHash common.Hash `json:"transactionHash" gencodec:"required"` TxHash common.Hash `json:"transactionHash" gencodec:"required"`
ContractAddress common.Address `json:"contractAddress"` ContractAddress common.Address `json:"contractAddress"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice,omitempty"`
BlockHash common.Hash `json:"blockHash,omitempty"` BlockHash common.Hash `json:"blockHash,omitempty"`
BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` BlockNumber *hexutil.Big `json:"blockNumber,omitempty"`
TransactionIndex hexutil.Uint `json:"transactionIndex"` TransactionIndex hexutil.Uint `json:"transactionIndex"`
@ -39,6 +40,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) {
enc.TxHash = r.TxHash enc.TxHash = r.TxHash
enc.ContractAddress = r.ContractAddress enc.ContractAddress = r.ContractAddress
enc.GasUsed = hexutil.Uint64(r.GasUsed) enc.GasUsed = hexutil.Uint64(r.GasUsed)
enc.EffectiveGasPrice = (*hexutil.Big)(r.EffectiveGasPrice)
enc.BlockHash = r.BlockHash enc.BlockHash = r.BlockHash
enc.BlockNumber = (*hexutil.Big)(r.BlockNumber) enc.BlockNumber = (*hexutil.Big)(r.BlockNumber)
enc.TransactionIndex = hexutil.Uint(r.TransactionIndex) enc.TransactionIndex = hexutil.Uint(r.TransactionIndex)
@ -57,6 +59,7 @@ func (r *Receipt) UnmarshalJSON(input []byte) error {
TxHash *common.Hash `json:"transactionHash" gencodec:"required"` TxHash *common.Hash `json:"transactionHash" gencodec:"required"`
ContractAddress *common.Address `json:"contractAddress"` ContractAddress *common.Address `json:"contractAddress"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice,omitempty"`
BlockHash *common.Hash `json:"blockHash,omitempty"` BlockHash *common.Hash `json:"blockHash,omitempty"`
BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` BlockNumber *hexutil.Big `json:"blockNumber,omitempty"`
TransactionIndex *hexutil.Uint `json:"transactionIndex"` TransactionIndex *hexutil.Uint `json:"transactionIndex"`
@ -97,6 +100,9 @@ func (r *Receipt) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'gasUsed' for Receipt") return errors.New("missing required field 'gasUsed' for Receipt")
} }
r.GasUsed = uint64(*dec.GasUsed) r.GasUsed = uint64(*dec.GasUsed)
if dec.EffectiveGasPrice != nil {
r.EffectiveGasPrice = (*big.Int)(dec.EffectiveGasPrice)
}
if dec.BlockHash != nil { if dec.BlockHash != nil {
r.BlockHash = *dec.BlockHash r.BlockHash = *dec.BlockHash
} }

View File

@ -59,10 +59,10 @@ type Receipt struct {
Logs []*Log `json:"logs" gencodec:"required"` Logs []*Log `json:"logs" gencodec:"required"`
// Implementation fields: These fields are added by geth when processing a transaction. // Implementation fields: These fields are added by geth when processing a transaction.
// They are stored in the chain database. TxHash common.Hash `json:"transactionHash" gencodec:"required"`
TxHash common.Hash `json:"transactionHash" gencodec:"required"` ContractAddress common.Address `json:"contractAddress"`
ContractAddress common.Address `json:"contractAddress"` GasUsed uint64 `json:"gasUsed" gencodec:"required"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"` EffectiveGasPrice *big.Int `json:"effectiveGasPrice"`
// Inclusion information: These fields provide information about the inclusion of the // Inclusion information: These fields provide information about the inclusion of the
// transaction corresponding to this receipt. // transaction corresponding to this receipt.
@ -313,7 +313,7 @@ func (rs Receipts) EncodeIndex(i int, w *bytes.Buffer) {
// DeriveFields fills the receipts with their computed fields based on consensus // DeriveFields fills the receipts with their computed fields based on consensus
// data and contextual infos like containing block and transactions. // data and contextual infos like containing block and transactions.
func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, txs Transactions) error { func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, baseFee *big.Int, txs []*Transaction) error {
signer := MakeSigner(config, new(big.Int).SetUint64(number)) signer := MakeSigner(config, new(big.Int).SetUint64(number))
logIndex := uint(0) logIndex := uint(0)
@ -325,6 +325,8 @@ func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, nu
rs[i].Type = txs[i].Type() rs[i].Type = txs[i].Type()
rs[i].TxHash = txs[i].Hash() rs[i].TxHash = txs[i].Hash()
rs[i].EffectiveGasPrice = txs[i].inner.effectiveGasPrice(new(big.Int), baseFee)
// block location fields // block location fields
rs[i].BlockHash = hash rs[i].BlockHash = hash
rs[i].BlockNumber = new(big.Int).SetUint64(number) rs[i].BlockNumber = new(big.Int).SetUint64(number)
@ -335,13 +337,17 @@ func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, nu
// Deriving the signer is expensive, only do if it's actually needed // Deriving the signer is expensive, only do if it's actually needed
from, _ := Sender(signer, txs[i]) from, _ := Sender(signer, txs[i])
rs[i].ContractAddress = crypto.CreateAddress(from, txs[i].Nonce()) rs[i].ContractAddress = crypto.CreateAddress(from, txs[i].Nonce())
} else {
rs[i].ContractAddress = common.Address{}
} }
// The used gas can be calculated based on previous r // The used gas can be calculated based on previous r
if i == 0 { if i == 0 {
rs[i].GasUsed = rs[i].CumulativeGasUsed rs[i].GasUsed = rs[i].CumulativeGasUsed
} else { } else {
rs[i].GasUsed = rs[i].CumulativeGasUsed - rs[i-1].CumulativeGasUsed rs[i].GasUsed = rs[i].CumulativeGasUsed - rs[i-1].CumulativeGasUsed
} }
// The derived log fields can simply be set from the block and transaction // The derived log fields can simply be set from the block and transaction
for j := 0; j < len(rs[i].Logs); j++ { for j := 0; j < len(rs[i].Logs); j++ {
rs[i].Logs[j].BlockNumber = number rs[i].Logs[j].BlockNumber = number

View File

@ -18,15 +18,16 @@ package types
import ( import (
"bytes" "bytes"
"encoding/json"
"math" "math"
"math/big" "math/big"
"reflect" "reflect"
"testing" "testing"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/kylelemons/godebug/diff"
) )
var ( var (
@ -96,122 +97,177 @@ func TestDeriveFields(t *testing.T) {
// Create a few transactions to have receipts for // Create a few transactions to have receipts for
to2 := common.HexToAddress("0x2") to2 := common.HexToAddress("0x2")
to3 := common.HexToAddress("0x3") to3 := common.HexToAddress("0x3")
to4 := common.HexToAddress("0x4")
to5 := common.HexToAddress("0x5")
txs := Transactions{ txs := Transactions{
NewTx(&LegacyTx{ NewTx(&LegacyTx{
Nonce: 1, Nonce: 1,
Value: big.NewInt(1), Value: big.NewInt(1),
Gas: 1, Gas: 1,
GasPrice: big.NewInt(1), GasPrice: big.NewInt(11),
}), }),
NewTx(&LegacyTx{ NewTx(&LegacyTx{
To: &to2, To: &to2,
Nonce: 2, Nonce: 2,
Value: big.NewInt(2), Value: big.NewInt(2),
Gas: 2, Gas: 2,
GasPrice: big.NewInt(2), GasPrice: big.NewInt(22),
}), }),
NewTx(&AccessListTx{ NewTx(&AccessListTx{
To: &to3, To: &to3,
Nonce: 3, Nonce: 3,
Value: big.NewInt(3), Value: big.NewInt(3),
Gas: 3, Gas: 3,
GasPrice: big.NewInt(3), GasPrice: big.NewInt(33),
}),
// EIP-1559 transactions.
NewTx(&DynamicFeeTx{
To: &to4,
Nonce: 4,
Value: big.NewInt(4),
Gas: 4,
GasTipCap: big.NewInt(44),
GasFeeCap: big.NewInt(1045),
}),
NewTx(&DynamicFeeTx{
To: &to5,
Nonce: 5,
Value: big.NewInt(5),
Gas: 5,
GasTipCap: big.NewInt(56),
GasFeeCap: big.NewInt(1055),
}), }),
} }
blockNumber := big.NewInt(1)
blockHash := common.BytesToHash([]byte{0x03, 0x14})
// Create the corresponding receipts // Create the corresponding receipts
receipts := Receipts{ receipts := Receipts{
&Receipt{ &Receipt{
Status: ReceiptStatusFailed, Status: ReceiptStatusFailed,
CumulativeGasUsed: 1, CumulativeGasUsed: 1,
Logs: []*Log{ Logs: []*Log{
{Address: common.BytesToAddress([]byte{0x11})}, {
{Address: common.BytesToAddress([]byte{0x01, 0x11})}, Address: common.BytesToAddress([]byte{0x11}),
// derived fields:
BlockNumber: blockNumber.Uint64(),
TxHash: txs[0].Hash(),
TxIndex: 0,
BlockHash: blockHash,
Index: 0,
},
{
Address: common.BytesToAddress([]byte{0x01, 0x11}),
// derived fields:
BlockNumber: blockNumber.Uint64(),
TxHash: txs[0].Hash(),
TxIndex: 0,
BlockHash: blockHash,
Index: 1,
},
}, },
TxHash: txs[0].Hash(), // derived fields:
ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}), TxHash: txs[0].Hash(),
GasUsed: 1, ContractAddress: common.HexToAddress("0x5a443704dd4b594b382c22a083e2bd3090a6fef3"),
GasUsed: 1,
EffectiveGasPrice: big.NewInt(11),
BlockHash: blockHash,
BlockNumber: blockNumber,
TransactionIndex: 0,
}, },
&Receipt{ &Receipt{
PostState: common.Hash{2}.Bytes(), PostState: common.Hash{2}.Bytes(),
CumulativeGasUsed: 3, CumulativeGasUsed: 3,
Logs: []*Log{ Logs: []*Log{
{Address: common.BytesToAddress([]byte{0x22})}, {
{Address: common.BytesToAddress([]byte{0x02, 0x22})}, Address: common.BytesToAddress([]byte{0x22}),
// derived fields:
BlockNumber: blockNumber.Uint64(),
TxHash: txs[1].Hash(),
TxIndex: 1,
BlockHash: blockHash,
Index: 2,
},
{
Address: common.BytesToAddress([]byte{0x02, 0x22}),
// derived fields:
BlockNumber: blockNumber.Uint64(),
TxHash: txs[1].Hash(),
TxIndex: 1,
BlockHash: blockHash,
Index: 3,
},
}, },
TxHash: txs[1].Hash(), // derived fields:
ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}), TxHash: txs[1].Hash(),
GasUsed: 2, GasUsed: 2,
EffectiveGasPrice: big.NewInt(22),
BlockHash: blockHash,
BlockNumber: blockNumber,
TransactionIndex: 1,
}, },
&Receipt{ &Receipt{
Type: AccessListTxType, Type: AccessListTxType,
PostState: common.Hash{3}.Bytes(), PostState: common.Hash{3}.Bytes(),
CumulativeGasUsed: 6, CumulativeGasUsed: 6,
Logs: []*Log{ Logs: []*Log{},
{Address: common.BytesToAddress([]byte{0x33})}, // derived fields:
{Address: common.BytesToAddress([]byte{0x03, 0x33})}, TxHash: txs[2].Hash(),
}, GasUsed: 3,
TxHash: txs[2].Hash(), EffectiveGasPrice: big.NewInt(33),
ContractAddress: common.BytesToAddress([]byte{0x03, 0x33, 0x33}), BlockHash: blockHash,
GasUsed: 3, BlockNumber: blockNumber,
TransactionIndex: 2,
},
&Receipt{
Type: DynamicFeeTxType,
PostState: common.Hash{4}.Bytes(),
CumulativeGasUsed: 10,
Logs: []*Log{},
// derived fields:
TxHash: txs[3].Hash(),
GasUsed: 4,
EffectiveGasPrice: big.NewInt(1044),
BlockHash: blockHash,
BlockNumber: blockNumber,
TransactionIndex: 3,
},
&Receipt{
Type: DynamicFeeTxType,
PostState: common.Hash{5}.Bytes(),
CumulativeGasUsed: 15,
Logs: []*Log{},
// derived fields:
TxHash: txs[4].Hash(),
GasUsed: 5,
EffectiveGasPrice: big.NewInt(1055),
BlockHash: blockHash,
BlockNumber: blockNumber,
TransactionIndex: 4,
}, },
} }
// Clear all the computed fields and re-derive them
number := big.NewInt(1)
hash := common.BytesToHash([]byte{0x03, 0x14})
clearComputedFieldsOnReceipts(t, receipts) // Re-derive receipts.
if err := receipts.DeriveFields(params.TestChainConfig, hash, number.Uint64(), txs); err != nil { basefee := big.NewInt(1000)
derivedReceipts := clearComputedFieldsOnReceipts(receipts)
err := Receipts(derivedReceipts).DeriveFields(params.TestChainConfig, blockHash, blockNumber.Uint64(), basefee, txs)
if err != nil {
t.Fatalf("DeriveFields(...) = %v, want <nil>", err) t.Fatalf("DeriveFields(...) = %v, want <nil>", err)
} }
// Iterate over all the computed fields and check that they're correct
signer := MakeSigner(params.TestChainConfig, number)
logIndex := uint(0) // Check diff of receipts against derivedReceipts.
for i := range receipts { r1, err := json.MarshalIndent(receipts, "", " ")
if receipts[i].Type != txs[i].Type() { if err != nil {
t.Errorf("receipts[%d].Type = %d, want %d", i, receipts[i].Type, txs[i].Type()) t.Fatal("error marshaling input receipts:", err)
} }
if receipts[i].TxHash != txs[i].Hash() { r2, err := json.MarshalIndent(derivedReceipts, "", " ")
t.Errorf("receipts[%d].TxHash = %s, want %s", i, receipts[i].TxHash.String(), txs[i].Hash().String()) if err != nil {
} t.Fatal("error marshaling derived receipts:", err)
if receipts[i].BlockHash != hash { }
t.Errorf("receipts[%d].BlockHash = %s, want %s", i, receipts[i].BlockHash.String(), hash.String()) d := diff.Diff(string(r1), string(r2))
} if d != "" {
if receipts[i].BlockNumber.Cmp(number) != 0 { t.Fatal("receipts differ:", d)
t.Errorf("receipts[%c].BlockNumber = %s, want %s", i, receipts[i].BlockNumber.String(), number.String())
}
if receipts[i].TransactionIndex != uint(i) {
t.Errorf("receipts[%d].TransactionIndex = %d, want %d", i, receipts[i].TransactionIndex, i)
}
if receipts[i].GasUsed != txs[i].Gas() {
t.Errorf("receipts[%d].GasUsed = %d, want %d", i, receipts[i].GasUsed, txs[i].Gas())
}
if txs[i].To() != nil && receipts[i].ContractAddress != (common.Address{}) {
t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, receipts[i].ContractAddress.String(), (common.Address{}).String())
}
from, _ := Sender(signer, txs[i])
contractAddress := crypto.CreateAddress(from, txs[i].Nonce())
if txs[i].To() == nil && receipts[i].ContractAddress != contractAddress {
t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, receipts[i].ContractAddress.String(), contractAddress.String())
}
for j := range receipts[i].Logs {
if receipts[i].Logs[j].BlockNumber != number.Uint64() {
t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, receipts[i].Logs[j].BlockNumber, number.Uint64())
}
if receipts[i].Logs[j].BlockHash != hash {
t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, receipts[i].Logs[j].BlockHash.String(), hash.String())
}
if receipts[i].Logs[j].TxHash != txs[i].Hash() {
t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String())
}
if receipts[i].Logs[j].TxIndex != uint(i) {
t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, receipts[i].Logs[j].TxIndex, i)
}
if receipts[i].Logs[j].Index != logIndex {
t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, receipts[i].Logs[j].Index, logIndex)
}
logIndex++
}
} }
} }
@ -342,41 +398,36 @@ func TestReceiptUnmarshalBinary(t *testing.T) {
} }
} }
func clearComputedFieldsOnReceipts(t *testing.T, receipts Receipts) { func clearComputedFieldsOnReceipts(receipts []*Receipt) []*Receipt {
t.Helper() r := make([]*Receipt, len(receipts))
for i, receipt := range receipts {
for _, receipt := range receipts { r[i] = clearComputedFieldsOnReceipt(receipt)
clearComputedFieldsOnReceipt(t, receipt)
} }
return r
} }
func clearComputedFieldsOnReceipt(t *testing.T, receipt *Receipt) { func clearComputedFieldsOnReceipt(receipt *Receipt) *Receipt {
t.Helper() cpy := *receipt
cpy.TxHash = common.Hash{0xff, 0xff, 0x11}
receipt.TxHash = common.Hash{} cpy.BlockHash = common.Hash{0xff, 0xff, 0x22}
receipt.BlockHash = common.Hash{} cpy.BlockNumber = big.NewInt(math.MaxUint32)
receipt.BlockNumber = big.NewInt(math.MaxUint32) cpy.TransactionIndex = math.MaxUint32
receipt.TransactionIndex = math.MaxUint32 cpy.ContractAddress = common.Address{0xff, 0xff, 0x33}
receipt.ContractAddress = common.Address{} cpy.GasUsed = 0xffffffff
receipt.GasUsed = 0 cpy.Logs = clearComputedFieldsOnLogs(receipt.Logs)
return &cpy
clearComputedFieldsOnLogs(t, receipt.Logs)
} }
func clearComputedFieldsOnLogs(t *testing.T, logs []*Log) { func clearComputedFieldsOnLogs(logs []*Log) []*Log {
t.Helper() l := make([]*Log, len(logs))
for i, log := range logs {
for _, log := range logs { cpy := *log
clearComputedFieldsOnLog(t, log) cpy.BlockNumber = math.MaxUint32
cpy.BlockHash = common.Hash{}
cpy.TxHash = common.Hash{}
cpy.TxIndex = math.MaxUint32
cpy.Index = math.MaxUint32
l[i] = &cpy
} }
} return l
func clearComputedFieldsOnLog(t *testing.T, log *Log) {
t.Helper()
log.BlockNumber = math.MaxUint32
log.BlockHash = common.Hash{}
log.TxHash = common.Hash{}
log.TxIndex = math.MaxUint32
log.Index = math.MaxUint32
} }

View File

@ -85,6 +85,14 @@ type TxData interface {
rawSignatureValues() (v, r, s *big.Int) rawSignatureValues() (v, r, s *big.Int)
setSignatureValues(chainID, v, r, s *big.Int) setSignatureValues(chainID, v, r, s *big.Int)
// effectiveGasPrice computes the gas price paid by the transaction, given
// the inclusion block baseFee.
//
// Unlike other TxData methods, the returned *big.Int should be an independent
// copy of the computed value, i.e. callers are allowed to mutate the result.
// Method implementations can use 'dst' to store the result.
effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int
} }
// EncodeRLP implements rlp.Encoder // EncodeRLP implements rlp.Encoder

View File

@ -106,6 +106,10 @@ func (tx *AccessListTx) value() *big.Int { return tx.Value }
func (tx *AccessListTx) nonce() uint64 { return tx.Nonce } func (tx *AccessListTx) nonce() uint64 { return tx.Nonce }
func (tx *AccessListTx) to() *common.Address { return tx.To } func (tx *AccessListTx) to() *common.Address { return tx.To }
func (tx *AccessListTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
return dst.Set(tx.GasPrice)
}
func (tx *AccessListTx) rawSignatureValues() (v, r, s *big.Int) { func (tx *AccessListTx) rawSignatureValues() (v, r, s *big.Int) {
return tx.V, tx.R, tx.S return tx.V, tx.R, tx.S
} }

View File

@ -94,6 +94,17 @@ func (tx *DynamicFeeTx) value() *big.Int { return tx.Value }
func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce } func (tx *DynamicFeeTx) nonce() uint64 { return tx.Nonce }
func (tx *DynamicFeeTx) to() *common.Address { return tx.To } func (tx *DynamicFeeTx) to() *common.Address { return tx.To }
func (tx *DynamicFeeTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
if baseFee == nil {
return dst.Set(tx.GasFeeCap)
}
tip := dst.Sub(tx.GasFeeCap, baseFee)
if tip.Cmp(tx.GasTipCap) > 0 {
tip.Set(tx.GasTipCap)
}
return tip.Add(tip, baseFee)
}
func (tx *DynamicFeeTx) rawSignatureValues() (v, r, s *big.Int) { func (tx *DynamicFeeTx) rawSignatureValues() (v, r, s *big.Int) {
return tx.V, tx.R, tx.S return tx.V, tx.R, tx.S
} }

View File

@ -103,6 +103,10 @@ func (tx *LegacyTx) value() *big.Int { return tx.Value }
func (tx *LegacyTx) nonce() uint64 { return tx.Nonce } func (tx *LegacyTx) nonce() uint64 { return tx.Nonce }
func (tx *LegacyTx) to() *common.Address { return tx.To } func (tx *LegacyTx) to() *common.Address { return tx.To }
func (tx *LegacyTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
return dst.Set(tx.GasPrice)
}
func (tx *LegacyTx) rawSignatureValues() (v, r, s *big.Int) { func (tx *LegacyTx) rawSignatureValues() (v, r, s *big.Int) {
return tx.V, tx.R, tx.S return tx.V, tx.R, tx.S
} }

View File

@ -267,20 +267,24 @@ func (api *DebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error) {
_, stateDb := api.eth.miner.Pending() _, stateDb := api.eth.miner.Pending()
return stateDb.RawDump(opts), nil return stateDb.RawDump(opts), nil
} }
var block *types.Block var header *types.Header
if blockNr == rpc.LatestBlockNumber { if blockNr == rpc.LatestBlockNumber {
block = api.eth.blockchain.CurrentBlock() header = api.eth.blockchain.CurrentBlock()
} else if blockNr == rpc.FinalizedBlockNumber { } else if blockNr == rpc.FinalizedBlockNumber {
block = api.eth.blockchain.CurrentFinalizedBlock() header = api.eth.blockchain.CurrentFinalBlock()
} else if blockNr == rpc.SafeBlockNumber { } else if blockNr == rpc.SafeBlockNumber {
block = api.eth.blockchain.CurrentSafeBlock() header = api.eth.blockchain.CurrentSafeBlock()
} else { } else {
block = api.eth.blockchain.GetBlockByNumber(uint64(blockNr)) block := api.eth.blockchain.GetBlockByNumber(uint64(blockNr))
if block == nil {
return state.Dump{}, fmt.Errorf("block #%d not found", blockNr)
}
header = block.Header()
} }
if block == nil { if header == nil {
return state.Dump{}, fmt.Errorf("block #%d not found", blockNr) return state.Dump{}, fmt.Errorf("block #%d not found", blockNr)
} }
stateDb, err := api.eth.BlockChain().StateAt(block.Root()) stateDb, err := api.eth.BlockChain().StateAt(header.Root)
if err != nil { if err != nil {
return state.Dump{}, err return state.Dump{}, err
} }
@ -347,20 +351,24 @@ func (api *DebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, start hex
// the miner and operate on those // the miner and operate on those
_, stateDb = api.eth.miner.Pending() _, stateDb = api.eth.miner.Pending()
} else { } else {
var block *types.Block var header *types.Header
if number == rpc.LatestBlockNumber { if number == rpc.LatestBlockNumber {
block = api.eth.blockchain.CurrentBlock() header = api.eth.blockchain.CurrentBlock()
} else if number == rpc.FinalizedBlockNumber { } else if number == rpc.FinalizedBlockNumber {
block = api.eth.blockchain.CurrentFinalizedBlock() header = api.eth.blockchain.CurrentFinalBlock()
} else if number == rpc.SafeBlockNumber { } else if number == rpc.SafeBlockNumber {
block = api.eth.blockchain.CurrentSafeBlock() header = api.eth.blockchain.CurrentSafeBlock()
} else { } else {
block = api.eth.blockchain.GetBlockByNumber(uint64(number)) block := api.eth.blockchain.GetBlockByNumber(uint64(number))
if block == nil {
return state.IteratorDump{}, fmt.Errorf("block #%d not found", number)
}
header = block.Header()
} }
if block == nil { if header == nil {
return state.IteratorDump{}, fmt.Errorf("block #%d not found", number) return state.IteratorDump{}, fmt.Errorf("block #%d not found", number)
} }
stateDb, err = api.eth.BlockChain().StateAt(block.Root()) stateDb, err = api.eth.BlockChain().StateAt(header.Root)
if err != nil { if err != nil {
return state.IteratorDump{}, err return state.IteratorDump{}, err
} }
@ -552,7 +560,7 @@ func (api *DebugAPI) GetAccessibleState(from, to rpc.BlockNumber) (uint64, error
if block == nil { if block == nil {
return 0, fmt.Errorf("current block missing") return 0, fmt.Errorf("current block missing")
} }
return block.NumberU64(), nil return block.Number.Uint64(), nil
} }
return uint64(num.Int64()), nil return uint64(num.Int64()), nil
} }

View File

@ -55,7 +55,7 @@ func (b *EthAPIBackend) ChainConfig() *params.ChainConfig {
return b.eth.blockchain.Config() return b.eth.blockchain.Config()
} }
func (b *EthAPIBackend) CurrentBlock() *types.Block { func (b *EthAPIBackend) CurrentBlock() *types.Header {
return b.eth.blockchain.CurrentBlock() return b.eth.blockchain.CurrentBlock()
} }
@ -72,19 +72,19 @@ func (b *EthAPIBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumb
} }
// Otherwise resolve and return the block // Otherwise resolve and return the block
if number == rpc.LatestBlockNumber { if number == rpc.LatestBlockNumber {
return b.eth.blockchain.CurrentBlock().Header(), nil return b.eth.blockchain.CurrentBlock(), nil
} }
if number == rpc.FinalizedBlockNumber { if number == rpc.FinalizedBlockNumber {
block := b.eth.blockchain.CurrentFinalizedBlock() block := b.eth.blockchain.CurrentFinalBlock()
if block != nil { if block != nil {
return block.Header(), nil return block, nil
} }
return nil, errors.New("finalized block not found") return nil, errors.New("finalized block not found")
} }
if number == rpc.SafeBlockNumber { if number == rpc.SafeBlockNumber {
block := b.eth.blockchain.CurrentSafeBlock() block := b.eth.blockchain.CurrentSafeBlock()
if block != nil { if block != nil {
return block.Header(), nil return block, nil
} }
return nil, errors.New("safe block not found") return nil, errors.New("safe block not found")
} }
@ -120,13 +120,16 @@ func (b *EthAPIBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumbe
} }
// Otherwise resolve and return the block // Otherwise resolve and return the block
if number == rpc.LatestBlockNumber { if number == rpc.LatestBlockNumber {
return b.eth.blockchain.CurrentBlock(), nil header := b.eth.blockchain.CurrentBlock()
return b.eth.blockchain.GetBlock(header.Hash(), header.Number.Uint64()), nil
} }
if number == rpc.FinalizedBlockNumber { if number == rpc.FinalizedBlockNumber {
return b.eth.blockchain.CurrentFinalizedBlock(), nil header := b.eth.blockchain.CurrentFinalBlock()
return b.eth.blockchain.GetBlock(header.Hash(), header.Number.Uint64()), nil
} }
if number == rpc.SafeBlockNumber { if number == rpc.SafeBlockNumber {
return b.eth.blockchain.CurrentSafeBlock(), nil header := b.eth.blockchain.CurrentSafeBlock()
return b.eth.blockchain.GetBlock(header.Hash(), header.Number.Uint64()), nil
} }
return b.eth.blockchain.GetBlockByNumber(uint64(number)), nil return b.eth.blockchain.GetBlockByNumber(uint64(number)), nil
} }

View File

@ -237,6 +237,10 @@ func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payl
log.Warn("Forkchoice requested unknown head", "hash", update.HeadBlockHash) log.Warn("Forkchoice requested unknown head", "hash", update.HeadBlockHash)
return engine.STATUS_SYNCING, nil return engine.STATUS_SYNCING, nil
} }
// If the finalized hash is known, we can direct the downloader to move
// potentially more data to the freezer from the get go.
finalized := api.remoteBlocks.get(update.FinalizedBlockHash)
// Header advertised via a past newPayload request. Start syncing to it. // Header advertised via a past newPayload request. Start syncing to it.
// Before we do however, make sure any legacy sync in switched off so we // Before we do however, make sure any legacy sync in switched off so we
// don't accidentally have 2 cycles running. // don't accidentally have 2 cycles running.
@ -244,8 +248,16 @@ func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payl
merger.ReachTTD() merger.ReachTTD()
api.eth.Downloader().Cancel() api.eth.Downloader().Cancel()
} }
log.Info("Forkchoice requested sync to new head", "number", header.Number, "hash", header.Hash()) context := []interface{}{"number", header.Number, "hash", header.Hash()}
if err := api.eth.Downloader().BeaconSync(api.eth.SyncMode(), header); err != nil { if update.FinalizedBlockHash != (common.Hash{}) {
if finalized == nil {
context = append(context, []interface{}{"finalized", "unknown"}...)
} else {
context = append(context, []interface{}{"finalized", finalized.Number}...)
}
}
log.Info("Forkchoice requested sync to new head", context...)
if err := api.eth.Downloader().BeaconSync(api.eth.SyncMode(), header, finalized); err != nil {
return engine.STATUS_SYNCING, err return engine.STATUS_SYNCING, err
} }
return engine.STATUS_SYNCING, nil return engine.STATUS_SYNCING, nil
@ -289,7 +301,7 @@ func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payl
} else { } else {
// If the head block is already in our canonical chain, the beacon client is // If the head block is already in our canonical chain, the beacon client is
// probably resyncing. Ignore the update. // probably resyncing. Ignore the update.
log.Info("Ignoring beacon update to old head", "number", block.NumberU64(), "hash", update.HeadBlockHash, "age", common.PrettyAge(time.Unix(int64(block.Time()), 0)), "have", api.eth.BlockChain().CurrentBlock().NumberU64()) log.Info("Ignoring beacon update to old head", "number", block.NumberU64(), "hash", update.HeadBlockHash, "age", common.PrettyAge(time.Unix(int64(block.Time()), 0)), "have", api.eth.BlockChain().CurrentBlock().Number)
return valid(nil), nil return valid(nil), nil
} }
api.eth.SetSynced() api.eth.SetSynced()
@ -310,7 +322,7 @@ func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payl
return engine.STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("final block not in canonical chain")) return engine.STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("final block not in canonical chain"))
} }
// Set the finalized block // Set the finalized block
api.eth.BlockChain().SetFinalized(finalBlock) api.eth.BlockChain().SetFinalized(finalBlock.Header())
} }
// Check if the safe block hash is in our canonical tree, if not somethings wrong // Check if the safe block hash is in our canonical tree, if not somethings wrong
if update.SafeBlockHash != (common.Hash{}) { if update.SafeBlockHash != (common.Hash{}) {
@ -324,7 +336,7 @@ func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payl
return engine.STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("safe block not in canonical chain")) return engine.STATUS_INVALID, engine.InvalidForkChoiceState.With(errors.New("safe block not in canonical chain"))
} }
// Set the safe block // Set the safe block
api.eth.BlockChain().SetSafe(safeBlock) api.eth.BlockChain().SetSafe(safeBlock.Header())
} }
// If payload generation was requested, create a new block to be potentially // If payload generation was requested, create a new block to be potentially
// sealed by the beacon client. The payload will be requested later, and we // sealed by the beacon client. The payload will be requested later, and we
@ -792,7 +804,7 @@ func (api *ConsensusAPI) GetPayloadBodiesByRangeV1(start, count hexutil.Uint64)
return nil, engine.TooLargeRequest.With(fmt.Errorf("requested count too large: %v", count)) return nil, engine.TooLargeRequest.With(fmt.Errorf("requested count too large: %v", count))
} }
// limit count up until current // limit count up until current
current := api.eth.BlockChain().CurrentBlock().NumberU64() current := api.eth.BlockChain().CurrentBlock().Number.Uint64()
last := uint64(start) + uint64(count) - 1 last := uint64(start) + uint64(count) - 1
if last > current { if last > current {
last = current last = current

View File

@ -19,8 +19,11 @@ package catalyst
import ( import (
"bytes" "bytes"
"context" "context"
crand "crypto/rand"
"fmt" "fmt"
"math/big" "math/big"
"math/rand"
"reflect"
"sync" "sync"
"testing" "testing"
"time" "time"
@ -41,7 +44,6 @@ import (
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
) )
@ -248,8 +250,8 @@ func TestInvalidPayloadTimestamp(t *testing.T) {
shouldErr bool shouldErr bool
}{ }{
{0, true}, {0, true},
{parent.Time(), true}, {parent.Time, true},
{parent.Time() - 1, true}, {parent.Time - 1, true},
// TODO (MariusVanDerWijden) following tests are currently broken, // TODO (MariusVanDerWijden) following tests are currently broken,
// fixed in upcoming merge-kiln-v2 pr // fixed in upcoming merge-kiln-v2 pr
@ -262,7 +264,7 @@ func TestInvalidPayloadTimestamp(t *testing.T) {
params := engine.PayloadAttributes{ params := engine.PayloadAttributes{
Timestamp: test.time, Timestamp: test.time,
Random: crypto.Keccak256Hash([]byte{byte(123)}), Random: crypto.Keccak256Hash([]byte{byte(123)}),
SuggestedFeeRecipient: parent.Coinbase(), SuggestedFeeRecipient: parent.Coinbase,
} }
fcState := engine.ForkchoiceStateV1{ fcState := engine.ForkchoiceStateV1{
HeadBlockHash: parent.Hash(), HeadBlockHash: parent.Hash(),
@ -319,7 +321,7 @@ func TestEth2NewBlock(t *testing.T) {
t.Fatalf("Failed to insert block: %v", err) t.Fatalf("Failed to insert block: %v", err)
case newResp.Status != "VALID": case newResp.Status != "VALID":
t.Fatalf("Failed to insert block: %v", newResp.Status) t.Fatalf("Failed to insert block: %v", newResp.Status)
case ethservice.BlockChain().CurrentBlock().NumberU64() != block.NumberU64()-1: case ethservice.BlockChain().CurrentBlock().Number.Uint64() != block.NumberU64()-1:
t.Fatalf("Chain head shouldn't be updated") t.Fatalf("Chain head shouldn't be updated")
} }
checkLogEvents(t, newLogCh, rmLogsCh, 0, 0) checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
@ -331,7 +333,7 @@ func TestEth2NewBlock(t *testing.T) {
if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil { if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
t.Fatalf("Failed to insert block: %v", err) t.Fatalf("Failed to insert block: %v", err)
} }
if have, want := ethservice.BlockChain().CurrentBlock().NumberU64(), block.NumberU64(); have != want { if have, want := ethservice.BlockChain().CurrentBlock().Number.Uint64(), block.NumberU64(); have != want {
t.Fatalf("Chain head should be updated, have %d want %d", have, want) t.Fatalf("Chain head should be updated, have %d want %d", have, want)
} }
checkLogEvents(t, newLogCh, rmLogsCh, 1, 0) checkLogEvents(t, newLogCh, rmLogsCh, 1, 0)
@ -341,7 +343,7 @@ func TestEth2NewBlock(t *testing.T) {
// Introduce fork chain // Introduce fork chain
var ( var (
head = ethservice.BlockChain().CurrentBlock().NumberU64() head = ethservice.BlockChain().CurrentBlock().Number.Uint64()
) )
parent = preMergeBlocks[len(preMergeBlocks)-1] parent = preMergeBlocks[len(preMergeBlocks)-1]
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
@ -359,7 +361,7 @@ func TestEth2NewBlock(t *testing.T) {
if err != nil || newResp.Status != "VALID" { if err != nil || newResp.Status != "VALID" {
t.Fatalf("Failed to insert block: %v", err) t.Fatalf("Failed to insert block: %v", err)
} }
if ethservice.BlockChain().CurrentBlock().NumberU64() != head { if ethservice.BlockChain().CurrentBlock().Number.Uint64() != head {
t.Fatalf("Chain head shouldn't be updated") t.Fatalf("Chain head shouldn't be updated")
} }
@ -371,7 +373,7 @@ func TestEth2NewBlock(t *testing.T) {
if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil { if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
t.Fatalf("Failed to insert block: %v", err) t.Fatalf("Failed to insert block: %v", err)
} }
if ethservice.BlockChain().CurrentBlock().NumberU64() != block.NumberU64() { if ethservice.BlockChain().CurrentBlock().Number.Uint64() != block.NumberU64() {
t.Fatalf("Chain head should be updated") t.Fatalf("Chain head should be updated")
} }
parent, head = block, block.NumberU64() parent, head = block, block.NumberU64()
@ -389,7 +391,7 @@ func TestEth2DeepReorg(t *testing.T) {
var ( var (
api = NewConsensusAPI(ethservice, nil) api = NewConsensusAPI(ethservice, nil)
parent = preMergeBlocks[len(preMergeBlocks)-core.TriesInMemory-1] parent = preMergeBlocks[len(preMergeBlocks)-core.TriesInMemory-1]
head = ethservice.BlockChain().CurrentBlock().NumberU64() head = ethservice.BlockChain().CurrentBlock().Number.Uint64()()
) )
if ethservice.BlockChain().HasBlockAndState(parent.Hash(), parent.NumberU64()) { if ethservice.BlockChain().HasBlockAndState(parent.Hash(), parent.NumberU64()) {
t.Errorf("Block %d not pruned", parent.NumberU64()) t.Errorf("Block %d not pruned", parent.NumberU64())
@ -410,13 +412,13 @@ func TestEth2DeepReorg(t *testing.T) {
if err != nil || newResp.Status != "VALID" { if err != nil || newResp.Status != "VALID" {
t.Fatalf("Failed to insert block: %v", err) t.Fatalf("Failed to insert block: %v", err)
} }
if ethservice.BlockChain().CurrentBlock().NumberU64() != head { if ethservice.BlockChain().CurrentBlock().Number.Uint64()() != head {
t.Fatalf("Chain head shouldn't be updated") t.Fatalf("Chain head shouldn't be updated")
} }
if err := api.setHead(block.Hash()); err != nil { if err := api.setHead(block.Hash()); err != nil {
t.Fatalf("Failed to set head: %v", err) t.Fatalf("Failed to set head: %v", err)
} }
if ethservice.BlockChain().CurrentBlock().NumberU64() != block.NumberU64() { if ethservice.BlockChain().CurrentBlock().Number.Uint64()() != block.NumberU64() {
t.Fatalf("Chain head should be updated") t.Fatalf("Chain head should be updated")
} }
parent, head = block, block.NumberU64() parent, head = block, block.NumberU64()
@ -466,25 +468,28 @@ func TestFullAPI(t *testing.T) {
logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00") logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
) )
callback := func(parent *types.Block) { callback := func(parent *types.Header) {
statedb, _ := ethservice.BlockChain().StateAt(parent.Root()) statedb, _ := ethservice.BlockChain().StateAt(parent.Root)
nonce := statedb.GetNonce(testAddr) nonce := statedb.GetNonce(testAddr)
tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey) tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
ethservice.TxPool().AddLocal(tx) ethservice.TxPool().AddLocal(tx)
} }
setupBlocks(t, ethservice, 10, parent, callback) setupBlocks(t, ethservice, 10, parent, callback, nil)
} }
func setupBlocks(t *testing.T, ethservice *eth.Ethereum, n int, parent *types.Block, callback func(parent *types.Block)) []*types.Block { func setupBlocks(t *testing.T, ethservice *eth.Ethereum, n int, parent *types.Header, callback func(parent *types.Header), withdrawals [][]*types.Withdrawal) []*types.Header {
api := NewConsensusAPI(ethservice) api := NewConsensusAPI(ethservice)
var blocks []*types.Block var blocks []*types.Header
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
callback(parent) callback(parent)
var w []*types.Withdrawal
if withdrawals != nil {
w = withdrawals[i]
}
payload := getNewPayload(t, api, parent) payload := getNewPayload(t, api, parent, w)
execResp, err := api.NewPayloadV2(*payload)
execResp, err := api.NewPayloadV1(*payload)
if err != nil { if err != nil {
t.Fatalf("can't execute payload: %v", err) t.Fatalf("can't execute payload: %v", err)
} }
@ -499,10 +504,10 @@ func setupBlocks(t *testing.T, ethservice *eth.Ethereum, n int, parent *types.Bl
if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil { if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
t.Fatalf("Failed to insert block: %v", err) t.Fatalf("Failed to insert block: %v", err)
} }
if ethservice.BlockChain().CurrentBlock().NumberU64() != payload.Number { if ethservice.BlockChain().CurrentBlock().Number.Uint64() != payload.Number {
t.Fatal("Chain head should be updated") t.Fatal("Chain head should be updated")
} }
if ethservice.BlockChain().CurrentFinalizedBlock().NumberU64() != payload.Number-1 { if ethservice.BlockChain().CurrentFinalBlock().Number.Uint64() != payload.Number-1 {
t.Fatal("Finalized block should be updated") t.Fatal("Finalized block should be updated")
} }
parent = ethservice.BlockChain().CurrentBlock() parent = ethservice.BlockChain().CurrentBlock()
@ -585,7 +590,7 @@ func TestNewPayloadOnInvalidChain(t *testing.T) {
logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00") logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
) )
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
statedb, _ := ethservice.BlockChain().StateAt(parent.Root()) statedb, _ := ethservice.BlockChain().StateAt(parent.Root)
tx := types.MustSignNewTx(testKey, signer, &types.LegacyTx{ tx := types.MustSignNewTx(testKey, signer, &types.LegacyTx{
Nonce: statedb.GetNonce(testAddr), Nonce: statedb.GetNonce(testAddr),
Value: new(big.Int), Value: new(big.Int),
@ -596,9 +601,9 @@ func TestNewPayloadOnInvalidChain(t *testing.T) {
ethservice.TxPool().AddRemotesSync([]*types.Transaction{tx}) ethservice.TxPool().AddRemotesSync([]*types.Transaction{tx})
var ( var (
params = engine.PayloadAttributes{ params = engine.PayloadAttributes{
Timestamp: parent.Time() + 1, Timestamp: parent.Time + 1,
Random: crypto.Keccak256Hash([]byte{byte(i)}), Random: crypto.Keccak256Hash([]byte{byte(i)}),
SuggestedFeeRecipient: parent.Coinbase(), SuggestedFeeRecipient: parent.Coinbase,
} }
fcState = engine.ForkchoiceStateV1{ fcState = engine.ForkchoiceStateV1{
HeadBlockHash: parent.Hash(), HeadBlockHash: parent.Hash(),
@ -645,7 +650,7 @@ func TestNewPayloadOnInvalidChain(t *testing.T) {
if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil { if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
t.Fatalf("Failed to insert block: %v", err) t.Fatalf("Failed to insert block: %v", err)
} }
if ethservice.BlockChain().CurrentBlock().NumberU64() != payload.Number { if ethservice.BlockChain().CurrentBlock().Number.Uint64() != payload.Number {
t.Fatalf("Chain head should be updated") t.Fatalf("Chain head should be updated")
} }
parent = ethservice.BlockChain().CurrentBlock() parent = ethservice.BlockChain().CurrentBlock()
@ -676,10 +681,10 @@ func TestEmptyBlocks(t *testing.T) {
api := NewConsensusAPI(ethservice) api := NewConsensusAPI(ethservice)
// Setup 10 blocks on the canonical chain // Setup 10 blocks on the canonical chain
setupBlocks(t, ethservice, 10, commonAncestor, func(parent *types.Block) {}) setupBlocks(t, ethservice, 10, commonAncestor, func(parent *types.Header) {}, nil)
// (1) check LatestValidHash by sending a normal payload (P1'') // (1) check LatestValidHash by sending a normal payload (P1'')
payload := getNewPayload(t, api, commonAncestor) payload := getNewPayload(t, api, commonAncestor, nil)
status, err := api.NewPayloadV1(*payload) status, err := api.NewPayloadV1(*payload)
if err != nil { if err != nil {
@ -693,7 +698,7 @@ func TestEmptyBlocks(t *testing.T) {
} }
// (2) Now send P1' which is invalid // (2) Now send P1' which is invalid
payload = getNewPayload(t, api, commonAncestor) payload = getNewPayload(t, api, commonAncestor, nil)
payload.GasUsed += 1 payload.GasUsed += 1
payload = setBlockhash(payload) payload = setBlockhash(payload)
// Now latestValidHash should be the common ancestor // Now latestValidHash should be the common ancestor
@ -711,7 +716,7 @@ func TestEmptyBlocks(t *testing.T) {
} }
// (3) Now send a payload with unknown parent // (3) Now send a payload with unknown parent
payload = getNewPayload(t, api, commonAncestor) payload = getNewPayload(t, api, commonAncestor, nil)
payload.ParentHash = common.Hash{1} payload.ParentHash = common.Hash{1}
payload = setBlockhash(payload) payload = setBlockhash(payload)
// Now latestValidHash should be the common ancestor // Now latestValidHash should be the common ancestor
@ -727,11 +732,12 @@ func TestEmptyBlocks(t *testing.T) {
} }
} }
func getNewPayload(t *testing.T, api *ConsensusAPI, parent *types.Block) *engine.ExecutableData { func getNewPayload(t *testing.T, api *ConsensusAPI, parent *types.Header, withdrawals []*types.Withdrawal) *engine.ExecutableData {
params := engine.PayloadAttributes{ params := engine.PayloadAttributes{
Timestamp: parent.Time() + 1, Timestamp: parent.Time + 1,
Random: crypto.Keccak256Hash([]byte{byte(1)}), Random: crypto.Keccak256Hash([]byte{byte(1)}),
SuggestedFeeRecipient: parent.Coinbase(), SuggestedFeeRecipient: parent.Coinbase,
Withdrawals: withdrawals,
} }
payload, err := assembleBlock(api, parent.Hash(), &params) payload, err := assembleBlock(api, parent.Hash(), &params)
@ -799,7 +805,7 @@ func TestTrickRemoteBlockCache(t *testing.T) {
commonAncestor := ethserviceA.BlockChain().CurrentBlock() commonAncestor := ethserviceA.BlockChain().CurrentBlock()
// Setup 10 blocks on the canonical chain // Setup 10 blocks on the canonical chain
setupBlocks(t, ethserviceA, 10, commonAncestor, func(parent *types.Block) {}) setupBlocks(t, ethserviceA, 10, commonAncestor, func(parent *types.Header) {}, nil)
commonAncestor = ethserviceA.BlockChain().CurrentBlock() commonAncestor = ethserviceA.BlockChain().CurrentBlock()
var invalidChain []*engine.ExecutableData var invalidChain []*engine.ExecutableData
@ -808,7 +814,7 @@ func TestTrickRemoteBlockCache(t *testing.T) {
//invalidChain = append(invalidChain, payload1) //invalidChain = append(invalidChain, payload1)
// create an invalid payload2 (P2) // create an invalid payload2 (P2)
payload2 := getNewPayload(t, apiA, commonAncestor) payload2 := getNewPayload(t, apiA, commonAncestor, nil)
//payload2.ParentHash = payload1.BlockHash //payload2.ParentHash = payload1.BlockHash
payload2.GasUsed += 1 payload2.GasUsed += 1
payload2 = setBlockhash(payload2) payload2 = setBlockhash(payload2)
@ -817,7 +823,7 @@ func TestTrickRemoteBlockCache(t *testing.T) {
head := payload2 head := payload2
// create some valid payloads on top // create some valid payloads on top
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
payload := getNewPayload(t, apiA, commonAncestor) payload := getNewPayload(t, apiA, commonAncestor, nil)
payload.ParentHash = head.BlockHash payload.ParentHash = head.BlockHash
payload = setBlockhash(payload) payload = setBlockhash(payload)
invalidChain = append(invalidChain, payload) invalidChain = append(invalidChain, payload)
@ -855,10 +861,10 @@ func TestInvalidBloom(t *testing.T) {
api := NewConsensusAPI(ethservice) api := NewConsensusAPI(ethservice)
// Setup 10 blocks on the canonical chain // Setup 10 blocks on the canonical chain
setupBlocks(t, ethservice, 10, commonAncestor, func(parent *types.Block) {}) setupBlocks(t, ethservice, 10, commonAncestor, func(parent *types.Header) {}, nil)
// (1) check LatestValidHash by sending a normal payload (P1'') // (1) check LatestValidHash by sending a normal payload (P1'')
payload := getNewPayload(t, api, commonAncestor) payload := getNewPayload(t, api, commonAncestor, nil)
payload.LogsBloom = append(payload.LogsBloom, byte(1)) payload.LogsBloom = append(payload.LogsBloom, byte(1))
status, err := api.NewPayloadV1(*payload) status, err := api.NewPayloadV1(*payload)
if err != nil { if err != nil {
@ -874,7 +880,7 @@ func TestNewPayloadOnInvalidTerminalBlock(t *testing.T) {
n, ethservice := startEthService(t, genesis, preMergeBlocks) n, ethservice := startEthService(t, genesis, preMergeBlocks)
defer n.Close() defer n.Close()
genesis.Config.TerminalTotalDifficulty = preMergeBlocks[0].Difficulty() //.Sub(genesis.Config.TerminalTotalDifficulty, preMergeBlocks[len(preMergeBlocks)-1].Difficulty()) ethservice.BlockChain().Config().TerminalTotalDifficulty = preMergeBlocks[0].Difficulty() //.Sub(genesis.Config.TerminalTotalDifficulty, preMergeBlocks[len(preMergeBlocks)-1].Difficulty())
var ( var (
api = NewConsensusAPI(ethservice) api = NewConsensusAPI(ethservice)
@ -966,7 +972,7 @@ func TestSimultaneousNewBlock(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to convert executable data to block %v", err) t.Fatalf("Failed to convert executable data to block %v", err)
} }
if ethservice.BlockChain().CurrentBlock().NumberU64() != block.NumberU64()-1 { if ethservice.BlockChain().CurrentBlock().Number.Uint64() != block.NumberU64()-1 {
t.Fatalf("Chain head shouldn't be updated") t.Fatalf("Chain head shouldn't be updated")
} }
fcState := engine.ForkchoiceStateV1{ fcState := engine.ForkchoiceStateV1{
@ -997,7 +1003,7 @@ func TestSimultaneousNewBlock(t *testing.T) {
t.Fatal(testErr) t.Fatal(testErr)
} }
} }
if have, want := ethservice.BlockChain().CurrentBlock().NumberU64(), block.NumberU64(); have != want { if have, want := ethservice.BlockChain().CurrentBlock().Number.Uint64(), block.NumberU64(); have != want {
t.Fatalf("Chain head should be updated, have %d want %d", have, want) t.Fatalf("Chain head should be updated, have %d want %d", have, want)
} }
parent = block parent = block
@ -1233,8 +1239,10 @@ func TestNilWithdrawals(t *testing.T) {
} }
func setupBodies(t *testing.T) (*node.Node, *eth.Ethereum, []*types.Block) { func setupBodies(t *testing.T) (*node.Node, *eth.Ethereum, []*types.Block) {
genesis, preMergeBlocks := generateMergeChain(10, false) genesis, blocks := generateMergeChain(10, true)
n, ethservice := startEthService(t, genesis, preMergeBlocks) n, ethservice := startEthService(t, genesis, blocks)
// enable shanghai on the last block
ethservice.BlockChain().Config().ShanghaiTime = &blocks[len(blocks)-1].Header().Time
var ( var (
parent = ethservice.BlockChain().CurrentBlock() parent = ethservice.BlockChain().CurrentBlock()
@ -1242,15 +1250,45 @@ func setupBodies(t *testing.T) (*node.Node, *eth.Ethereum, []*types.Block) {
logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00") logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
) )
callback := func(parent *types.Block) { callback := func(parent *types.Header) {
statedb, _ := ethservice.BlockChain().StateAt(parent.Root()) statedb, _ := ethservice.BlockChain().StateAt(parent.Root)
nonce := statedb.GetNonce(testAddr) nonce := statedb.GetNonce(testAddr)
tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey) tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
ethservice.TxPool().AddLocal(tx) ethservice.TxPool().AddLocal(tx)
} }
postMergeBlocks := setupBlocks(t, ethservice, 10, parent, callback) withdrawals := make([][]*types.Withdrawal, 10)
return n, ethservice, append(preMergeBlocks, postMergeBlocks...) withdrawals[0] = nil // should be filtered out by miner
withdrawals[1] = make([]*types.Withdrawal, 0)
for i := 2; i < len(withdrawals); i++ {
addr := make([]byte, 20)
crand.Read(addr)
withdrawals[i] = []*types.Withdrawal{
{Index: rand.Uint64(), Validator: rand.Uint64(), Amount: rand.Uint64(), Address: common.BytesToAddress(addr)},
}
}
postShanghaiHeaders := setupBlocks(t, ethservice, 10, parent, callback, withdrawals)
postShanghaiBlocks := make([]*types.Block, len(postShanghaiHeaders))
for i, header := range postShanghaiHeaders {
postShanghaiBlocks[i] = ethservice.BlockChain().GetBlock(header.Hash(), header.Number.Uint64())
}
return n, ethservice, append(blocks, postShanghaiBlocks...)
}
func allHashes(blocks []*types.Block) []common.Hash {
var hashes []common.Hash
for _, b := range blocks {
hashes = append(hashes, b.Hash())
}
return hashes
}
func allBodies(blocks []*types.Block) []*types.Body {
var bodies []*types.Body
for _, b := range blocks {
bodies = append(bodies, b.Body())
}
return bodies
} }
func TestGetBlockBodiesByHash(t *testing.T) { func TestGetBlockBodiesByHash(t *testing.T) {
@ -1292,6 +1330,11 @@ func TestGetBlockBodiesByHash(t *testing.T) {
results: []*types.Body{blocks[0].Body(), nil, blocks[0].Body(), blocks[0].Body()}, results: []*types.Body{blocks[0].Body(), nil, blocks[0].Body(), blocks[0].Body()},
hashes: []common.Hash{blocks[0].Hash(), {1, 2}, blocks[0].Hash(), blocks[0].Hash()}, hashes: []common.Hash{blocks[0].Hash(), {1, 2}, blocks[0].Hash(), blocks[0].Hash()},
}, },
// all blocks
{
results: allBodies(blocks),
hashes: allHashes(blocks),
},
} }
for k, test := range tests { for k, test := range tests {
@ -1360,6 +1403,12 @@ func TestGetBlockBodiesByRange(t *testing.T) {
start: 22, start: 22,
count: 2, count: 2,
}, },
// allBlocks
{
results: allBodies(blocks),
start: 1,
count: hexutil.Uint64(len(blocks)),
},
} }
for k, test := range tests { for k, test := range tests {
@ -1430,15 +1479,14 @@ func equalBody(a *types.Body, b *engine.ExecutionPayloadBodyV1) bool {
} else if a == nil || b == nil { } else if a == nil || b == nil {
return false return false
} }
var want []hexutil.Bytes if len(a.Transactions) != len(b.TransactionData) {
for _, tx := range a.Transactions {
data, _ := tx.MarshalBinary()
want = append(want, hexutil.Bytes(data))
}
aBytes, errA := rlp.EncodeToBytes(want)
bBytes, errB := rlp.EncodeToBytes(b.TransactionData)
if errA != errB {
return false return false
} }
return bytes.Equal(aBytes, bBytes) for i, tx := range a.Transactions {
data, _ := tx.MarshalBinary()
if !bytes.Equal(data, b.TransactionData[i]) {
return false
}
}
return reflect.DeepEqual(a.Withdrawals, b.Withdrawals)
} }

View File

@ -31,9 +31,12 @@ import (
const maxTrackedPayloads = 10 const maxTrackedPayloads = 10
// maxTrackedHeaders is the maximum number of executed payloads the execution // maxTrackedHeaders is the maximum number of executed payloads the execution
// engine tracks before evicting old ones. Ideally we should only ever track the // engine tracks before evicting old ones. These are tracked outside the chain
// latest one; but have a slight wiggle room for non-ideal conditions. // during initial sync to allow ForkchoiceUpdate to reference past blocks via
const maxTrackedHeaders = 10 // hashes only. For the sync target it would be enough to track only the latest
// header, but snap sync also needs the latest finalized height for the ancient
// limit.
const maxTrackedHeaders = 96
// payloadQueueItem represents an id->payload tuple to store until it's retrieved // payloadQueueItem represents an id->payload tuple to store until it's retrieved
// or evicted. // or evicted.

View File

@ -75,7 +75,7 @@ func (tester *FullSyncTester) Start() error {
} }
// Trigger beacon sync with the provided block header as // Trigger beacon sync with the provided block header as
// trusted chain head. // trusted chain head.
err := tester.api.eth.Downloader().BeaconSync(downloader.FullSync, tester.block.Header()) err := tester.api.eth.Downloader().BeaconSync(downloader.FullSync, tester.block.Header(), nil)
if err != nil { if err != nil {
log.Info("Failed to beacon sync", "err", err) log.Info("Failed to beacon sync", "err", err)
} }

View File

@ -76,7 +76,7 @@ func (b *beaconBackfiller) suspend() *types.Header {
// Sync cycle was just terminated, retrieve and return the last filled header. // Sync cycle was just terminated, retrieve and return the last filled header.
// Can't use `filled` as that contains a stale value from before cancellation. // Can't use `filled` as that contains a stale value from before cancellation.
return b.downloader.blockchain.CurrentFastBlock().Header() return b.downloader.blockchain.CurrentSnapBlock()
} }
// resume starts the downloader threads for backfilling state and chain data. // resume starts the downloader threads for backfilling state and chain data.
@ -101,7 +101,7 @@ func (b *beaconBackfiller) resume() {
defer func() { defer func() {
b.lock.Lock() b.lock.Lock()
b.filling = false b.filling = false
b.filled = b.downloader.blockchain.CurrentFastBlock().Header() b.filled = b.downloader.blockchain.CurrentSnapBlock()
b.lock.Unlock() b.lock.Unlock()
}() }()
// If the downloader fails, report an error as in beacon chain mode there // If the downloader fails, report an error as in beacon chain mode there
@ -151,8 +151,8 @@ func (d *Downloader) SetBadBlockCallback(onBadBlock badBlockFn) {
// //
// Internally backfilling and state sync is done the same way, but the header // Internally backfilling and state sync is done the same way, but the header
// retrieval and scheduling is replaced. // retrieval and scheduling is replaced.
func (d *Downloader) BeaconSync(mode SyncMode, head *types.Header) error { func (d *Downloader) BeaconSync(mode SyncMode, head *types.Header, final *types.Header) error {
return d.beaconSync(mode, head, true) return d.beaconSync(mode, head, final, true)
} }
// BeaconExtend is an optimistic version of BeaconSync, where an attempt is made // BeaconExtend is an optimistic version of BeaconSync, where an attempt is made
@ -162,7 +162,7 @@ func (d *Downloader) BeaconSync(mode SyncMode, head *types.Header) error {
// This is useful if a beacon client is feeding us large chunks of payloads to run, // This is useful if a beacon client is feeding us large chunks of payloads to run,
// but is not setting the head after each. // but is not setting the head after each.
func (d *Downloader) BeaconExtend(mode SyncMode, head *types.Header) error { func (d *Downloader) BeaconExtend(mode SyncMode, head *types.Header) error {
return d.beaconSync(mode, head, false) return d.beaconSync(mode, head, nil, false)
} }
// beaconSync is the post-merge version of the chain synchronization, where the // beaconSync is the post-merge version of the chain synchronization, where the
@ -171,7 +171,7 @@ func (d *Downloader) BeaconExtend(mode SyncMode, head *types.Header) error {
// //
// Internally backfilling and state sync is done the same way, but the header // Internally backfilling and state sync is done the same way, but the header
// retrieval and scheduling is replaced. // retrieval and scheduling is replaced.
func (d *Downloader) beaconSync(mode SyncMode, head *types.Header, force bool) error { func (d *Downloader) beaconSync(mode SyncMode, head *types.Header, final *types.Header, force bool) error {
// When the downloader starts a sync cycle, it needs to be aware of the sync // When the downloader starts a sync cycle, it needs to be aware of the sync
// mode to use (full, snap). To keep the skeleton chain oblivious, inject the // mode to use (full, snap). To keep the skeleton chain oblivious, inject the
// mode into the backfiller directly. // mode into the backfiller directly.
@ -181,7 +181,7 @@ func (d *Downloader) beaconSync(mode SyncMode, head *types.Header, force bool) e
d.skeleton.filler.(*beaconBackfiller).setMode(mode) d.skeleton.filler.(*beaconBackfiller).setMode(mode)
// Signal the skeleton sync to switch to a new head, however it wants // Signal the skeleton sync to switch to a new head, however it wants
if err := d.skeleton.Sync(head, force); err != nil { if err := d.skeleton.Sync(head, final, force); err != nil {
return err return err
} }
return nil return nil
@ -198,16 +198,16 @@ func (d *Downloader) findBeaconAncestor() (uint64, error) {
switch d.getMode() { switch d.getMode() {
case FullSync: case FullSync:
chainHead = d.blockchain.CurrentBlock().Header() chainHead = d.blockchain.CurrentBlock()
case SnapSync: case SnapSync:
chainHead = d.blockchain.CurrentFastBlock().Header() chainHead = d.blockchain.CurrentSnapBlock()
default: default:
chainHead = d.lightchain.CurrentHeader() chainHead = d.lightchain.CurrentHeader()
} }
number := chainHead.Number.Uint64() number := chainHead.Number.Uint64()
// Retrieve the skeleton bounds and ensure they are linked to the local chain // Retrieve the skeleton bounds and ensure they are linked to the local chain
beaconHead, beaconTail, err := d.skeleton.Bounds() beaconHead, beaconTail, _, err := d.skeleton.Bounds()
if err != nil { if err != nil {
// This is a programming error. The chain backfiller was called with an // This is a programming error. The chain backfiller was called with an
// invalid beacon sync state. Ideally we would panic here, but erroring // invalid beacon sync state. Ideally we would panic here, but erroring
@ -272,7 +272,7 @@ func (d *Downloader) findBeaconAncestor() (uint64, error) {
// until sync errors or is finished. // until sync errors or is finished.
func (d *Downloader) fetchBeaconHeaders(from uint64) error { func (d *Downloader) fetchBeaconHeaders(from uint64) error {
var head *types.Header var head *types.Header
_, tail, err := d.skeleton.Bounds() _, tail, _, err := d.skeleton.Bounds()
if err != nil { if err != nil {
return err return err
} }
@ -292,7 +292,7 @@ func (d *Downloader) fetchBeaconHeaders(from uint64) error {
for { for {
// Some beacon headers might have appeared since the last cycle, make // Some beacon headers might have appeared since the last cycle, make
// sure we're always syncing to all available ones // sure we're always syncing to all available ones
head, _, err = d.skeleton.Bounds() head, _, _, err = d.skeleton.Bounds()
if err != nil { if err != nil {
return err return err
} }

View File

@ -196,10 +196,10 @@ type BlockChain interface {
GetBlockByHash(common.Hash) *types.Block GetBlockByHash(common.Hash) *types.Block
// CurrentBlock retrieves the head block from the local chain. // CurrentBlock retrieves the head block from the local chain.
CurrentBlock() *types.Block CurrentBlock() *types.Header
// CurrentFastBlock retrieves the head snap block from the local chain. // CurrentSnapBlock retrieves the head snap block from the local chain.
CurrentFastBlock() *types.Block CurrentSnapBlock() *types.Header
// SnapSyncCommitHead directly commits the head block to a certain entity. // SnapSyncCommitHead directly commits the head block to a certain entity.
SnapSyncCommitHead(common.Hash) error SnapSyncCommitHead(common.Hash) error
@ -236,7 +236,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain Bl
quitCh: make(chan struct{}), quitCh: make(chan struct{}),
SnapSyncer: snap.NewSyncer(stateDb, chain.TrieDB().Scheme()), SnapSyncer: snap.NewSyncer(stateDb, chain.TrieDB().Scheme()),
stateSyncStart: make(chan *stateSync), stateSyncStart: make(chan *stateSync),
syncStartBlock: chain.CurrentFastBlock().NumberU64(), syncStartBlock: chain.CurrentSnapBlock().Number.Uint64(),
} }
// Create the post-merge skeleton syncer and start the process // Create the post-merge skeleton syncer and start the process
dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success)) dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success))
@ -261,9 +261,9 @@ func (d *Downloader) Progress() ethereum.SyncProgress {
mode := d.getMode() mode := d.getMode()
switch { switch {
case d.blockchain != nil && mode == FullSync: case d.blockchain != nil && mode == FullSync:
current = d.blockchain.CurrentBlock().NumberU64() current = d.blockchain.CurrentBlock().Number.Uint64()
case d.blockchain != nil && mode == SnapSync: case d.blockchain != nil && mode == SnapSync:
current = d.blockchain.CurrentFastBlock().NumberU64() current = d.blockchain.CurrentSnapBlock().Number.Uint64()
case d.lightchain != nil: case d.lightchain != nil:
current = d.lightchain.CurrentHeader().Number.Uint64() current = d.lightchain.CurrentHeader().Number.Uint64()
default: default:
@ -480,7 +480,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *
}(time.Now()) }(time.Now())
// Look up the sync boundaries: the common ancestor and the target block // Look up the sync boundaries: the common ancestor and the target block
var latest, pivot *types.Header var latest, pivot, final *types.Header
if !beaconMode { if !beaconMode {
// In legacy mode, use the master peer to retrieve the headers from // In legacy mode, use the master peer to retrieve the headers from
latest, pivot, err = d.fetchHead(p) latest, pivot, err = d.fetchHead(p)
@ -489,7 +489,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *
} }
} else { } else {
// In beacon mode, use the skeleton chain to retrieve the headers from // In beacon mode, use the skeleton chain to retrieve the headers from
latest, _, err = d.skeleton.Bounds() latest, _, final, err = d.skeleton.Bounds()
if err != nil { if err != nil {
return err return err
} }
@ -499,7 +499,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *
// Retrieve the pivot header from the skeleton chain segment but // Retrieve the pivot header from the skeleton chain segment but
// fallback to local chain if it's not found in skeleton space. // fallback to local chain if it's not found in skeleton space.
if pivot = d.skeleton.Header(number); pivot == nil { if pivot = d.skeleton.Header(number); pivot == nil {
_, oldest, _ := d.skeleton.Bounds() // error is already checked _, oldest, _, _ := d.skeleton.Bounds() // error is already checked
if number < oldest.Number.Uint64() { if number < oldest.Number.Uint64() {
count := int(oldest.Number.Uint64() - number) // it's capped by fsMinFullBlocks count := int(oldest.Number.Uint64() - number) // it's capped by fsMinFullBlocks
headers := d.readHeaderRange(oldest, count) headers := d.readHeaderRange(oldest, count)
@ -523,7 +523,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *
// anyway, but still need a valid pivot block to avoid some code hitting // anyway, but still need a valid pivot block to avoid some code hitting
// nil panics on access. // nil panics on access.
if mode == SnapSync && pivot == nil { if mode == SnapSync && pivot == nil {
pivot = d.blockchain.CurrentBlock().Header() pivot = d.blockchain.CurrentBlock()
} }
height := latest.Number.Uint64() height := latest.Number.Uint64()
@ -567,26 +567,41 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *
d.committed = 0 d.committed = 0
} }
if mode == SnapSync { if mode == SnapSync {
// Set the ancient data limitation. // Set the ancient data limitation. If we are running snap sync, all block
// If we are running snap sync, all block data older than ancientLimit will be // data older than ancientLimit will be written to the ancient store. More
// written to the ancient store. More recent data will be written to the active // recent data will be written to the active database and will wait for the
// database and will wait for the freezer to migrate. // freezer to migrate.
// //
// If there is a checkpoint available, then calculate the ancientLimit through // If the network is post-merge, use either the last announced finalized
// that. Otherwise calculate the ancient limit through the advertised height // block as the ancient limit, or if we haven't yet received one, the head-
// of the remote peer. // a max fork ancestry limit. One quirky case if we've already passed the
// finalized block, in which case the skeleton.Bounds will return nil and
// we'll revert to head - 90K. That's fine, we're finishing sync anyway.
// //
// The reason for picking checkpoint first is that a malicious peer can give us // For non-merged networks, if there is a checkpoint available, then calculate
// a fake (very high) height, forcing the ancient limit to also be very high. // the ancientLimit through that. Otherwise calculate the ancient limit through
// The peer would start to feed us valid blocks until head, resulting in all of // the advertised height of the remote peer. This most is mostly a fallback for
// the blocks might be written into the ancient store. A following mini-reorg // legacy networks, but should eventually be droppped. TODO(karalabe).
// could cause issues. if beaconMode {
if d.checkpoint != 0 && d.checkpoint > fullMaxForkAncestry+1 { // Beacon sync, use the latest finalized block as the ancient limit
d.ancientLimit = d.checkpoint // or a reasonable height if no finalized block is yet announced.
} else if height > fullMaxForkAncestry+1 { if final != nil {
d.ancientLimit = height - fullMaxForkAncestry - 1 d.ancientLimit = final.Number.Uint64()
} else if height > fullMaxForkAncestry+1 {
d.ancientLimit = height - fullMaxForkAncestry - 1
} else {
d.ancientLimit = 0
}
} else { } else {
d.ancientLimit = 0 // Legacy sync, use any hardcoded checkpoints or the best announcement
// we have from the remote peer. TODO(karalabe): Drop this pathway.
if d.checkpoint != 0 && d.checkpoint > fullMaxForkAncestry+1 {
d.ancientLimit = d.checkpoint
} else if height > fullMaxForkAncestry+1 {
d.ancientLimit = height - fullMaxForkAncestry - 1
} else {
d.ancientLimit = 0
}
} }
frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here. frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here.
@ -819,9 +834,9 @@ func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header)
mode := d.getMode() mode := d.getMode()
switch mode { switch mode {
case FullSync: case FullSync:
localHeight = d.blockchain.CurrentBlock().NumberU64() localHeight = d.blockchain.CurrentBlock().Number.Uint64()
case SnapSync: case SnapSync:
localHeight = d.blockchain.CurrentFastBlock().NumberU64() localHeight = d.blockchain.CurrentSnapBlock().Number.Uint64()
default: default:
localHeight = d.lightchain.CurrentHeader().Number.Uint64() localHeight = d.lightchain.CurrentHeader().Number.Uint64()
} }
@ -1159,8 +1174,8 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) e
if mode == LightSync { if mode == LightSync {
head = d.lightchain.CurrentHeader().Number.Uint64() head = d.lightchain.CurrentHeader().Number.Uint64()
} else { } else {
head = d.blockchain.CurrentFastBlock().NumberU64() head = d.blockchain.CurrentSnapBlock().Number.Uint64()
if full := d.blockchain.CurrentBlock().NumberU64(); head < full { if full := d.blockchain.CurrentBlock().Number.Uint64(); head < full {
head = full head = full
} }
} }
@ -1274,8 +1289,8 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode
if rollback > 0 { if rollback > 0 {
lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0
if mode != LightSync { if mode != LightSync {
lastFastBlock = d.blockchain.CurrentFastBlock().Number() lastFastBlock = d.blockchain.CurrentSnapBlock().Number
lastBlock = d.blockchain.CurrentBlock().Number() lastBlock = d.blockchain.CurrentBlock().Number
} }
if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block
// We're already unwinding the stack, only print the error to make it more visible // We're already unwinding the stack, only print the error to make it more visible
@ -1283,8 +1298,8 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode
} }
curFastBlock, curBlock := common.Big0, common.Big0 curFastBlock, curBlock := common.Big0, common.Big0
if mode != LightSync { if mode != LightSync {
curFastBlock = d.blockchain.CurrentFastBlock().Number() curFastBlock = d.blockchain.CurrentSnapBlock().Number
curBlock = d.blockchain.CurrentBlock().Number() curBlock = d.blockchain.CurrentBlock().Number
} }
log.Warn("Rolled back chain segment", log.Warn("Rolled back chain segment",
"header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number),
@ -1329,7 +1344,7 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode
// R: Nothing to give // R: Nothing to give
if mode != LightSync { if mode != LightSync {
head := d.blockchain.CurrentBlock() head := d.blockchain.CurrentBlock()
if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 { if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 {
return errStallingPeer return errStallingPeer
} }
} }
@ -1566,7 +1581,7 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error {
// In post-merge, notify the engine API of encountered bad chains // In post-merge, notify the engine API of encountered bad chains
if d.badBlock != nil { if d.badBlock != nil {
head, _, err := d.skeleton.Bounds() head, _, _, err := d.skeleton.Bounds()
if err != nil { if err != nil {
log.Error("Failed to retrieve beacon bounds for bad block reporting", "err", err) log.Error("Failed to retrieve beacon bounds for bad block reporting", "err", err)
} else { } else {
@ -1853,14 +1868,14 @@ func (d *Downloader) reportSnapSyncProgress(force bool) {
} }
var ( var (
header = d.blockchain.CurrentHeader() header = d.blockchain.CurrentHeader()
block = d.blockchain.CurrentFastBlock() block = d.blockchain.CurrentSnapBlock()
) )
syncedBlocks := block.NumberU64() - d.syncStartBlock syncedBlocks := block.Number.Uint64() - d.syncStartBlock
if syncedBlocks == 0 { if syncedBlocks == 0 {
return return
} }
// Retrieve the current chain head and calculate the ETA // Retrieve the current chain head and calculate the ETA
latest, _, err := d.skeleton.Bounds() latest, _, _, err := d.skeleton.Bounds()
if err != nil { if err != nil {
// We're going to cheat for non-merged networks, but that's fine // We're going to cheat for non-merged networks, but that's fine
latest = d.pivotHeader latest = d.pivotHeader
@ -1872,13 +1887,13 @@ func (d *Downloader) reportSnapSyncProgress(force bool) {
return return
} }
var ( var (
left = latest.Number.Uint64() - block.NumberU64() left = latest.Number.Uint64() - block.Number.Uint64()
eta = time.Since(d.syncStartTime) / time.Duration(syncedBlocks) * time.Duration(left) eta = time.Since(d.syncStartTime) / time.Duration(syncedBlocks) * time.Duration(left)
progress = fmt.Sprintf("%.2f%%", float64(block.NumberU64())*100/float64(latest.Number.Uint64())) progress = fmt.Sprintf("%.2f%%", float64(block.Number.Uint64())*100/float64(latest.Number.Uint64()))
headers = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(header.Number.Uint64()), common.StorageSize(headerBytes).TerminalString()) headers = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(header.Number.Uint64()), common.StorageSize(headerBytes).TerminalString())
bodies = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(block.NumberU64()), common.StorageSize(bodyBytes).TerminalString()) bodies = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(block.Number.Uint64()), common.StorageSize(bodyBytes).TerminalString())
receipts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(block.NumberU64()), common.StorageSize(receiptBytes).TerminalString()) receipts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(block.Number.Uint64()), common.StorageSize(receiptBytes).TerminalString())
) )
log.Info("Syncing: chain download in progress", "synced", progress, "chain", syncedBytes, "headers", headers, "bodies", bodies, "receipts", receipts, "eta", common.PrettyDuration(eta)) log.Info("Syncing: chain download in progress", "synced", progress, "chain", syncedBytes, "headers", headers, "bodies", bodies, "receipts", receipts, "eta", common.PrettyDuration(eta))
d.syncLogTime = time.Now() d.syncLogTime = time.Now()

View File

@ -100,7 +100,7 @@ func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
head := dl.peers[id].chain.CurrentBlock() head := dl.peers[id].chain.CurrentBlock()
if td == nil { if td == nil {
// If no particular TD was requested, load from the peer's blockchain // If no particular TD was requested, load from the peer's blockchain
td = dl.peers[id].chain.GetTd(head.Hash(), head.NumberU64()) td = dl.peers[id].chain.GetTd(head.Hash(), head.Number.Uint64())
} }
// Synchronise with the chosen peer and ensure proper cleanup afterwards // Synchronise with the chosen peer and ensure proper cleanup afterwards
err := dl.downloader.synchronise(id, head.Hash(), td, nil, mode, false, nil) err := dl.downloader.synchronise(id, head.Hash(), td, nil, mode, false, nil)
@ -158,7 +158,7 @@ type downloadTesterPeer struct {
// and total difficulty. // and total difficulty.
func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) { func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
head := dlp.chain.CurrentBlock() head := dlp.chain.CurrentBlock()
return head.Hash(), dlp.chain.GetTd(head.Hash(), head.NumberU64()) return head.Hash(), dlp.chain.GetTd(head.Hash(), head.Number.Uint64())
} }
func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header { func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header {
@ -430,10 +430,10 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
if hs := int(tester.chain.CurrentHeader().Number.Uint64()) + 1; hs != headers { if hs := int(tester.chain.CurrentHeader().Number.Uint64()) + 1; hs != headers {
t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers)
} }
if bs := int(tester.chain.CurrentBlock().NumberU64()) + 1; bs != blocks { if bs := int(tester.chain.CurrentBlock().Number.Uint64()) + 1; bs != blocks {
t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks)
} }
if rs := int(tester.chain.CurrentFastBlock().NumberU64()) + 1; rs != receipts { if rs := int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1; rs != receipts {
t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts) t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts)
} }
} }
@ -490,7 +490,7 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
for { for {
// Check the retrieval count synchronously (! reason for this ugly block) // Check the retrieval count synchronously (! reason for this ugly block)
tester.lock.RLock() tester.lock.RLock()
retrieved := int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1 retrieved := int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1
tester.lock.RUnlock() tester.lock.RUnlock()
if retrieved >= targetBlocks+1 { if retrieved >= targetBlocks+1 {
break break
@ -506,7 +506,7 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
{ {
cached = tester.downloader.queue.resultCache.countCompleted() cached = tester.downloader.queue.resultCache.countCompleted()
frozen = int(atomic.LoadUint32(&blocked)) frozen = int(atomic.LoadUint32(&blocked))
retrieved = int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1 retrieved = int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1
} }
tester.downloader.queue.resultCache.lock.Unlock() tester.downloader.queue.resultCache.lock.Unlock()
tester.downloader.queue.lock.Unlock() tester.downloader.queue.lock.Unlock()
@ -522,7 +522,7 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
// Make sure we filled up the cache, then exhaust it // Make sure we filled up the cache, then exhaust it
time.Sleep(25 * time.Millisecond) // give it a chance to screw up time.Sleep(25 * time.Millisecond) // give it a chance to screw up
tester.lock.RLock() tester.lock.RLock()
retrieved = int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1 retrieved = int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1
tester.lock.RUnlock() tester.lock.RUnlock()
if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay { if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1) t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)
@ -921,7 +921,7 @@ func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
} }
if mode == SnapSync { if mode == SnapSync {
if head := tester.chain.CurrentBlock().NumberU64(); head != 0 { if head := tester.chain.CurrentBlock().Number.Uint64(); head != 0 {
t.Errorf("fast sync pivot block #%d not rolled back", head) t.Errorf("fast sync pivot block #%d not rolled back", head)
} }
} }
@ -943,7 +943,7 @@ func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {
t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)
} }
if mode == SnapSync { if mode == SnapSync {
if head := tester.chain.CurrentBlock().NumberU64(); head != 0 { if head := tester.chain.CurrentBlock().Number.Uint64(); head != 0 {
t.Errorf("fast sync pivot block #%d not rolled back", head) t.Errorf("fast sync pivot block #%d not rolled back", head)
} }
} }
@ -1478,13 +1478,13 @@ func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) {
if c.local > 0 { if c.local > 0 {
tester.chain.InsertChain(chain.blocks[1 : c.local+1]) tester.chain.InsertChain(chain.blocks[1 : c.local+1])
} }
if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header()); err != nil { if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil {
t.Fatalf("Failed to beacon sync chain %v %v", c.name, err) t.Fatalf("Failed to beacon sync chain %v %v", c.name, err)
} }
select { select {
case <-success: case <-success:
// Ok, downloader fully cancelled after sync cycle // Ok, downloader fully cancelled after sync cycle
if bs := int(tester.chain.CurrentBlock().NumberU64()) + 1; bs != len(chain.blocks) { if bs := int(tester.chain.CurrentBlock().Number.Uint64()) + 1; bs != len(chain.blocks) {
t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(chain.blocks)) t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(chain.blocks))
} }
case <-time.NewTimer(time.Second * 3).C: case <-time.NewTimer(time.Second * 3).C:

View File

@ -102,6 +102,7 @@ type subchain struct {
// suspended skeleton sync without prior knowledge of all prior suspension points. // suspended skeleton sync without prior knowledge of all prior suspension points.
type skeletonProgress struct { type skeletonProgress struct {
Subchains []*subchain // Disjoint subchains downloaded until now Subchains []*subchain // Disjoint subchains downloaded until now
Finalized *uint64 // Last known finalized block number
} }
// headUpdate is a notification that the beacon sync should switch to a new target. // headUpdate is a notification that the beacon sync should switch to a new target.
@ -109,6 +110,7 @@ type skeletonProgress struct {
// extend it and fail if it's not possible. // extend it and fail if it's not possible.
type headUpdate struct { type headUpdate struct {
header *types.Header // Header to update the sync target to header *types.Header // Header to update the sync target to
final *types.Header // Finalized header to use as thresholds
force bool // Whether to force the update or only extend if possible force bool // Whether to force the update or only extend if possible
errc chan error // Channel to signal acceptance of the new head errc chan error // Channel to signal acceptance of the new head
} }
@ -321,12 +323,12 @@ func (s *skeleton) Terminate() error {
// //
// This method does not block, rather it just waits until the syncer receives the // This method does not block, rather it just waits until the syncer receives the
// fed header. What the syncer does with it is the syncer's problem. // fed header. What the syncer does with it is the syncer's problem.
func (s *skeleton) Sync(head *types.Header, force bool) error { func (s *skeleton) Sync(head *types.Header, final *types.Header, force bool) error {
log.Trace("New skeleton head announced", "number", head.Number, "hash", head.Hash(), "force", force) log.Trace("New skeleton head announced", "number", head.Number, "hash", head.Hash(), "force", force)
errc := make(chan error) errc := make(chan error)
select { select {
case s.headEvents <- &headUpdate{header: head, force: force, errc: errc}: case s.headEvents <- &headUpdate{header: head, final: final, force: force, errc: errc}:
return <-errc return <-errc
case <-s.terminated: case <-s.terminated:
return errTerminated return errTerminated
@ -437,7 +439,7 @@ func (s *skeleton) sync(head *types.Header) (*types.Header, error) {
// we don't seamlessly integrate reorgs to keep things simple. If the // we don't seamlessly integrate reorgs to keep things simple. If the
// network starts doing many mini reorgs, it might be worthwhile handling // network starts doing many mini reorgs, it might be worthwhile handling
// a limited depth without an error. // a limited depth without an error.
if reorged := s.processNewHead(event.header, event.force); reorged { if reorged := s.processNewHead(event.header, event.final, event.force); reorged {
// If a reorg is needed, and we're forcing the new head, signal // If a reorg is needed, and we're forcing the new head, signal
// the syncer to tear down and start over. Otherwise, drop the // the syncer to tear down and start over. Otherwise, drop the
// non-force reorg. // non-force reorg.
@ -590,7 +592,17 @@ func (s *skeleton) saveSyncStatus(db ethdb.KeyValueWriter) {
// accepts and integrates it into the skeleton or requests a reorg. Upon reorg, // accepts and integrates it into the skeleton or requests a reorg. Upon reorg,
// the syncer will tear itself down and restart with a fresh head. It is simpler // the syncer will tear itself down and restart with a fresh head. It is simpler
// to reconstruct the sync state than to mutate it and hope for the best. // to reconstruct the sync state than to mutate it and hope for the best.
func (s *skeleton) processNewHead(head *types.Header, force bool) bool { func (s *skeleton) processNewHead(head *types.Header, final *types.Header, force bool) bool {
// If a new finalized block was announced, update the sync process independent
// of what happens with the sync head below
if final != nil {
if number := final.Number.Uint64(); s.progress.Finalized == nil || *s.progress.Finalized != number {
s.progress.Finalized = new(uint64)
*s.progress.Finalized = final.Number.Uint64()
s.saveSyncStatus(s.db)
}
}
// If the header cannot be inserted without interruption, return an error for // If the header cannot be inserted without interruption, return an error for
// the outer loop to tear down the skeleton sync and restart it // the outer loop to tear down the skeleton sync and restart it
number := head.Number.Uint64() number := head.Number.Uint64()
@ -1150,9 +1162,10 @@ func (s *skeleton) cleanStales(filled *types.Header) error {
return nil return nil
} }
// Bounds retrieves the current head and tail tracked by the skeleton syncer. // Bounds retrieves the current head and tail tracked by the skeleton syncer
// This method is used by the backfiller, whose life cycle is controlled by the // and optionally the last known finalized header if any was announced and if
// skeleton syncer. // it is still in the sync range. This method is used by the backfiller, whose
// life cycle is controlled by the skeleton syncer.
// //
// Note, the method will not use the internal state of the skeleton, but will // Note, the method will not use the internal state of the skeleton, but will
// rather blindly pull stuff from the database. This is fine, because the back- // rather blindly pull stuff from the database. This is fine, because the back-
@ -1160,28 +1173,34 @@ func (s *skeleton) cleanStales(filled *types.Header) error {
// There might be new heads appended, but those are atomic from the perspective // There might be new heads appended, but those are atomic from the perspective
// of this method. Any head reorg will first tear down the backfiller and only // of this method. Any head reorg will first tear down the backfiller and only
// then make the modification. // then make the modification.
func (s *skeleton) Bounds() (head *types.Header, tail *types.Header, err error) { func (s *skeleton) Bounds() (head *types.Header, tail *types.Header, final *types.Header, err error) {
// Read the current sync progress from disk and figure out the current head. // Read the current sync progress from disk and figure out the current head.
// Although there's a lot of error handling here, these are mostly as sanity // Although there's a lot of error handling here, these are mostly as sanity
// checks to avoid crashing if a programming error happens. These should not // checks to avoid crashing if a programming error happens. These should not
// happen in live code. // happen in live code.
status := rawdb.ReadSkeletonSyncStatus(s.db) status := rawdb.ReadSkeletonSyncStatus(s.db)
if len(status) == 0 { if len(status) == 0 {
return nil, nil, errors.New("beacon sync not yet started") return nil, nil, nil, errors.New("beacon sync not yet started")
} }
progress := new(skeletonProgress) progress := new(skeletonProgress)
if err := json.Unmarshal(status, progress); err != nil { if err := json.Unmarshal(status, progress); err != nil {
return nil, nil, err return nil, nil, nil, err
} }
head = rawdb.ReadSkeletonHeader(s.db, progress.Subchains[0].Head) head = rawdb.ReadSkeletonHeader(s.db, progress.Subchains[0].Head)
if head == nil { if head == nil {
return nil, nil, fmt.Errorf("head skeleton header %d is missing", progress.Subchains[0].Head) return nil, nil, nil, fmt.Errorf("head skeleton header %d is missing", progress.Subchains[0].Head)
} }
tail = rawdb.ReadSkeletonHeader(s.db, progress.Subchains[0].Tail) tail = rawdb.ReadSkeletonHeader(s.db, progress.Subchains[0].Tail)
if tail == nil { if tail == nil {
return nil, nil, fmt.Errorf("tail skeleton header %d is missing", progress.Subchains[0].Tail) return nil, nil, nil, fmt.Errorf("tail skeleton header %d is missing", progress.Subchains[0].Tail)
} }
return head, tail, nil if progress.Finalized != nil && tail.Number.Uint64() <= *progress.Finalized && *progress.Finalized <= head.Number.Uint64() {
final = rawdb.ReadSkeletonHeader(s.db, *progress.Finalized)
if final == nil {
return nil, nil, nil, fmt.Errorf("finalized skeleton header %d is missing", *progress.Finalized)
}
}
return head, tail, final, nil
} }
// Header retrieves a specific header tracked by the skeleton syncer. This method // Header retrieves a specific header tracked by the skeleton syncer. This method

View File

@ -370,7 +370,7 @@ func TestSkeletonSyncInit(t *testing.T) {
skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller()) skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller())
skeleton.syncStarting = func() { close(wait) } skeleton.syncStarting = func() { close(wait) }
skeleton.Sync(tt.head, true) skeleton.Sync(tt.head, nil, true)
<-wait <-wait
skeleton.Terminate() skeleton.Terminate()
@ -484,10 +484,10 @@ func TestSkeletonSyncExtend(t *testing.T) {
skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller()) skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller())
skeleton.syncStarting = func() { close(wait) } skeleton.syncStarting = func() { close(wait) }
skeleton.Sync(tt.head, true) skeleton.Sync(tt.head, nil, true)
<-wait <-wait
if err := skeleton.Sync(tt.extend, false); err != tt.err { if err := skeleton.Sync(tt.extend, nil, false); err != tt.err {
t.Errorf("test %d: extension failure mismatch: have %v, want %v", i, err, tt.err) t.Errorf("test %d: extension failure mismatch: have %v, want %v", i, err, tt.err)
} }
skeleton.Terminate() skeleton.Terminate()
@ -859,7 +859,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
} }
// Create a skeleton sync and run a cycle // Create a skeleton sync and run a cycle
skeleton := newSkeleton(db, peerset, drop, filler) skeleton := newSkeleton(db, peerset, drop, filler)
skeleton.Sync(tt.head, true) skeleton.Sync(tt.head, nil, true)
var progress skeletonProgress var progress skeletonProgress
// Wait a bit (bleah) for the initial sync loop to go to idle. This might // Wait a bit (bleah) for the initial sync loop to go to idle. This might
@ -910,7 +910,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
} }
// Apply the post-init events if there's any // Apply the post-init events if there's any
if tt.newHead != nil { if tt.newHead != nil {
skeleton.Sync(tt.newHead, true) skeleton.Sync(tt.newHead, nil, true)
} }
if tt.newPeer != nil { if tt.newPeer != nil {
if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil {

View File

@ -49,10 +49,10 @@ func (b *testBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber
number = 0 number = 0
} }
if number == rpc.FinalizedBlockNumber { if number == rpc.FinalizedBlockNumber {
return b.chain.CurrentFinalizedBlock().Header(), nil return b.chain.CurrentFinalBlock(), nil
} }
if number == rpc.SafeBlockNumber { if number == rpc.SafeBlockNumber {
return b.chain.CurrentSafeBlock().Header(), nil return b.chain.CurrentSafeBlock(), nil
} }
if number == rpc.LatestBlockNumber { if number == rpc.LatestBlockNumber {
number = testHead number = testHead
@ -75,10 +75,10 @@ func (b *testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber)
number = 0 number = 0
} }
if number == rpc.FinalizedBlockNumber { if number == rpc.FinalizedBlockNumber {
return b.chain.CurrentFinalizedBlock(), nil number = rpc.BlockNumber(b.chain.CurrentFinalBlock().Number.Uint64())
} }
if number == rpc.SafeBlockNumber { if number == rpc.SafeBlockNumber {
return b.chain.CurrentSafeBlock(), nil number = rpc.BlockNumber(b.chain.CurrentSafeBlock().Number.Uint64())
} }
if number == rpc.LatestBlockNumber { if number == rpc.LatestBlockNumber {
number = testHead number = testHead
@ -169,8 +169,8 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, pending bool) *testBacke
t.Fatalf("Failed to create local chain, %v", err) t.Fatalf("Failed to create local chain, %v", err)
} }
chain.InsertChain(blocks) chain.InsertChain(blocks)
chain.SetFinalized(chain.GetBlockByNumber(25)) chain.SetFinalized(chain.GetBlockByNumber(25).Header())
chain.SetSafe(chain.GetBlockByNumber(25)) chain.SetSafe(chain.GetBlockByNumber(25).Header())
return &testBackend{chain: chain, pending: pending} return &testBackend{chain: chain, pending: pending}
} }

View File

@ -152,13 +152,13 @@ func newHandler(config *handlerConfig) (*handler, error) {
// * the last snap sync is not finished while user specifies a full sync this // * the last snap sync is not finished while user specifies a full sync this
// time. But we don't have any recent state for full sync. // time. But we don't have any recent state for full sync.
// In these cases however it's safe to reenable snap sync. // In these cases however it's safe to reenable snap sync.
fullBlock, fastBlock := h.chain.CurrentBlock(), h.chain.CurrentFastBlock() fullBlock, snapBlock := h.chain.CurrentBlock(), h.chain.CurrentSnapBlock()
if fullBlock.NumberU64() == 0 && fastBlock.NumberU64() > 0 { if fullBlock.Number.Uint64() == 0 && snapBlock.Number.Uint64() > 0 {
h.snapSync = uint32(1) h.snapSync = uint32(1)
log.Warn("Switch sync mode from full sync to snap sync") log.Warn("Switch sync mode from full sync to snap sync")
} }
} else { } else {
if h.chain.CurrentBlock().NumberU64() > 0 { if h.chain.CurrentBlock().Number.Uint64() > 0 {
// Print warning log if database is not empty to run snap sync. // Print warning log if database is not empty to run snap sync.
log.Warn("Switch sync mode from snap sync to full sync") log.Warn("Switch sync mode from snap sync to full sync")
} else { } else {
@ -183,10 +183,10 @@ func newHandler(config *handlerConfig) (*handler, error) {
// If we've successfully finished a sync cycle and passed any required // If we've successfully finished a sync cycle and passed any required
// checkpoint, enable accepting transactions from the network // checkpoint, enable accepting transactions from the network
head := h.chain.CurrentBlock() head := h.chain.CurrentBlock()
if head.NumberU64() >= h.checkpointNumber { if head.Number.Uint64() >= h.checkpointNumber {
// Checkpoint passed, sanity check the timestamp to have a fallback mechanism // Checkpoint passed, sanity check the timestamp to have a fallback mechanism
// for non-checkpointed (number = 0) private networks. // for non-checkpointed (number = 0) private networks.
if head.Time() >= uint64(time.Now().AddDate(0, -1, 0).Unix()) { if head.Time >= uint64(time.Now().AddDate(0, -1, 0).Unix()) {
atomic.StoreUint32(&h.acceptTxs, 1) atomic.StoreUint32(&h.acceptTxs, 1)
} }
} }
@ -198,7 +198,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
log.Info("Chain post-merge, sync via beacon client") log.Info("Chain post-merge, sync via beacon client")
} else { } else {
head := h.chain.CurrentBlock() head := h.chain.CurrentBlock()
if td := h.chain.GetTd(head.Hash(), head.NumberU64()); td.Cmp(ttd) >= 0 { if td := h.chain.GetTd(head.Hash(), head.Number.Uint64()); td.Cmp(ttd) >= 0 {
log.Info("Chain post-TTD, sync via beacon client") log.Info("Chain post-TTD, sync via beacon client")
} else { } else {
log.Warn("Chain pre-merge, sync via PoW (ensure beacon client is ready)") log.Warn("Chain pre-merge, sync via PoW (ensure beacon client is ready)")
@ -227,7 +227,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
return h.chain.Engine().VerifyHeader(h.chain, header, true) return h.chain.Engine().VerifyHeader(h.chain, header, true)
} }
heighter := func() uint64 { heighter := func() uint64 {
return h.chain.CurrentBlock().NumberU64() return h.chain.CurrentBlock().Number.Uint64()
} }
inserter := func(blocks types.Blocks) (int, error) { inserter := func(blocks types.Blocks) (int, error) {
// All the block fetcher activities should be disabled // All the block fetcher activities should be disabled
@ -250,7 +250,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
// the propagated block if the head is too old. Unfortunately there is a corner // the propagated block if the head is too old. Unfortunately there is a corner
// case when starting new networks, where the genesis might be ancient (0 unix) // case when starting new networks, where the genesis might be ancient (0 unix)
// which would prevent full nodes from accepting it. // which would prevent full nodes from accepting it.
if h.chain.CurrentBlock().NumberU64() < h.checkpointNumber { if h.chain.CurrentBlock().Number.Uint64() < h.checkpointNumber {
log.Warn("Unsynced yet, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash()) log.Warn("Unsynced yet, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
return 0, nil return 0, nil
} }

View File

@ -274,7 +274,7 @@ func testRecvTransactions(t *testing.T, protocol uint) {
var ( var (
genesis = handler.chain.Genesis() genesis = handler.chain.Genesis()
head = handler.chain.CurrentBlock() head = handler.chain.CurrentBlock()
td = handler.chain.GetTd(head.Hash(), head.NumberU64()) td = handler.chain.GetTd(head.Hash(), head.Number.Uint64())
) )
if err := src.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil { if err := src.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil {
t.Fatalf("failed to run protocol handshake") t.Fatalf("failed to run protocol handshake")
@ -337,7 +337,7 @@ func testSendTransactions(t *testing.T, protocol uint) {
var ( var (
genesis = handler.chain.Genesis() genesis = handler.chain.Genesis()
head = handler.chain.CurrentBlock() head = handler.chain.CurrentBlock()
td = handler.chain.GetTd(head.Hash(), head.NumberU64()) td = handler.chain.GetTd(head.Hash(), head.Number.Uint64())
) )
if err := sink.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil { if err := sink.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil {
t.Fatalf("failed to run protocol handshake") t.Fatalf("failed to run protocol handshake")
@ -545,7 +545,7 @@ func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpo
var ( var (
genesis = handler.chain.Genesis() genesis = handler.chain.Genesis()
head = handler.chain.CurrentBlock() head = handler.chain.CurrentBlock()
td = handler.chain.GetTd(head.Hash(), head.NumberU64()) td = handler.chain.GetTd(head.Hash(), head.Number.Uint64())
) )
if err := remote.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil { if err := remote.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil {
t.Fatalf("failed to run protocol handshake") t.Fatalf("failed to run protocol handshake")
@ -661,7 +661,8 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
} }
// Initiate a block propagation across the peers // Initiate a block propagation across the peers
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
source.handler.BroadcastBlock(source.chain.CurrentBlock(), true) header := source.chain.CurrentBlock()
source.handler.BroadcastBlock(source.chain.GetBlock(header.Hash(), header.Number.Uint64()), true)
// Iterate through all the sinks and ensure the correct number got the block // Iterate through all the sinks and ensure the correct number got the block
done := make(chan struct{}, peers) done := make(chan struct{}, peers)
@ -734,18 +735,19 @@ func testBroadcastMalformedBlock(t *testing.T, protocol uint) {
// Create various combinations of malformed blocks // Create various combinations of malformed blocks
head := source.chain.CurrentBlock() head := source.chain.CurrentBlock()
block := source.chain.GetBlock(head.Hash(), head.Number.Uint64())
malformedUncles := head.Header() malformedUncles := head
malformedUncles.UncleHash[0]++ malformedUncles.UncleHash[0]++
malformedTransactions := head.Header() malformedTransactions := head
malformedTransactions.TxHash[0]++ malformedTransactions.TxHash[0]++
malformedEverything := head.Header() malformedEverything := head
malformedEverything.UncleHash[0]++ malformedEverything.UncleHash[0]++
malformedEverything.TxHash[0]++ malformedEverything.TxHash[0]++
// Try to broadcast all malformations and ensure they all get discarded // Try to broadcast all malformations and ensure they all get discarded
for _, header := range []*types.Header{malformedUncles, malformedTransactions, malformedEverything} { for _, header := range []*types.Header{malformedUncles, malformedTransactions, malformedEverything} {
block := types.NewBlockWithHeader(header).WithBody(head.Transactions(), head.Uncles()) block := types.NewBlockWithHeader(header).WithBody(block.Transactions(), block.Uncles())
if err := src.SendNewBlock(block, big.NewInt(131136)); err != nil { if err := src.SendNewBlock(block, big.NewInt(131136)); err != nil {
t.Fatalf("failed to broadcast block: %v", err) t.Fatalf("failed to broadcast block: %v", err)
} }

View File

@ -17,8 +17,6 @@
package eth package eth
import ( import (
"math/big"
"github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/eth/protocols/snap"
) )
@ -26,9 +24,7 @@ import (
// ethPeerInfo represents a short summary of the `eth` sub-protocol metadata known // ethPeerInfo represents a short summary of the `eth` sub-protocol metadata known
// about a connected peer. // about a connected peer.
type ethPeerInfo struct { type ethPeerInfo struct {
Version uint `json:"version"` // Ethereum protocol version negotiated Version uint `json:"version"` // Ethereum protocol version negotiated
Difficulty *big.Int `json:"difficulty"` // Total difficulty of the peer's blockchain
Head string `json:"head"` // Hex hash of the peer's best owned block
} }
// ethPeer is a wrapper around eth.Peer to maintain a few extra metadata. // ethPeer is a wrapper around eth.Peer to maintain a few extra metadata.
@ -39,12 +35,8 @@ type ethPeer struct {
// info gathers and returns some `eth` protocol metadata known about a peer. // info gathers and returns some `eth` protocol metadata known about a peer.
func (p *ethPeer) info() *ethPeerInfo { func (p *ethPeer) info() *ethPeerInfo {
hash, td := p.Head()
return &ethPeerInfo{ return &ethPeerInfo{
Version: p.Version(), Version: p.Version(),
Difficulty: td,
Head: hash.Hex(),
} }
} }

View File

@ -137,12 +137,14 @@ type NodeInfo struct {
// nodeInfo retrieves some `eth` protocol metadata about the running host node. // nodeInfo retrieves some `eth` protocol metadata about the running host node.
func nodeInfo(chain *core.BlockChain, network uint64) *NodeInfo { func nodeInfo(chain *core.BlockChain, network uint64) *NodeInfo {
head := chain.CurrentBlock() head := chain.CurrentBlock()
hash := head.Hash()
return &NodeInfo{ return &NodeInfo{
Network: network, Network: network,
Difficulty: chain.GetTd(head.Hash(), head.NumberU64()), Difficulty: chain.GetTd(hash, head.Number.Uint64()),
Genesis: chain.Genesis().Hash(), Genesis: chain.Genesis().Hash(),
Config: chain.Config(), Config: chain.Config(),
Head: head.Hash(), Head: hash,
} }
} }

View File

@ -225,24 +225,24 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
[]common.Hash{backend.chain.GetBlockByNumber(0).Hash()}, []common.Hash{backend.chain.GetBlockByNumber(0).Hash()},
}, },
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64()}, Amount: 1}, &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1},
[]common.Hash{backend.chain.CurrentBlock().Hash()}, []common.Hash{backend.chain.CurrentBlock().Hash()},
}, },
{ // If the peer requests a bit into the future, we deliver what we have { // If the peer requests a bit into the future, we deliver what we have
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64()}, Amount: 10}, &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10},
[]common.Hash{backend.chain.CurrentBlock().Hash()}, []common.Hash{backend.chain.CurrentBlock().Hash()},
}, },
// Ensure protocol limits are honored // Ensure protocol limits are honored
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true}, &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true},
getHashes(backend.chain.CurrentBlock().NumberU64(), limit), getHashes(backend.chain.CurrentBlock().Number.Uint64(), limit),
}, },
// Check that requesting more than available is handled gracefully // Check that requesting more than available is handled gracefully
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3}, &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().NumberU64() - 4).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(),
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().NumberU64()).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64()).Hash(),
}, },
}, { }, {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
@ -253,10 +253,10 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
}, },
// Check that requesting more than available is handled gracefully, even if mid skip // Check that requesting more than available is handled gracefully, even if mid skip
{ {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3}, &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3},
[]common.Hash{ []common.Hash{
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().NumberU64() - 4).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(),
backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().NumberU64() - 1).Hash(), backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 1).Hash(),
}, },
}, { }, {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
@ -293,7 +293,7 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: unknown}, Amount: 1}, &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: unknown}, Amount: 1},
[]common.Hash{}, []common.Hash{},
}, { }, {
&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64() + 1}, Amount: 1}, &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1},
[]common.Hash{}, []common.Hash{},
}, },
} }
@ -394,7 +394,7 @@ func testGetBlockBodies(t *testing.T, protocol uint) {
) )
for j := 0; j < tt.random; j++ { for j := 0; j < tt.random; j++ {
for { for {
num := rand.Int63n(int64(backend.chain.CurrentBlock().NumberU64())) num := rand.Int63n(int64(backend.chain.CurrentBlock().Number.Uint64()))
if !seen[num] { if !seen[num] {
seen[num] = true seen[num] = true
@ -529,7 +529,7 @@ func testGetNodeData(t *testing.T, protocol uint, drop bool) {
// Sanity check whether all state matches. // Sanity check whether all state matches.
accounts := []common.Address{testAddr, acc1Addr, acc2Addr} accounts := []common.Address{testAddr, acc1Addr, acc2Addr}
for i := uint64(0); i <= backend.chain.CurrentBlock().NumberU64(); i++ { for i := uint64(0); i <= backend.chain.CurrentBlock().Number.Uint64(); i++ {
root := backend.chain.GetBlockByNumber(i).Root() root := backend.chain.GetBlockByNumber(i).Root()
reconstructed, _ := state.New(root, state.NewDatabase(reconstructDB), nil) reconstructed, _ := state.New(root, state.NewDatabase(reconstructDB), nil)
for j, acc := range accounts { for j, acc := range accounts {
@ -602,7 +602,7 @@ func testGetBlockReceipts(t *testing.T, protocol uint) {
hashes []common.Hash hashes []common.Hash
receipts [][]*types.Receipt receipts [][]*types.Receipt
) )
for i := uint64(0); i <= backend.chain.CurrentBlock().NumberU64(); i++ { for i := uint64(0); i <= backend.chain.CurrentBlock().Number.Uint64(); i++ {
block := backend.chain.GetBlockByNumber(i) block := backend.chain.GetBlockByNumber(i)
hashes = append(hashes, block.Hash()) hashes = append(hashes, block.Hash())

View File

@ -39,7 +39,7 @@ func testHandshake(t *testing.T, protocol uint) {
var ( var (
genesis = backend.chain.Genesis() genesis = backend.chain.Genesis()
head = backend.chain.CurrentBlock() head = backend.chain.CurrentBlock()
td = backend.chain.GetTd(head.Hash(), head.NumberU64()) td = backend.chain.GetTd(head.Hash(), head.Number.Uint64())
forkID = forkid.NewID(backend.chain.Config(), backend.chain.Genesis().Hash(), backend.chain.CurrentHeader().Number.Uint64(), backend.chain.CurrentHeader().Time) forkID = forkid.NewID(backend.chain.Config(), backend.chain.Genesis().Hash(), backend.chain.CurrentHeader().Number.Uint64(), backend.chain.CurrentHeader().Time)
) )
tests := []struct { tests := []struct {

View File

@ -206,22 +206,22 @@ func peerToSyncOp(mode downloader.SyncMode, p *eth.Peer) *chainSyncOp {
func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) {
// If we're in snap sync mode, return that directly // If we're in snap sync mode, return that directly
if atomic.LoadUint32(&cs.handler.snapSync) == 1 { if atomic.LoadUint32(&cs.handler.snapSync) == 1 {
block := cs.handler.chain.CurrentFastBlock() block := cs.handler.chain.CurrentSnapBlock()
td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64()) td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64())
return downloader.SnapSync, td return downloader.SnapSync, td
} }
// We are probably in full sync, but we might have rewound to before the // We are probably in full sync, but we might have rewound to before the
// snap sync pivot, check if we should reenable // snap sync pivot, check if we should reenable
if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil { if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil {
if head := cs.handler.chain.CurrentBlock(); head.NumberU64() < *pivot { if head := cs.handler.chain.CurrentBlock(); head.Number.Uint64() < *pivot {
block := cs.handler.chain.CurrentFastBlock() block := cs.handler.chain.CurrentSnapBlock()
td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64()) td := cs.handler.chain.GetTd(block.Hash(), block.Number.Uint64())
return downloader.SnapSync, td return downloader.SnapSync, td
} }
} }
// Nope, we're really full syncing // Nope, we're really full syncing
head := cs.handler.chain.CurrentBlock() head := cs.handler.chain.CurrentBlock()
td := cs.handler.chain.GetTd(head.Hash(), head.NumberU64()) td := cs.handler.chain.GetTd(head.Hash(), head.Number.Uint64())
return downloader.FullSync, td return downloader.FullSync, td
} }
@ -263,21 +263,23 @@ func (h *handler) doSync(op *chainSyncOp) error {
// If we've successfully finished a sync cycle and passed any required checkpoint, // If we've successfully finished a sync cycle and passed any required checkpoint,
// enable accepting transactions from the network. // enable accepting transactions from the network.
head := h.chain.CurrentBlock() head := h.chain.CurrentBlock()
if head.NumberU64() >= h.checkpointNumber { if head.Number.Uint64() >= h.checkpointNumber {
// Checkpoint passed, sanity check the timestamp to have a fallback mechanism // Checkpoint passed, sanity check the timestamp to have a fallback mechanism
// for non-checkpointed (number = 0) private networks. // for non-checkpointed (number = 0) private networks.
if head.Time() >= uint64(time.Now().AddDate(0, -1, 0).Unix()) { if head.Time >= uint64(time.Now().AddDate(0, -1, 0).Unix()) {
atomic.StoreUint32(&h.acceptTxs, 1) atomic.StoreUint32(&h.acceptTxs, 1)
} }
} }
if head.NumberU64() > 0 { if head.Number.Uint64() > 0 {
// We've completed a sync cycle, notify all peers of new state. This path is // We've completed a sync cycle, notify all peers of new state. This path is
// essential in star-topology networks where a gateway node needs to notify // essential in star-topology networks where a gateway node needs to notify
// all its out-of-date peers of the availability of a new block. This failure // all its out-of-date peers of the availability of a new block. This failure
// scenario will most often crop up in private and hackathon networks with // scenario will most often crop up in private and hackathon networks with
// degenerate connectivity, but it should be healthy for the mainnet too to // degenerate connectivity, but it should be healthy for the mainnet too to
// more reliably update peers or the local TD state. // more reliably update peers or the local TD state.
h.BroadcastBlock(head, false) if block := h.chain.GetBlock(head.Hash(), head.Number.Uint64()); block != nil {
h.BroadcastBlock(block, false)
}
} }
return nil return nil
} }

View File

@ -295,9 +295,10 @@ func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed
for i, tx := range task.block.Transactions() { for i, tx := range task.block.Transactions() {
msg, _ := tx.AsMessage(signer, task.block.BaseFee()) msg, _ := tx.AsMessage(signer, task.block.BaseFee())
txctx := &Context{ txctx := &Context{
BlockHash: task.block.Hash(), BlockHash: task.block.Hash(),
TxIndex: i, BlockNumber: task.block.Number(),
TxHash: tx.Hash(), TxIndex: i,
TxHash: tx.Hash(),
} }
res, err := api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, config) res, err := api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, config)
if err != nil { if err != nil {
@ -629,9 +630,10 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
// Generate the next state snapshot fast without tracing // Generate the next state snapshot fast without tracing
msg, _ := tx.AsMessage(signer, block.BaseFee()) msg, _ := tx.AsMessage(signer, block.BaseFee())
txctx := &Context{ txctx := &Context{
BlockHash: blockHash, BlockHash: blockHash,
TxIndex: i, BlockNumber: block.Number(),
TxHash: tx.Hash(), TxIndex: i,
TxHash: tx.Hash(),
} }
res, err := api.traceTx(ctx, msg, txctx, blockCtx, statedb, config) res, err := api.traceTx(ctx, msg, txctx, blockCtx, statedb, config)
if err != nil { if err != nil {
@ -671,9 +673,10 @@ func (api *API) traceBlockParallel(ctx context.Context, block *types.Block, stat
for task := range jobs { for task := range jobs {
msg, _ := txs[task.index].AsMessage(signer, block.BaseFee()) msg, _ := txs[task.index].AsMessage(signer, block.BaseFee())
txctx := &Context{ txctx := &Context{
BlockHash: blockHash, BlockHash: blockHash,
TxIndex: task.index, BlockNumber: block.Number(),
TxHash: txs[task.index].Hash(), TxIndex: task.index,
TxHash: txs[task.index].Hash(),
} }
res, err := api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, config) res, err := api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, config)
if err != nil { if err != nil {
@ -874,9 +877,10 @@ func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config *
defer release() defer release()
txctx := &Context{ txctx := &Context{
BlockHash: blockHash, BlockHash: blockHash,
TxIndex: int(index), BlockNumber: block.Number(),
TxHash: hash, TxIndex: int(index),
TxHash: hash,
} }
return api.traceTx(ctx, msg, txctx, vmctx, statedb, config) return api.traceTx(ctx, msg, txctx, vmctx, statedb, config)
} }

View File

@ -109,7 +109,7 @@ func (b *testBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types
func (b *testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { func (b *testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) {
if number == rpc.PendingBlockNumber || number == rpc.LatestBlockNumber { if number == rpc.PendingBlockNumber || number == rpc.LatestBlockNumber {
return b.chain.CurrentBlock(), nil return b.chain.GetBlockByNumber(b.chain.CurrentBlock().Number.Uint64()), nil
} }
return b.chain.GetBlockByNumber(uint64(number)), nil return b.chain.GetBlockByNumber(uint64(number)), nil
} }

View File

@ -58,7 +58,7 @@ type callTrace struct {
From common.Address `json:"from"` From common.Address `json:"from"`
Gas *hexutil.Uint64 `json:"gas"` Gas *hexutil.Uint64 `json:"gas"`
GasUsed *hexutil.Uint64 `json:"gasUsed"` GasUsed *hexutil.Uint64 `json:"gasUsed"`
To common.Address `json:"to,omitempty"` To *common.Address `json:"to,omitempty"`
Input hexutil.Bytes `json:"input"` Input hexutil.Bytes `json:"input"`
Output hexutil.Bytes `json:"output,omitempty"` Output hexutil.Bytes `json:"output,omitempty"`
Error string `json:"error,omitempty"` Error string `json:"error,omitempty"`

View File

@ -0,0 +1,213 @@
package tracetest
import (
"encoding/json"
"fmt"
"math/big"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/tests"
// Force-load the native, to trigger registration
"github.com/ethereum/go-ethereum/eth/tracers"
)
// flatCallTrace is the result of a callTracerParity run.
type flatCallTrace struct {
Action flatCallTraceAction `json:"action"`
BlockHash common.Hash `json:"-"`
BlockNumber uint64 `json:"-"`
Error string `json:"error,omitempty"`
Result flatCallTraceResult `json:"result,omitempty"`
Subtraces int `json:"subtraces"`
TraceAddress []int `json:"traceAddress"`
TransactionHash common.Hash `json:"-"`
TransactionPosition uint64 `json:"-"`
Type string `json:"type"`
Time string `json:"-"`
}
type flatCallTraceAction struct {
Author common.Address `json:"author,omitempty"`
RewardType string `json:"rewardType,omitempty"`
SelfDestructed common.Address `json:"address,omitempty"`
Balance hexutil.Big `json:"balance,omitempty"`
CallType string `json:"callType,omitempty"`
CreationMethod string `json:"creationMethod,omitempty"`
From common.Address `json:"from,omitempty"`
Gas hexutil.Uint64 `json:"gas,omitempty"`
Init hexutil.Bytes `json:"init,omitempty"`
Input hexutil.Bytes `json:"input,omitempty"`
RefundAddress common.Address `json:"refundAddress,omitempty"`
To common.Address `json:"to,omitempty"`
Value hexutil.Big `json:"value,omitempty"`
}
type flatCallTraceResult struct {
Address common.Address `json:"address,omitempty"`
Code hexutil.Bytes `json:"code,omitempty"`
GasUsed hexutil.Uint64 `json:"gasUsed,omitempty"`
Output hexutil.Bytes `json:"output,omitempty"`
}
// flatCallTracerTest defines a single test to check the call tracer against.
type flatCallTracerTest struct {
Genesis core.Genesis `json:"genesis"`
Context callContext `json:"context"`
Input string `json:"input"`
TracerConfig json.RawMessage `json:"tracerConfig"`
Result []flatCallTrace `json:"result"`
}
func flatCallTracerTestRunner(tracerName string, filename string, dirPath string, t testing.TB) error {
// Call tracer test found, read if from disk
blob, err := os.ReadFile(filepath.Join("testdata", dirPath, filename))
if err != nil {
return fmt.Errorf("failed to read testcase: %v", err)
}
test := new(flatCallTracerTest)
if err := json.Unmarshal(blob, test); err != nil {
return fmt.Errorf("failed to parse testcase: %v", err)
}
// Configure a blockchain with the given prestate
tx := new(types.Transaction)
if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil {
return fmt.Errorf("failed to parse testcase input: %v", err)
}
signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)))
origin, _ := signer.Sender(tx)
txContext := vm.TxContext{
Origin: origin,
GasPrice: tx.GasPrice(),
}
context := vm.BlockContext{
CanTransfer: core.CanTransfer,
Transfer: core.Transfer,
Coinbase: test.Context.Miner,
BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)),
Time: uint64(test.Context.Time),
Difficulty: (*big.Int)(test.Context.Difficulty),
GasLimit: uint64(test.Context.GasLimit),
}
_, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
// Create the tracer, the EVM environment and run it
tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig)
if err != nil {
return fmt.Errorf("failed to create call tracer: %v", err)
}
evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
msg, err := tx.AsMessage(signer, nil)
if err != nil {
return fmt.Errorf("failed to prepare transaction for tracing: %v", err)
}
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
if _, err = st.TransitionDb(); err != nil {
return fmt.Errorf("failed to execute transaction: %v", err)
}
// Retrieve the trace result and compare against the etalon
res, err := tracer.GetResult()
if err != nil {
return fmt.Errorf("failed to retrieve trace result: %v", err)
}
ret := new([]flatCallTrace)
if err := json.Unmarshal(res, ret); err != nil {
return fmt.Errorf("failed to unmarshal trace result: %v", err)
}
if !jsonEqualFlat(ret, test.Result) {
t.Logf("tracer name: %s", tracerName)
// uncomment this for easier debugging
// have, _ := json.MarshalIndent(ret, "", " ")
// want, _ := json.MarshalIndent(test.Result, "", " ")
// t.Logf("trace mismatch: \nhave %+v\nwant %+v", string(have), string(want))
// uncomment this for harder debugging <3 meowsbits
// lines := deep.Equal(ret, test.Result)
// for _, l := range lines {
// t.Logf("%s", l)
// t.FailNow()
// }
t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", ret, test.Result)
}
return nil
}
// Iterates over all the input-output datasets in the tracer parity test harness and
// runs the Native tracer against them.
func TestFlatCallTracerNative(t *testing.T) {
testFlatCallTracer("flatCallTracer", "call_tracer_flat", t)
}
func testFlatCallTracer(tracerName string, dirPath string, t *testing.T) {
files, err := os.ReadDir(filepath.Join("testdata", dirPath))
if err != nil {
t.Fatalf("failed to retrieve tracer test suite: %v", err)
}
for _, file := range files {
if !strings.HasSuffix(file.Name(), ".json") {
continue
}
file := file // capture range variable
t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) {
t.Parallel()
err := flatCallTracerTestRunner(tracerName, file.Name(), dirPath, t)
if err != nil {
t.Fatal(err)
}
})
}
}
// jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to
// comparison
func jsonEqualFlat(x, y interface{}) bool {
xTrace := new([]flatCallTrace)
yTrace := new([]flatCallTrace)
if xj, err := json.Marshal(x); err == nil {
json.Unmarshal(xj, xTrace)
} else {
return false
}
if yj, err := json.Marshal(y); err == nil {
json.Unmarshal(yj, yTrace)
} else {
return false
}
return reflect.DeepEqual(xTrace, yTrace)
}
func BenchmarkFlatCallTracer(b *testing.B) {
files, err := filepath.Glob("testdata/call_tracer_flat/*.json")
if err != nil {
b.Fatalf("failed to read testdata: %v", err)
}
for _, file := range files {
filename := strings.TrimPrefix(file, "testdata/call_tracer_flat/")
b.Run(camel(strings.TrimSuffix(filename, ".json")), func(b *testing.B) {
for n := 0; n < b.N; n++ {
err := flatCallTracerTestRunner("flatCallTracer", filename, "call_tracer_flat", b)
if err != nil {
b.Fatal(err)
}
}
})
}
}

File diff suppressed because one or more lines are too long

View File

@ -50,7 +50,6 @@
"input": "0x02f9029d82053980849502f90085010c388d00832dc6c08080b90241608060405234801561001057600080fd5b50600060405161001f906100a2565b604051809103906000f08015801561003b573d6000803e3d6000fd5b5090508073ffffffffffffffffffffffffffffffffffffffff1663c04062266040518163ffffffff1660e01b815260040160006040518083038186803b15801561008457600080fd5b505afa158015610098573d6000803e3d6000fd5b50505050506100af565b610145806100fc83390190565b603f806100bd6000396000f3fe6080604052600080fdfea264697066735822122077f7dbd3450d6e817079cf3fe27107de5768bb3163a402b94e2206b468eb025664736f6c63430008070033608060405234801561001057600080fd5b50610125806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063c040622614602d575b600080fd5b60336035565b005b60036002116076576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401606d906097565b60405180910390fd5b565b6000608360128360b5565b9150608c8260c6565b602082019050919050565b6000602082019050818103600083015260ae816078565b9050919050565b600082825260208201905092915050565b7f546869732063616c6c6564206661696c6564000000000000000000000000000060008201525056fea264697066735822122033f8d92e29d467e5ea08d0024eab0b36b86b8cdb3542c6e89dbaabeb8ffaa42064736f6c63430008070033c001a07566181071cabaf58b70fc41557eb813bfc7a24f5c58554e7fed0bf7c031f169a0420af50b5fe791a4d839e181a676db5250b415dfb35cb85d544db7a1475ae2cc", "input": "0x02f9029d82053980849502f90085010c388d00832dc6c08080b90241608060405234801561001057600080fd5b50600060405161001f906100a2565b604051809103906000f08015801561003b573d6000803e3d6000fd5b5090508073ffffffffffffffffffffffffffffffffffffffff1663c04062266040518163ffffffff1660e01b815260040160006040518083038186803b15801561008457600080fd5b505afa158015610098573d6000803e3d6000fd5b50505050506100af565b610145806100fc83390190565b603f806100bd6000396000f3fe6080604052600080fdfea264697066735822122077f7dbd3450d6e817079cf3fe27107de5768bb3163a402b94e2206b468eb025664736f6c63430008070033608060405234801561001057600080fd5b50610125806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063c040622614602d575b600080fd5b60336035565b005b60036002116076576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401606d906097565b60405180910390fd5b565b6000608360128360b5565b9150608c8260c6565b602082019050919050565b6000602082019050818103600083015260ae816078565b9050919050565b600082825260208201905092915050565b7f546869732063616c6c6564206661696c6564000000000000000000000000000060008201525056fea264697066735822122033f8d92e29d467e5ea08d0024eab0b36b86b8cdb3542c6e89dbaabeb8ffaa42064736f6c63430008070033c001a07566181071cabaf58b70fc41557eb813bfc7a24f5c58554e7fed0bf7c031f169a0420af50b5fe791a4d839e181a676db5250b415dfb35cb85d544db7a1475ae2cc",
"result": { "result": {
"from": "0x3623191d4ccfbbdf09e8ebf6382a1f8257417bc1", "from": "0x3623191d4ccfbbdf09e8ebf6382a1f8257417bc1",
"to": "0x0000000000000000000000000000000000000000",
"gas": "0x2cd774", "gas": "0x2cd774",
"gasUsed": "0x25590", "gasUsed": "0x25590",
"input": "0x608060405234801561001057600080fd5b50600060405161001f906100a2565b604051809103906000f08015801561003b573d6000803e3d6000fd5b5090508073ffffffffffffffffffffffffffffffffffffffff1663c04062266040518163ffffffff1660e01b815260040160006040518083038186803b15801561008457600080fd5b505afa158015610098573d6000803e3d6000fd5b50505050506100af565b610145806100fc83390190565b603f806100bd6000396000f3fe6080604052600080fdfea264697066735822122077f7dbd3450d6e817079cf3fe27107de5768bb3163a402b94e2206b468eb025664736f6c63430008070033608060405234801561001057600080fd5b50610125806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063c040622614602d575b600080fd5b60336035565b005b60036002116076576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401606d906097565b60405180910390fd5b565b6000608360128360b5565b9150608c8260c6565b602082019050919050565b6000602082019050818103600083015260ae816078565b9050919050565b600082825260208201905092915050565b7f546869732063616c6c6564206661696c6564000000000000000000000000000060008201525056fea264697066735822122033f8d92e29d467e5ea08d0024eab0b36b86b8cdb3542c6e89dbaabeb8ffaa42064736f6c63430008070033", "input": "0x608060405234801561001057600080fd5b50600060405161001f906100a2565b604051809103906000f08015801561003b573d6000803e3d6000fd5b5090508073ffffffffffffffffffffffffffffffffffffffff1663c04062266040518163ffffffff1660e01b815260040160006040518083038186803b15801561008457600080fd5b505afa158015610098573d6000803e3d6000fd5b50505050506100af565b610145806100fc83390190565b603f806100bd6000396000f3fe6080604052600080fdfea264697066735822122077f7dbd3450d6e817079cf3fe27107de5768bb3163a402b94e2206b468eb025664736f6c63430008070033608060405234801561001057600080fd5b50610125806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063c040622614602d575b600080fd5b60336035565b005b60036002116076576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401606d906097565b60405180910390fd5b565b6000608360128360b5565b9150608c8260c6565b602082019050919050565b6000602082019050818103600083015260ae816078565b9050919050565b600082825260208201905092915050565b7f546869732063616c6c6564206661696c6564000000000000000000000000000060008201525056fea264697066735822122033f8d92e29d467e5ea08d0024eab0b36b86b8cdb3542c6e89dbaabeb8ffaa42064736f6c63430008070033",

View File

@ -0,0 +1,64 @@
{
"genesis": {
"difficulty": "50486697699375",
"extraData": "0xd783010406844765746887676f312e362e32856c696e7578",
"gasLimit": "4788482",
"hash": "0xf6bbc5bbe34d5c93fd5b4712cd498d1026b8b0f586efefe7fe30231ed6b8a1a5",
"miner": "0xbcdfc35b86bedf72f0cda046a3c16829a2ef41d1",
"mixHash": "0xabca93555584c0463ee5c212251dd002bb3a93a157e06614276f93de53d4fdb8",
"nonce": "0xa64136fcb9c2d4ca",
"number": "1719576",
"stateRoot": "0xab5eec2177a92d633e282936af66c46e24cfa8f2fdc2b8155f33885f483d06f3",
"timestamp": "1466150166",
"totalDifficulty": "28295412423546970038",
"alloc": {
"0xf8bda96b67036ee48107f2a0695ea673479dda56": {
"balance": "0x1529e844f9ecdeec",
"nonce": "33",
"code": "0x",
"storage": {}
}
},
"config": {
"chainId": 1,
"daoForkSupport": true,
"eip150Block": 0,
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"eip155Block": 3000000,
"eip158Block": 0,
"ethash": {},
"homesteadBlock": 1150000,
"byzantiumBlock": 8772000,
"constantinopleBlock": 9573000,
"petersburgBlock": 10500839,
"istanbulBlock": 10500839
}
},
"context": {
"number": "1719577",
"difficulty": "50486697732143",
"timestamp": "1466150178",
"gasLimit": "4788484",
"miner": "0x2a65aca4d5fc5b5c859090a6c34d164135398226"
},
"input": "0xf874218504a817c800832318608080a35b620186a05a131560135760016020526000565b600080601f600039601f565b6000f31ba0575fa000a1f06659a7b6d3c7877601519a4997f04293f0dfa0eee6d8cd840c77a04c52ce50719ee2ff7a0c5753f4ee69c0340666f582dbb5148845a354ca726e4a",
"result": [
{
"action": {
"from": "0xf8bda96b67036ee48107f2a0695ea673479dda56",
"gas": "0x22410c",
"init": "0x5b620186a05a131560135760016020526000565b600080601f600039601f565b6000f3",
"value": "0x0"
},
"blockNumber": 1719577,
"result": {
"address": "0xb2e6a2546c45889427757171ab05b8b438525b42",
"code": "0x",
"gasUsed": "0x219202"
},
"subtraces": 0,
"traceAddress": [],
"type": "create"
}
]
}

View File

@ -0,0 +1,74 @@
{
"genesis": {
"difficulty": "4671584",
"extraData": "0xd683010b05846765746886676f312e3133856c696e7578",
"gasLimit": "9435026",
"hash": "0x755bd54de4b2f5a7a589a10d69888b4ead48a6311d5d69f2f69ca85ec35fbe0b",
"miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"mixHash": "0x3a44525624571c31344ba57780f7664098fe7cbeafe532bcdee76a23fc474ba0",
"nonce": "0x6dca647c00c72bbf",
"number": "1555278",
"stateRoot": "0x5f56d8323ee384b0c8d1de49d63e150e17283eea813483698362bc0ec9e0242a",
"timestamp": "1590795319",
"totalDifficulty": "2242614315030",
"alloc": {
"0x0000000000000000000000000000000000000004": {
"balance": "0x0",
"nonce": "0",
"code": "0x",
"storage": {}
},
"0x877bd459c9b7d8576b44e59e09d076c25946f443": {
"balance": "0x62436e941792f02a5fb1",
"nonce": "265356",
"code": "0x",
"storage": {}
}
},
"config": {
"chainId": 63,
"daoForkSupport": true,
"eip150Block": 0,
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"eip155Block": 0,
"eip158Block": 0,
"ethash": {},
"homesteadBlock": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 301243,
"petersburgBlock": 999983,
"istanbulBlock": 999983
}
},
"context": {
"number": "1555279",
"difficulty": "4669303",
"timestamp": "1590795340",
"gasLimit": "9444238",
"miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443"
},
"input": "0xf86f83040c8c843b9aca0083019f7880809b60206000600060006013600462030d40f26002556000516000550081a2a086ad228c89ad9664287b12a5602a635a803506904f4ce39795990ac4f945cd57a025b30ea8042d773f6c5b13d7cc1b3979f9f10ee674410b6a2112ce840d0302dc",
"result": [
{
"type": "create",
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"value": "0x0",
"gas": "0xcf08",
"init": "0x60206000600060006013600462030d40f260025560005160005500"
},
"result": {
"gasUsed": "0xf3bc",
"code": "0x",
"address": "0x5f8a7e007172ba80afbff1b15f800eb0b260f224"
},
"traceAddress": [],
"subtraces": 0,
"transactionPosition": 74,
"transactionHash": "0x5ef60b27ac971c22a7d484e546e50093ca62300c8986d165154e47773764b6a4",
"blockNumber": 1555279,
"blockHash": "0xd6c98d1b87dfa92a210d99bad2873adaf0c9e51fe43addc63fd9cca03a5c6f46",
"time": "209.346µs"
}
]
}

View File

@ -0,0 +1,94 @@
{
"genesis": {
"difficulty": "4671584",
"extraData": "0xd883010b05846765746888676f312e31342e33856c696e7578",
"gasLimit": "9425823",
"hash": "0x27dd7d052dbc8a29cc5b9487e1e41d842e7a643fcaea4964caa22b834964acaf",
"miner": "0x73f26d124436b0791169d63a3af29c2ae47765a3",
"mixHash": "0xb4a050624f5d147fdf02857cbfd55da3ddc1451743acc5c163861584589c3034",
"nonce": "0x3c255875b17e0573",
"number": "1555277",
"stateRoot": "0x6290d79215a2eebc25d5e456b35876c6d78ffc1ea47bdd70e375ebb3cf325620",
"timestamp": "1590795308",
"totalDifficulty": "2242609643446",
"alloc": {
"0x0000000000000000000000000000000000000001": {
"balance": "0x0",
"nonce": "0",
"code": "0x",
"storage": {}
},
"0x877bd459c9b7d8576b44e59e09d076c25946f443": {
"balance": "0x624329308610ab365fb1",
"nonce": "265194",
"code": "0x",
"storage": {}
}
},
"config": {
"chainId": 63,
"daoForkSupport": true,
"eip150Block": 0,
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"eip155Block": 0,
"eip158Block": 0,
"ethash": {},
"homesteadBlock": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 301243,
"petersburgBlock": 999983,
"istanbulBlock": 999983
}
},
"context": {
"number": "1555278",
"difficulty": "4671584",
"timestamp": "1590795319",
"gasLimit": "9435026",
"miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443"
},
"input": "0xf8ee83040bea843b9aca008301a7588080b8997f18c547e4f7b0f325ad1e56f57e26c745b09a3e503d86e00e5255ff7f715d3d1c600052601c6020527f73b1693892219d736caba55bdb67216e485557ea6b6af75f37096c9aa6a5a75f6040527feeb940b1d03b21e36b0e47e79769f095fe2ab855bd91e3a38756b7d75a9c4549606052602060806080600060006001610bb7f260025560a060020a6080510660005560005432146001550081a1a05b9a162d84bfe84faa7c176e21c26c0083645d4dd0d566547b7be2c2da0b4259a05b37ff12a4c27634cb0da6008d9b69726d415ff4694f9bc38c7806eb1fb60ae9",
"result": [
{
"type": "create",
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"value": "0x0",
"gas": "0xcf08",
"init": "0x7f18c547e4f7b0f325ad1e56f57e26c745b09a3e503d86e00e5255ff7f715d3d1c600052601c6020527f73b1693892219d736caba55bdb67216e485557ea6b6af75f37096c9aa6a5a75f6040527feeb940b1d03b21e36b0e47e79769f095fe2ab855bd91e3a38756b7d75a9c4549606052602060806080600060006001610bb7f260025560a060020a60805106600055600054321460015500"
},
"result": {
"gasUsed": "0xf3e9",
"code": "0x",
"address": "0x568c19ecb14b87e4aec29b4d2d700a3ad3fd0613"
},
"traceAddress": [],
"subtraces": 1,
"transactionPosition": 141,
"transactionHash": "0x1592cbda0d928b8d18eed98857942b91ade32d088e55b8bf63418917cb0231f1",
"blockNumber": 1555278,
"blockHash": "0x755bd54de4b2f5a7a589a10d69888b4ead48a6311d5d69f2f69ca85ec35fbe0b",
"time": "300.9µs"
},
{
"type": "call",
"action": {
"from": "0x568c19ecb14b87e4aec29b4d2d700a3ad3fd0613",
"to": "0x0000000000000000000000000000000000000001",
"value": "0x0",
"gas": "0xbb7",
"input": "0x18c547e4f7b0f325ad1e56f57e26c745b09a3e503d86e00e5255ff7f715d3d1c000000000000000000000000000000000000000000000000000000000000001c73b1693892219d736caba55bdb67216e485557ea6b6af75f37096c9aa6a5a75feeb940b1d03b21e36b0e47e79769f095fe2ab855bd91e3a38756b7d75a9c4549",
"callType": "callcode"
},
"error": "out of gas",
"traceAddress": [
0
],
"subtraces": 0,
"transactionPosition": 141,
"transactionHash": "0x1592cbda0d928b8d18eed98857942b91ade32d088e55b8bf63418917cb0231f1",
"blockNumber": 1555278,
"blockHash": "0x755bd54de4b2f5a7a589a10d69888b4ead48a6311d5d69f2f69ca85ec35fbe0b"
}
]
}

View File

@ -0,0 +1,90 @@
{
"genesis": {
"difficulty": "4683014",
"extraData": "0x537465762d63676574682d76312e31312e34",
"gasLimit": "9435044",
"hash": "0x3452ca5005cb73cd60dfa488a7b124251168e564491f80eb66765e79d78cfd95",
"miner": "0x415aa6292d1db797a467b22139704956c030e62f",
"mixHash": "0x6037612618507ae70c74a72bc2580253662971db959cfbc06d3f8527d4d01575",
"nonce": "0x314fc90dee5e39a2",
"number": "1555274",
"stateRoot": "0x795751f3f96a5de1fd3944ddd78cbfe4ef10491e1086be47609869a30929d0e5",
"timestamp": "1590795228",
"totalDifficulty": "2242595605834",
"alloc": {
"0x0000000000000000000000000000000000000009": {
"balance": "0x0",
"nonce": "0",
"code": "0x",
"storage": {}
},
"0x877bd459c9b7d8576b44e59e09d076c25946f443": {
"balance": "0x6242e3ccf48e66425fb1",
"nonce": "264981",
"code": "0x",
"storage": {}
}
},
"config": {
"chainId": 63,
"daoForkSupport": true,
"eip150Block": 0,
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"eip155Block": 0,
"eip158Block": 0,
"ethash": {},
"homesteadBlock": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 301243,
"petersburgBlock": 999983,
"istanbulBlock": 999983
}
},
"context": {
"number": "1555275",
"difficulty": "4683014",
"timestamp": "1590795244",
"gasLimit": "9444256",
"miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443"
},
"input": "0xf87a83040b15843b9aca008301a0348080a636600060003760406103e8366000600060095af26001556103e851600255610408516003550081a1a0dd883fbbb489b640dadc8c1bf151767155228d0a1321f687f070f35f14374b05a02dd0ccb16a8de39bc8ee61381bbbbb54f0ab18422afd7b03c6163da1f5023934",
"result": [
{
"type": "create",
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"value": "0x0",
"gas": "0xcf08",
"init": "0x36600060003760406103e8366000600060095af26001556103e8516002556104085160035500"
},
"error": "out of gas",
"traceAddress": [],
"subtraces": 1,
"transactionPosition": 117,
"transactionHash": "0x7fe4dec901e1a62c1a1d96b8267bb9ff9dc1f75def43aa45b998743455eff8f9",
"blockNumber": 1555275,
"blockHash": "0x80945caaff2fc67253cbb0217d2e5a307afde943929e97d8b36e58b88cbb02fd",
"time": "332.877µs"
},
{
"type": "call",
"action": {
"from": "0x8832ef498070145c3a5b30f47fbca71fd7b1de9f",
"to": "0x0000000000000000000000000000000000000009",
"value": "0x0",
"gas": "0xc897",
"input": "0x",
"callType": "callcode"
},
"error": "invalid input length",
"traceAddress": [
0
],
"subtraces": 0,
"transactionPosition": 117,
"transactionHash": "0x7fe4dec901e1a62c1a1d96b8267bb9ff9dc1f75def43aa45b998743455eff8f9",
"blockNumber": 1555275,
"blockHash": "0x80945caaff2fc67253cbb0217d2e5a307afde943929e97d8b36e58b88cbb02fd"
}
]
}

View File

@ -0,0 +1,67 @@
{
"context": {
"difficulty": "3755480783",
"gasLimit": "5401723",
"miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511",
"number": "2294702",
"timestamp": "1513676146"
},
"genesis": {
"alloc": {
"0x13e4acefe6a6700604929946e70e6443e4e73447": {
"balance": "0xcf3e0938579f000",
"code": "0x",
"nonce": "9",
"storage": {}
},
"0x7dc9c9730689ff0b0fd506c67db815f12d90a448": {
"balance": "0x0",
"code": "0x",
"nonce": "0",
"storage": {}
}
},
"config": {
"byzantiumBlock": 1700000,
"chainId": 3,
"daoForkSupport": true,
"eip150Block": 0,
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"eip155Block": 10,
"eip158Block": 10,
"ethash": {},
"homesteadBlock": 0
},
"difficulty": "3757315409",
"extraData": "0x566961425443",
"gasLimit": "5406414",
"hash": "0xae107f592eebdd9ff8d6ba00363676096e6afb0e1007a7d3d0af88173077378d",
"miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511",
"mixHash": "0xc927aa05a38bc3de864e95c33b3ae559d3f39c4ccd51cef6f113f9c50ba0caf1",
"nonce": "0x93363bbd2c95f410",
"number": "2294701",
"stateRoot": "0x6b6737d5bde8058990483e915866bd1578014baeff57bd5e4ed228a2bfad635c",
"timestamp": "1513676127",
"totalDifficulty": "7160808139332585"
},
"input": "0xf907ef098504e3b29200830897be8080b9079c606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a1129a01060f46676a5dff6f407f0f51eb6f37f5c8c54e238c70221e18e65fc29d3ea65a0557b01c50ff4ffaac8ed6e5d31237a4ecbac843ab1bfe8bb0165a0060df7c54f",
"result": [
{
"action": {
"from": "0x13e4acefe6a6700604929946e70e6443e4e73447",
"gas": "0x5e106",
"init": "0x606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a11",
"value": "0x0"
},
"blockNumber": 2294702,
"result": {
"address": "0x7dc9c9730689ff0b0fd506c67db815f12d90a448",
"code": "0x606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029",
"gasUsed": "0x897be"
},
"subtraces": 0,
"traceAddress": [],
"type": "create"
}
]
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,103 @@
{
"genesis": {
"number": "566098",
"hash": "0xba134562590a59291892395a29c5088899c2c64d720135dad88f7f076cf55f5f",
"nonce": "0x4b281be9594e3eb3",
"mixHash": "0xdb4ec386166d9c0dc9ba147755ecbb87af9f0a22563cbda02c799efa4e29db6e",
"stateRoot": "0xfc01993ad96a8fb8790a093cea4f505f8db1b0e1143c5f57bb1d173db0baa9e3",
"miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"difficulty": "1926740",
"totalDifficulty": "482216286599",
"extraData": "0xd883010906846765746888676f312e31332e35856c696e7578",
"gasLimit": "19388354",
"timestamp": "1577558314",
"alloc": {
"0x6ab9dd83108698b9ca8d03af3c7eb91c0e54c3fc": {
"balance": "0x0",
"nonce": "0",
"code": "0x",
"storage": {}
},
"0x877bd459c9b7d8576b44e59e09d076c25946f443": {
"balance": "0xcbd5b9b25d1c38c2aad",
"nonce": "134969",
"code": "0x",
"storage": {}
},
"0x91765918420bcb5ad22ee0997abed04056705798": {
"balance": "0x0",
"nonce": "1",
"code": "0x366000803760206000366000736ab9dd83108698b9ca8d03af3c7eb91c0e54c3fc60325a03f41560015760206000f3",
"storage": {}
}
},
"config": {
"chainId": 63,
"daoForkSupport": true,
"eip150Block": 0,
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"eip155Block": 0,
"eip158Block": 0,
"ethash": {},
"homesteadBlock": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 301243,
"petersburgBlock": 999983,
"istanbulBlock": 999983
}
},
"context": {
"number": "566099",
"difficulty": "1927680",
"timestamp": "1577558317",
"gasLimit": "19369422",
"miner": "0x774c398d763161f55b66a646f17edda4addad2ca"
},
"input": "0xf87983020f3985746a52880083015f909491765918420bcb5ad22ee0997abed04056705798888ac7230489e80000884e45375a4741394181a1a04b7260723fd02830754916b3bdf1537b6a851a7ae27c7e9296cfe1fc8275ec08a049d32158988eb717d61b4503b27c7583037c067daba1eb56f4bdfafc1b0045f6",
"result": [
{
"action": {
"callType": "call",
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"gas": "0x10b68",
"input": "0x4e45375a47413941",
"to": "0x91765918420bcb5ad22ee0997abed04056705798",
"value": "0x8ac7230489e80000"
},
"blockHash": "0xb05cc5c8f11df2b5d53ced342ee79e2805785f04c2f40add4539f27bd349f74e",
"blockNumber": 566099,
"result": {
"gasUsed": "0x5721",
"output": "0x4e45375a47413941000000000000000000000000000000000000000000000000"
},
"subtraces": 1,
"traceAddress": [],
"transactionHash": "0x6e26dffe2f66186f03a2c36a16a4cd9724d07622c83746f1e35f988515713d4b",
"transactionPosition": 10,
"type": "call"
},
{
"action": {
"callType": "delegatecall",
"from": "0x91765918420bcb5ad22ee0997abed04056705798",
"gas": "0x10463",
"input": "0x4e45375a47413941",
"to": "0x6ab9dd83108698b9ca8d03af3c7eb91c0e54c3fc",
"value": "0x8ac7230489e80000"
},
"blockHash": "0xb05cc5c8f11df2b5d53ced342ee79e2805785f04c2f40add4539f27bd349f74e",
"blockNumber": 566099,
"result": {
"gasUsed": "0x0",
"output": "0x"
},
"subtraces": 0,
"traceAddress": [
0
],
"transactionHash": "0x6e26dffe2f66186f03a2c36a16a4cd9724d07622c83746f1e35f988515713d4b",
"transactionPosition": 10,
"type": "call"
}
]
}

View File

@ -0,0 +1,95 @@
{
"genesis": {
"difficulty": "4683014",
"extraData": "0x537465762d63676574682d76312e31312e34",
"gasLimit": "9435044",
"hash": "0x3452ca5005cb73cd60dfa488a7b124251168e564491f80eb66765e79d78cfd95",
"miner": "0x415aa6292d1db797a467b22139704956c030e62f",
"mixHash": "0x6037612618507ae70c74a72bc2580253662971db959cfbc06d3f8527d4d01575",
"nonce": "0x314fc90dee5e39a2",
"number": "1555274",
"stateRoot": "0x795751f3f96a5de1fd3944ddd78cbfe4ef10491e1086be47609869a30929d0e5",
"timestamp": "1590795228",
"totalDifficulty": "2242595605834",
"alloc": {
"0x0000000000000000000000000000000000000001": {
"balance": "0x0",
"nonce": "0",
"code": "0x",
"storage": {}
},
"0x877bd459c9b7d8576b44e59e09d076c25946f443": {
"balance": "0x6242e3ccf48e66425fb1",
"nonce": "264882",
"code": "0x",
"storage": {}
}
},
"config": {
"chainId": 63,
"daoForkSupport": true,
"eip150Block": 0,
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"eip155Block": 0,
"eip158Block": 0,
"ethash": {},
"homesteadBlock": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 301243,
"petersburgBlock": 999983,
"istanbulBlock": 999983
}
},
"context": {
"number": "1555275",
"difficulty": "4683014",
"timestamp": "1590795244",
"gasLimit": "9444256",
"miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443"
},
"input": "0xf9011583040ab2843b9aca008301a9c88080b8c0601b565b6000555b005b630badf00d6003565b63c001f00d6003565b7319e7e376e7c213b7e7e7e46cc70a5dd086daff2a7f22ae6da6b482f9b1b19b0b897c3fd43884180a1c5ee361e1107a1bc635649dda600052601b603f537f16433dce375ce6dc8151d3f0a22728bc4a1d9fd6ed39dfd18b4609331937367f6040527f306964c0cf5d74f04129fdc60b54d35b596dde1bf89ad92cb4123318f4c0e40060605260206080607f60006000600161fffff2156007576080511460125760095681a1a07682fc43dbe1fb13c6474f5e70e121c826dd996168d8bb1d8ca7a63470127b46a00a25b308ba417b7770899e8f98a3f0c14aa9bf7db0edacfe4e78d00dbbd3c31e",
"result": [
{
"type": "create",
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"value": "0x0",
"gas": "0xcf08",
"init": "0x601b565b6000555b005b630badf00d6003565b63c001f00d6003565b7319e7e376e7c213b7e7e7e46cc70a5dd086daff2a7f22ae6da6b482f9b1b19b0b897c3fd43884180a1c5ee361e1107a1bc635649dda600052601b603f537f16433dce375ce6dc8151d3f0a22728bc4a1d9fd6ed39dfd18b4609331937367f6040527f306964c0cf5d74f04129fdc60b54d35b596dde1bf89ad92cb4123318f4c0e40060605260206080607f60006000600161fffff21560075760805114601257600956"
},
"result": {
"gasUsed": "0x137e5",
"code": "0x",
"address": "0x1a05d76017ca02010533a470e05e8925a0380d8f"
},
"traceAddress": [],
"subtraces": 1,
"transactionPosition": 18,
"transactionHash": "0xc1c42a325856d513523aec464811923b2e2926f54015c7ba37877064cf889803",
"blockNumber": 1555275,
"blockHash": "0x80945caaff2fc67253cbb0217d2e5a307afde943929e97d8b36e58b88cbb02fd",
"time": "453.925µs"
},
{
"type": "call",
"action": {
"from": "0x1a05d76017ca02010533a470e05e8925a0380d8f",
"to": "0x0000000000000000000000000000000000000001",
"value": "0x0",
"gas": "0xc8c6",
"input": "0x22ae6da6b482f9b1b19b0b897c3fd43884180a1c5ee361e1107a1bc635649dda000000000000000000000000000000000000000000000000000000000000001b16433dce375ce6dc8151d3f0a22728bc4a1d9fd6ed39dfd18b4609331937367f306964c0cf5d74f04129fdc60b54d35b596dde1bf89ad92cb4123318f4c0e4",
"callType": "callcode"
},
"result": {
"gasUsed": "0xbb8",
"output": "0x00000000000000000000000019e7e376e7c213b7e7e7e46cc70a5dd086daff2a"
},
"traceAddress": [0],
"subtraces": 0,
"transactionPosition": 18,
"transactionHash": "0xc1c42a325856d513523aec464811923b2e2926f54015c7ba37877064cf889803",
"blockNumber": 1555275,
"blockHash": "0x80945caaff2fc67253cbb0217d2e5a307afde943929e97d8b36e58b88cbb02fd"
}
]
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,72 @@
{
"genesis": {
"difficulty": "117067574",
"extraData": "0xd783010502846765746887676f312e372e33856c696e7578",
"gasLimit": "4712380",
"hash": "0xe05db05eeb3f288041ecb10a787df121c0ed69499355716e17c307de313a4486",
"miner": "0x0c062b329265c965deef1eede55183b3acb8f611",
"mixHash": "0xb669ae39118a53d2c65fd3b1e1d3850dd3f8c6842030698ed846a2762d68b61d",
"nonce": "0x2b469722b8e28c45",
"number": "24973",
"stateRoot": "0x532a5c3f75453a696428db078e32ae283c85cb97e4d8560dbdf022adac6df369",
"timestamp": "1479891145",
"totalDifficulty": "1892250259406",
"alloc": {
"0x6c06b16512b332e6cd8293a2974872674716ce18": {
"balance": "0x0",
"nonce": "1",
"code": "0x60606040526000357c0100000000000000000000000000000000000000000000000000000000900480632e1a7d4d146036575b6000565b34600057604e60048080359060200190919050506050565b005b3373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051809050600060405180830381858888f19350505050505b5056",
"storage": {}
},
"0x66fdfd05e46126a07465ad24e40cc0597bc1ef31": {
"balance": "0x229ebbb36c3e0f20",
"nonce": "3",
"code": "0x",
"storage": {}
}
},
"config": {
"chainId": 3,
"homesteadBlock": 0,
"daoForkSupport": true,
"eip150Block": 0,
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"eip155Block": 10,
"eip158Block": 10,
"byzantiumBlock": 1700000,
"constantinopleBlock": 4230000,
"petersburgBlock": 4939394,
"istanbulBlock": 6485846,
"muirGlacierBlock": 7117117,
"ethash": {}
}
},
"context": {
"number": "24974",
"difficulty": "117067574",
"timestamp": "1479891162",
"gasLimit": "4712388",
"miner": "0xc822ef32e6d26e170b70cf761e204c1806265914"
},
"input": "0xf889038504a81557008301f97e946c06b16512b332e6cd8293a2974872674716ce1880a42e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b1600002aa0e2a6558040c5d72bc59f2fb62a38993a314c849cd22fb393018d2c5af3112095a01bdb6d7ba32263ccc2ecc880d38c49d9f0c5a72d8b7908e3122b31356d349745",
"result": [
{
"action": {
"callType": "call",
"from": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31",
"gas": "0x1a466",
"input": "0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000",
"to": "0x6c06b16512b332e6cd8293a2974872674716ce18",
"value": "0x0"
},
"blockNumber": 24974,
"result": {
"gasUsed": "0x72de",
"output": "0x"
},
"subtraces": 0,
"traceAddress": [],
"type": "call"
}
]
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,94 @@
{
"genesis": {
"difficulty": "1808543",
"extraData": "0xd883010906846765746888676f312e31332e35856c696e7578",
"gasLimit": "4875092",
"hash": "0x3851fdc18bd5f2314cf0c90439356f9a1fe157d7fb06c20e20b77954da903671",
"miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"mixHash": "0x3d4e702d6058acf94c9547560f05536d45d515bd4f9014564ec41b5b4ff9578b",
"nonce": "0x1695153e7b16c1e7",
"number": "555461",
"stateRoot": "0xba8272acd0dfeb5f04376328e8bfc5b276b177697000c204a060f6f7b629ae32",
"timestamp": "1577423350",
"totalDifficulty": "462222992438",
"alloc": {
"0xcf5b3467dfa45cdc8e5358a7a1ba4deb02e5faed": {
"balance": "0x0",
"nonce": "0",
"code": "0x",
"storage": {}
},
"0x877bd459c9b7d8576b44e59e09d076c25946f443": {
"balance": "0x16c102a3b09c02abdace",
"nonce": "19049",
"code": "0x",
"storage": {}
}
},
"config": {
"chainId": 63,
"daoForkSupport": true,
"eip150Block": 0,
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"eip155Block": 0,
"eip158Block": 0,
"ethash": {},
"homesteadBlock": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 301243,
"petersburgBlock": 999983,
"istanbulBlock": 999983
}
},
"context": {
"number": "555462",
"difficulty": "1808543",
"timestamp": "1577423360",
"gasLimit": "4873701",
"miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443"
},
"input": "0xf90451824a6985746a52880083053e908080b903fb60606040525b60405161015b806102a0833901809050604051809103906000f0600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908302179055505b610247806100596000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900480632ef9db1314610044578063e37678761461007157610042565b005b61005b6004803590602001803590602001506100ad565b6040518082815260200191505060405180910390f35b61008860048035906020018035906020015061008a565b005b8060006000506000848152602001908152602001600020600050819055505b5050565b6000600060008484604051808381526020018281526020019250505060405180910390209150610120600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff167f6164640000000000000000000000000000000000000000000000000000000000846101e3565b9050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681868660405180807f616464000000000000000000000000000000000000000000000000000000000081526020015060200184815260200183815260200182815260200193505050506000604051808303816000866161da5a03f191505050600060005060008281526020019081526020016000206000505492506101db565b505092915050565b60004340848484604051808581526020018473ffffffffffffffffffffffffffffffffffffffff166c0100000000000000000000000002815260140183815260200182815260200194505050505060405180910390209050610240565b9392505050566060604052610148806100136000396000f30060606040526000357c010000000000000000000000000000000000000000000000000000000090048063471407e614610044578063e37678761461007757610042565b005b6100616004803590602001803590602001803590602001506100b3565b6040518082815260200191505060405180910390f35b61008e600480359060200180359060200150610090565b005b8060006000506000848152602001908152602001600020600050819055505b5050565b6000818301905080506100c684826100d5565b8090506100ce565b9392505050565b3373ffffffffffffffffffffffffffffffffffffffff16828260405180807f7265676973746572496e74000000000000000000000000000000000000000000815260200150602001838152602001828152602001925050506000604051808303816000866161da5a03f1915050505b50505681a1a0b9a85df655d3b6aa081e52d8c3db52c50c2bf97d9d993a980113b2262649c125a00d51e63880ca8ef4705914a71e7ff906834a9cdcff0cbd063ff4e43a5905890d",
"result": [
{
"type": "create",
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"value": "0x0",
"gas": "0x3951c",
"init": "0x60606040525b60405161015b806102a0833901809050604051809103906000f0600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908302179055505b610247806100596000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900480632ef9db1314610044578063e37678761461007157610042565b005b61005b6004803590602001803590602001506100ad565b6040518082815260200191505060405180910390f35b61008860048035906020018035906020015061008a565b005b8060006000506000848152602001908152602001600020600050819055505b5050565b6000600060008484604051808381526020018281526020019250505060405180910390209150610120600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff167f6164640000000000000000000000000000000000000000000000000000000000846101e3565b9050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681868660405180807f616464000000000000000000000000000000000000000000000000000000000081526020015060200184815260200183815260200182815260200193505050506000604051808303816000866161da5a03f191505050600060005060008281526020019081526020016000206000505492506101db565b505092915050565b60004340848484604051808581526020018473ffffffffffffffffffffffffffffffffffffffff166c0100000000000000000000000002815260140183815260200182815260200194505050505060405180910390209050610240565b9392505050566060604052610148806100136000396000f30060606040526000357c010000000000000000000000000000000000000000000000000000000090048063471407e614610044578063e37678761461007757610042565b005b6100616004803590602001803590602001803590602001506100b3565b6040518082815260200191505060405180910390f35b61008e600480359060200180359060200150610090565b005b8060006000506000848152602001908152602001600020600050819055505b5050565b6000818301905080506100c684826100d5565b8090506100ce565b9392505050565b3373ffffffffffffffffffffffffffffffffffffffff16828260405180807f7265676973746572496e74000000000000000000000000000000000000000000815260200150602001838152602001828152602001925050506000604051808303816000866161da5a03f1915050505b505056"
},
"result": {
"gasUsed": "0x53e90",
"code": "0x60606040526000357c0100000000000000000000000000000000000000000000000000000000900480632ef9db1314610044578063e37678761461007157610042565b005b61005b6004803590602001803590602001506100ad565b6040518082815260200191505060405180910390f35b61008860048035906020018035906020015061008a565b005b8060006000506000848152602001908152602001600020600050819055505b5050565b6000600060008484604051808381526020018281526020019250505060405180910390209150610120600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff167f6164640000000000000000000000000000000000000000000000000000000000846101e3565b9050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681868660405180807f616464000000000000000000000000000000000000000000000000000000000081526020015060200184815260200183815260200182815260200193505050506000604051808303816000866161da5a03f191505050600060005060008281526020019081526020016000206000505492506101db565b505092915050565b60004340848484604051808581526020018473ffffffffffffffffffffffffffffffffffffffff166c0100000000000000000000000002815260140183815260200182815260200194505050505060405180910390209050610240565b939250505056",
"address": "0x9db7a1baf185a865ffee3824946ccd8958191e5e"
},
"traceAddress": [],
"subtraces": 1,
"transactionPosition": 23,
"transactionHash": "0xe267552ce8437a5bc7081385c99f912de5723ad34b958db215dbc41abd5f6c03",
"blockNumber": 555462,
"blockHash": "0x38bba9e3965b57205097ea5ec53fc403cf3941bec2e4c933faae244de5ca4ba1",
"time": "1.147715ms"
},
{
"type": "create",
"action": {
"from": "0x9db7a1baf185a865ffee3824946ccd8958191e5e",
"value": "0x0",
"gas": "0x30b34",
"init": "0x6060604052610148806100136000396000f30060606040526000357c010000000000000000000000000000000000000000000000000000000090048063471407e614610044578063e37678761461007757610042565b005b6100616004803590602001803590602001803590602001506100b3565b6040518082815260200191505060405180910390f35b61008e600480359060200180359060200150610090565b005b8060006000506000848152602001908152602001600020600050819055505b5050565b6000818301905080506100c684826100d5565b8090506100ce565b9392505050565b3373ffffffffffffffffffffffffffffffffffffffff16828260405180807f7265676973746572496e74000000000000000000000000000000000000000000815260200150602001838152602001828152602001925050506000604051808303816000866161da5a03f1915050505b505056"
},
"result": {
"gasUsed": "0x1009d",
"code": "0x60606040526000357c010000000000000000000000000000000000000000000000000000000090048063471407e614610044578063e37678761461007757610042565b005b6100616004803590602001803590602001803590602001506100b3565b6040518082815260200191505060405180910390f35b61008e600480359060200180359060200150610090565b005b8060006000506000848152602001908152602001600020600050819055505b5050565b6000818301905080506100c684826100d5565b8090506100ce565b9392505050565b3373ffffffffffffffffffffffffffffffffffffffff16828260405180807f7265676973746572496e74000000000000000000000000000000000000000000815260200150602001838152602001828152602001925050506000604051808303816000866161da5a03f1915050505b505056",
"address": "0xcf5b3467dfa45cdc8e5358a7a1ba4deb02e5faed"
},
"traceAddress": [0],
"subtraces": 0,
"transactionPosition": 23,
"transactionHash": "0xe267552ce8437a5bc7081385c99f912de5723ad34b958db215dbc41abd5f6c03",
"blockNumber": 555462,
"blockHash": "0x38bba9e3965b57205097ea5ec53fc403cf3941bec2e4c933faae244de5ca4ba1"
}
]
}

View File

@ -0,0 +1,94 @@
{
"genesis": {
"difficulty": "4635413",
"extraData": "0xd683010b05846765746886676f312e3133856c696e7578",
"gasLimit": "9289294",
"hash": "0x359775cf1a2ae2400e26ec68bf33bcfe38b7979c76b7e616f42c4ca7e7605e39",
"miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"mixHash": "0x4b2a0ef121a9c7d732fa0fbd4166a0e1041d2da2b8cb677c61edabf8b7183b64",
"nonce": "0x2a8a64ad9757be55",
"number": "1555160",
"stateRoot": "0x95067c12148e2362fcd4a89df286ff0b1739ef097a40ca42ae7f698af9a9d913",
"timestamp": "1590793999",
"totalDifficulty": "2242063623471",
"alloc": {
"0x8785e369f0ef0a4e5c5a5f929680427dc75273a5": {
"balance": "0x0",
"nonce": "0",
"code": "0x",
"storage": {}
},
"0x877bd459c9b7d8576b44e59e09d076c25946f443": {
"balance": "0x623145b285b3f551fa3f",
"nonce": "260617",
"code": "0x",
"storage": {}
}
},
"config": {
"chainId": 63,
"daoForkSupport": true,
"eip150Block": 0,
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"eip155Block": 0,
"eip158Block": 0,
"ethash": {},
"homesteadBlock": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 301243,
"petersburgBlock": 999983,
"istanbulBlock": 999983
}
},
"context": {
"number": "1555161",
"difficulty": "4633150",
"timestamp": "1590794020",
"gasLimit": "9298364",
"miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443"
},
"input": "0xf85e8303fa09843b9aca0083019ed880808a6000600060006000f50081a2a0485ea410e210740eef8e6f6de11c530f46f8da80eecb02afbb6c5f61749ac015a068d72f1b0f1d3cb4e214d5def79b49a73e6ee91db2df83499a54c656c144600f",
"result": [
{
"type": "create",
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"value": "0x0",
"gas": "0xcf6c",
"init": "0x6000600060006000f500"
},
"result": {
"gasUsed": "0x14c78",
"code": "0x",
"address": "0x2e8eded627eead210cb6143eb39ef7a3e44e4f00"
},
"traceAddress": [],
"subtraces": 1,
"transactionPosition": 31,
"transactionHash": "0x1257b698c5833c54ce786734087002b097275abc3877af082b5c2a538e894a41",
"blockNumber": 1555161,
"blockHash": "0xb0793dd508dd106a19794b8ce1dfc0ff8d98c76aab61bf32a11799854149a171",
"time": "889.048µs"
},
{
"type": "create",
"action": {
"from": "0x2e8eded627eead210cb6143eb39ef7a3e44e4f00",
"value": "0x0",
"gas": "0x5117",
"init": "0x"
},
"result": {
"gasUsed": "0x0",
"code": "0x",
"address": "0x8785e369f0ef0a4e5c5a5f929680427dc75273a5"
},
"traceAddress": [0],
"subtraces": 0,
"transactionPosition": 31,
"transactionHash": "0x1257b698c5833c54ce786734087002b097275abc3877af082b5c2a538e894a41",
"blockNumber": 1555161,
"blockHash": "0xb0793dd508dd106a19794b8ce1dfc0ff8d98c76aab61bf32a11799854149a171"
}
]
}

View File

@ -0,0 +1,90 @@
{
"genesis": {
"difficulty": "4639933",
"extraData": "0xd883010b05846765746888676f312e31342e33856c696e7578",
"gasLimit": "9280188",
"hash": "0x9a5f3a98eb1c60f6e3f450658a9cea190157e7021d04f927b752ad6482cf9194",
"miner": "0x73f26d124436b0791169d63a3af29c2ae47765a3",
"mixHash": "0x6b6f8fcaa54b8565c4c1ae7cf0a020e938a53007f4561e758b17bc05c9044d78",
"nonce": "0x773aba50dc51b462",
"number": "1555169",
"stateRoot": "0xc4b9703de3e59ff795baae2c3afa010cf039c37244a7a6af7f3f491a10601348",
"timestamp": "1590794111",
"totalDifficulty": "2242105342155",
"alloc": {
"0x5ac5599fc9df172c89ee7ec55ad9104ccbfed40d": {
"balance": "0x0",
"nonce": "0",
"code": "0x",
"storage": {}
},
"0x877bd459c9b7d8576b44e59e09d076c25946f443": {
"balance": "0x62325b40cbbd0915c4b9",
"nonce": "260875",
"code": "0x",
"storage": {}
}
},
"config": {
"chainId": 63,
"daoForkSupport": true,
"eip150Block": 0,
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"eip155Block": 0,
"eip158Block": 0,
"ethash": {},
"homesteadBlock": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 301243,
"petersburgBlock": 999983,
"istanbulBlock": 999983
}
},
"context": {
"number": "1555170",
"difficulty": "4642198",
"timestamp": "1590794112",
"gasLimit": "9289249",
"miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443"
},
"input": "0xf8658303fb0b843b9aca0083019ee48080915a600055600060006000f0505a6001550081a2a01a7deb3a16d967b766459ef486b00656c6581e5ad58968184a33701e27e0eb8aa07162ccdfe2018d64360a605310a62c399dd586c7282dd42a88c54f02f51d451f",
"result": [
{
"type": "create",
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"value": "0x0",
"gas": "0xcf08",
"init": "0x5a600055600060006000f0505a60015500"
},
"error": "out of gas",
"traceAddress": [],
"subtraces": 1,
"transactionPosition": 63,
"transactionHash": "0x60e881fae3884657b5430925c5d0053535b45cce0b8188f2a6be1feee8bcc650",
"blockNumber": 1555170,
"blockHash": "0xea46fbf941d51bf1e4180fbf26d22fda3896f49c7f371d109c226de95dd7b02e",
"time": "952.736µs"
},
{
"type": "create",
"action": {
"from": "0x9c5cfe45b15eaff4ad617af4250189e26024a4f8",
"value": "0x0",
"gas": "0x3cb",
"init": "0x"
},
"result": {
"gasUsed": "0x0",
"code": "0x",
"address": "0x5ac5599fc9df172c89ee7ec55ad9104ccbfed40d"
},
"traceAddress": [0],
"subtraces": 0,
"transactionPosition": 63,
"transactionHash": "0x60e881fae3884657b5430925c5d0053535b45cce0b8188f2a6be1feee8bcc650",
"blockNumber": 1555170,
"blockHash": "0xea46fbf941d51bf1e4180fbf26d22fda3896f49c7f371d109c226de95dd7b02e"
}
]
}

View File

@ -0,0 +1,81 @@
{
"genesis": {
"difficulty": "3244991",
"extraData": "0x",
"gasLimit": "7968787",
"hash": "0x62bbf18c203068a8793af8d8360d054f95a63bc62b87ade550861ed490af3f15",
"miner": "0x9f2659ffe7b3b467e46dcec3623392cf51635079",
"mixHash": "0xc8dec711fd1e03972b6a279a09dc0cd29c5171b60f42c4ce37c7c51ff445f776",
"nonce": "0x40b1bbcc25ddb804",
"number": "839246",
"stateRoot": "0x4bb3b02ec70b837651233957fb61a6ea3fc6a4244c1f55df7a713c154829ec0a",
"timestamp": "1581179375",
"totalDifficulty": "1023985623933",
"alloc": {
"0x76554b33410b6d90b7dc889bfed0451ad195f27e": {
"balance": "0x0",
"nonce": "1",
"code": "0x6080604052348015600f57600080fd5b506004361060505760003560e01c8063391521f414605557806355313dea14605d5780636d3d14161460655780638da5cb5b14606d578063b9d1e5aa1460b5575b600080fd5b605b60bd565b005b606360c8565b005b606b60ca565b005b607360cf565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60bb60f4565b005b6020610123600af050565b005b600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565bfefea165627a7a723058202094d5aa5dbbd493e9a2c64c50b62eba4b109b2a12d2bb73a5d0d54982651fc80029",
"storage": {}
},
"0xed69ab7145a9bae7152406d062c077c6ecc6ae18": {
"balance": "0x0",
"nonce": "0",
"code": "0x",
"storage": {}
},
"0xa3b31cbd5168d3c99756660d4b7625d679e12573": {
"balance": "0x569bc6535d3083fce",
"nonce": "26",
"code": "0x",
"storage": {}
}
},
"config": {
"chainId": 63,
"daoForkSupport": true,
"eip150Block": 0,
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"eip155Block": 0,
"eip158Block": 0,
"ethash": {},
"homesteadBlock": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 301243,
"petersburgBlock": 999983,
"istanbulBlock": 999983
}
},
"context": {
"number": "839247",
"difficulty": "3213311",
"timestamp": "1581179571",
"gasLimit": "7961006",
"miner": "0x9f2659ffe7b3b467e46dcec3623392cf51635079"
},
"input": "0xf86a1a8509502f9000830334509476554b33410b6d90b7dc889bfed0451ad195f27e8084391521f481a2a02e4ff0d171a860c8c7de2283978e2f225f9ba3ed4dec446b773c6b2d73ef22dea02a6a517528b491cb71b204f534db11a1c8059035f54d5bae347d1cab536bde2c",
"result": [
{
"type": "call",
"action": {
"from": "0xa3b31cbd5168d3c99756660d4b7625d679e12573",
"to": "0x76554b33410b6d90b7dc889bfed0451ad195f27e",
"value": "0x0",
"gas": "0x2e138",
"input": "0x391521f4",
"callType": "call"
},
"result": {
"gasUsed": "0xd0b5",
"output": "0x"
},
"traceAddress": [],
"subtraces": 0,
"transactionPosition": 26,
"transactionHash": "0xcb1090fa85d2a3da8326b75333e92b3dca89963c895d9c981bfdaa64643135e4",
"blockNumber": 839247,
"blockHash": "0xce7ff7d84ca97f0f89d6065e2c12409a795c9f607cdb14aef0713cad5d7e311c",
"time": "182.267µs"
}
]
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,111 @@
{
"genesis": {
"difficulty": "1911202",
"extraData": "0xd883010906846765746888676f312e31332e35856c696e7578",
"gasLimit": "7842876",
"hash": "0x4d7bc82e0d56307094378e1a8fbfa6260986f621de95b5fe68a95248b3ba8efe",
"miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"mixHash": "0xc102ad52677c391edab82cc895ca7a7e9fff3eed4fa966ecf7fb61ec1e84bb6b",
"nonce": "0x39f5b074e3437f3f",
"number": "553415",
"stateRoot": "0x8f89e79109c19fa00e72b400502448540dc4773ad92dddd341dbba20c710a3b5",
"timestamp": "1577396195",
"totalDifficulty": "458361299240",
"alloc": {
"0x531f76bad925f6a925474996c7d738c1008045f6": {
"balance": "0x0",
"nonce": "1",
"code": "0x6060604052361561008a576000357c01000000000000000000000000000000000000000000000000000000009004806301cb3b20146102bf57806329dcb0cf146102cc57806338af3eed146102ed5780636e66f6e9146103245780637a3a0e841461035b5780637b3e5e7b1461037c578063a035b1fe1461039d578063dc0d3dff146103be5761008a565b6102bd5b60003490506040604051908101604052803381526020018281526020015060066000506006600050805480919060010190908154818355818115116101365760020281600202836000526020600020918201910161013591906100ec565b808211156101315760006000820160006101000a81549073ffffffffffffffffffffffffffffffffffffffff02191690556001820160005060009055506001016100ec565b5090565b5b505050815481101561000257906000526020600020906002020160005060008201518160000160006101000a81548173ffffffffffffffffffffffffffffffffffffffff0219169083021790555060208201518160010160005055905050806002600082828250540192505081905550600560009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166390b98a11336004600050548404604051837c0100000000000000000000000000000000000000000000000000000000028152600401808373ffffffffffffffffffffffffffffffffffffffff168152602001828152602001925050506020604051808303816000876161da5a03f1156100025750505060405151507fe842aea7a5f1b01049d752008c53c52890b1a6daf660cf39e8eec506112bbdf633826001604051808473ffffffffffffffffffffffffffffffffffffffff168152602001838152602001828152602001935050505060405180910390a15b50565b005b6102ca6004506104c8565b005b6102d760045061043a565b6040518082815260200191505060405180910390f35b6102f8600450610402565b604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b61032f60045061044c565b604051808273ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b610366600450610428565b6040518082815260200191505060405180910390f35b610387600450610431565b6040518082815260200191505060405180910390f35b6103a8600450610443565b6040518082815260200191505060405180910390f35b6103cf600480359060200150610472565b604051808373ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60016000505481565b60026000505481565b60036000505481565b60046000505481565b600560009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60066000508181548110156100025790600052602060002090600202016000915090508060000160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16908060010160005054905082565b6000600360005054421015156107d8576001600050546002600050541015156105cf57600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166000600260005054604051809050600060405180830381858888f19350505050507fe842aea7a5f1b01049d752008c53c52890b1a6daf660cf39e8eec506112bbdf6600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff166002600050546000604051808473ffffffffffffffffffffffffffffffffffffffff168152602001838152602001828152602001935050505060405180910390a161079d565b7fe842aea7a5f1b01049d752008c53c52890b1a6daf660cf39e8eec506112bbdf66000600b600060405180848152602001838152602001828152602001935050505060405180910390a1600090505b60066000505481101561079c57600660005081815481101561000257906000526020600020906002020160005060000160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166000600660005083815481101561000257906000526020600020906002020160005060010160005054604051809050600060405180830381858888f19350505050507fe842aea7a5f1b01049d752008c53c52890b1a6daf660cf39e8eec506112bbdf6600660005082815481101561000257906000526020600020906002020160005060000160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff166006600050838154811015610002579060005260206000209060020201600050600101600050546000604051808473ffffffffffffffffffffffffffffffffffffffff168152602001838152602001828152602001935050505060405180910390a15b806001019050805061061e565b5b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b5b5056",
"storage": {
"0x0000000000000000000000000000000000000000000000000000000000000006": "0x0000000000000000000000000000000000000000000000000000000000000000",
"0xf652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f": "0x0000000000000000000000000000000000000000000000000000000000000000",
"0xf652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d40": "0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000005": "0x000000000000000000000000b49180d443dc4ca6028de0031ac09337891fd8ce",
"0x0000000000000000000000000000000000000000000000000000000000000004": "0x0000000000000000000000000000000000000000000000000de0b6b3a7640000"
}
},
"0xb49180d443dc4ca6028de0031ac09337891fd8ce": {
"balance": "0x0",
"nonce": "0",
"code": "0x",
"storage": {}
},
"0x877bd459c9b7d8576b44e59e09d076c25946f443": {
"balance": "0x193e9986e2e3f0c58988",
"nonce": "2585",
"code": "0x",
"storage": {}
}
},
"config": {
"chainId": 63,
"daoForkSupport": true,
"eip150Block": 0,
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"eip155Block": 0,
"eip158Block": 0,
"ethash": {},
"homesteadBlock": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 301243,
"petersburgBlock": 999983,
"istanbulBlock": 999983
}
},
"context": {
"number": "553416",
"difficulty": "1909336",
"timestamp": "1577396224",
"gasLimit": "7835218",
"miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443"
},
"input": "0xf870820a1985e8d4a5100083040b2894531f76bad925f6a925474996c7d738c1008045f6880de0b6b3a76400008081a2a08693170f040d9501b831b404d9e40fba040c5aef4b8974aedc20b3844aea7c32a0476861058ff9b8030c58bcba8be320acc855e4694a633c493fb50fbdb9455489",
"result": [
{
"type": "call",
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"to": "0x531f76bad925f6a925474996c7d738c1008045f6",
"value": "0xde0b6b3a7640000",
"gas": "0x3b920",
"input": "0x",
"callType": "call"
},
"result": {
"gasUsed": "0x19c3e",
"output": "0x"
},
"traceAddress": [],
"subtraces": 1,
"transactionPosition": 5,
"transactionHash": "0x04d2029a5cbbed30969cdc0a2ca9e9fc6b719e323af0802b52466f07ee0ecada",
"blockNumber": 553416,
"blockHash": "0x8df024322173d225a09681d35edeaa528aca60743a11a70f854c158862bf5282",
"time": "617.42µs"
},
{
"type": "call",
"action": {
"from": "0x531f76bad925f6a925474996c7d738c1008045f6",
"to": "0xb49180d443dc4ca6028de0031ac09337891fd8ce",
"value": "0x0",
"gas": "0x2164e",
"input": "0x90b98a11000000000000000000000000877bd459c9b7d8576b44e59e09d076c25946f4430000000000000000000000000000000000000000000000000000000000000001",
"callType": "call"
},
"result": {
"gasUsed": "0x0",
"output": "0x"
},
"traceAddress": [
0
],
"subtraces": 0,
"transactionPosition": 5,
"transactionHash": "0x04d2029a5cbbed30969cdc0a2ca9e9fc6b719e323af0802b52466f07ee0ecada",
"blockNumber": 553416,
"blockHash": "0x8df024322173d225a09681d35edeaa528aca60743a11a70f854c158862bf5282"
}
]
}

View File

@ -0,0 +1,68 @@
{
"context": {
"difficulty": "3665057456",
"gasLimit": "5232723",
"miner": "0xf4d8e706cfb25c0decbbdd4d2e2cc10c66376a3f",
"number": "2294501",
"timestamp": "1513673601"
},
"genesis": {
"alloc": {
"0x0f6cef2b7fbb504782e35aa82a2207e816a2b7a9": {
"balance": "0x2a3fc32bcc019283",
"code": "0x",
"nonce": "10",
"storage": {}
},
"0xabbcd5b340c80b5f1c0545c04c987b87310296ae": {
"balance": "0x0",
"code": "0x606060405236156100755763ffffffff7c01000000000000000000000000000000000000000000000000000000006000350416632d0335ab811461007a578063548db174146100ab5780637f649783146100fc578063b092145e1461014d578063c3f44c0a14610186578063c47cf5de14610203575b600080fd5b341561008557600080fd5b610099600160a060020a0360043516610270565b60405190815260200160405180910390f35b34156100b657600080fd5b6100fa600460248135818101908301358060208181020160405190810160405280939291908181526020018383602002808284375094965061028f95505050505050565b005b341561010757600080fd5b6100fa600460248135818101908301358060208181020160405190810160405280939291908181526020018383602002808284375094965061029e95505050505050565b005b341561015857600080fd5b610172600160a060020a03600435811690602435166102ad565b604051901515815260200160405180910390f35b341561019157600080fd5b6100fa6004803560ff1690602480359160443591606435600160a060020a0316919060a49060843590810190830135806020601f8201819004810201604051908101604052818152929190602084018383808284375094965050509235600160a060020a031692506102cd915050565b005b341561020e57600080fd5b61025460046024813581810190830135806020601f8201819004810201604051908101604052818152929190602084018383808284375094965061056a95505050505050565b604051600160a060020a03909116815260200160405180910390f35b600160a060020a0381166000908152602081905260409020545b919050565b61029a816000610594565b5b50565b61029a816001610594565b5b50565b600160209081526000928352604080842090915290825290205460ff1681565b60008080600160a060020a038416158061030d5750600160a060020a038085166000908152600160209081526040808320339094168352929052205460ff165b151561031857600080fd5b6103218561056a565b600160a060020a038116600090815260208190526040808220549295507f19000000000000000000000000000000000000000000000000000000000000009230918891908b908b90517fff000000000000000000000000000000000000000000000000000000000000008089168252871660018201526c01000000000000000000000000600160a060020a038088168202600284015286811682026016840152602a8301869052841602604a820152605e810182805190602001908083835b6020831061040057805182525b601f1990920191602091820191016103e0565b6001836020036101000a0380198251168184511617909252505050919091019850604097505050505050505051809103902091506001828a8a8a6040516000815260200160405260006040516020015260405193845260ff90921660208085019190915260408085019290925260608401929092526080909201915160208103908084039060008661646e5a03f1151561049957600080fd5b5050602060405103519050600160a060020a03838116908216146104bc57600080fd5b600160a060020a0380841660009081526020819052604090819020805460010190559087169086905180828051906020019080838360005b8381101561050d5780820151818401525b6020016104f4565b50505050905090810190601f16801561053a5780820380516001836020036101000a031916815260200191505b5091505060006040518083038160008661646e5a03f1915050151561055e57600080fd5b5b505050505050505050565b600060248251101561057e5750600061028a565b600160a060020a0360248301511690505b919050565b60005b825181101561060157600160a060020a033316600090815260016020526040812083918584815181106105c657fe5b90602001906020020151600160a060020a031681526020810191909152604001600020805460ff19169115159190911790555b600101610597565b5b5050505600a165627a7a723058200027e8b695e9d2dea9f3629519022a69f3a1d23055ce86406e686ea54f31ee9c0029",
"nonce": "1",
"storage": {}
}
},
"config": {
"byzantiumBlock": 1700000,
"chainId": 3,
"daoForkSupport": true,
"eip150Block": 0,
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"eip155Block": 10,
"eip158Block": 10,
"ethash": {},
"homesteadBlock": 0
},
"difficulty": "3672229776",
"extraData": "0x4554482e45544846414e532e4f52472d4641313738394444",
"gasLimit": "5227619",
"hash": "0xa07b3d6c6bf63f5f981016db9f2d1d93033833f2c17e8bf7209e85f1faf08076",
"miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3",
"mixHash": "0x806e151ce2817be922e93e8d5921fa0f0d0fd213d6b2b9a3fa17458e74a163d0",
"nonce": "0xbc5d43adc2c30c7d",
"number": "2294500",
"stateRoot": "0xca645b335888352ef9d8b1ef083e9019648180b259026572e3139717270de97d",
"timestamp": "1513673552",
"totalDifficulty": "7160066586979149"
},
"input": "0xf9018b0a8505d21dba00832dc6c094abbcd5b340c80b5f1c0545c04c987b87310296ae80b9012473b40a5c000000000000000000000000400de2e016bda6577407dfc379faba9899bc73ef0000000000000000000000002cc31912b2b0f3075a87b3640923d45a26cef3ee000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d79d8e6c7265636f76657279416464726573730000000000000000000000000000000000000000000000000000000000383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988000000000000000000000000000000000000000000000000000000000000000000000000000000001ba0fd659d76a4edbd2a823e324c93f78ad6803b30ff4a9c8bce71ba82798975c70ca06571eecc0b765688ec6c78942c5ee8b585e00988c0141b518287e9be919bc48a",
"result": [
{
"action": {
"callType": "call",
"from": "0x0f6cef2b7fbb504782e35aa82a2207e816a2b7a9",
"gas": "0x2d55e8",
"input": "0x73b40a5c000000000000000000000000400de2e016bda6577407dfc379faba9899bc73ef0000000000000000000000002cc31912b2b0f3075a87b3640923d45a26cef3ee000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d79d8e6c7265636f76657279416464726573730000000000000000000000000000000000000000000000000000000000383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988383e3ec32dc0f66d8fe60dbdc2f6815bdf73a98800000000000000000000000000000000000000000000000000000000000000000000000000000000",
"to": "0xabbcd5b340c80b5f1c0545c04c987b87310296ae",
"value": "0x0"
},
"blockNumber": 2294501,
"error": "execution reverted",
"result": {
"gasUsed": "0x719b"
},
"subtraces": 0,
"traceAddress": [],
"type": "call"
}
]
}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,91 @@
{
"genesis": {
"difficulty": "4628640",
"extraData": "0xd883010b05846765746888676f312e31342e33856c696e7578",
"gasLimit": "9244120",
"hash": "0x5a1f551897cc91265225b0453136ad8c7eef1c1c8b06139da4f2e6e710c1f4df",
"miner": "0x73f26d124436b0791169d63a3af29c2ae47765a3",
"mixHash": "0xd6735e63f8937fe0c5491e0d5836ec28467363be7ada5a2f979f9d107e2c831e",
"nonce": "0x7c35e34d2e328d7d",
"number": "1555145",
"stateRoot": "0x565873b05f71b98595133e37a52d79c3476ce820c05ebedaddd35541b0e894a3",
"timestamp": "1590793819",
"totalDifficulty": "2241994078605",
"alloc": {
"0x119f569a45e9d0089d51d7f9529f5ea9bf5785e2": {
"balance": "0x0",
"nonce": "0",
"code": "0x",
"storage": {}
},
"0x877bd459c9b7d8576b44e59e09d076c25946f443": {
"balance": "0x622e8fced69d43eb8d97",
"nonce": "260140",
"code": "0x",
"storage": {}
}
},
"config": {
"chainId": 63,
"daoForkSupport": true,
"eip150Block": 0,
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"eip155Block": 0,
"eip158Block": 0,
"ethash": {},
"homesteadBlock": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 301243,
"petersburgBlock": 999983,
"istanbulBlock": 999983
}
},
"context": {
"number": "1555146",
"difficulty": "4630900",
"timestamp": "1590793820",
"gasLimit": "9253146",
"miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443"
},
"input": "0xf8628303f82c843b9aca0083019ecc80808e605a600053600160006001f0ff0081a2a077f539ae2a58746bbfa6370fc423f946870efa32753d697d3729d361a428623aa0384ef9a5650d6630f5c1ddef616bffa5fc72a95a9314361d0918de066aa4475a",
"result": [
{
"type": "create",
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"value": "0x0",
"gas": "0xcf08",
"init": "0x605a600053600160006001f0ff00"
},
"result": {
"gasUsed": "0x102a1",
"code": "0x",
"address": "0x1d99a1a3efa9181f540f9e24fa6e4e08eb7844ca"
},
"traceAddress": [],
"subtraces": 1,
"transactionPosition": 14,
"transactionHash": "0xdd76f02407e2f8329303ba688e111cae4f7008ad0d14d6e42c5698424ea36d79",
"blockNumber": 1555146,
"blockHash": "0xafb4f1dd27b9054c805acb81a88ed04384788cb31d84164c21874935c81e5c7e",
"time": "187.145µs"
},
{
"type": "suicide",
"action": {
"address": "0x1d99a1a3efa9181f540f9e24fa6e4e08eb7844ca",
"refundAddress": "0x0000000000000000000000000000000000000000",
"balance": "0x0"
},
"result": null,
"traceAddress": [
0
],
"subtraces": 0,
"transactionPosition": 14,
"transactionHash": "0xdd76f02407e2f8329303ba688e111cae4f7008ad0d14d6e42c5698424ea36d79",
"blockNumber": 1555146,
"blockHash": "0xafb4f1dd27b9054c805acb81a88ed04384788cb31d84164c21874935c81e5c7e"
}
]
}

View File

@ -0,0 +1,97 @@
{
"context": {
"difficulty": "3502894804",
"gasLimit": "4722976",
"miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724",
"number": "2289806",
"timestamp": "1513601314"
},
"genesis": {
"alloc": {
"0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": {
"balance": "0x0",
"code": "0x",
"nonce": "22",
"storage": {}
},
"0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": {
"balance": "0x4d87094125a369d9bd5",
"code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029",
"nonce": "1",
"storage": {
"0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb",
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000",
"0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000003c",
"0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834"
}
},
"0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": {
"balance": "0x1780d77678137ac1b775",
"code": "0x",
"nonce": "29072",
"storage": {}
}
},
"config": {
"byzantiumBlock": 1700000,
"chainId": 3,
"daoForkSupport": true,
"eip150Block": 0,
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"eip155Block": 10,
"eip158Block": 10,
"ethash": {},
"homesteadBlock": 0
},
"difficulty": "3509749784",
"extraData": "0x4554482e45544846414e532e4f52472d4641313738394444",
"gasLimit": "4727564",
"hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440",
"miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3",
"mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada",
"nonce": "0x4eb12e19c16d43da",
"number": "2289805",
"stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f",
"timestamp": "1513601261",
"totalDifficulty": "7143276353481064"
},
"input": "0xf88b8271908506fc23ac0083015f90943b873a919aa0512d5a0f09e6dcceaa4a6727fafe80a463e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c52aa0bdce0b59e8761854e857fe64015f06dd08a4fbb7624f6094893a79a72e6ad6bea01d9dde033cff7bb235a3163f348a6d7ab8d6b52bc0963a95b91612e40ca766a4",
"result": [
{
"action": {
"callType": "call",
"from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb",
"gas": "0x10738",
"input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5",
"to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe",
"value": "0x0"
},
"blockNumber": 2289806,
"result": {
"gasUsed": "0x9751",
"output": "0x0000000000000000000000000000000000000000000000000000000000000001"
},
"subtraces": 1,
"traceAddress": [],
"type": "call"
},
{
"action": {
"callType": "call",
"from": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe",
"gas": "0x6d05",
"input": "0x",
"to": "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5",
"value": "0x6f05b59d3b20000"
},
"blockNumber": 0,
"result": {
"gasUsed": "0x0",
"output": "0x"
},
"subtraces": 0,
"traceAddress": [0],
"type": "call"
}
]
}

View File

@ -0,0 +1,70 @@
{
"genesis": {
"difficulty": "4673862",
"extraData": "0xd683010b05846765746886676f312e3133856c696e7578",
"gasLimit": "9471919",
"hash": "0x7f072150c5905c214966e3432d418910badcdbe510aceaac295b1d7059cc0ffc",
"miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"mixHash": "0x113ced8fedb939fdc862008da7bdddde726f997c0e6dfba0e55613994757b489",
"nonce": "0x0f411a2e5552c5b7",
"number": "1555284",
"stateRoot": "0x9fe125b361b72d5479b24ad9be9964b74228c73a2dfb0065060a79b4a6dfaa1e",
"timestamp": "1590795374",
"totalDifficulty": "2242642335405",
"alloc": {
"0xe85df1413eebe1b191c26260e19783a274a6b041": {
"balance": "0x0",
"nonce": "0",
"code": "0x",
"storage": {}
},
"0x877bd459c9b7d8576b44e59e09d076c25946f443": {
"balance": "0x6244c985ef1e48e84531",
"nonce": "265775",
"code": "0x",
"storage": {}
}
},
"config": {
"chainId": 63,
"daoForkSupport": true,
"eip150Block": 0,
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"eip155Block": 0,
"eip158Block": 0,
"ethash": {},
"homesteadBlock": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 301243,
"petersburgBlock": 999983,
"istanbulBlock": 999983
}
},
"context": {
"number": "1555285",
"difficulty": "4676144",
"timestamp": "1590795378",
"gasLimit": "9481167",
"miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443"
},
"input": "0xf9014083040e2f843b9aca008301aab08080b8eb7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b5547f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000000000000000000000000000000000000000c3507f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000000000000000000000000000000000000000000037f05581a2a09db45e7846f193471f6d897fb6ff58b7ec41a9c6f63d10aca47d821c365981cba052ec320875625e16141a1a9e8b7993de863698fb699f93ae2cab26149bbb144f",
"result": [
{
"type": "create",
"action": {
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"value": "0x0",
"gas": "0xd550",
"init": "0x7f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b57f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000945304eb96065b2a98b57a48a06ae28d285a71b5547f000000000000000000000000000000000000000000000000000000000000c3507f000000000000000000000000000000000000000000000000000000000000c3507f00000000000000000000000000000000000000000000000000000000000000007f000000000000000000000000000000000000000000000000000000000000000037f055"
},
"error": "out of gas",
"traceAddress": [],
"subtraces": 0,
"transactionPosition": 16,
"transactionHash": "0x384487e5ae8d2997aece8e28403d393cb9752425e6de358891bed981c5af1c05",
"blockNumber": 1555285,
"blockHash": "0x93231d8e9662adb4c5c703583a92c7b3112cd5448f43ab4fa1f0f00a0183ed3f",
"time": "665.278µs"
}
]
}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,92 @@
{
"genesis": {
"number": "553153",
"hash": "0x88bde20840880a1f3fba92121912a3cc0d3b26d76e4d914fbd85fc2e43da3b3f",
"nonce": "0x7be554ffe4b82fc2",
"mixHash": "0xf73d2ff3c16599c3b8a24b9ebde6c09583b5ee3f747d3cd37845d564f4c8d87a",
"stateRoot": "0x40b5f53d610108947688a04fb68838ff9c0aa0dd6e54156b682537192171ff5c",
"miner": "0x774c398d763161f55b66a646f17edda4addad2ca",
"difficulty": "1928226",
"totalDifficulty": "457857582215",
"extraData": "0xd983010907846765746888676f312e31332e358664617277696e",
"gasLimit": "7999473",
"timestamp": "1577392669",
"alloc": {
"0x877bd459c9b7d8576b44e59e09d076c25946f443": {
"balance": "0x19bb4ac611ca7a1fc881",
"nonce": "701",
"code": "0x",
"storage": {}
},
"0x8ee79c5b3f6e1d214d2c4fcf7ea4092a32e26e91": {
"balance": "0x0",
"nonce": "1",
"code": "0x60606040526000357c01000000000000000000000000000000000000000000000000000000009004806341c0e1b514610044578063cfae32171461005157610042565b005b61004f6004506100ca565b005b61005c60045061015e565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100bc5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561015b57600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b5b565b60206040519081016040528060008152602001506001600050805480601f016020809104026020016040519081016040528092919081815260200182805480156101cd57820191906000526020600020905b8154815290600101906020018083116101b057829003601f168201915b505050505090506101d9565b9056",
"storage": {
"0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000877bd459c9b7d8576b44e59e09d076c25946f443"
}
}
},
"config": {
"chainId": 63,
"daoForkSupport": true,
"eip150Block": 0,
"eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d",
"eip155Block": 0,
"eip158Block": 0,
"ethash": {},
"homesteadBlock": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 301243,
"petersburgBlock": 999983,
"istanbulBlock": 999983
}
},
"context": {
"number": "553154",
"difficulty": "1929167",
"timestamp": "1577392670",
"gasLimit": "8000000",
"miner": "0x877bd459c9b7d8576b44e59e09d076c25946f443"
},
"input": "0xf86c8202bd850ee6b280008344aa20948ee79c5b3f6e1d214d2c4fcf7ea4092a32e26e91808441c0e1b581a2a03f95ca5cdf7fd727630341c4c6aa1b64ccd9949bd9ecc72cfdd7ce17a2013a69a06d34795ef7fb0108a6dbee4ae0a1bdc48dcd2a4ee53bb6a33d45515af07bb9a8",
"result": [
{
"action": {
"callType": "call",
"from": "0x877bd459c9b7d8576b44e59e09d076c25946f443",
"gas": "0x445708",
"input": "0x41c0e1b5",
"to": "0x8ee79c5b3f6e1d214d2c4fcf7ea4092a32e26e91",
"value": "0x0"
},
"blockHash": "0xf641c3b0f82b07cd3a528adb9927dd83eeb4f1682e2bd523ed36888e0d82c9a9",
"blockNumber": 553154,
"result": {
"gasUsed": "0x347a",
"output": "0x"
},
"subtraces": 1,
"traceAddress": [],
"transactionHash": "0x6af0a5c3188ffacae4d340d4a17e14fdb5a54187683a80ef241bde248189882b",
"transactionPosition": 15,
"type": "call"
},
{
"action": {
"address": "0x8ee79c5b3f6e1d214d2c4fcf7ea4092a32e26e91",
"balance": "0x0",
"refundAddress": "0x877bd459c9b7d8576b44e59e09d076c25946f443"
},
"blockHash": "0xf641c3b0f82b07cd3a528adb9927dd83eeb4f1682e2bd523ed36888e0d82c9a9",
"blockNumber": 553154,
"subtraces": 0,
"traceAddress": [
0
],
"transactionHash": "0x6af0a5c3188ffacae4d340d4a17e14fdb5a54187683a80ef241bde248189882b",
"transactionPosition": 15,
"type": "suicide"
}
]
}

File diff suppressed because one or more lines are too long

Some files were not shown because too many files have changed in this diff Show More