Merge pull request #23 from openrelayxyz/merge/v1.10.13

Merge/v1.10.13
This commit is contained in:
Philip Morlier 2021-11-29 11:28:23 -05:00 committed by GitHub
commit 5352d44915
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
217 changed files with 5330 additions and 2464 deletions

View File

@ -120,36 +120,6 @@ jobs:
- go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc - go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
# This builder does the Linux Azure MIPS xgo uploads
- stage: build
if: type = push
os: linux
dist: bionic
services:
- docker
go: 1.17.x
env:
- azure-linux-mips
- GO111MODULE=on
git:
submodules: false # avoid cloning ethereum/tests
script:
- go run build/ci.go xgo --alltools -- --targets=linux/mips --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mips; do mv -f "${bin}" "${bin/-linux-mips/}"; done
- go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
- go run build/ci.go xgo --alltools -- --targets=linux/mipsle --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mipsle; do mv -f "${bin}" "${bin/-linux-mipsle/}"; done
- go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
- go run build/ci.go xgo --alltools -- --targets=linux/mips64 --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mips64; do mv -f "${bin}" "${bin/-linux-mips64/}"; done
- go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY signify SIGNIFY_KEY -upload gethstore/builds
- go run build/ci.go xgo --alltools -- --targets=linux/mips64le --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mips64le; do mv -f "${bin}" "${bin/-linux-mips64le/}"; done
- go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
# This builder does the Android Maven and Azure uploads # This builder does the Android Maven and Azure uploads
- stage: build - stage: build
if: type = push if: type = push

View File

@ -2,11 +2,7 @@
# with Go source code. If you know what GOPATH is then you probably # with Go source code. If you know what GOPATH is then you probably
# don't need to bother with make. # don't need to bother with make.
.PHONY: geth android ios geth-cross evm all test clean .PHONY: geth android ios evm all test clean
.PHONY: geth-linux geth-linux-386 geth-linux-amd64 geth-linux-mips64 geth-linux-mips64le
.PHONY: geth-linux-arm geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
.PHONY: geth-windows geth-windows-386 geth-windows-amd64
GOBIN = ./build/bin GOBIN = ./build/bin
GO ?= latest GO ?= latest
@ -53,95 +49,3 @@ devtools:
env GOBIN= go install ./cmd/abigen env GOBIN= go install ./cmd/abigen
@type "solc" 2> /dev/null || echo 'Please install solc' @type "solc" 2> /dev/null || echo 'Please install solc'
@type "protoc" 2> /dev/null || echo 'Please install protoc' @type "protoc" 2> /dev/null || echo 'Please install protoc'
# Cross Compilation Targets (xgo)
geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios
@echo "Full cross compilation done:"
@ls -ld $(GOBIN)/geth-*
geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm geth-linux-mips64 geth-linux-mips64le
@echo "Linux cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-*
geth-linux-386:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth
@echo "Linux 386 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep 386
geth-linux-amd64:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth
@echo "Linux amd64 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep amd64
geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
@echo "Linux ARM cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm
geth-linux-arm-5:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth
@echo "Linux ARMv5 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm-5
geth-linux-arm-6:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth
@echo "Linux ARMv6 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm-6
geth-linux-arm-7:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth
@echo "Linux ARMv7 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm-7
geth-linux-arm64:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth
@echo "Linux ARM64 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm64
geth-linux-mips:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPS cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mips
geth-linux-mipsle:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPSle cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mipsle
geth-linux-mips64:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPS64 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mips64
geth-linux-mips64le:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPS64le cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mips64le
geth-darwin: geth-darwin-386 geth-darwin-amd64
@echo "Darwin cross compilation done:"
@ls -ld $(GOBIN)/geth-darwin-*
geth-darwin-386:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth
@echo "Darwin 386 cross compilation done:"
@ls -ld $(GOBIN)/geth-darwin-* | grep 386
geth-darwin-amd64:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth
@echo "Darwin amd64 cross compilation done:"
@ls -ld $(GOBIN)/geth-darwin-* | grep amd64
geth-windows: geth-windows-386 geth-windows-amd64
@echo "Windows cross compilation done:"
@ls -ld $(GOBIN)/geth-windows-*
geth-windows-386:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth
@echo "Windows 386 cross compilation done:"
@ls -ld $(GOBIN)/geth-windows-* | grep 386
geth-windows-amd64:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth
@echo "Windows amd64 cross compilation done:"
@ls -ld $(GOBIN)/geth-windows-* | grep amd64

View File

@ -462,6 +462,12 @@ func (b *SimulatedBackend) PendingNonceAt(ctx context.Context, account common.Ad
// SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated // SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated
// chain doesn't have miners, we just return a gas price of 1 for any call. // chain doesn't have miners, we just return a gas price of 1 for any call.
func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) { func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
b.mu.Lock()
defer b.mu.Unlock()
if b.pendingBlock.Header().BaseFee != nil {
return b.pendingBlock.Header().BaseFee, nil
}
return big.NewInt(1), nil return big.NewInt(1), nil
} }

View File

@ -916,8 +916,8 @@ func TestSuggestGasPrice(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("could not get gas price: %v", err) t.Errorf("could not get gas price: %v", err)
} }
if gasPrice.Uint64() != uint64(1) { if gasPrice.Uint64() != sim.pendingBlock.Header().BaseFee.Uint64() {
t.Errorf("gas price was not expected value of 1. actual: %v", gasPrice.Uint64()) t.Errorf("gas price was not expected value of %v. actual: %v", sim.pendingBlock.Header().BaseFee.Uint64(), gasPrice.Uint64())
} }
} }

View File

@ -370,7 +370,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
rawTx, err = c.createLegacyTx(opts, contract, input) rawTx, err = c.createLegacyTx(opts, contract, input)
} else { } else {
// Only query for basefee if gasPrice not specified // Only query for basefee if gasPrice not specified
if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); err != nil { if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); errHead != nil {
return nil, errHead return nil, errHead
} else if head.BaseFee != nil { } else if head.BaseFee != nil {
rawTx, err = c.createDynamicTx(opts, contract, input, head) rawTx, err = c.createDynamicTx(opts, contract, input, head)

View File

@ -33,7 +33,6 @@ Available commands are:
nsis -- creates a Windows NSIS installer nsis -- creates a Windows NSIS installer
aar [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an Android archive aar [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an Android archive
xcode [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an iOS XCode framework xcode [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an iOS XCode framework
xgo [ -alltools ] [ options ] -- cross builds according to options
purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore
For all commands, -n prevents execution of external programs (dry run mode). For all commands, -n prevents execution of external programs (dry run mode).
@ -188,8 +187,6 @@ func main() {
doAndroidArchive(os.Args[2:]) doAndroidArchive(os.Args[2:])
case "xcode": case "xcode":
doXCodeFramework(os.Args[2:]) doXCodeFramework(os.Args[2:])
case "xgo":
doXgo(os.Args[2:])
case "purge": case "purge":
doPurge(os.Args[2:]) doPurge(os.Args[2:])
default: default:
@ -1209,48 +1206,6 @@ func newPodMetadata(env build.Environment, archive string) podMetadata {
} }
} }
// Cross compilation
func doXgo(cmdline []string) {
var (
alltools = flag.Bool("alltools", false, `Flag whether we're building all known tools, or only on in particular`)
)
flag.CommandLine.Parse(cmdline)
env := build.Env()
var tc build.GoToolchain
// Make sure xgo is available for cross compilation
build.MustRun(tc.Install(GOBIN, "github.com/karalabe/xgo@latest"))
// If all tools building is requested, build everything the builder wants
args := append(buildFlags(env), flag.Args()...)
if *alltools {
args = append(args, []string{"--dest", GOBIN}...)
for _, res := range allToolsArchiveFiles {
if strings.HasPrefix(res, GOBIN) {
// Binary tool found, cross build it explicitly
args = append(args, "./"+filepath.Join("cmd", filepath.Base(res)))
build.MustRun(xgoTool(args))
args = args[:len(args)-1]
}
}
return
}
// Otherwise execute the explicit cross compilation
path := args[len(args)-1]
args = append(args[:len(args)-1], []string{"--dest", GOBIN, path}...)
build.MustRun(xgoTool(args))
}
func xgoTool(args []string) *exec.Cmd {
cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, []string{"GOBIN=" + GOBIN}...)
return cmd
}
// Binary distribution cleanups // Binary distribution cleanups
func doPurge(cmdline []string) { func doPurge(cmdline []string) {

View File

@ -133,7 +133,8 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
log.Info(fmt.Sprintf("Creating %s = %q", path, val)) log.Info(fmt.Sprintf("Creating %s = %q", path, val))
ttl := rootTTL ttl := rootTTL
if path != name { if path != name {
ttl = treeNodeTTL // Max TTL permitted by Cloudflare ttl = treeNodeTTLCloudflare // Max TTL permitted by Cloudflare
} }
record := cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl} record := cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl}
_, err = c.CreateDNSRecord(context.Background(), c.zoneID, record) _, err = c.CreateDNSRecord(context.Background(), c.zoneID, record)

View File

@ -117,6 +117,7 @@ var (
const ( const (
rootTTL = 30 * 60 // 30 min rootTTL = 30 * 60 // 30 min
treeNodeTTL = 4 * 7 * 24 * 60 * 60 // 4 weeks treeNodeTTL = 4 * 7 * 24 * 60 * 60 // 4 weeks
treeNodeTTLCloudflare = 24 * 60 * 60 // 1 day
) )
// dnsSync performs dnsSyncCommand. // dnsSync performs dnsSyncCommand.

View File

@ -131,7 +131,7 @@ func (c *Conn) handshake() error {
} }
c.negotiateEthProtocol(msg.Caps) c.negotiateEthProtocol(msg.Caps)
if c.negotiatedProtoVersion == 0 { if c.negotiatedProtoVersion == 0 {
return fmt.Errorf("unexpected eth protocol version") return fmt.Errorf("could not negotiate protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion)
} }
return nil return nil
default: default:

View File

@ -52,35 +52,35 @@ func NewSuite(dest *enode.Node, chainfile string, genesisfile string) (*Suite, e
func (s *Suite) AllEthTests() []utesting.Test { func (s *Suite) AllEthTests() []utesting.Test {
return []utesting.Test{ return []utesting.Test{
// status // status
{Name: "TestStatus", Fn: s.TestStatus}, {Name: "TestStatus65", Fn: s.TestStatus65},
{Name: "TestStatus66", Fn: s.TestStatus66}, {Name: "TestStatus66", Fn: s.TestStatus66},
// get block headers // get block headers
{Name: "TestGetBlockHeaders", Fn: s.TestGetBlockHeaders}, {Name: "TestGetBlockHeaders65", Fn: s.TestGetBlockHeaders65},
{Name: "TestGetBlockHeaders66", Fn: s.TestGetBlockHeaders66}, {Name: "TestGetBlockHeaders66", Fn: s.TestGetBlockHeaders66},
{Name: "TestSimultaneousRequests66", Fn: s.TestSimultaneousRequests66}, {Name: "TestSimultaneousRequests66", Fn: s.TestSimultaneousRequests66},
{Name: "TestSameRequestID66", Fn: s.TestSameRequestID66}, {Name: "TestSameRequestID66", Fn: s.TestSameRequestID66},
{Name: "TestZeroRequestID66", Fn: s.TestZeroRequestID66}, {Name: "TestZeroRequestID66", Fn: s.TestZeroRequestID66},
// get block bodies // get block bodies
{Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies}, {Name: "TestGetBlockBodies65", Fn: s.TestGetBlockBodies65},
{Name: "TestGetBlockBodies66", Fn: s.TestGetBlockBodies66}, {Name: "TestGetBlockBodies66", Fn: s.TestGetBlockBodies66},
// broadcast // broadcast
{Name: "TestBroadcast", Fn: s.TestBroadcast}, {Name: "TestBroadcast65", Fn: s.TestBroadcast65},
{Name: "TestBroadcast66", Fn: s.TestBroadcast66}, {Name: "TestBroadcast66", Fn: s.TestBroadcast66},
{Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce}, {Name: "TestLargeAnnounce65", Fn: s.TestLargeAnnounce65},
{Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66}, {Name: "TestLargeAnnounce66", Fn: s.TestLargeAnnounce66},
{Name: "TestOldAnnounce", Fn: s.TestOldAnnounce}, {Name: "TestOldAnnounce65", Fn: s.TestOldAnnounce65},
{Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66}, {Name: "TestOldAnnounce66", Fn: s.TestOldAnnounce66},
{Name: "TestBlockHashAnnounce", Fn: s.TestBlockHashAnnounce}, {Name: "TestBlockHashAnnounce65", Fn: s.TestBlockHashAnnounce65},
{Name: "TestBlockHashAnnounce66", Fn: s.TestBlockHashAnnounce66}, {Name: "TestBlockHashAnnounce66", Fn: s.TestBlockHashAnnounce66},
// malicious handshakes + status // malicious handshakes + status
{Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake}, {Name: "TestMaliciousHandshake65", Fn: s.TestMaliciousHandshake65},
{Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus}, {Name: "TestMaliciousStatus65", Fn: s.TestMaliciousStatus65},
{Name: "TestMaliciousHandshake66", Fn: s.TestMaliciousHandshake66}, {Name: "TestMaliciousHandshake66", Fn: s.TestMaliciousHandshake66},
{Name: "TestMaliciousStatus66", Fn: s.TestMaliciousStatus66}, {Name: "TestMaliciousStatus66", Fn: s.TestMaliciousStatus66},
// test transactions // test transactions
{Name: "TestTransaction", Fn: s.TestTransaction}, {Name: "TestTransaction65", Fn: s.TestTransaction65},
{Name: "TestTransaction66", Fn: s.TestTransaction66}, {Name: "TestTransaction66", Fn: s.TestTransaction66},
{Name: "TestMaliciousTx", Fn: s.TestMaliciousTx}, {Name: "TestMaliciousTx65", Fn: s.TestMaliciousTx65},
{Name: "TestMaliciousTx66", Fn: s.TestMaliciousTx66}, {Name: "TestMaliciousTx66", Fn: s.TestMaliciousTx66},
{Name: "TestLargeTxRequest66", Fn: s.TestLargeTxRequest66}, {Name: "TestLargeTxRequest66", Fn: s.TestLargeTxRequest66},
{Name: "TestNewPooledTxs66", Fn: s.TestNewPooledTxs66}, {Name: "TestNewPooledTxs66", Fn: s.TestNewPooledTxs66},
@ -89,17 +89,17 @@ func (s *Suite) AllEthTests() []utesting.Test {
func (s *Suite) EthTests() []utesting.Test { func (s *Suite) EthTests() []utesting.Test {
return []utesting.Test{ return []utesting.Test{
{Name: "TestStatus", Fn: s.TestStatus}, {Name: "TestStatus65", Fn: s.TestStatus65},
{Name: "TestGetBlockHeaders", Fn: s.TestGetBlockHeaders}, {Name: "TestGetBlockHeaders65", Fn: s.TestGetBlockHeaders65},
{Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies}, {Name: "TestGetBlockBodies65", Fn: s.TestGetBlockBodies65},
{Name: "TestBroadcast", Fn: s.TestBroadcast}, {Name: "TestBroadcast65", Fn: s.TestBroadcast65},
{Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce}, {Name: "TestLargeAnnounce65", Fn: s.TestLargeAnnounce65},
{Name: "TestOldAnnounce", Fn: s.TestOldAnnounce}, {Name: "TestOldAnnounce65", Fn: s.TestOldAnnounce65},
{Name: "TestBlockHashAnnounce", Fn: s.TestBlockHashAnnounce}, {Name: "TestBlockHashAnnounce65", Fn: s.TestBlockHashAnnounce65},
{Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake}, {Name: "TestMaliciousHandshake65", Fn: s.TestMaliciousHandshake65},
{Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus}, {Name: "TestMaliciousStatus65", Fn: s.TestMaliciousStatus65},
{Name: "TestTransaction", Fn: s.TestTransaction}, {Name: "TestTransaction65", Fn: s.TestTransaction65},
{Name: "TestMaliciousTx", Fn: s.TestMaliciousTx}, {Name: "TestMaliciousTx65", Fn: s.TestMaliciousTx65},
} }
} }
@ -130,9 +130,9 @@ var (
eth65 = false // indicates whether suite should negotiate eth65 connection or below. eth65 = false // indicates whether suite should negotiate eth65 connection or below.
) )
// TestStatus attempts to connect to the given node and exchange // TestStatus65 attempts to connect to the given node and exchange
// a status message with it. // a status message with it.
func (s *Suite) TestStatus(t *utesting.T) { func (s *Suite) TestStatus65(t *utesting.T) {
conn, err := s.dial() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
@ -156,9 +156,9 @@ func (s *Suite) TestStatus66(t *utesting.T) {
} }
} }
// TestGetBlockHeaders tests whether the given node can respond to // TestGetBlockHeaders65 tests whether the given node can respond to
// a `GetBlockHeaders` request accurately. // a `GetBlockHeaders` request accurately.
func (s *Suite) TestGetBlockHeaders(t *utesting.T) { func (s *Suite) TestGetBlockHeaders65(t *utesting.T) {
conn, err := s.dial() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
@ -392,9 +392,9 @@ func (s *Suite) TestZeroRequestID66(t *utesting.T) {
} }
} }
// TestGetBlockBodies tests whether the given node can respond to // TestGetBlockBodies65 tests whether the given node can respond to
// a `GetBlockBodies` request and that the response is accurate. // a `GetBlockBodies` request and that the response is accurate.
func (s *Suite) TestGetBlockBodies(t *utesting.T) { func (s *Suite) TestGetBlockBodies65(t *utesting.T) {
conn, err := s.dial() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
@ -460,9 +460,9 @@ func (s *Suite) TestGetBlockBodies66(t *utesting.T) {
} }
} }
// TestBroadcast tests whether a block announcement is correctly // TestBroadcast65 tests whether a block announcement is correctly
// propagated to the given node's peer(s). // propagated to the given node's peer(s).
func (s *Suite) TestBroadcast(t *utesting.T) { func (s *Suite) TestBroadcast65(t *utesting.T) {
if err := s.sendNextBlock(eth65); err != nil { if err := s.sendNextBlock(eth65); err != nil {
t.Fatalf("block broadcast failed: %v", err) t.Fatalf("block broadcast failed: %v", err)
} }
@ -476,8 +476,8 @@ func (s *Suite) TestBroadcast66(t *utesting.T) {
} }
} }
// TestLargeAnnounce tests the announcement mechanism with a large block. // TestLargeAnnounce65 tests the announcement mechanism with a large block.
func (s *Suite) TestLargeAnnounce(t *utesting.T) { func (s *Suite) TestLargeAnnounce65(t *utesting.T) {
nextBlock := len(s.chain.blocks) nextBlock := len(s.chain.blocks)
blocks := []*NewBlock{ blocks := []*NewBlock{
{ {
@ -569,8 +569,8 @@ func (s *Suite) TestLargeAnnounce66(t *utesting.T) {
} }
} }
// TestOldAnnounce tests the announcement mechanism with an old block. // TestOldAnnounce65 tests the announcement mechanism with an old block.
func (s *Suite) TestOldAnnounce(t *utesting.T) { func (s *Suite) TestOldAnnounce65(t *utesting.T) {
if err := s.oldAnnounce(eth65); err != nil { if err := s.oldAnnounce(eth65); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -584,9 +584,9 @@ func (s *Suite) TestOldAnnounce66(t *utesting.T) {
} }
} }
// TestBlockHashAnnounce sends a new block hash announcement and expects // TestBlockHashAnnounce65 sends a new block hash announcement and expects
// the node to perform a `GetBlockHeaders` request. // the node to perform a `GetBlockHeaders` request.
func (s *Suite) TestBlockHashAnnounce(t *utesting.T) { func (s *Suite) TestBlockHashAnnounce65(t *utesting.T) {
if err := s.hashAnnounce(eth65); err != nil { if err := s.hashAnnounce(eth65); err != nil {
t.Fatalf("block hash announcement failed: %v", err) t.Fatalf("block hash announcement failed: %v", err)
} }
@ -600,8 +600,8 @@ func (s *Suite) TestBlockHashAnnounce66(t *utesting.T) {
} }
} }
// TestMaliciousHandshake tries to send malicious data during the handshake. // TestMaliciousHandshake65 tries to send malicious data during the handshake.
func (s *Suite) TestMaliciousHandshake(t *utesting.T) { func (s *Suite) TestMaliciousHandshake65(t *utesting.T) {
if err := s.maliciousHandshakes(t, eth65); err != nil { if err := s.maliciousHandshakes(t, eth65); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -614,8 +614,8 @@ func (s *Suite) TestMaliciousHandshake66(t *utesting.T) {
} }
} }
// TestMaliciousStatus sends a status package with a large total difficulty. // TestMaliciousStatus65 sends a status package with a large total difficulty.
func (s *Suite) TestMaliciousStatus(t *utesting.T) { func (s *Suite) TestMaliciousStatus65(t *utesting.T) {
conn, err := s.dial() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
@ -641,9 +641,9 @@ func (s *Suite) TestMaliciousStatus66(t *utesting.T) {
} }
} }
// TestTransaction sends a valid transaction to the node and // TestTransaction65 sends a valid transaction to the node and
// checks if the transaction gets propagated. // checks if the transaction gets propagated.
func (s *Suite) TestTransaction(t *utesting.T) { func (s *Suite) TestTransaction65(t *utesting.T) {
if err := s.sendSuccessfulTxs(t, eth65); err != nil { if err := s.sendSuccessfulTxs(t, eth65); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -657,9 +657,9 @@ func (s *Suite) TestTransaction66(t *utesting.T) {
} }
} }
// TestMaliciousTx sends several invalid transactions and tests whether // TestMaliciousTx65 sends several invalid transactions and tests whether
// the node will propagate them. // the node will propagate them.
func (s *Suite) TestMaliciousTx(t *utesting.T) { func (s *Suite) TestMaliciousTx65(t *utesting.T) {
if err := s.sendMaliciousTxs(t, eth65); err != nil { if err := s.sendMaliciousTxs(t, eth65); err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -235,6 +235,8 @@ func ethFilter(args []string) (nodeFilter, error) {
filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash) filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash)
case "ropsten": case "ropsten":
filter = forkid.NewStaticFilter(params.RopstenChainConfig, params.RopstenGenesisHash) filter = forkid.NewStaticFilter(params.RopstenChainConfig, params.RopstenGenesisHash)
case "sepolia":
filter = forkid.NewStaticFilter(params.SepoliaChainConfig, params.SepoliaGenesisHash)
default: default:
return nil, fmt.Errorf("unknown network %q", args[0]) return nil, fmt.Errorf("unknown network %q", args[0])
} }

View File

@ -0,0 +1,380 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package t8ntool
import (
"crypto/ecdsa"
"encoding/json"
"errors"
"fmt"
"math/big"
"os"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"gopkg.in/urfave/cli.v1"
)
//go:generate gencodec -type header -field-override headerMarshaling -out gen_header.go
type header struct {
ParentHash common.Hash `json:"parentHash"`
OmmerHash *common.Hash `json:"sha3Uncles"`
Coinbase *common.Address `json:"miner"`
Root common.Hash `json:"stateRoot" gencodec:"required"`
TxHash *common.Hash `json:"transactionsRoot"`
ReceiptHash *common.Hash `json:"receiptsRoot"`
Bloom types.Bloom `json:"logsBloom"`
Difficulty *big.Int `json:"difficulty"`
Number *big.Int `json:"number" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed"`
Time uint64 `json:"timestamp" gencodec:"required"`
Extra []byte `json:"extraData"`
MixDigest common.Hash `json:"mixHash"`
Nonce *types.BlockNonce `json:"nonce"`
BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
}
type headerMarshaling struct {
Difficulty *math.HexOrDecimal256
Number *math.HexOrDecimal256
GasLimit math.HexOrDecimal64
GasUsed math.HexOrDecimal64
Time math.HexOrDecimal64
Extra hexutil.Bytes
BaseFee *math.HexOrDecimal256
}
type bbInput struct {
Header *header `json:"header,omitempty"`
OmmersRlp []string `json:"ommers,omitempty"`
TxRlp string `json:"txs,omitempty"`
Clique *cliqueInput `json:"clique,omitempty"`
Ethash bool `json:"-"`
EthashDir string `json:"-"`
PowMode ethash.Mode `json:"-"`
Txs []*types.Transaction `json:"-"`
Ommers []*types.Header `json:"-"`
}
type cliqueInput struct {
Key *ecdsa.PrivateKey
Voted *common.Address
Authorize *bool
Vanity common.Hash
}
// UnmarshalJSON implements json.Unmarshaler interface.
func (c *cliqueInput) UnmarshalJSON(input []byte) error {
var x struct {
Key *common.Hash `json:"secretKey"`
Voted *common.Address `json:"voted"`
Authorize *bool `json:"authorize"`
Vanity common.Hash `json:"vanity"`
}
if err := json.Unmarshal(input, &x); err != nil {
return err
}
if x.Key == nil {
return errors.New("missing required field 'secretKey' for cliqueInput")
}
if ecdsaKey, err := crypto.ToECDSA(x.Key[:]); err != nil {
return err
} else {
c.Key = ecdsaKey
}
c.Voted = x.Voted
c.Authorize = x.Authorize
c.Vanity = x.Vanity
return nil
}
// ToBlock converts i into a *types.Block
func (i *bbInput) ToBlock() *types.Block {
header := &types.Header{
ParentHash: i.Header.ParentHash,
UncleHash: types.EmptyUncleHash,
Coinbase: common.Address{},
Root: i.Header.Root,
TxHash: types.EmptyRootHash,
ReceiptHash: types.EmptyRootHash,
Bloom: i.Header.Bloom,
Difficulty: common.Big0,
Number: i.Header.Number,
GasLimit: i.Header.GasLimit,
GasUsed: i.Header.GasUsed,
Time: i.Header.Time,
Extra: i.Header.Extra,
MixDigest: i.Header.MixDigest,
BaseFee: i.Header.BaseFee,
}
// Fill optional values.
if i.Header.OmmerHash != nil {
header.UncleHash = *i.Header.OmmerHash
} else if len(i.Ommers) != 0 {
// Calculate the ommer hash if none is provided and there are ommers to hash
header.UncleHash = types.CalcUncleHash(i.Ommers)
}
if i.Header.Coinbase != nil {
header.Coinbase = *i.Header.Coinbase
}
if i.Header.TxHash != nil {
header.TxHash = *i.Header.TxHash
}
if i.Header.ReceiptHash != nil {
header.ReceiptHash = *i.Header.ReceiptHash
}
if i.Header.Nonce != nil {
header.Nonce = *i.Header.Nonce
}
if header.Difficulty != nil {
header.Difficulty = i.Header.Difficulty
}
return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers)
}
// SealBlock seals the given block using the configured engine.
func (i *bbInput) SealBlock(block *types.Block) (*types.Block, error) {
switch {
case i.Ethash:
return i.sealEthash(block)
case i.Clique != nil:
return i.sealClique(block)
default:
return block, nil
}
}
// sealEthash seals the given block using ethash.
func (i *bbInput) sealEthash(block *types.Block) (*types.Block, error) {
if i.Header.Nonce != nil {
return nil, NewError(ErrorConfig, fmt.Errorf("sealing with ethash will overwrite provided nonce"))
}
ethashConfig := ethash.Config{
PowMode: i.PowMode,
DatasetDir: i.EthashDir,
CacheDir: i.EthashDir,
DatasetsInMem: 1,
DatasetsOnDisk: 2,
CachesInMem: 2,
CachesOnDisk: 3,
}
engine := ethash.New(ethashConfig, nil, true)
defer engine.Close()
// Use a buffered chan for results.
// If the testmode is used, the sealer will return quickly, and complain
// "Sealing result is not read by miner" if it cannot write the result.
results := make(chan *types.Block, 1)
if err := engine.Seal(nil, block, results, nil); err != nil {
panic(fmt.Sprintf("failed to seal block: %v", err))
}
found := <-results
return block.WithSeal(found.Header()), nil
}
// sealClique seals the given block using clique.
func (i *bbInput) sealClique(block *types.Block) (*types.Block, error) {
// If any clique value overwrites an explicit header value, fail
// to avoid silently building a block with unexpected values.
if i.Header.Extra != nil {
return nil, NewError(ErrorConfig, fmt.Errorf("sealing with clique will overwrite provided extra data"))
}
header := block.Header()
if i.Clique.Voted != nil {
if i.Header.Coinbase != nil {
return nil, NewError(ErrorConfig, fmt.Errorf("sealing with clique and voting will overwrite provided coinbase"))
}
header.Coinbase = *i.Clique.Voted
}
if i.Clique.Authorize != nil {
if i.Header.Nonce != nil {
return nil, NewError(ErrorConfig, fmt.Errorf("sealing with clique and voting will overwrite provided nonce"))
}
if *i.Clique.Authorize {
header.Nonce = [8]byte{}
} else {
header.Nonce = [8]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
}
}
// Extra is fixed 32 byte vanity and 65 byte signature
header.Extra = make([]byte, 32+65)
copy(header.Extra[0:32], i.Clique.Vanity.Bytes()[:])
// Sign the seal hash and fill in the rest of the extra data
h := clique.SealHash(header)
sighash, err := crypto.Sign(h[:], i.Clique.Key)
if err != nil {
return nil, err
}
copy(header.Extra[32:], sighash)
block = block.WithSeal(header)
return block, nil
}
// BuildBlock constructs a block from the given inputs.
func BuildBlock(ctx *cli.Context) error {
// Configure the go-ethereum logger
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
baseDir, err := createBasedir(ctx)
if err != nil {
return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err))
}
inputData, err := readInput(ctx)
if err != nil {
return err
}
block := inputData.ToBlock()
block, err = inputData.SealBlock(block)
if err != nil {
return err
}
return dispatchBlock(ctx, baseDir, block)
}
func readInput(ctx *cli.Context) (*bbInput, error) {
var (
headerStr = ctx.String(InputHeaderFlag.Name)
ommersStr = ctx.String(InputOmmersFlag.Name)
txsStr = ctx.String(InputTxsRlpFlag.Name)
cliqueStr = ctx.String(SealCliqueFlag.Name)
ethashOn = ctx.Bool(SealEthashFlag.Name)
ethashDir = ctx.String(SealEthashDirFlag.Name)
ethashMode = ctx.String(SealEthashModeFlag.Name)
inputData = &bbInput{}
)
if ethashOn && cliqueStr != "" {
return nil, NewError(ErrorConfig, fmt.Errorf("both ethash and clique sealing specified, only one may be chosen"))
}
if ethashOn {
inputData.Ethash = ethashOn
inputData.EthashDir = ethashDir
switch ethashMode {
case "normal":
inputData.PowMode = ethash.ModeNormal
case "test":
inputData.PowMode = ethash.ModeTest
case "fake":
inputData.PowMode = ethash.ModeFake
default:
return nil, NewError(ErrorConfig, fmt.Errorf("unknown pow mode: %s, supported modes: test, fake, normal", ethashMode))
}
}
if headerStr == stdinSelector || ommersStr == stdinSelector || txsStr == stdinSelector || cliqueStr == stdinSelector {
decoder := json.NewDecoder(os.Stdin)
if err := decoder.Decode(inputData); err != nil {
return nil, NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err))
}
}
if cliqueStr != stdinSelector && cliqueStr != "" {
var clique cliqueInput
if err := readFile(cliqueStr, "clique", &clique); err != nil {
return nil, err
}
inputData.Clique = &clique
}
if headerStr != stdinSelector {
var env header
if err := readFile(headerStr, "header", &env); err != nil {
return nil, err
}
inputData.Header = &env
}
if ommersStr != stdinSelector && ommersStr != "" {
var ommers []string
if err := readFile(ommersStr, "ommers", &ommers); err != nil {
return nil, err
}
inputData.OmmersRlp = ommers
}
if txsStr != stdinSelector {
var txs string
if err := readFile(txsStr, "txs", &txs); err != nil {
return nil, err
}
inputData.TxRlp = txs
}
// Deserialize rlp txs and ommers
var (
ommers = []*types.Header{}
txs = []*types.Transaction{}
)
if inputData.TxRlp != "" {
if err := rlp.DecodeBytes(common.FromHex(inputData.TxRlp), &txs); err != nil {
return nil, NewError(ErrorRlp, fmt.Errorf("unable to decode transaction from rlp data: %v", err))
}
inputData.Txs = txs
}
for _, str := range inputData.OmmersRlp {
type extblock struct {
Header *types.Header
Txs []*types.Transaction
Ommers []*types.Header
}
var ommer *extblock
if err := rlp.DecodeBytes(common.FromHex(str), &ommer); err != nil {
return nil, NewError(ErrorRlp, fmt.Errorf("unable to decode ommer from rlp data: %v", err))
}
ommers = append(ommers, ommer.Header)
}
inputData.Ommers = ommers
return inputData, nil
}
// dispatchOutput writes the output data to either stderr or stdout, or to the specified
// files
func dispatchBlock(ctx *cli.Context, baseDir string, block *types.Block) error {
raw, _ := rlp.EncodeToBytes(block)
type blockInfo struct {
Rlp hexutil.Bytes `json:"rlp"`
Hash common.Hash `json:"hash"`
}
var enc blockInfo
enc.Rlp = raw
enc.Hash = block.Hash()
b, err := json.MarshalIndent(enc, "", " ")
if err != nil {
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
}
switch dest := ctx.String(OutputBlockFlag.Name); dest {
case "stdout":
os.Stdout.Write(b)
os.Stdout.WriteString("\n")
case "stderr":
os.Stderr.Write(b)
os.Stderr.WriteString("\n")
default:
if err := saveFile(baseDir, dest, enc); err != nil {
return err
}
}
return nil
}

View File

@ -49,12 +49,13 @@ type Prestate struct {
type ExecutionResult struct { type ExecutionResult struct {
StateRoot common.Hash `json:"stateRoot"` StateRoot common.Hash `json:"stateRoot"`
TxRoot common.Hash `json:"txRoot"` TxRoot common.Hash `json:"txRoot"`
ReceiptRoot common.Hash `json:"receiptRoot"` ReceiptRoot common.Hash `json:"receiptsRoot"`
LogsHash common.Hash `json:"logsHash"` LogsHash common.Hash `json:"logsHash"`
Bloom types.Bloom `json:"logsBloom" gencodec:"required"` Bloom types.Bloom `json:"logsBloom" gencodec:"required"`
Receipts types.Receipts `json:"receipts"` Receipts types.Receipts `json:"receipts"`
Rejected []*rejectedTx `json:"rejected,omitempty"` Rejected []*rejectedTx `json:"rejected,omitempty"`
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"`
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
} }
type ommer struct { type ommer struct {
@ -96,7 +97,7 @@ type rejectedTx struct {
// Apply applies a set of transactions to a pre-state // Apply applies a set of transactions to a pre-state
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
txs types.Transactions, miningReward int64, txs types.Transactions, miningReward int64,
getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error)) (*state.StateDB, *ExecutionResult, error) { getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error)) (*state.StateDB, *ExecutionResult, error) {
// Capture errors for BLOCKHASH operation, if we haven't been supplied the // Capture errors for BLOCKHASH operation, if we haven't been supplied the
// required blockhashes // required blockhashes
@ -255,6 +256,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
Receipts: receipts, Receipts: receipts,
Rejected: rejectedTxs, Rejected: rejectedTxs,
Difficulty: (*math.HexOrDecimal256)(vmContext.Difficulty), Difficulty: (*math.HexOrDecimal256)(vmContext.Difficulty),
GasUsed: (math.HexOrDecimal64)(gasUsed),
} }
return statedb, execRs, nil return statedb, execRs, nil
} }

View File

@ -32,7 +32,11 @@ var (
} }
TraceDisableMemoryFlag = cli.BoolTFlag{ TraceDisableMemoryFlag = cli.BoolTFlag{
Name: "trace.nomemory", Name: "trace.nomemory",
Usage: "Disable full memory dump in traces", Usage: "Disable full memory dump in traces (deprecated)",
}
TraceEnableMemoryFlag = cli.BoolFlag{
Name: "trace.memory",
Usage: "Enable full memory dump in traces",
} }
TraceDisableStackFlag = cli.BoolFlag{ TraceDisableStackFlag = cli.BoolFlag{
Name: "trace.nostack", Name: "trace.nostack",
@ -40,7 +44,11 @@ var (
} }
TraceDisableReturnDataFlag = cli.BoolTFlag{ TraceDisableReturnDataFlag = cli.BoolTFlag{
Name: "trace.noreturndata", Name: "trace.noreturndata",
Usage: "Disable return data output in traces", Usage: "Disable return data output in traces (deprecated)",
}
TraceEnableReturnDataFlag = cli.BoolFlag{
Name: "trace.returndata",
Usage: "Enable return data output in traces",
} }
OutputBasedir = cli.StringFlag{ OutputBasedir = cli.StringFlag{
Name: "output.basedir", Name: "output.basedir",
@ -68,6 +76,14 @@ var (
"\t<file> - into the file <file> ", "\t<file> - into the file <file> ",
Value: "result.json", Value: "result.json",
} }
OutputBlockFlag = cli.StringFlag{
Name: "output.block",
Usage: "Determines where to put the `block` after building.\n" +
"\t`stdout` - into the stdout output\n" +
"\t`stderr` - into the stderr output\n" +
"\t<file> - into the file <file> ",
Value: "block.json",
}
InputAllocFlag = cli.StringFlag{ InputAllocFlag = cli.StringFlag{
Name: "input.alloc", Name: "input.alloc",
Usage: "`stdin` or file name of where to find the prestate alloc to use.", Usage: "`stdin` or file name of where to find the prestate alloc to use.",
@ -81,10 +97,41 @@ var (
InputTxsFlag = cli.StringFlag{ InputTxsFlag = cli.StringFlag{
Name: "input.txs", Name: "input.txs",
Usage: "`stdin` or file name of where to find the transactions to apply. " + Usage: "`stdin` or file name of where to find the transactions to apply. " +
"If the file prefix is '.rlp', then the data is interpreted as an RLP list of signed transactions." + "If the file extension is '.rlp', then the data is interpreted as an RLP list of signed transactions." +
"The '.rlp' format is identical to the output.body format.", "The '.rlp' format is identical to the output.body format.",
Value: "txs.json", Value: "txs.json",
} }
InputHeaderFlag = cli.StringFlag{
Name: "input.header",
Usage: "`stdin` or file name of where to find the block header to use.",
Value: "header.json",
}
InputOmmersFlag = cli.StringFlag{
Name: "input.ommers",
Usage: "`stdin` or file name of where to find the list of ommer header RLPs to use.",
}
InputTxsRlpFlag = cli.StringFlag{
Name: "input.txs",
Usage: "`stdin` or file name of where to find the transactions list in RLP form.",
Value: "txs.rlp",
}
SealCliqueFlag = cli.StringFlag{
Name: "seal.clique",
Usage: "Seal block with Clique. `stdin` or file name of where to find the Clique sealing data.",
}
SealEthashFlag = cli.BoolFlag{
Name: "seal.ethash",
Usage: "Seal block with ethash.",
}
SealEthashDirFlag = cli.StringFlag{
Name: "seal.ethash.dir",
Usage: "Path to ethash DAG. If none exists, a new DAG will be generated.",
}
SealEthashModeFlag = cli.StringFlag{
Name: "seal.ethash.mode",
Usage: "Defines the type and amount of PoW verification an ethash engine makes.",
Value: "normal",
}
RewardFlag = cli.Int64Flag{ RewardFlag = cli.Int64Flag{
Name: "state.reward", Name: "state.reward",
Usage: "Mining reward. Set to -1 to disable", Usage: "Mining reward. Set to -1 to disable",

View File

@ -0,0 +1,135 @@
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
package t8ntool
import (
"encoding/json"
"errors"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
)
var _ = (*headerMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (h header) MarshalJSON() ([]byte, error) {
type header struct {
ParentHash common.Hash `json:"parentHash"`
OmmerHash *common.Hash `json:"sha3Uncles"`
Coinbase *common.Address `json:"miner"`
Root common.Hash `json:"stateRoot" gencodec:"required"`
TxHash *common.Hash `json:"transactionsRoot"`
ReceiptHash *common.Hash `json:"receiptsRoot"`
Bloom types.Bloom `json:"logsBloom"`
Difficulty *math.HexOrDecimal256 `json:"difficulty"`
Number *math.HexOrDecimal256 `json:"number" gencodec:"required"`
GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
Time math.HexOrDecimal64 `json:"timestamp" gencodec:"required"`
Extra hexutil.Bytes `json:"extraData"`
MixDigest common.Hash `json:"mixHash"`
Nonce *types.BlockNonce `json:"nonce"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
}
var enc header
enc.ParentHash = h.ParentHash
enc.OmmerHash = h.OmmerHash
enc.Coinbase = h.Coinbase
enc.Root = h.Root
enc.TxHash = h.TxHash
enc.ReceiptHash = h.ReceiptHash
enc.Bloom = h.Bloom
enc.Difficulty = (*math.HexOrDecimal256)(h.Difficulty)
enc.Number = (*math.HexOrDecimal256)(h.Number)
enc.GasLimit = math.HexOrDecimal64(h.GasLimit)
enc.GasUsed = math.HexOrDecimal64(h.GasUsed)
enc.Time = math.HexOrDecimal64(h.Time)
enc.Extra = h.Extra
enc.MixDigest = h.MixDigest
enc.Nonce = h.Nonce
enc.BaseFee = (*math.HexOrDecimal256)(h.BaseFee)
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (h *header) UnmarshalJSON(input []byte) error {
type header struct {
ParentHash *common.Hash `json:"parentHash"`
OmmerHash *common.Hash `json:"sha3Uncles"`
Coinbase *common.Address `json:"miner"`
Root *common.Hash `json:"stateRoot" gencodec:"required"`
TxHash *common.Hash `json:"transactionsRoot"`
ReceiptHash *common.Hash `json:"receiptsRoot"`
Bloom *types.Bloom `json:"logsBloom"`
Difficulty *math.HexOrDecimal256 `json:"difficulty"`
Number *math.HexOrDecimal256 `json:"number" gencodec:"required"`
GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
Time *math.HexOrDecimal64 `json:"timestamp" gencodec:"required"`
Extra *hexutil.Bytes `json:"extraData"`
MixDigest *common.Hash `json:"mixHash"`
Nonce *types.BlockNonce `json:"nonce"`
BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas" rlp:"optional"`
}
var dec header
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
if dec.ParentHash != nil {
h.ParentHash = *dec.ParentHash
}
if dec.OmmerHash != nil {
h.OmmerHash = dec.OmmerHash
}
if dec.Coinbase != nil {
h.Coinbase = dec.Coinbase
}
if dec.Root == nil {
return errors.New("missing required field 'stateRoot' for header")
}
h.Root = *dec.Root
if dec.TxHash != nil {
h.TxHash = dec.TxHash
}
if dec.ReceiptHash != nil {
h.ReceiptHash = dec.ReceiptHash
}
if dec.Bloom != nil {
h.Bloom = *dec.Bloom
}
if dec.Difficulty != nil {
h.Difficulty = (*big.Int)(dec.Difficulty)
}
if dec.Number == nil {
return errors.New("missing required field 'number' for header")
}
h.Number = (*big.Int)(dec.Number)
if dec.GasLimit == nil {
return errors.New("missing required field 'gasLimit' for header")
}
h.GasLimit = uint64(*dec.GasLimit)
if dec.GasUsed != nil {
h.GasUsed = uint64(*dec.GasUsed)
}
if dec.Time == nil {
return errors.New("missing required field 'timestamp' for header")
}
h.Time = uint64(*dec.Time)
if dec.Extra != nil {
h.Extra = *dec.Extra
}
if dec.MixDigest != nil {
h.MixDigest = *dec.MixDigest
}
if dec.Nonce != nil {
h.Nonce = dec.Nonce
}
if dec.BaseFee != nil {
h.BaseFee = (*big.Int)(dec.BaseFee)
}
return nil
}

View File

@ -48,7 +48,7 @@ func (r *result) MarshalJSON() ([]byte, error) {
Error string `json:"error,omitempty"` Error string `json:"error,omitempty"`
Address *common.Address `json:"address,omitempty"` Address *common.Address `json:"address,omitempty"`
Hash *common.Hash `json:"hash,omitempty"` Hash *common.Hash `json:"hash,omitempty"`
IntrinsicGas uint64 `json:"intrinsicGas,omitempty"` IntrinsicGas hexutil.Uint64 `json:"intrinsicGas,omitempty"`
} }
var out xx var out xx
if r.Error != nil { if r.Error != nil {
@ -60,7 +60,7 @@ func (r *result) MarshalJSON() ([]byte, error) {
if r.Hash != (common.Hash{}) { if r.Hash != (common.Hash{}) {
out.Hash = &r.Hash out.Hash = &r.Hash
} }
out.IntrinsicGas = r.IntrinsicGas out.IntrinsicGas = hexutil.Uint64(r.IntrinsicGas)
return json.Marshal(out) return json.Marshal(out)
} }
@ -82,7 +82,7 @@ func Transaction(ctx *cli.Context) error {
) )
// Construct the chainconfig // Construct the chainconfig
if cConf, _, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil { if cConf, _, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil {
return NewError(ErrorVMConfig, fmt.Errorf("failed constructing chain configuration: %v", err)) return NewError(ErrorConfig, fmt.Errorf("failed constructing chain configuration: %v", err))
} else { } else {
chainConfig = cConf chainConfig = cConf
} }
@ -121,6 +121,9 @@ func Transaction(ctx *cli.Context) error {
} }
var results []result var results []result
for it.Next() { for it.Next() {
if err := it.Err(); err != nil {
return NewError(ErrorIO, err)
}
var tx types.Transaction var tx types.Transaction
err := rlp.DecodeBytes(it.Value(), &tx) err := rlp.DecodeBytes(it.Value(), &tx)
if err != nil { if err != nil {
@ -151,6 +154,8 @@ func Transaction(ctx *cli.Context) error {
} }
// Validate <256bit fields // Validate <256bit fields
switch { switch {
case tx.Nonce()+1 < tx.Nonce():
r.Error = errors.New("nonce exceeds 2^64-1")
case tx.Value().BitLen() > 256: case tx.Value().BitLen() > 256:
r.Error = errors.New("value exceeds 256 bits") r.Error = errors.New("value exceeds 256 bits")
case tx.GasPrice().BitLen() > 256: case tx.GasPrice().BitLen() > 256:

View File

@ -43,11 +43,12 @@ import (
const ( const (
ErrorEVM = 2 ErrorEVM = 2
ErrorVMConfig = 3 ErrorConfig = 3
ErrorMissingBlockhash = 4 ErrorMissingBlockhash = 4
ErrorJson = 10 ErrorJson = 10
ErrorIO = 11 ErrorIO = 11
ErrorRlp = 12
stdinSelector = "stdin" stdinSelector = "stdin"
) )
@ -89,27 +90,32 @@ func Transition(ctx *cli.Context) error {
var ( var (
err error err error
tracer vm.Tracer tracer vm.EVMLogger
baseDir = ""
) )
var getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error) var getTracer func(txIndex int, txHash common.Hash) (vm.EVMLogger, error)
// If user specified a basedir, make sure it exists baseDir, err := createBasedir(ctx)
if ctx.IsSet(OutputBasedir.Name) {
if base := ctx.String(OutputBasedir.Name); len(base) > 0 {
err := os.MkdirAll(base, 0755) // //rw-r--r--
if err != nil { if err != nil {
return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err)) return NewError(ErrorIO, fmt.Errorf("failed creating output basedir: %v", err))
} }
baseDir = base
}
}
if ctx.Bool(TraceFlag.Name) { if ctx.Bool(TraceFlag.Name) {
if ctx.IsSet(TraceDisableMemoryFlag.Name) && ctx.IsSet(TraceEnableMemoryFlag.Name) {
return NewError(ErrorConfig, fmt.Errorf("can't use both flags --%s and --%s", TraceDisableMemoryFlag.Name, TraceEnableMemoryFlag.Name))
}
if ctx.IsSet(TraceDisableReturnDataFlag.Name) && ctx.IsSet(TraceEnableReturnDataFlag.Name) {
return NewError(ErrorConfig, fmt.Errorf("can't use both flags --%s and --%s", TraceDisableReturnDataFlag.Name, TraceEnableReturnDataFlag.Name))
}
if ctx.IsSet(TraceDisableMemoryFlag.Name) {
log.Warn(fmt.Sprintf("--%s has been deprecated in favour of --%s", TraceDisableMemoryFlag.Name, TraceEnableMemoryFlag.Name))
}
if ctx.IsSet(TraceDisableReturnDataFlag.Name) {
log.Warn(fmt.Sprintf("--%s has been deprecated in favour of --%s", TraceDisableReturnDataFlag.Name, TraceEnableReturnDataFlag.Name))
}
// Configure the EVM logger // Configure the EVM logger
logConfig := &vm.LogConfig{ logConfig := &vm.LogConfig{
DisableStack: ctx.Bool(TraceDisableStackFlag.Name), DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
EnableMemory: !ctx.Bool(TraceDisableMemoryFlag.Name), EnableMemory: !ctx.Bool(TraceDisableMemoryFlag.Name) || ctx.Bool(TraceEnableMemoryFlag.Name),
EnableReturnData: !ctx.Bool(TraceDisableReturnDataFlag.Name), EnableReturnData: !ctx.Bool(TraceDisableReturnDataFlag.Name) || ctx.Bool(TraceEnableReturnDataFlag.Name),
Debug: true, Debug: true,
} }
var prevFile *os.File var prevFile *os.File
@ -119,7 +125,7 @@ func Transition(ctx *cli.Context) error {
prevFile.Close() prevFile.Close()
} }
}() }()
getTracer = func(txIndex int, txHash common.Hash) (vm.Tracer, error) { getTracer = func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) {
if prevFile != nil { if prevFile != nil {
prevFile.Close() prevFile.Close()
} }
@ -131,7 +137,7 @@ func Transition(ctx *cli.Context) error {
return vm.NewJSONLogger(logConfig, traceFile), nil return vm.NewJSONLogger(logConfig, traceFile), nil
} }
} else { } else {
getTracer = func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error) { getTracer = func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error) {
return nil, nil return nil, nil
} }
} }
@ -155,29 +161,17 @@ func Transition(ctx *cli.Context) error {
} }
} }
if allocStr != stdinSelector { if allocStr != stdinSelector {
inFile, err := os.Open(allocStr) if err := readFile(allocStr, "alloc", &inputData.Alloc); err != nil {
if err != nil { return err
return NewError(ErrorIO, fmt.Errorf("failed reading alloc file: %v", err))
}
defer inFile.Close()
decoder := json.NewDecoder(inFile)
if err := decoder.Decode(&inputData.Alloc); err != nil {
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling alloc-file: %v", err))
} }
} }
prestate.Pre = inputData.Alloc prestate.Pre = inputData.Alloc
// Set the block environment // Set the block environment
if envStr != stdinSelector { if envStr != stdinSelector {
inFile, err := os.Open(envStr)
if err != nil {
return NewError(ErrorIO, fmt.Errorf("failed reading env file: %v", err))
}
defer inFile.Close()
decoder := json.NewDecoder(inFile)
var env stEnv var env stEnv
if err := decoder.Decode(&env); err != nil { if err := readFile(envStr, "env", &env); err != nil {
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling env-file: %v", err)) return err
} }
inputData.Env = &env inputData.Env = &env
} }
@ -190,7 +184,7 @@ func Transition(ctx *cli.Context) error {
// Construct the chainconfig // Construct the chainconfig
var chainConfig *params.ChainConfig var chainConfig *params.ChainConfig
if cConf, extraEips, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil { if cConf, extraEips, err := tests.GetChainConfig(ctx.String(ForknameFlag.Name)); err != nil {
return NewError(ErrorVMConfig, fmt.Errorf("failed constructing chain configuration: %v", err)) return NewError(ErrorConfig, fmt.Errorf("failed constructing chain configuration: %v", err))
} else { } else {
chainConfig = cConf chainConfig = cConf
vmConfig.ExtraEips = extraEips vmConfig.ExtraEips = extraEips
@ -254,18 +248,18 @@ func Transition(ctx *cli.Context) error {
// Sanity check, to not `panic` in state_transition // Sanity check, to not `panic` in state_transition
if chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) { if chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) {
if prestate.Env.BaseFee == nil { if prestate.Env.BaseFee == nil {
return NewError(ErrorVMConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section")) return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
} }
} }
if env := prestate.Env; env.Difficulty == nil { if env := prestate.Env; env.Difficulty == nil {
// If difficulty was not provided by caller, we need to calculate it. // If difficulty was not provided by caller, we need to calculate it.
switch { switch {
case env.ParentDifficulty == nil: case env.ParentDifficulty == nil:
return NewError(ErrorVMConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty")) return NewError(ErrorConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty"))
case env.Number == 0: case env.Number == 0:
return NewError(ErrorVMConfig, errors.New("currentDifficulty needs to be provided for block number 0")) return NewError(ErrorConfig, errors.New("currentDifficulty needs to be provided for block number 0"))
case env.Timestamp <= env.ParentTimestamp: case env.Timestamp <= env.ParentTimestamp:
return NewError(ErrorVMConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)", return NewError(ErrorConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)",
env.Timestamp, env.ParentTimestamp)) env.Timestamp, env.ParentTimestamp))
} }
prestate.Env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp, prestate.Env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp,
@ -288,25 +282,32 @@ func Transition(ctx *cli.Context) error {
type txWithKey struct { type txWithKey struct {
key *ecdsa.PrivateKey key *ecdsa.PrivateKey
tx *types.Transaction tx *types.Transaction
protected bool
} }
func (t *txWithKey) UnmarshalJSON(input []byte) error { func (t *txWithKey) UnmarshalJSON(input []byte) error {
// Read the secretKey, if present // Read the metadata, if present
type sKey struct { type txMetadata struct {
Key *common.Hash `json:"secretKey"` Key *common.Hash `json:"secretKey"`
Protected *bool `json:"protected"`
} }
var key sKey var data txMetadata
if err := json.Unmarshal(input, &key); err != nil { if err := json.Unmarshal(input, &data); err != nil {
return err return err
} }
if key.Key != nil { if data.Key != nil {
k := key.Key.Hex()[2:] k := data.Key.Hex()[2:]
if ecdsaKey, err := crypto.HexToECDSA(k); err != nil { if ecdsaKey, err := crypto.HexToECDSA(k); err != nil {
return err return err
} else { } else {
t.key = ecdsaKey t.key = ecdsaKey
} }
} }
if data.Protected != nil {
t.protected = *data.Protected
} else {
t.protected = true
}
// Now, read the transaction itself // Now, read the transaction itself
var tx types.Transaction var tx types.Transaction
if err := json.Unmarshal(input, &tx); err != nil { if err := json.Unmarshal(input, &tx); err != nil {
@ -335,7 +336,15 @@ func signUnsignedTransactions(txs []*txWithKey, signer types.Signer) (types.Tran
v, r, s := tx.RawSignatureValues() v, r, s := tx.RawSignatureValues()
if key != nil && v.BitLen()+r.BitLen()+s.BitLen() == 0 { if key != nil && v.BitLen()+r.BitLen()+s.BitLen() == 0 {
// This transaction needs to be signed // This transaction needs to be signed
signed, err := types.SignTx(tx, signer, key) var (
signed *types.Transaction
err error
)
if txWithKey.protected {
signed, err = types.SignTx(tx, signer, key)
} else {
signed, err = types.SignTx(tx, types.FrontierSigner{}, key)
}
if err != nil { if err != nil {
return nil, NewError(ErrorJson, fmt.Errorf("tx %d: failed to sign tx: %v", i, err)) return nil, NewError(ErrorJson, fmt.Errorf("tx %d: failed to sign tx: %v", i, err))
} }

View File

@ -0,0 +1,54 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package t8ntool
import (
"encoding/json"
"fmt"
"os"
"gopkg.in/urfave/cli.v1"
)
// readFile reads the json-data in the provided path and marshals into dest.
func readFile(path, desc string, dest interface{}) error {
inFile, err := os.Open(path)
if err != nil {
return NewError(ErrorIO, fmt.Errorf("failed reading %s file: %v", desc, err))
}
defer inFile.Close()
decoder := json.NewDecoder(inFile)
if err := decoder.Decode(dest); err != nil {
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling %s file: %v", desc, err))
}
return nil
}
// createBasedir makes sure the basedir exists, if user specified one.
func createBasedir(ctx *cli.Context) (string, error) {
baseDir := ""
if ctx.IsSet(OutputBasedir.Name) {
if base := ctx.String(OutputBasedir.Name); len(base) > 0 {
err := os.MkdirAll(base, 0755) // //rw-r--r--
if err != nil {
return "", err
}
baseDir = base
}
}
return baseDir, nil
}

View File

@ -139,8 +139,10 @@ var stateTransitionCommand = cli.Command{
Flags: []cli.Flag{ Flags: []cli.Flag{
t8ntool.TraceFlag, t8ntool.TraceFlag,
t8ntool.TraceDisableMemoryFlag, t8ntool.TraceDisableMemoryFlag,
t8ntool.TraceEnableMemoryFlag,
t8ntool.TraceDisableStackFlag, t8ntool.TraceDisableStackFlag,
t8ntool.TraceDisableReturnDataFlag, t8ntool.TraceDisableReturnDataFlag,
t8ntool.TraceEnableReturnDataFlag,
t8ntool.OutputBasedir, t8ntool.OutputBasedir,
t8ntool.OutputAllocFlag, t8ntool.OutputAllocFlag,
t8ntool.OutputResultFlag, t8ntool.OutputResultFlag,
@ -167,6 +169,25 @@ var transactionCommand = cli.Command{
}, },
} }
var blockBuilderCommand = cli.Command{
Name: "block-builder",
Aliases: []string{"b11r"},
Usage: "builds a block",
Action: t8ntool.BuildBlock,
Flags: []cli.Flag{
t8ntool.OutputBasedir,
t8ntool.OutputBlockFlag,
t8ntool.InputHeaderFlag,
t8ntool.InputOmmersFlag,
t8ntool.InputTxsRlpFlag,
t8ntool.SealCliqueFlag,
t8ntool.SealEthashFlag,
t8ntool.SealEthashDirFlag,
t8ntool.SealEthashModeFlag,
t8ntool.VerbosityFlag,
},
}
func init() { func init() {
app.Flags = []cli.Flag{ app.Flags = []cli.Flag{
BenchFlag, BenchFlag,
@ -200,6 +221,7 @@ func init() {
stateTestCommand, stateTestCommand,
stateTransitionCommand, stateTransitionCommand,
transactionCommand, transactionCommand,
blockBuilderCommand,
} }
cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate cli.CommandHelpTemplate = flags.OriginCommandHelpTemplate
} }

View File

@ -116,7 +116,7 @@ func runCmd(ctx *cli.Context) error {
} }
var ( var (
tracer vm.Tracer tracer vm.EVMLogger
debugLogger *vm.StructLogger debugLogger *vm.StructLogger
statedb *state.StateDB statedb *state.StateDB
chainConfig *params.ChainConfig chainConfig *params.ChainConfig

View File

@ -65,7 +65,7 @@ func stateTestCmd(ctx *cli.Context) error {
EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name), EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name),
} }
var ( var (
tracer vm.Tracer tracer vm.EVMLogger
debugger *vm.StructLogger debugger *vm.StructLogger
) )
switch { switch {

View File

@ -9,6 +9,7 @@ import (
"testing" "testing"
"github.com/docker/docker/pkg/reexec" "github.com/docker/docker/pkg/reexec"
"github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool"
"github.com/ethereum/go-ethereum/internal/cmdtest" "github.com/ethereum/go-ethereum/internal/cmdtest"
) )
@ -130,7 +131,7 @@ func TestT8n(t *testing.T) {
output: t8nOutput{alloc: true, result: true}, output: t8nOutput{alloc: true, result: true},
expExitCode: 4, expExitCode: 4,
}, },
{ // Ommer test { // Uncle test
base: "./testdata/5", base: "./testdata/5",
input: t8nInput{ input: t8nInput{
"alloc.json", "txs.json", "env.json", "Byzantium", "0x80", "alloc.json", "txs.json", "env.json", "Byzantium", "0x80",
@ -170,13 +171,53 @@ func TestT8n(t *testing.T) {
output: t8nOutput{result: true}, output: t8nOutput{result: true},
expOut: "exp2.json", expOut: "exp2.json",
}, },
{ // Difficulty calculation - with ommers + Berlin
base: "./testdata/14",
input: t8nInput{
"alloc.json", "txs.json", "env.uncles.json", "Berlin", "",
},
output: t8nOutput{result: true},
expOut: "exp_berlin.json",
},
{ // Difficulty calculation on arrow glacier
base: "./testdata/19",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "London", "",
},
output: t8nOutput{result: true},
expOut: "exp_london.json",
},
{ // Difficulty calculation on arrow glacier
base: "./testdata/19",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "ArrowGlacier", "",
},
output: t8nOutput{result: true},
expOut: "exp_arrowglacier.json",
},
{ // Sign unprotected (pre-EIP155) transaction
base: "./testdata/23",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "Berlin", "",
},
output: t8nOutput{result: true},
expOut: "exp.json",
},
} { } {
args := []string{"t8n"} args := []string{"t8n"}
args = append(args, tc.output.get()...) args = append(args, tc.output.get()...)
args = append(args, tc.input.get(tc.base)...) args = append(args, tc.input.get(tc.base)...)
var qArgs []string // quoted args for debugging purposes
for _, arg := range args {
if len(arg) == 0 {
qArgs = append(qArgs, `""`)
} else {
qArgs = append(qArgs, arg)
}
}
tt.Logf("args: %v\n", strings.Join(qArgs, " "))
tt.Run("evm-test", args...) tt.Run("evm-test", args...)
tt.Logf("args: %v\n", strings.Join(args, " "))
// Compare the expected output, if provided // Compare the expected output, if provided
if tc.expOut != "" { if tc.expOut != "" {
want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut)) want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut))
@ -265,6 +306,14 @@ func TestT9n(t *testing.T) {
}, },
expOut: "exp.json", expOut: "exp.json",
}, },
{ // Invalid RLP
base: "./testdata/18",
input: t9nInput{
inTxs: "invalid.rlp",
stFork: "London",
},
expExitCode: t8ntool.ErrorIO,
},
} { } {
args := []string{"t9n"} args := []string{"t9n"}
@ -295,6 +344,126 @@ func TestT9n(t *testing.T) {
} }
} }
type b11rInput struct {
inEnv string
inOmmersRlp string
inTxsRlp string
inClique string
ethash bool
ethashMode string
ethashDir string
}
func (args *b11rInput) get(base string) []string {
var out []string
if opt := args.inEnv; opt != "" {
out = append(out, "--input.header")
out = append(out, fmt.Sprintf("%v/%v", base, opt))
}
if opt := args.inOmmersRlp; opt != "" {
out = append(out, "--input.ommers")
out = append(out, fmt.Sprintf("%v/%v", base, opt))
}
if opt := args.inTxsRlp; opt != "" {
out = append(out, "--input.txs")
out = append(out, fmt.Sprintf("%v/%v", base, opt))
}
if opt := args.inClique; opt != "" {
out = append(out, "--seal.clique")
out = append(out, fmt.Sprintf("%v/%v", base, opt))
}
if args.ethash {
out = append(out, "--seal.ethash")
}
if opt := args.ethashMode; opt != "" {
out = append(out, "--seal.ethash.mode")
out = append(out, fmt.Sprintf("%v/%v", base, opt))
}
if opt := args.ethashDir; opt != "" {
out = append(out, "--seal.ethash.dir")
out = append(out, fmt.Sprintf("%v/%v", base, opt))
}
out = append(out, "--output.block")
out = append(out, "stdout")
return out
}
func TestB11r(t *testing.T) {
tt := new(testT8n)
tt.TestCmd = cmdtest.NewTestCmd(t, tt)
for i, tc := range []struct {
base string
input b11rInput
expExitCode int
expOut string
}{
{ // unsealed block
base: "./testdata/20",
input: b11rInput{
inEnv: "header.json",
inOmmersRlp: "ommers.json",
inTxsRlp: "txs.rlp",
},
expOut: "exp.json",
},
{ // ethash test seal
base: "./testdata/21",
input: b11rInput{
inEnv: "header.json",
inOmmersRlp: "ommers.json",
inTxsRlp: "txs.rlp",
},
expOut: "exp.json",
},
{ // clique test seal
base: "./testdata/21",
input: b11rInput{
inEnv: "header.json",
inOmmersRlp: "ommers.json",
inTxsRlp: "txs.rlp",
inClique: "clique.json",
},
expOut: "exp-clique.json",
},
{ // block with ommers
base: "./testdata/22",
input: b11rInput{
inEnv: "header.json",
inOmmersRlp: "ommers.json",
inTxsRlp: "txs.rlp",
},
expOut: "exp.json",
},
} {
args := []string{"b11r"}
args = append(args, tc.input.get(tc.base)...)
tt.Run("evm-test", args...)
tt.Logf("args:\n go run . %v\n", strings.Join(args, " "))
// Compare the expected output, if provided
if tc.expOut != "" {
want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut))
if err != nil {
t.Fatalf("test %d: could not read expected output: %v", i, err)
}
have := tt.Output()
ok, err := cmpJson(have, want)
switch {
case err != nil:
t.Logf(string(have))
t.Fatalf("test %d, json parsing failed: %v", i, err)
case !ok:
t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want))
}
}
tt.WaitExit()
if have, want := tt.ExitStatus(), tc.expExitCode; have != want {
t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want)
}
}
}
// cmpJson compares the JSON in two byte slices. // cmpJson compares the JSON in two byte slices.
func cmpJson(a, b []byte) (bool, error) { func cmpJson(a, b []byte) (bool, error) {
var j, j2 interface{} var j, j2 interface{}

View File

@ -15,7 +15,7 @@
"result": { "result": {
"stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13", "stateRoot": "0x84208a19bc2b46ada7445180c1db162be5b39b9abc8c0a54b05d32943eae4e13",
"txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d", "txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d",
"receiptRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [ "receipts": [
@ -38,6 +38,7 @@
"error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" "error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1"
} }
], ],
"currentDifficulty": "0x20000" "currentDifficulty": "0x20000",
"gasUsed": "0x5208"
} }
} }

View File

@ -2,7 +2,7 @@
"result": { "result": {
"stateRoot": "0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61", "stateRoot": "0xe4b924a6adb5959fccf769d5b7bb2f6359e26d1e76a2443c5a91a36d826aef61",
"txRoot": "0x013509c8563d41c0ae4bf38f2d6d19fc6512a1d0d6be045079c8c9f68bf45f9d", "txRoot": "0x013509c8563d41c0ae4bf38f2d6d19fc6512a1d0d6be045079c8c9f68bf45f9d",
"receiptRoot": "0xa532a08aa9f62431d6fe5d924951b8efb86ed3c54d06fee77788c3767dd13420", "receiptsRoot": "0xa532a08aa9f62431d6fe5d924951b8efb86ed3c54d06fee77788c3767dd13420",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [ "receipts": [
@ -33,6 +33,7 @@
"transactionIndex": "0x1" "transactionIndex": "0x1"
} }
], ],
"currentDifficulty": "0x20000" "currentDifficulty": "0x20000",
"gasUsed": "0x109a0"
} }
} }

View File

@ -2,10 +2,11 @@
"result": { "result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"currentDifficulty": "0x2000020000000", "currentDifficulty": "0x2000020000000",
"receipts": [] "receipts": [],
"gasUsed": "0x0"
} }
} }

View File

@ -2,10 +2,11 @@
"result": { "result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [], "receipts": [],
"currentDifficulty": "0x1ff8020000000" "currentDifficulty": "0x1ff8020000000",
"gasUsed": "0x0"
} }
} }

12
cmd/evm/testdata/14/exp_berlin.json vendored Normal file
View File

@ -0,0 +1,12 @@
{
"result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [],
"currentDifficulty": "0x1ff9000000000",
"gasUsed": "0x0"
}
}

View File

@ -2,11 +2,11 @@
{ {
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363", "address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
"hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476", "hash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476",
"intrinsicGas": 21000 "intrinsicGas": "0x5208"
}, },
{ {
"address": "0xd02d72e067e77158444ef2020ff2d325f929b363", "address": "0xd02d72e067e77158444ef2020ff2d325f929b363",
"hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a", "hash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a",
"intrinsicGas": 21000 "intrinsicGas": "0x5208"
} }
] ]

View File

@ -2,12 +2,12 @@
{ {
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", "address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"hash": "0x7cc3d1a8540a44736750f03bb4d85c0113be4b3472a71bf82241a3b261b479e6", "hash": "0x7cc3d1a8540a44736750f03bb4d85c0113be4b3472a71bf82241a3b261b479e6",
"intrinsicGas": 21000 "intrinsicGas": "0x5208"
}, },
{ {
"error": "intrinsic gas too low: have 82, want 21000", "error": "intrinsic gas too low: have 82, want 21000",
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", "address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"hash": "0x3b2d2609e4361562edb9169314f4c05afc6dbf5d706bf9dda5abe242ab76a22b", "hash": "0x3b2d2609e4361562edb9169314f4c05afc6dbf5d706bf9dda5abe242ab76a22b",
"intrinsicGas": 21000 "intrinsicGas": "0x5208"
} }
] ]

View File

@ -3,13 +3,13 @@
"error": "value exceeds 256 bits", "error": "value exceeds 256 bits",
"address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", "address": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"hash": "0xfbd91685dcbf8172f0e8c53e2ddbb4d26707840da6b51a74371f62a33868fd82", "hash": "0xfbd91685dcbf8172f0e8c53e2ddbb4d26707840da6b51a74371f62a33868fd82",
"intrinsicGas": 21000 "intrinsicGas": "0x5208"
}, },
{ {
"error": "gasPrice exceeds 256 bits", "error": "gasPrice exceeds 256 bits",
"address": "0x1b57ccef1fe5fb73f1e64530fb4ebd9cf1655964", "address": "0x1b57ccef1fe5fb73f1e64530fb4ebd9cf1655964",
"hash": "0x45dc05035cada83748e4c1fe617220106b331eca054f44c2304d5654a9fb29d5", "hash": "0x45dc05035cada83748e4c1fe617220106b331eca054f44c2304d5654a9fb29d5",
"intrinsicGas": 21000 "intrinsicGas": "0x5208"
}, },
{ {
"error": "invalid transaction v, r, s values", "error": "invalid transaction v, r, s values",

9
cmd/evm/testdata/18/README.md vendored Normal file
View File

@ -0,0 +1,9 @@
# Invalid rlp
This folder contains a sample of invalid RLP, and it's expected
that the t9n handles this properly:
```
$ go run . t9n --input.txs=./testdata/18/invalid.rlp --state.fork=London
ERROR(11): rlp: value size exceeds available input length
```

1
cmd/evm/testdata/18/invalid.rlp vendored Normal file
View File

@ -0,0 +1 @@
"0xf852328001825208870b9331677e6ebf0a801ca098ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4aa03887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a3"

12
cmd/evm/testdata/19/alloc.json vendored Normal file
View File

@ -0,0 +1,12 @@
{
"a94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0x5ffd4878be161d74",
"code": "0x",
"nonce": "0xac",
"storage": {}
},
"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{
"balance": "0xfeedbead",
"nonce" : "0x00"
}
}

9
cmd/evm/testdata/19/env.json vendored Normal file
View File

@ -0,0 +1,9 @@
{
"currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"currentGasLimit": "0x750a163df65e8a",
"currentBaseFee": "0x500",
"currentNumber": "13000000",
"currentTimestamp": "100015",
"parentTimestamp" : "99999",
"parentDifficulty" : "0x2000000000000"
}

View File

@ -0,0 +1,12 @@
{
"result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"currentDifficulty": "0x2000000200000",
"receipts": [],
"gasUsed": "0x0"
}
}

12
cmd/evm/testdata/19/exp_london.json vendored Normal file
View File

@ -0,0 +1,12 @@
{
"result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"currentDifficulty": "0x2000080000000",
"receipts": [],
"gasUsed": "0x0"
}
}

9
cmd/evm/testdata/19/readme.md vendored Normal file
View File

@ -0,0 +1,9 @@
## Difficulty calculation
This test shows how the `evm t8n` can be used to calculate the (ethash) difficulty, if none is provided by the caller,
this time on `ArrowGlacier` (Eip 4345).
Calculating it (with an empty set of txs) using `ArrowGlacier` rules (and no provided unclehash for the parent block):
```
[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.json --output.result=stdout --state.fork=ArrowGlacier
```

1
cmd/evm/testdata/19/txs.json vendored Normal file
View File

@ -0,0 +1 @@
[]

4
cmd/evm/testdata/20/exp.json vendored Normal file
View File

@ -0,0 +1,4 @@
{
"rlp": "0xf902d9f90211a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794e997a23b159e2e2a5ce72333262972374b15425ca0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e99476574682f76312e302e312f6c696e75782f676f312e342e32a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf8897435673d874f7c8f8c2f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600c0",
"hash": "0xaba9a3b6a4e96e9ecffcadaa5a2ae0589359455617535cd86589fe1dd26fe899"
}

14
cmd/evm/testdata/20/header.json vendored Normal file
View File

@ -0,0 +1,14 @@
{
"parentHash": "0xd6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34e",
"miner": "0xe997a23b159e2e2a5ce72333262972374b15425c",
"stateRoot": "0x325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2e",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x1000",
"number": "0xc3be",
"gasLimit": "0x50785",
"gasUsed": "0x0",
"timestamp": "0x55c5277e",
"extraData": "0x476574682f76312e302e312f6c696e75782f676f312e342e32",
"mixHash": "0x5865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf",
"nonce": "0x97435673d874f7c8"
}

1
cmd/evm/testdata/20/ommers.json vendored Normal file
View File

@ -0,0 +1 @@
[]

11
cmd/evm/testdata/20/readme.md vendored Normal file
View File

@ -0,0 +1,11 @@
# Block building
This test shows how `b11r` can be used to assemble an unsealed block.
```console
$ go run . b11r --input.header=testdata/20/header.json --input.txs=testdata/20/txs.rlp --input.ommers=testdata/20/ommers.json --output.block=stdout
{
"rlp": "0xf90216f90211a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794e997a23b159e2e2a5ce72333262972374b15425ca0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e99476574682f76312e302e312f6c696e75782f676f312e342e32a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf8897435673d874f7c8c0c0",
"hash": "0xaba9a3b6a4e96e9ecffcadaa5a2ae0589359455617535cd86589fe1dd26fe899"
}
```

1
cmd/evm/testdata/20/txs.rlp vendored Normal file
View File

@ -0,0 +1 @@
"0xf8c2f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600f85f8002825208948a8eafb1cf62bfbeb1741769dae1a9dd4799619201801ba09500e8ba27d3c33ca7764e107410f44cbd8c19794bde214d694683a7aa998cdba07235ae07e4bd6e0206d102b1f8979d6adab280466b6a82d2208ee08951f1f600"

6
cmd/evm/testdata/21/clique.json vendored Normal file
View File

@ -0,0 +1,6 @@
{
"secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
"voted": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
"authorize": false,
"vanity": "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}

4
cmd/evm/testdata/21/exp-clique.json vendored Normal file
View File

@ -0,0 +1,4 @@
{
"rlp": "0xf9025ff9025aa0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277eb861aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac540a67aaee364005841da84f488f6b6d0116dfb5103d091402c81a163d5f66666595e37f56f196d8c5c98da714dbfae68d6b7e1790cc734a20ec6ce52213ad800a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf88ffffffffffffffffc0c0",
"hash": "0x71c59102cc805dbe8741e1210ebe229a321eff144ac7276006fefe39e8357dc7"
}

4
cmd/evm/testdata/21/exp.json vendored Normal file
View File

@ -0,0 +1,4 @@
{
"rlp": "0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0",
"hash": "0x801411e9f6609a659825690d13e4f75a3cfe9143952fa2d9573f3b0a5eb9ebbb"
}

11
cmd/evm/testdata/21/header.json vendored Normal file
View File

@ -0,0 +1,11 @@
{
"parentHash": "0xd6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34e",
"stateRoot": "0x325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2e",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x1000",
"number": "0xc3be",
"gasLimit": "0x50785",
"gasUsed": "0x0",
"timestamp": "0x55c5277e",
"mixHash": "0x5865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf"
}

1
cmd/evm/testdata/21/ommers.json vendored Normal file
View File

@ -0,0 +1 @@
[]

23
cmd/evm/testdata/21/readme.md vendored Normal file
View File

@ -0,0 +1,23 @@
# Sealed block building
This test shows how `b11r` can be used to assemble a sealed block.
## Ethash
```console
$ go run . b11r --input.header=testdata/21/header.json --input.txs=testdata/21/txs.rlp --input.ommers=testdata/21/ommers.json --seal.ethash --seal.ethash.mode=test --output.block=stdout
{
"rlp": "0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0",
"hash": "0x801411e9f6609a659825690d13e4f75a3cfe9143952fa2d9573f3b0a5eb9ebbb"
}
```
## Clique
```console
$ go run . b11r --input.header=testdata/21/header.json --input.txs=testdata/21/txs.rlp --input.ommers=testdata/21/ommers.json --seal.clique=testdata/21/clique.json --output.block=stdout
{
"rlp": "0xf9025ff9025aa0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277eb861aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac540a67aaee364005841da84f488f6b6d0116dfb5103d091402c81a163d5f66666595e37f56f196d8c5c98da714dbfae68d6b7e1790cc734a20ec6ce52213ad800a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf88ffffffffffffffffc0c0",
"hash": "0x71c59102cc805dbe8741e1210ebe229a321eff144ac7276006fefe39e8357dc7"
}
```

1
cmd/evm/testdata/21/txs.rlp vendored Normal file
View File

@ -0,0 +1 @@
"c0"

4
cmd/evm/testdata/22/exp-clique.json vendored Normal file
View File

@ -0,0 +1,4 @@
{
"rlp": "0xf9025ff9025aa0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277eb861aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaac540a67aaee364005841da84f488f6b6d0116dfb5103d091402c81a163d5f66666595e37f56f196d8c5c98da714dbfae68d6b7e1790cc734a20ec6ce52213ad800a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf88ffffffffffffffffc0c0",
"hash": "0x71c59102cc805dbe8741e1210ebe229a321eff144ac7276006fefe39e8357dc7"
}

4
cmd/evm/testdata/22/exp.json vendored Normal file
View File

@ -0,0 +1,4 @@
{
"rlp": "0xf905f5f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea06eb9f0c3cd68c9e97134e6725d12b1f1d8f0644458da6870a37ff84c908fb1e7940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0f903f6f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000",
"hash": "0xd9a81c8fcd57a7f2a0d2c375eff6ad192c30c3729a271303f0a9a7e1b357e755"
}

11
cmd/evm/testdata/22/header.json vendored Normal file
View File

@ -0,0 +1,11 @@
{
"parentHash": "0xd6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34e",
"stateRoot": "0x325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2e",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x1000",
"number": "0xc3be",
"gasLimit": "0x50785",
"gasUsed": "0x0",
"timestamp": "0x55c5277e",
"mixHash": "0x5865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf"
}

1
cmd/evm/testdata/22/ommers.json vendored Normal file
View File

@ -0,0 +1 @@
["0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0","0xf901fdf901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0c0"]

11
cmd/evm/testdata/22/readme.md vendored Normal file
View File

@ -0,0 +1,11 @@
# Building blocks with ommers
This test shows how `b11r` can chain together ommer assembles into a canonical block.
```console
$ echo "{ \"ommers\": [`go run . b11r --input.header=testdata/22/header.json --input.txs=testdata/22/txs.rlp --output.block=stdout | jq '.[\"rlp\"]'`,`go run . b11r --input.header=testdata/22/header.json --input.txs=testdata/22/txs.rlp --output.block=stdout | jq '.[\"rlp\"]'`]}" | go run . b11r --input.header=testdata/22/header.json --input.txs=testdata/22/txs.rlp --input.ommers=stdin --output.block=stdout
{
"rlp": "0xf905f5f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea06eb9f0c3cd68c9e97134e6725d12b1f1d8f0644458da6870a37ff84c908fb1e7940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000c0f903f6f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000f901f8a0d6d785d33cbecf30f30d07e00e226af58f72efdf385d46bc3e6326c23b11e34ea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a0325aea6db48e9d737cddf59034843e99f05bec269453be83c9b9a981a232cc2ea056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082100082c3be83050785808455c5277e80a05865e417635a26db6d1d39ac70d1abf373e5398b3c6fd506acd038fa1334eedf880000000000000000",
"hash": "0xd9a81c8fcd57a7f2a0d2c375eff6ad192c30c3729a271303f0a9a7e1b357e755"
}
```

1
cmd/evm/testdata/22/txs.rlp vendored Normal file
View File

@ -0,0 +1 @@
"c0"

16
cmd/evm/testdata/23/alloc.json vendored Normal file
View File

@ -0,0 +1,16 @@
{
"0x095e7baea6a6c7c4c2dfeb977efac326af552d87" : {
"balance" : "0x0de0b6b3a7640000",
"code" : "0x6001",
"nonce" : "0x00",
"storage" : {
}
},
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
"balance" : "0x0de0b6b3a7640000",
"code" : "0x",
"nonce" : "0x00",
"storage" : {
}
}
}

7
cmd/evm/testdata/23/env.json vendored Normal file
View File

@ -0,0 +1,7 @@
{
"currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
"currentDifficulty" : "0x020000",
"currentGasLimit" : "0x3b9aca00",
"currentNumber" : "0x05",
"currentTimestamp" : "0x03e8"
}

25
cmd/evm/testdata/23/exp.json vendored Normal file
View File

@ -0,0 +1,25 @@
{
"result": {
"stateRoot": "0x65334305e4accfa18352deb24f007b837b5036425b0712cf0e65a43bfa95154d",
"txRoot": "0x75e61774a2ff58cbe32653420256c7f44bc715715a423b0b746d5c622979af6b",
"receiptsRoot": "0xf951f9396af203499cc7d379715a9110323de73967c5700e2f424725446a3c76",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [
{
"root": "0x",
"status": "0x1",
"cumulativeGasUsed": "0x520b",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null,
"transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81",
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x520b",
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"transactionIndex": "0x0"
}
],
"currentDifficulty": "0x20000",
"gasUsed": "0x520b"
}
}

1
cmd/evm/testdata/23/readme.md vendored Normal file
View File

@ -0,0 +1 @@
These files examplify how to sign a transaction using the pre-EIP155 scheme.

15
cmd/evm/testdata/23/txs.json vendored Normal file
View File

@ -0,0 +1,15 @@
[
{
"input" : "0x",
"gas" : "0x5f5e100",
"gasPrice" : "0x1",
"nonce" : "0x0",
"to" : "0x095e7baea6a6c7c4c2dfeb977efac326af552d87",
"value" : "0x186a0",
"v" : "0x0",
"r" : "0x0",
"s" : "0x0",
"secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
"protected": false
}
]

View File

@ -15,7 +15,7 @@
"result": { "result": {
"stateRoot": "0xb7341da3f9f762a6884eaa186c32942734c146b609efee11c4b0214c44857ea1", "stateRoot": "0xb7341da3f9f762a6884eaa186c32942734c146b609efee11c4b0214c44857ea1",
"txRoot": "0x75e61774a2ff58cbe32653420256c7f44bc715715a423b0b746d5c622979af6b", "txRoot": "0x75e61774a2ff58cbe32653420256c7f44bc715715a423b0b746d5c622979af6b",
"receiptRoot": "0xd0d26df80374a327c025d405ebadc752b1bbd089d864801ae78ab704bcad8086", "receiptsRoot": "0xd0d26df80374a327c025d405ebadc752b1bbd089d864801ae78ab704bcad8086",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [ "receipts": [
@ -32,6 +32,7 @@
"transactionIndex": "0x0" "transactionIndex": "0x0"
} }
], ],
"currentDifficulty": "0x20000" "currentDifficulty": "0x20000",
"gasUsed": "0x521f"
} }
} }

View File

@ -13,10 +13,11 @@
"result": { "result": {
"stateRoot": "0xa7312add33811645c6aa65d928a1a4f49d65d448801912c069a0aa8fe9c1f393", "stateRoot": "0xa7312add33811645c6aa65d928a1a4f49d65d448801912c069a0aa8fe9c1f393",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [], "receipts": [],
"currentDifficulty": "0x20000" "currentDifficulty": "0x20000",
"gasUsed": "0x0"
} }
} }

View File

@ -66,6 +66,7 @@ It expects the genesis file as argument.`,
Flags: []cli.Flag{ Flags: []cli.Flag{
utils.MainnetFlag, utils.MainnetFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -140,7 +141,9 @@ be gzipped.`,
}, },
Category: "BLOCKCHAIN COMMANDS", Category: "BLOCKCHAIN COMMANDS",
Description: ` Description: `
The import-preimages command imports hash preimages from an RLP encoded stream.`, The import-preimages command imports hash preimages from an RLP encoded stream.
It's deprecated, please use "geth db import" instead.
`,
} }
exportPreimagesCommand = cli.Command{ exportPreimagesCommand = cli.Command{
Action: utils.MigrateFlags(exportPreimages), Action: utils.MigrateFlags(exportPreimages),
@ -154,7 +157,9 @@ be gzipped.`,
}, },
Category: "BLOCKCHAIN COMMANDS", Category: "BLOCKCHAIN COMMANDS",
Description: ` Description: `
The export-preimages command export hash preimages to an RLP encoded stream`, The export-preimages command exports hash preimages to an RLP encoded stream.
It's deprecated, please use "geth db export" instead.
`,
} }
dumpCommand = cli.Command{ dumpCommand = cli.Command{
Action: utils.MigrateFlags(dump), Action: utils.MigrateFlags(dump),
@ -368,7 +373,6 @@ func exportPreimages(ctx *cli.Context) error {
if len(ctx.Args()) < 1 { if len(ctx.Args()) < 1 {
utils.Fatalf("This command requires an argument.") utils.Fatalf("This command requires an argument.")
} }
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)
defer stack.Close() defer stack.Close()

View File

@ -156,8 +156,8 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
// makeFullNode loads geth configuration and creates the Ethereum backend. // makeFullNode loads geth configuration and creates the Ethereum backend.
func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
stack, cfg := makeConfigNode(ctx) stack, cfg := makeConfigNode(ctx)
if ctx.GlobalIsSet(utils.OverrideLondonFlag.Name) { if ctx.GlobalIsSet(utils.OverrideArrowGlacierFlag.Name) {
cfg.Eth.OverrideLondon = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideLondonFlag.Name)) cfg.Eth.OverrideArrowGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideArrowGlacierFlag.Name))
} }
backend, eth := utils.RegisterEthService(stack, &cfg.Eth) backend, eth := utils.RegisterEthService(stack, &cfg.Eth)

View File

@ -134,6 +134,8 @@ func remoteConsole(ctx *cli.Context) error {
path = filepath.Join(path, "rinkeby") path = filepath.Join(path, "rinkeby")
} else if ctx.GlobalBool(utils.GoerliFlag.Name) { } else if ctx.GlobalBool(utils.GoerliFlag.Name) {
path = filepath.Join(path, "goerli") path = filepath.Join(path, "goerli")
} else if ctx.GlobalBool(utils.SepoliaFlag.Name) {
path = filepath.Join(path, "sepolia")
} }
} }
endpoint = fmt.Sprintf("%s/geth.ipc", path) endpoint = fmt.Sprintf("%s/geth.ipc", path)

View File

@ -17,12 +17,16 @@
package main package main
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"os" "os"
"os/signal"
"path/filepath" "path/filepath"
"sort" "sort"
"strconv" "strconv"
"strings"
"syscall"
"time" "time"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
@ -63,6 +67,8 @@ Remove blockchain and state databases`,
dbPutCmd, dbPutCmd,
dbGetSlotsCmd, dbGetSlotsCmd,
dbDumpFreezerIndex, dbDumpFreezerIndex,
dbImportCmd,
dbExportCmd,
}, },
} }
dbInspectCmd = cli.Command{ dbInspectCmd = cli.Command{
@ -71,9 +77,11 @@ Remove blockchain and state databases`,
ArgsUsage: "<prefix> <start>", ArgsUsage: "<prefix> <start>",
Flags: []cli.Flag{ Flags: []cli.Flag{
utils.DataDirFlag, utils.DataDirFlag,
utils.AncientFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.MainnetFlag, utils.MainnetFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -89,6 +97,7 @@ Remove blockchain and state databases`,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.MainnetFlag, utils.MainnetFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -102,6 +111,7 @@ Remove blockchain and state databases`,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.MainnetFlag, utils.MainnetFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.CacheFlag, utils.CacheFlag,
@ -121,6 +131,7 @@ corruption if it is aborted during execution'!`,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.MainnetFlag, utils.MainnetFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -136,6 +147,7 @@ corruption if it is aborted during execution'!`,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.MainnetFlag, utils.MainnetFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -152,6 +164,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.MainnetFlag, utils.MainnetFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -168,6 +181,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.MainnetFlag, utils.MainnetFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -183,11 +197,42 @@ WARNING: This is a low-level operation which may cause database corruption!`,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.MainnetFlag, utils.MainnetFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
Description: "This command displays information about the freezer index.", Description: "This command displays information about the freezer index.",
} }
dbImportCmd = cli.Command{
Action: utils.MigrateFlags(importLDBdata),
Name: "import",
Usage: "Imports leveldb-data from an exported RLP dump.",
ArgsUsage: "<dumpfile> <start (optional)",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "The import command imports the specific chain data from an RLP encoded stream.",
}
dbExportCmd = cli.Command{
Action: utils.MigrateFlags(exportChaindata),
Name: "export",
Usage: "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
ArgsUsage: "<type> <dumpfile>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
}
) )
func removeDB(ctx *cli.Context) error { func removeDB(ctx *cli.Context) error {
@ -510,3 +555,133 @@ func parseHexOrString(str string) ([]byte, error) {
} }
return b, err return b, err
} }
func importLDBdata(ctx *cli.Context) error {
start := 0
switch ctx.NArg() {
case 1:
break
case 2:
s, err := strconv.Atoi(ctx.Args().Get(1))
if err != nil {
return fmt.Errorf("second arg must be an integer: %v", err)
}
start = s
default:
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
var (
fName = ctx.Args().Get(0)
stack, _ = makeConfigNode(ctx)
interrupt = make(chan os.Signal, 1)
stop = make(chan struct{})
)
defer stack.Close()
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
log.Info("Interrupted during ldb import, stopping at next batch")
}
close(stop)
}()
db := utils.MakeChainDatabase(ctx, stack, false)
return utils.ImportLDBData(db, fName, int64(start), stop)
}
type preimageIterator struct {
iter ethdb.Iterator
}
func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
for iter.iter.Next() {
key := iter.iter.Key()
if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
return utils.OpBatchAdd, key, iter.iter.Value(), true
}
}
return 0, nil, nil, false
}
func (iter *preimageIterator) Release() {
iter.iter.Release()
}
type snapshotIterator struct {
init bool
account ethdb.Iterator
storage ethdb.Iterator
}
func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
if !iter.init {
iter.init = true
return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
}
for iter.account.Next() {
key := iter.account.Key()
if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
return utils.OpBatchAdd, key, iter.account.Value(), true
}
}
for iter.storage.Next() {
key := iter.storage.Key()
if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
return utils.OpBatchAdd, key, iter.storage.Value(), true
}
}
return 0, nil, nil, false
}
func (iter *snapshotIterator) Release() {
iter.account.Release()
iter.storage.Release()
}
// chainExporters defines the export scheme for all exportable chain data.
var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
"preimage": func(db ethdb.Database) utils.ChainDataIterator {
iter := db.NewIterator(rawdb.PreimagePrefix, nil)
return &preimageIterator{iter: iter}
},
"snapshot": func(db ethdb.Database) utils.ChainDataIterator {
account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
return &snapshotIterator{account: account, storage: storage}
},
}
func exportChaindata(ctx *cli.Context) error {
if ctx.NArg() < 2 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
// Parse the required chain data type, make sure it's supported.
kind := ctx.Args().Get(0)
kind = strings.ToLower(strings.Trim(kind, " "))
exporter, ok := chainExporters[kind]
if !ok {
var kinds []string
for kind := range chainExporters {
kinds = append(kinds, kind)
}
return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
}
var (
stack, _ = makeConfigNode(ctx)
interrupt = make(chan os.Signal, 1)
stop = make(chan struct{})
)
defer stack.Close()
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
log.Info("Interrupted during db export, stopping at next batch")
}
close(stop)
}()
db := utils.MakeChainDatabase(ctx, stack, true)
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
}

View File

@ -42,6 +42,11 @@ import (
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/plugins" "github.com/ethereum/go-ethereum/plugins"
"github.com/ethereum/go-ethereum/plugins/wrappers" "github.com/ethereum/go-ethereum/plugins/wrappers"
// Force-load the tracer engines to trigger registration
_ "github.com/ethereum/go-ethereum/eth/tracers/js"
_ "github.com/ethereum/go-ethereum/eth/tracers/native"
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
) )
@ -69,7 +74,7 @@ var (
utils.NoUSBFlag, utils.NoUSBFlag,
utils.USBFlag, utils.USBFlag,
utils.SmartCardDaemonPathFlag, utils.SmartCardDaemonPathFlag,
utils.OverrideLondonFlag, utils.OverrideArrowGlacierFlag,
utils.EthashCacheDirFlag, utils.EthashCacheDirFlag,
utils.EthashCachesInMemoryFlag, utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag, utils.EthashCachesOnDiskFlag,
@ -138,7 +143,9 @@ var (
utils.MainnetFlag, utils.MainnetFlag,
utils.DeveloperFlag, utils.DeveloperFlag,
utils.DeveloperPeriodFlag, utils.DeveloperPeriodFlag,
utils.DeveloperGasLimitFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.VMEnableDebugFlag, utils.VMEnableDebugFlag,

View File

@ -62,6 +62,7 @@ var (
utils.DataDirFlag, utils.DataDirFlag,
utils.AncientFlag, utils.AncientFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.CacheTrieJournalFlag, utils.CacheTrieJournalFlag,
@ -92,6 +93,7 @@ the trie clean cache with default directory will be deleted.
utils.DataDirFlag, utils.DataDirFlag,
utils.AncientFlag, utils.AncientFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -112,6 +114,7 @@ In other words, this command does the snapshot to trie conversion.
utils.DataDirFlag, utils.DataDirFlag,
utils.AncientFlag, utils.AncientFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -134,6 +137,7 @@ It's also usable without snapshot enabled.
utils.DataDirFlag, utils.DataDirFlag,
utils.AncientFlag, utils.AncientFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -157,6 +161,7 @@ It's also usable without snapshot enabled.
utils.DataDirFlag, utils.DataDirFlag,
utils.AncientFlag, utils.AncientFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.ExcludeCodeFlag, utils.ExcludeCodeFlag,

View File

@ -45,6 +45,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.GoerliFlag, utils.GoerliFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.ExitWhenSyncedFlag, utils.ExitWhenSyncedFlag,
utils.GCModeFlag, utils.GCModeFlag,
@ -74,6 +75,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
Flags: []cli.Flag{ Flags: []cli.Flag{
utils.DeveloperFlag, utils.DeveloperFlag,
utils.DeveloperPeriodFlag, utils.DeveloperPeriodFlag,
utils.DeveloperGasLimitFlag,
}, },
}, },
{ {

View File

@ -23,7 +23,6 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"sync"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
@ -80,14 +79,8 @@ func (w *wizard) run() {
} else if err := json.Unmarshal(blob, &w.conf); err != nil { } else if err := json.Unmarshal(blob, &w.conf); err != nil {
log.Crit("Previous configuration corrupted", "path", w.conf.path, "err", err) log.Crit("Previous configuration corrupted", "path", w.conf.path, "err", err)
} else { } else {
// Dial all previously known servers concurrently // Dial all previously known servers
var pend sync.WaitGroup
for server, pubkey := range w.conf.Servers { for server, pubkey := range w.conf.Servers {
pend.Add(1)
go func(server string, pubkey []byte) {
defer pend.Done()
log.Info("Dialing previously configured server", "server", server) log.Info("Dialing previously configured server", "server", server)
client, err := dial(server, pubkey) client, err := dial(server, pubkey)
if err != nil { if err != nil {
@ -96,9 +89,7 @@ func (w *wizard) run() {
w.lock.Lock() w.lock.Lock()
w.servers[server] = client w.servers[server] = client
w.lock.Unlock() w.lock.Unlock()
}(server, pubkey)
} }
pend.Wait()
w.networkStats() w.networkStats()
} }
// Basics done, loop ad infinitum about what to do // Basics done, loop ad infinitum about what to do

View File

@ -18,7 +18,9 @@
package utils package utils
import ( import (
"bufio"
"compress/gzip" "compress/gzip"
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
@ -270,6 +272,7 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las
} }
// ImportPreimages imports a batch of exported hash preimages into the database. // ImportPreimages imports a batch of exported hash preimages into the database.
// It's a part of the deprecated functionality, should be removed in the future.
func ImportPreimages(db ethdb.Database, fn string) error { func ImportPreimages(db ethdb.Database, fn string) error {
log.Info("Importing preimages", "file", fn) log.Info("Importing preimages", "file", fn)
@ -280,7 +283,7 @@ func ImportPreimages(db ethdb.Database, fn string) error {
} }
defer fh.Close() defer fh.Close()
var reader io.Reader = fh var reader io.Reader = bufio.NewReader(fh)
if strings.HasSuffix(fn, ".gz") { if strings.HasSuffix(fn, ".gz") {
if reader, err = gzip.NewReader(reader); err != nil { if reader, err = gzip.NewReader(reader); err != nil {
return err return err
@ -288,7 +291,7 @@ func ImportPreimages(db ethdb.Database, fn string) error {
} }
stream := rlp.NewStream(reader, 0) stream := rlp.NewStream(reader, 0)
// Import the preimages in batches to prevent disk trashing // Import the preimages in batches to prevent disk thrashing
preimages := make(map[common.Hash][]byte) preimages := make(map[common.Hash][]byte)
for { for {
@ -317,6 +320,7 @@ func ImportPreimages(db ethdb.Database, fn string) error {
// ExportPreimages exports all known hash preimages into the specified file, // ExportPreimages exports all known hash preimages into the specified file,
// truncating any data already present in the file. // truncating any data already present in the file.
// It's a part of the deprecated functionality, should be removed in the future.
func ExportPreimages(db ethdb.Database, fn string) error { func ExportPreimages(db ethdb.Database, fn string) error {
log.Info("Exporting preimages", "file", fn) log.Info("Exporting preimages", "file", fn)
@ -344,3 +348,207 @@ func ExportPreimages(db ethdb.Database, fn string) error {
log.Info("Exported preimages", "file", fn) log.Info("Exported preimages", "file", fn)
return nil return nil
} }
// exportHeader is used in the export/import flow. When we do an export,
// the first element we output is the exportHeader.
// Whenever a backwards-incompatible change is made, the Version header
// should be bumped.
// If the importer sees a higher version, it should reject the import.
type exportHeader struct {
Magic string // Always set to 'gethdbdump' for disambiguation
Version uint64
Kind string
UnixTime uint64
}
const exportMagic = "gethdbdump"
const (
OpBatchAdd = 0
OpBatchDel = 1
)
// ImportLDBData imports a batch of snapshot data into the database
func ImportLDBData(db ethdb.Database, f string, startIndex int64, interrupt chan struct{}) error {
log.Info("Importing leveldb data", "file", f)
// Open the file handle and potentially unwrap the gzip stream
fh, err := os.Open(f)
if err != nil {
return err
}
defer fh.Close()
var reader io.Reader = bufio.NewReader(fh)
if strings.HasSuffix(f, ".gz") {
if reader, err = gzip.NewReader(reader); err != nil {
return err
}
}
stream := rlp.NewStream(reader, 0)
// Read the header
var header exportHeader
if err := stream.Decode(&header); err != nil {
return fmt.Errorf("could not decode header: %v", err)
}
if header.Magic != exportMagic {
return errors.New("incompatible data, wrong magic")
}
if header.Version != 0 {
return fmt.Errorf("incompatible version %d, (support only 0)", header.Version)
}
log.Info("Importing data", "file", f, "type", header.Kind, "data age",
common.PrettyDuration(time.Since(time.Unix(int64(header.UnixTime), 0))))
// Import the snapshot in batches to prevent disk thrashing
var (
count int64
start = time.Now()
logged = time.Now()
batch = db.NewBatch()
)
for {
// Read the next entry
var (
op byte
key, val []byte
)
if err := stream.Decode(&op); err != nil {
if err == io.EOF {
break
}
return err
}
if err := stream.Decode(&key); err != nil {
return err
}
if err := stream.Decode(&val); err != nil {
return err
}
if count < startIndex {
count++
continue
}
switch op {
case OpBatchDel:
batch.Delete(key)
case OpBatchAdd:
batch.Put(key, val)
default:
return fmt.Errorf("unknown op %d\n", op)
}
if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
return err
}
batch.Reset()
}
// Check interruption emitted by ctrl+c
if count%1000 == 0 {
select {
case <-interrupt:
if err := batch.Write(); err != nil {
return err
}
log.Info("External data import interrupted", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
return nil
default:
}
}
if count%1000 == 0 && time.Since(logged) > 8*time.Second {
log.Info("Importing external data", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
count += 1
}
// Flush the last batch snapshot data
if batch.ValueSize() > 0 {
if err := batch.Write(); err != nil {
return err
}
}
log.Info("Imported chain data", "file", f, "count", count,
"elapsed", common.PrettyDuration(time.Since(start)))
return nil
}
// ChainDataIterator is an interface wraps all necessary functions to iterate
// the exporting chain data.
type ChainDataIterator interface {
// Next returns the key-value pair for next exporting entry in the iterator.
// When the end is reached, it will return (0, nil, nil, false).
Next() (byte, []byte, []byte, bool)
// Release releases associated resources. Release should always succeed and can
// be called multiple times without causing error.
Release()
}
// ExportChaindata exports the given data type (truncating any data already present)
// in the file. If the suffix is 'gz', gzip compression is used.
func ExportChaindata(fn string, kind string, iter ChainDataIterator, interrupt chan struct{}) error {
log.Info("Exporting chain data", "file", fn, "kind", kind)
defer iter.Release()
// Open the file handle and potentially wrap with a gzip stream
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil {
return err
}
defer fh.Close()
var writer io.Writer = fh
if strings.HasSuffix(fn, ".gz") {
writer = gzip.NewWriter(writer)
defer writer.(*gzip.Writer).Close()
}
// Write the header
if err := rlp.Encode(writer, &exportHeader{
Magic: exportMagic,
Version: 0,
Kind: kind,
UnixTime: uint64(time.Now().Unix()),
}); err != nil {
return err
}
// Extract data from source iterator and dump them out to file
var (
count int64
start = time.Now()
logged = time.Now()
)
for {
op, key, val, ok := iter.Next()
if !ok {
break
}
if err := rlp.Encode(writer, op); err != nil {
return err
}
if err := rlp.Encode(writer, key); err != nil {
return err
}
if err := rlp.Encode(writer, val); err != nil {
return err
}
if count%1000 == 0 {
// Check interruption emitted by ctrl+c
select {
case <-interrupt:
log.Info("Chain data exporting interrupted", "file", fn,
"kind", kind, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
return nil
default:
}
if time.Since(logged) > 8*time.Second {
log.Info("Exporting chain data", "file", fn, "kind", kind,
"count", count, "elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
}
count++
}
log.Info("Exported chain data", "file", fn, "kind", kind, "count", count,
"elapsed", common.PrettyDuration(time.Since(start)))
return nil
}

198
cmd/utils/export_test.go Normal file
View File

@ -0,0 +1,198 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of go-ethereum.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"fmt"
"os"
"strings"
"testing"
"time"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/rlp"
)
// TestExport does basic sanity checks on the export/import functionality
func TestExport(t *testing.T) {
f := fmt.Sprintf("%v/tempdump", os.TempDir())
defer func() {
os.Remove(f)
}()
testExport(t, f)
}
func TestExportGzip(t *testing.T) {
f := fmt.Sprintf("%v/tempdump.gz", os.TempDir())
defer func() {
os.Remove(f)
}()
testExport(t, f)
}
type testIterator struct {
index int
}
func newTestIterator() *testIterator {
return &testIterator{index: -1}
}
func (iter *testIterator) Next() (byte, []byte, []byte, bool) {
if iter.index >= 999 {
return 0, nil, nil, false
}
iter.index += 1
if iter.index == 42 {
iter.index += 1
}
return OpBatchAdd, []byte(fmt.Sprintf("key-%04d", iter.index)),
[]byte(fmt.Sprintf("value %d", iter.index)), true
}
func (iter *testIterator) Release() {}
func testExport(t *testing.T, f string) {
err := ExportChaindata(f, "testdata", newTestIterator(), make(chan struct{}))
if err != nil {
t.Fatal(err)
}
db := rawdb.NewMemoryDatabase()
err = ImportLDBData(db, f, 5, make(chan struct{}))
if err != nil {
t.Fatal(err)
}
// verify
for i := 0; i < 1000; i++ {
v, err := db.Get([]byte(fmt.Sprintf("key-%04d", i)))
if (i < 5 || i == 42) && err == nil {
t.Fatalf("expected no element at idx %d, got '%v'", i, string(v))
}
if !(i < 5 || i == 42) {
if err != nil {
t.Fatalf("expected element idx %d: %v", i, err)
}
if have, want := string(v), fmt.Sprintf("value %d", i); have != want {
t.Fatalf("have %v, want %v", have, want)
}
}
}
v, err := db.Get([]byte(fmt.Sprintf("key-%04d", 1000)))
if err == nil {
t.Fatalf("expected no element at idx %d, got '%v'", 1000, string(v))
}
}
// testDeletion tests if the deletion markers can be exported/imported correctly
func TestDeletionExport(t *testing.T) {
f := fmt.Sprintf("%v/tempdump", os.TempDir())
defer func() {
os.Remove(f)
}()
testDeletion(t, f)
}
// TestDeletionExportGzip tests if the deletion markers can be exported/imported
// correctly with gz compression.
func TestDeletionExportGzip(t *testing.T) {
f := fmt.Sprintf("%v/tempdump.gz", os.TempDir())
defer func() {
os.Remove(f)
}()
testDeletion(t, f)
}
type deletionIterator struct {
index int
}
func newDeletionIterator() *deletionIterator {
return &deletionIterator{index: -1}
}
func (iter *deletionIterator) Next() (byte, []byte, []byte, bool) {
if iter.index >= 999 {
return 0, nil, nil, false
}
iter.index += 1
if iter.index == 42 {
iter.index += 1
}
return OpBatchDel, []byte(fmt.Sprintf("key-%04d", iter.index)), nil, true
}
func (iter *deletionIterator) Release() {}
func testDeletion(t *testing.T, f string) {
err := ExportChaindata(f, "testdata", newDeletionIterator(), make(chan struct{}))
if err != nil {
t.Fatal(err)
}
db := rawdb.NewMemoryDatabase()
for i := 0; i < 1000; i++ {
db.Put([]byte(fmt.Sprintf("key-%04d", i)), []byte(fmt.Sprintf("value %d", i)))
}
err = ImportLDBData(db, f, 5, make(chan struct{}))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 1000; i++ {
v, err := db.Get([]byte(fmt.Sprintf("key-%04d", i)))
if i < 5 || i == 42 {
if err != nil {
t.Fatalf("expected element at idx %d, got '%v'", i, err)
}
if have, want := string(v), fmt.Sprintf("value %d", i); have != want {
t.Fatalf("have %v, want %v", have, want)
}
}
if !(i < 5 || i == 42) {
if err == nil {
t.Fatalf("expected no element idx %d: %v", i, string(v))
}
}
}
}
// TestImportFutureFormat tests that we reject unsupported future versions.
func TestImportFutureFormat(t *testing.T) {
f := fmt.Sprintf("%v/tempdump-future", os.TempDir())
defer func() {
os.Remove(f)
}()
fh, err := os.OpenFile(f, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil {
t.Fatal(err)
}
defer fh.Close()
if err := rlp.Encode(fh, &exportHeader{
Magic: exportMagic,
Version: 500,
Kind: "testdata",
UnixTime: uint64(time.Now().Unix()),
}); err != nil {
t.Fatal(err)
}
db2 := rawdb.NewMemoryDatabase()
err = ImportLDBData(db2, f, 0, make(chan struct{}))
if err == nil {
t.Fatal("Expected error, got none")
}
if !strings.HasPrefix(err.Error(), "incompatible version") {
t.Fatalf("wrong error: %v", err)
}
}

View File

@ -155,6 +155,10 @@ var (
Name: "ropsten", Name: "ropsten",
Usage: "Ropsten network: pre-configured proof-of-work test network", Usage: "Ropsten network: pre-configured proof-of-work test network",
} }
SepoliaFlag = cli.BoolFlag{
Name: "sepolia",
Usage: "Sepolia network: pre-configured proof-of-work test network",
}
DeveloperFlag = cli.BoolFlag{ DeveloperFlag = cli.BoolFlag{
Name: "dev", Name: "dev",
Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled", Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled",
@ -163,6 +167,11 @@ var (
Name: "dev.period", Name: "dev.period",
Usage: "Block period to use in developer mode (0 = mine only if transaction pending)", Usage: "Block period to use in developer mode (0 = mine only if transaction pending)",
} }
DeveloperGasLimitFlag = cli.Uint64Flag{
Name: "dev.gaslimit",
Usage: "Initial block gas limit",
Value: 11500000,
}
IdentityFlag = cli.StringFlag{ IdentityFlag = cli.StringFlag{
Name: "identity", Name: "identity",
Usage: "Custom node name", Usage: "Custom node name",
@ -235,9 +244,9 @@ var (
Usage: "Megabytes of memory allocated to bloom-filter for pruning", Usage: "Megabytes of memory allocated to bloom-filter for pruning",
Value: 2048, Value: 2048,
} }
OverrideLondonFlag = cli.Uint64Flag{ OverrideArrowGlacierFlag = cli.Uint64Flag{
Name: "override.london", Name: "override.arrowglacier",
Usage: "Manually specify London fork-block, overriding the bundled setting", Usage: "Manually specify Arrow Glacier fork-block, overriding the bundled setting",
} }
// Light server and client settings // Light server and client settings
LightServeFlag = cli.IntFlag{ LightServeFlag = cli.IntFlag{
@ -798,6 +807,9 @@ func MakeDataDir(ctx *cli.Context) string {
if ctx.GlobalBool(GoerliFlag.Name) { if ctx.GlobalBool(GoerliFlag.Name) {
return filepath.Join(path, "goerli") return filepath.Join(path, "goerli")
} }
if ctx.GlobalBool(SepoliaFlag.Name) {
return filepath.Join(path, "sepolia")
}
return path return path
} }
Fatalf("Cannot determine default data directory, please set manually (--datadir)") Fatalf("Cannot determine default data directory, please set manually (--datadir)")
@ -846,6 +858,8 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
urls = SplitAndTrim(ctx.GlobalString(BootnodesFlag.Name)) urls = SplitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
case ctx.GlobalBool(RopstenFlag.Name): case ctx.GlobalBool(RopstenFlag.Name):
urls = params.RopstenBootnodes urls = params.RopstenBootnodes
case ctx.GlobalBool(SepoliaFlag.Name):
urls = params.SepoliaBootnodes
case ctx.GlobalBool(RinkebyFlag.Name): case ctx.GlobalBool(RinkebyFlag.Name):
urls = params.RinkebyBootnodes urls = params.RinkebyBootnodes
case ctx.GlobalBool(GoerliFlag.Name): case ctx.GlobalBool(GoerliFlag.Name):
@ -1269,6 +1283,8 @@ func setDataDir(ctx *cli.Context, cfg *node.Config) {
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby") cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
case ctx.GlobalBool(GoerliFlag.Name) && cfg.DataDir == node.DefaultDataDir(): case ctx.GlobalBool(GoerliFlag.Name) && cfg.DataDir == node.DefaultDataDir():
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli") cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli")
case ctx.GlobalBool(SepoliaFlag.Name) && cfg.DataDir == node.DefaultDataDir():
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "sepolia")
} }
} }
@ -1454,7 +1470,7 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) {
// SetEthConfig applies eth-related command line flags to the config. // SetEthConfig applies eth-related command line flags to the config.
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// Avoid conflicting network flags // Avoid conflicting network flags
CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag) CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, SepoliaFlag)
CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light") CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light")
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
if ctx.GlobalString(GCModeFlag.Name) == "archive" && ctx.GlobalUint64(TxLookupLimitFlag.Name) != 0 { if ctx.GlobalString(GCModeFlag.Name) == "archive" && ctx.GlobalUint64(TxLookupLimitFlag.Name) != 0 {
@ -1598,6 +1614,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
} }
cfg.Genesis = core.DefaultRopstenGenesisBlock() cfg.Genesis = core.DefaultRopstenGenesisBlock()
SetDNSDiscoveryDefaults(cfg, params.RopstenGenesisHash) SetDNSDiscoveryDefaults(cfg, params.RopstenGenesisHash)
case ctx.GlobalBool(SepoliaFlag.Name):
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 11155111
}
cfg.Genesis = core.DefaultSepoliaGenesisBlock()
SetDNSDiscoveryDefaults(cfg, params.SepoliaGenesisHash)
case ctx.GlobalBool(RinkebyFlag.Name): case ctx.GlobalBool(RinkebyFlag.Name):
if !ctx.GlobalIsSet(NetworkIdFlag.Name) { if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 4 cfg.NetworkId = 4
@ -1644,7 +1666,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
log.Info("Using developer account", "address", developer.Address) log.Info("Using developer account", "address", developer.Address)
// Create a new developer genesis block or reuse existing one // Create a new developer genesis block or reuse existing one
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), developer.Address) cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), ctx.GlobalUint64(DeveloperGasLimitFlag.Name), developer.Address)
if ctx.GlobalIsSet(DataDirFlag.Name) { if ctx.GlobalIsSet(DataDirFlag.Name) {
// Check if we have an already initialized chain and fall back to // Check if we have an already initialized chain and fall back to
// that if so. Otherwise we need to generate a new genesis spec. // that if so. Otherwise we need to generate a new genesis spec.
@ -1826,6 +1848,8 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
genesis = core.DefaultGenesisBlock() genesis = core.DefaultGenesisBlock()
case ctx.GlobalBool(RopstenFlag.Name): case ctx.GlobalBool(RopstenFlag.Name):
genesis = core.DefaultRopstenGenesisBlock() genesis = core.DefaultRopstenGenesisBlock()
case ctx.GlobalBool(SepoliaFlag.Name):
genesis = core.DefaultSepoliaGenesisBlock()
case ctx.GlobalBool(RinkebyFlag.Name): case ctx.GlobalBool(RinkebyFlag.Name):
genesis = core.DefaultRinkebyGenesisBlock() genesis = core.DefaultRinkebyGenesisBlock()
case ctx.GlobalBool(GoerliFlag.Name): case ctx.GlobalBool(GoerliFlag.Name):

View File

@ -176,13 +176,14 @@ func MustDecodeBig(input string) *big.Int {
} }
// EncodeBig encodes bigint as a hex string with 0x prefix. // EncodeBig encodes bigint as a hex string with 0x prefix.
// The sign of the integer is ignored.
func EncodeBig(bigint *big.Int) string { func EncodeBig(bigint *big.Int) string {
nbits := bigint.BitLen() if sign := bigint.Sign(); sign == 0 {
if nbits == 0 {
return "0x0" return "0x0"
} else if sign > 0 {
return "0x" + bigint.Text(16)
} else {
return "-0x" + bigint.Text(16)[1:]
} }
return fmt.Sprintf("%#x", bigint)
} }
func has0xPrefix(input string) bool { func has0xPrefix(input string) bool {

View File

@ -201,3 +201,15 @@ func TestDecodeUint64(t *testing.T) {
} }
} }
} }
func BenchmarkEncodeBig(b *testing.B) {
for _, bench := range encodeBigTests {
b.Run(bench.want, func(b *testing.B) {
b.ReportAllocs()
bigint := bench.input.(*big.Int)
for i := 0; i < b.N; i++ {
EncodeBig(bigint)
}
})
}
}

View File

@ -214,6 +214,9 @@ func (api *API) GetSigner(rlpOrBlockNr *blockNumberOrHashOrRLP) (common.Address,
} else if number, ok := blockNrOrHash.Number(); ok { } else if number, ok := blockNrOrHash.Number(); ok {
header = api.chain.GetHeaderByNumber(uint64(number.Int64())) header = api.chain.GetHeaderByNumber(uint64(number.Int64()))
} }
if header == nil {
return common.Address{}, fmt.Errorf("missing block %v", blockNrOrHash.String())
}
return api.clique.Author(header) return api.clique.Author(header)
} }
block := new(types.Block) block := new(types.Block)

View File

@ -600,8 +600,7 @@ func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, res
} }
// For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing) // For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing)
if c.config.Period == 0 && len(block.Transactions()) == 0 { if c.config.Period == 0 && len(block.Transactions()) == 0 {
log.Info("Sealing paused, waiting for transactions") return errors.New("sealing paused while waiting for transactions")
return nil
} }
// Don't hold the signer fields for the entire sealing procedure // Don't hold the signer fields for the entire sealing procedure
c.lock.RLock() c.lock.RLock()
@ -621,8 +620,7 @@ func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, res
if recent == signer { if recent == signer {
// Signer is among recents, only wait if the current block doesn't shift it out // Signer is among recents, only wait if the current block doesn't shift it out
if limit := uint64(len(snap.Signers)/2 + 1); number < limit || seen > number-limit { if limit := uint64(len(snap.Signers)/2 + 1); number < limit || seen > number-limit {
log.Info("Signed recently, must wait for others") return errors.New("signed recently, must wait for others")
return nil
} }
} }
} }

View File

@ -45,6 +45,11 @@ var (
maxUncles = 2 // Maximum number of uncles allowed in a single block maxUncles = 2 // Maximum number of uncles allowed in a single block
allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks
// calcDifficultyEip4345 is the difficulty adjustment algorithm as specified by EIP 4345.
// It offsets the bomb a total of 10.7M blocks.
// Specification EIP-4345: https://eips.ethereum.org/EIPS/eip-4345
calcDifficultyEip4345 = makeDifficultyCalculator(big.NewInt(10_700_000))
// calcDifficultyEip3554 is the difficulty adjustment algorithm as specified by EIP 3554. // calcDifficultyEip3554 is the difficulty adjustment algorithm as specified by EIP 3554.
// It offsets the bomb a total of 9.7M blocks. // It offsets the bomb a total of 9.7M blocks.
// Specification EIP-3554: https://eips.ethereum.org/EIPS/eip-3554 // Specification EIP-3554: https://eips.ethereum.org/EIPS/eip-3554
@ -330,6 +335,8 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainHeaderReader, time uin
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int { func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
next := new(big.Int).Add(parent.Number, big1) next := new(big.Int).Add(parent.Number, big1)
switch { switch {
case config.IsArrowGlacier(next):
return calcDifficultyEip4345(time, parent)
case config.IsLondon(next): case config.IsLondon(next):
return calcDifficultyEip3554(time, parent) return calcDifficultyEip3554(time, parent)
case config.IsMuirGlacier(next): case config.IsMuirGlacier(next):

View File

@ -136,13 +136,16 @@ func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(bu
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil { if err = ensureSize(dump, int64(len(dumpMagic))*4+int64(size)); err != nil {
dump.Close()
os.Remove(temp)
return nil, nil, nil, err return nil, nil, nil, err
} }
// Memory map the file for writing and fill it with the generator // Memory map the file for writing and fill it with the generator
mem, buffer, err := memoryMapFile(dump, true) mem, buffer, err := memoryMapFile(dump, true)
if err != nil { if err != nil {
dump.Close() dump.Close()
os.Remove(temp)
return nil, nil, nil, err return nil, nil, nil, err
} }
copy(buffer, dumpMagic) copy(buffer, dumpMagic)
@ -358,7 +361,7 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
if err != nil { if err != nil {
logger.Error("Failed to generate mapped ethash dataset", "err", err) logger.Error("Failed to generate mapped ethash dataset", "err", err)
d.dataset = make([]uint32, dsize/2) d.dataset = make([]uint32, dsize/4)
generateDataset(d.dataset, d.epoch, cache) generateDataset(d.dataset, d.epoch, cache)
} }
// Iterate over all previous instances and delete old ones // Iterate over all previous instances and delete old ones

View File

@ -0,0 +1,35 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build linux
// +build linux
package ethash
import (
"os"
"golang.org/x/sys/unix"
)
// ensureSize expands the file to the given size. This is to prevent runtime
// errors later on, if the underlying file expands beyond the disk capacity,
// even though it ostensibly is already expanded, but due to being sparse
// does not actually occupy the full declared size on disk.
func ensureSize(f *os.File, size int64) error {
// Docs: https://www.man7.org/linux/man-pages/man2/fallocate.2.html
return unix.Fallocate(int(f.Fd()), 0, 0, size)
}

View File

@ -0,0 +1,36 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build !linux
// +build !linux
package ethash
import (
"os"
)
// ensureSize expands the file to the given size. This is to prevent runtime
// errors later on, if the underlying file expands beyond the disk capacity,
// even though it ostensibly is already expanded, but due to being sparse
// does not actually occupy the full declared size on disk.
func ensureSize(f *os.File, size int64) error {
// On systems which do not support fallocate, we merely truncate it.
// More robust alternatives would be to
// - Use posix_fallocate, or
// - explicitly fill the file with zeroes.
return f.Truncate(size)
}

View File

@ -99,7 +99,7 @@ func newTester(t *testing.T, confOverride func(*ethconfig.Config)) *tester {
t.Fatalf("failed to create node: %v", err) t.Fatalf("failed to create node: %v", err)
} }
ethConf := &ethconfig.Config{ ethConf := &ethconfig.Config{
Genesis: core.DeveloperGenesisBlock(15, common.Address{}), Genesis: core.DeveloperGenesisBlock(15, 11_500_000, common.Address{}),
Miner: miner.Config{ Miner: miner.Config{
Etherbase: common.HexToAddress(testAddress), Etherbase: common.HexToAddress(testAddress),
}, },

View File

@ -75,7 +75,7 @@ var (
// This is the content of the genesis block used by the benchmarks. // This is the content of the genesis block used by the benchmarks.
benchRootKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") benchRootKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
benchRootAddr = crypto.PubkeyToAddress(benchRootKey.PublicKey) benchRootAddr = crypto.PubkeyToAddress(benchRootKey.PublicKey)
benchRootFunds = math.BigPow(2, 100) benchRootFunds = math.BigPow(2, 200)
) )
// genValueTx returns a block generator that includes a single // genValueTx returns a block generator that includes a single
@ -86,7 +86,19 @@ func genValueTx(nbytes int) func(int, *BlockGen) {
toaddr := common.Address{} toaddr := common.Address{}
data := make([]byte, nbytes) data := make([]byte, nbytes)
gas, _ := IntrinsicGas(data, nil, false, false, false) gas, _ := IntrinsicGas(data, nil, false, false, false)
tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(benchRootAddr), toaddr, big.NewInt(1), gas, nil, data), types.HomesteadSigner{}, benchRootKey) signer := types.MakeSigner(gen.config, big.NewInt(int64(i)))
gasPrice := big.NewInt(0)
if gen.header.BaseFee != nil {
gasPrice = gen.header.BaseFee
}
tx, _ := types.SignNewTx(benchRootKey, signer, &types.LegacyTx{
Nonce: gen.TxNonce(benchRootAddr),
To: &toaddr,
Value: big.NewInt(1),
Gas: gas,
Data: data,
GasPrice: gasPrice,
})
gen.AddTx(tx) gen.AddTx(tx)
} }
} }
@ -110,24 +122,38 @@ func init() {
// and fills the blocks with many small transactions. // and fills the blocks with many small transactions.
func genTxRing(naccounts int) func(int, *BlockGen) { func genTxRing(naccounts int) func(int, *BlockGen) {
from := 0 from := 0
availableFunds := new(big.Int).Set(benchRootFunds)
return func(i int, gen *BlockGen) { return func(i int, gen *BlockGen) {
block := gen.PrevBlock(i - 1) block := gen.PrevBlock(i - 1)
gas := block.GasLimit() gas := block.GasLimit()
gasPrice := big.NewInt(0)
if gen.header.BaseFee != nil {
gasPrice = gen.header.BaseFee
}
signer := types.MakeSigner(gen.config, big.NewInt(int64(i)))
for { for {
gas -= params.TxGas gas -= params.TxGas
if gas < params.TxGas { if gas < params.TxGas {
break break
} }
to := (from + 1) % naccounts to := (from + 1) % naccounts
tx := types.NewTransaction( burn := new(big.Int).SetUint64(params.TxGas)
gen.TxNonce(ringAddrs[from]), burn.Mul(burn, gen.header.BaseFee)
ringAddrs[to], availableFunds.Sub(availableFunds, burn)
benchRootFunds, if availableFunds.Cmp(big.NewInt(1)) < 0 {
params.TxGas, panic("not enough funds")
nil, }
nil, tx, err := types.SignNewTx(ringKeys[from], signer,
) &types.LegacyTx{
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, ringKeys[from]) Nonce: gen.TxNonce(ringAddrs[from]),
To: &ringAddrs[to],
Value: availableFunds,
Gas: params.TxGas,
GasPrice: gasPrice,
})
if err != nil {
panic(err)
}
gen.AddTx(tx) gen.AddTx(tx)
from = to from = to
} }
@ -245,6 +271,7 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) {
block := types.NewBlockWithHeader(header) block := types.NewBlockWithHeader(header)
rawdb.WriteBody(db, hash, n, block.Body()) rawdb.WriteBody(db, hash, n, block.Body())
rawdb.WriteReceipts(db, hash, n, nil) rawdb.WriteReceipts(db, hash, n, nil)
rawdb.WriteHeadBlockHash(db, hash)
} }
} }
} }
@ -278,6 +305,8 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
} }
makeChainForBench(db, full, count) makeChainForBench(db, full, count)
db.Close() db.Close()
cacheConfig := *defaultCacheConfig
cacheConfig.TrieDirtyDisabled = true
b.ReportAllocs() b.ReportAllocs()
b.ResetTimer() b.ResetTimer()
@ -287,7 +316,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
if err != nil { if err != nil {
b.Fatalf("error opening database at %v: %v", dir, err) b.Fatalf("error opening database at %v: %v", dir, err)
} }
chain, err := NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil) chain, err := NewBlockChain(db, &cacheConfig, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
if err != nil { if err != nil {
b.Fatalf("error creating chain: %v", err) b.Fatalf("error creating chain: %v", err)
} }

View File

@ -296,7 +296,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
if diskRoot != (common.Hash{}) { if diskRoot != (common.Hash{}) {
log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot) log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot)
snapDisk, err := bc.SetHeadBeyondRoot(head.NumberU64(), diskRoot) snapDisk, err := bc.setHeadBeyondRoot(head.NumberU64(), diskRoot, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -306,7 +306,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
} }
} else { } else {
log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash()) log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
if err := bc.SetHead(head.NumberU64()); err != nil { if _, err := bc.setHeadBeyondRoot(head.NumberU64(), common.Hash{}, true); err != nil {
return nil, err return nil, err
} }
} }
@ -482,11 +482,11 @@ func (bc *BlockChain) loadLastState() error {
// was fast synced or full synced and in which state, the method will try to // was fast synced or full synced and in which state, the method will try to
// delete minimal data from disk whilst retaining chain consistency. // delete minimal data from disk whilst retaining chain consistency.
func (bc *BlockChain) SetHead(head uint64) error { func (bc *BlockChain) SetHead(head uint64) error {
_, err := bc.SetHeadBeyondRoot(head, common.Hash{}) _, err := bc.setHeadBeyondRoot(head, common.Hash{}, false)
return err return err
} }
// SetHeadBeyondRoot rewinds the local chain to a new head with the extra condition // setHeadBeyondRoot rewinds the local chain to a new head with the extra condition
// that the rewind must pass the specified state root. This method is meant to be // that the rewind must pass the specified state root. This method is meant to be
// used when rewinding with snapshots enabled to ensure that we go back further than // used when rewinding with snapshots enabled to ensure that we go back further than
// persistent disk layer. Depending on whether the node was fast synced or full, and // persistent disk layer. Depending on whether the node was fast synced or full, and
@ -494,7 +494,7 @@ func (bc *BlockChain) SetHead(head uint64) error {
// retaining chain consistency. // retaining chain consistency.
// //
// The method returns the block number where the requested root cap was found. // The method returns the block number where the requested root cap was found.
func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64, error) { func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bool) (uint64, error) {
if !bc.chainmu.TryLock() { if !bc.chainmu.TryLock() {
return 0, errChainStopped return 0, errChainStopped
} }
@ -610,8 +610,8 @@ func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64,
} }
// If SetHead was only called as a chain reparation method, try to skip // If SetHead was only called as a chain reparation method, try to skip
// touching the header chain altogether, unless the freezer is broken // touching the header chain altogether, unless the freezer is broken
if block := bc.CurrentBlock(); block.NumberU64() == head { if repair {
if target, force := updateFn(bc.db, block.Header()); force { if target, force := updateFn(bc.db, bc.CurrentBlock().Header()); force {
bc.hc.SetHead(target, updateFn, delFn) bc.hc.SetHead(target, updateFn, delFn)
} }
} else { } else {
@ -1438,11 +1438,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
// Peek the error for the first block to decide the directing import logic // Peek the error for the first block to decide the directing import logic
it := newInsertIterator(chain, results, bc.validator) it := newInsertIterator(chain, results, bc.validator)
block, err := it.next() block, err := it.next()
// Left-trim all the known blocks // Left-trim all the known blocks that don't need to build snapshot
if err == ErrKnownBlock { if bc.skipBlock(err, it) {
// First block (and state) is known // First block (and state) is known
// 1. We did a roll-back, and should now do a re-import // 1. We did a roll-back, and should now do a re-import
// 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
@ -1453,7 +1452,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
localTd = bc.GetTd(current.Hash(), current.NumberU64()) localTd = bc.GetTd(current.Hash(), current.NumberU64())
externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil
) )
for block != nil && err == ErrKnownBlock { for block != nil && bc.skipBlock(err, it) {
externTd = new(big.Int).Add(externTd, block.Difficulty()) externTd = new(big.Int).Add(externTd, block.Difficulty())
if localTd.Cmp(externTd) < 0 { if localTd.Cmp(externTd) < 0 {
break break
@ -1471,7 +1470,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
// When node runs a fast sync again, it can re-import a batch of known blocks via // When node runs a fast sync again, it can re-import a batch of known blocks via
// `insertChain` while a part of them have higher total difficulty than current // `insertChain` while a part of them have higher total difficulty than current
// head full block(new pivot point). // head full block(new pivot point).
for block != nil && err == ErrKnownBlock { for block != nil && bc.skipBlock(err, it) {
log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash()) log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
if err := bc.writeKnownBlock(block); err != nil { if err := bc.writeKnownBlock(block); err != nil {
return it.index, err return it.index, err
@ -1503,8 +1502,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
// If there are any still remaining, mark as ignored // If there are any still remaining, mark as ignored
return it.index, err return it.index, err
// Some other error occurred, abort // Some other error(except ErrKnownBlock) occurred, abort.
case err != nil: // ErrKnownBlock is allowed here since some known blocks
// still need re-execution to generate snapshots that are missing
case err != nil && !errors.Is(err, ErrKnownBlock):
bc.futureBlocks.Remove(block.Hash()) bc.futureBlocks.Remove(block.Hash())
stats.ignored += len(it.chain) stats.ignored += len(it.chain)
bc.reportBlock(block, nil, err) bc.reportBlock(block, nil, err)
@ -1522,7 +1523,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
} }
}() }()
for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() { for ; block != nil && err == nil || errors.Is(err, ErrKnownBlock); block, err = it.next() {
// If the chain is terminating, stop processing blocks // If the chain is terminating, stop processing blocks
if bc.insertStopped() { if bc.insertStopped() {
log.Debug("Abort during block processing") log.Debug("Abort during block processing")
@ -1537,8 +1538,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
// Clique blocks where they can share state among each other, so importing an // Clique blocks where they can share state among each other, so importing an
// older block might complete the state of the subsequent one. In this case, // older block might complete the state of the subsequent one. In this case,
// just skip the block (we already validated it once fully (and crashed), since // just skip the block (we already validated it once fully (and crashed), since
// its header and body was already in the database). // its header and body was already in the database). But if the corresponding
if err == ErrKnownBlock { // snapshot layer is missing, forcibly rerun the execution to build it.
if bc.skipBlock(err, it) {
logger := log.Debug logger := log.Debug
if bc.chainConfig.Clique == nil { if bc.chainConfig.Clique == nil {
logger = log.Warn logger = log.Warn
@ -2016,6 +2018,47 @@ func (bc *BlockChain) futureBlocksLoop() {
} }
} }
// skipBlock returns 'true', if the block being imported can be skipped over, meaning
// that the block does not need to be processed but can be considered already fully 'done'.
func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool {
// We can only ever bypass processing if the only error returned by the validator
// is ErrKnownBlock, which means all checks passed, but we already have the block
// and state.
if !errors.Is(err, ErrKnownBlock) {
return false
}
// If we're not using snapshots, we can skip this, since we have both block
// and (trie-) state
if bc.snaps == nil {
return true
}
var (
header = it.current() // header can't be nil
parentRoot common.Hash
)
// If we also have the snapshot-state, we can skip the processing.
if bc.snaps.Snapshot(header.Root) != nil {
return true
}
// In this case, we have the trie-state but not snapshot-state. If the parent
// snapshot-state exists, we need to process this in order to not get a gap
// in the snapshot layers.
// Resolve parent block
if parent := it.previous(); parent != nil {
parentRoot = parent.Root
} else if parent = bc.GetHeaderByHash(header.ParentHash); parent != nil {
parentRoot = parent.Root
}
if parentRoot == (common.Hash{}) {
return false // Theoretically impossible case
}
// Parent is also missing snapshot: we can skip this. Otherwise process.
if bc.snaps.Snapshot(parentRoot) == nil {
return true
}
return false
}
// maintainTxIndex is responsible for the construction and deletion of the // maintainTxIndex is responsible for the construction and deletion of the
// transaction index. // transaction index.
// //

View File

@ -150,6 +150,14 @@ func (it *insertIterator) previous() *types.Header {
return it.chain[it.index-1].Header() return it.chain[it.index-1].Header()
} }
// current returns the current header that is being processed, or nil.
func (it *insertIterator) current() *types.Header {
if it.index == -1 || it.index >= len(it.chain) {
return nil
}
return it.chain[it.index].Header()
}
// first returns the first block in the it. // first returns the first block in the it.
func (it *insertIterator) first() *types.Block { func (it *insertIterator) first() *types.Block {
return it.chain[0] return it.chain[0]

View File

@ -1863,3 +1863,124 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen) t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen)
} }
} }
// TestIssue23496 tests scenario described in https://github.com/ethereum/go-ethereum/pull/23496#issuecomment-926393893
// Credits to @zzyalbert for finding the issue.
//
// Local chain owns these blocks:
// G B1 B2 B3 B4
// B1: state committed
// B2: snapshot disk layer
// B3: state committed
// B4: head block
//
// Crash happens without fully persisting snapshot and in-memory states,
// chain rewinds itself to the B1 (skip B3 in order to recover snapshot)
// In this case the snapshot layer of B3 is not created because of existent
// state.
func TestIssue23496(t *testing.T) {
// It's hard to follow the test case, visualize the input
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// Create a temporary persistent database
datadir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Failed to create temporary datadir: %v", err)
}
os.RemoveAll(datadir)
db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
if err != nil {
t.Fatalf("Failed to create persistent database: %v", err)
}
defer db.Close() // Might double close, should be fine
// Initialize a fresh chain
var (
genesis = (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
engine = ethash.NewFullFaker()
config = &CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 256,
SnapshotWait: true,
}
)
chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
}
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), 4, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{0x02})
b.SetDifficulty(big.NewInt(1000000))
})
// Insert block B1 and commit the state into disk
if _, err := chain.InsertChain(blocks[:1]); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
chain.stateCache.TrieDB().Commit(blocks[0].Root(), true, nil)
// Insert block B2 and commit the snapshot into disk
if _, err := chain.InsertChain(blocks[1:2]); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
if err := chain.snaps.Cap(blocks[1].Root(), 0); err != nil {
t.Fatalf("Failed to flatten snapshots: %v", err)
}
// Insert block B3 and commit the state into disk
if _, err := chain.InsertChain(blocks[2:3]); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
chain.stateCache.TrieDB().Commit(blocks[2].Root(), true, nil)
// Insert the remaining blocks
if _, err := chain.InsertChain(blocks[3:]); err != nil {
t.Fatalf("Failed to import canonical chain tail: %v", err)
}
// Pull the plug on the database, simulating a hard crash
db.Close()
// Start a new blockchain back up and see where the repair leads us
db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
if err != nil {
t.Fatalf("Failed to reopen persistent database: %v", err)
}
defer db.Close()
chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
defer chain.Stop()
if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4)
}
if head := chain.CurrentFastBlock(); head.NumberU64() != uint64(4) {
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), uint64(4))
}
if head := chain.CurrentBlock(); head.NumberU64() != uint64(1) {
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), uint64(1))
}
// Reinsert B2-B4
if _, err := chain.InsertChain(blocks[1:]); err != nil {
t.Fatalf("Failed to import canonical chain tail: %v", err)
}
if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4)
}
if head := chain.CurrentFastBlock(); head.NumberU64() != uint64(4) {
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), uint64(4))
}
if head := chain.CurrentBlock(); head.NumberU64() != uint64(4) {
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), uint64(4))
}
if layer := chain.Snapshots().Snapshot(blocks[2].Root()); layer == nil {
t.Error("Failed to regenerate the snapshot of known state")
}
}

View File

@ -360,7 +360,7 @@ func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false) }
func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true) } func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true) }
func testReorgLong(t *testing.T, full bool) { func testReorgLong(t *testing.T, full bool) {
testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280, full) testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full)
} }
// Tests that reorganising a short difficult chain after a long easy one // Tests that reorganising a short difficult chain after a long easy one
@ -380,7 +380,7 @@ func testReorgShort(t *testing.T, full bool) {
for i := 0; i < len(diff); i++ { for i := 0; i < len(diff); i++ {
diff[i] = -9 diff[i] = -9
} }
testReorg(t, easy, diff, 12615120, full) testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full)
} }
func testReorg(t *testing.T, first, second []int64, td int64, full bool) { func testReorg(t *testing.T, first, second []int64, td int64, full bool) {
@ -2385,7 +2385,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in
for txi := 0; txi < numTxs; txi++ { for txi := 0; txi < numTxs; txi++ {
uniq := uint64(i*numTxs + txi) uniq := uint64(i*numTxs + txi)
recipient := recipientFn(uniq) recipient := recipientFn(uniq)
tx, err := types.SignTx(types.NewTransaction(uniq, recipient, big.NewInt(1), params.TxGas, big.NewInt(1), nil), signer, testBankKey) tx, err := types.SignTx(types.NewTransaction(uniq, recipient, big.NewInt(1), params.TxGas, block.header.BaseFee, nil), signer, testBankKey)
if err != nil { if err != nil {
b.Error(err) b.Error(err)
} }

View File

@ -51,6 +51,10 @@ var (
// next one expected based on the local chain. // next one expected based on the local chain.
ErrNonceTooHigh = errors.New("nonce too high") ErrNonceTooHigh = errors.New("nonce too high")
// ErrNonceMax is returned if the nonce of a transaction sender account has
// maximum allowed value and would become invalid if incremented.
ErrNonceMax = errors.New("nonce has max value")
// ErrGasLimitReached is returned by the gas pool if the amount of gas required // ErrGasLimitReached is returned by the gas pool if the amount of gas required
// by a transaction is higher than what's left in the block. // by a transaction is higher than what's left in the block.
ErrGasLimitReached = errors.New("gas limit reached") ErrGasLimitReached = errors.New("gas limit reached")

View File

@ -63,8 +63,10 @@ func TestCreation(t *testing.T) {
{12243999, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block {12243999, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block
{12244000, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block {12244000, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block
{12964999, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block {12964999, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block
{12965000, ID{Hash: checksumToBytes(0xb715077d), Next: 0}}, // First London block {12965000, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block
{20000000, ID{Hash: checksumToBytes(0xb715077d), Next: 0}}, // Future London block {13772999, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block
{13773000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, /// First Arrow Glacier block
{20000000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, // Future Arrow Glacier block
}, },
}, },
// Ropsten test cases // Ropsten test cases
@ -205,11 +207,11 @@ func TestValidation(t *testing.T) {
// Local is mainnet Petersburg, remote is Rinkeby Petersburg. // Local is mainnet Petersburg, remote is Rinkeby Petersburg.
{7987396, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale}, {7987396, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale},
// Local is mainnet London, far in the future. Remote announces Gopherium (non existing fork) // Local is mainnet Arrow Glacier, far in the future. Remote announces Gopherium (non existing fork)
// at some future block 88888888, for itself, but past block for local. Local is incompatible. // at some future block 88888888, for itself, but past block for local. Local is incompatible.
// //
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess). // This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
{88888888, ID{Hash: checksumToBytes(0xb715077d), Next: 88888888}, ErrLocalIncompatibleOrStale}, {88888888, ID{Hash: checksumToBytes(0x20c327fc), Next: 88888888}, ErrLocalIncompatibleOrStale},
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing // Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
// fork) at block 7279999, before Petersburg. Local is incompatible. // fork) at block 7279999, before Petersburg. Local is incompatible.

View File

@ -158,7 +158,7 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
return SetupGenesisBlockWithOverride(db, genesis, nil) return SetupGenesisBlockWithOverride(db, genesis, nil)
} }
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideLondon *big.Int) (*params.ChainConfig, common.Hash, error) { func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideArrowGlacier *big.Int) (*params.ChainConfig, common.Hash, error) {
if genesis != nil && genesis.Config == nil { if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
} }
@ -204,8 +204,8 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
} }
// Get the existing chain configuration. // Get the existing chain configuration.
newcfg := genesis.configOrDefault(stored) newcfg := genesis.configOrDefault(stored)
if overrideLondon != nil { if overrideArrowGlacier != nil {
newcfg.LondonBlock = overrideLondon newcfg.ArrowGlacierBlock = overrideArrowGlacier
} }
if err := newcfg.CheckConfigForkOrder(); err != nil { if err := newcfg.CheckConfigForkOrder(); err != nil {
return newcfg, common.Hash{}, err return newcfg, common.Hash{}, err
@ -244,6 +244,8 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
return params.MainnetChainConfig return params.MainnetChainConfig
case ghash == params.RopstenGenesisHash: case ghash == params.RopstenGenesisHash:
return params.RopstenChainConfig return params.RopstenChainConfig
case ghash == params.SepoliaGenesisHash:
return params.SepoliaChainConfig
case ghash == params.RinkebyGenesisHash: case ghash == params.RinkebyGenesisHash:
return params.RinkebyChainConfig return params.RinkebyChainConfig
case ghash == params.GoerliGenesisHash: case ghash == params.GoerliGenesisHash:
@ -322,7 +324,7 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
if config.Clique != nil && len(block.Extra()) == 0 { if config.Clique != nil && len(block.Extra()) == 0 {
return nil, errors.New("can't start clique chain without signers") return nil, errors.New("can't start clique chain without signers")
} }
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), g.Difficulty) rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty())
rawdb.WriteBlock(db, block) rawdb.WriteBlock(db, block)
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil) rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
@ -400,8 +402,21 @@ func DefaultGoerliGenesisBlock() *Genesis {
} }
} }
// DefaultSepoliaGenesisBlock returns the Sepolia network genesis block.
func DefaultSepoliaGenesisBlock() *Genesis {
return &Genesis{
Config: params.SepoliaChainConfig,
Nonce: 0,
ExtraData: []byte("Sepolia, Athens, Attica, Greece!"),
GasLimit: 0x1c9c380,
Difficulty: big.NewInt(0x20000),
Timestamp: 1633267481,
Alloc: decodePrealloc(sepoliaAllocData),
}
}
// DeveloperGenesisBlock returns the 'geth --dev' genesis block. // DeveloperGenesisBlock returns the 'geth --dev' genesis block.
func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis { func DeveloperGenesisBlock(period uint64, gasLimit uint64, faucet common.Address) *Genesis {
// Override the default period to the user requested one // Override the default period to the user requested one
config := *params.AllCliqueProtocolChanges config := *params.AllCliqueProtocolChanges
config.Clique = &params.CliqueConfig{ config.Clique = &params.CliqueConfig{
@ -413,7 +428,7 @@ func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis {
return &Genesis{ return &Genesis{
Config: &config, Config: &config,
ExtraData: append(append(make([]byte, 32), faucet[:]...), make([]byte, crypto.SignatureLength)...), ExtraData: append(append(make([]byte, 32), faucet[:]...), make([]byte, crypto.SignatureLength)...),
GasLimit: 11500000, GasLimit: gasLimit,
BaseFee: big.NewInt(params.InitialBaseFee), BaseFee: big.NewInt(params.InitialBaseFee),
Difficulty: big.NewInt(1), Difficulty: big.NewInt(1),
Alloc: map[common.Address]GenesisAccount{ Alloc: map[common.Address]GenesisAccount{

File diff suppressed because one or more lines are too long

View File

@ -30,25 +30,6 @@ import (
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
func TestDefaultGenesisBlock(t *testing.T) {
block := DefaultGenesisBlock().ToBlock(nil)
if block.Hash() != params.MainnetGenesisHash {
t.Errorf("wrong mainnet genesis hash, got %v, want %v", block.Hash(), params.MainnetGenesisHash)
}
block = DefaultRopstenGenesisBlock().ToBlock(nil)
if block.Hash() != params.RopstenGenesisHash {
t.Errorf("wrong ropsten genesis hash, got %v, want %v", block.Hash(), params.RopstenGenesisHash)
}
block = DefaultRinkebyGenesisBlock().ToBlock(nil)
if block.Hash() != params.RinkebyGenesisHash {
t.Errorf("wrong rinkeby genesis hash, got %v, want %v", block.Hash(), params.RinkebyGenesisHash)
}
block = DefaultGoerliGenesisBlock().ToBlock(nil)
if block.Hash() != params.GoerliGenesisHash {
t.Errorf("wrong goerli genesis hash, got %v, want %v", block.Hash(), params.GoerliGenesisHash)
}
}
func TestInvalidCliqueConfig(t *testing.T) { func TestInvalidCliqueConfig(t *testing.T) {
block := DefaultGoerliGenesisBlock() block := DefaultGoerliGenesisBlock()
block.ExtraData = []byte{} block.ExtraData = []byte{}
@ -179,33 +160,56 @@ func TestSetupGenesis(t *testing.T) {
} }
} }
// TestGenesisHashes checks the congruity of default genesis data to corresponding hardcoded genesis hash values. // TestGenesisHashes checks the congruity of default genesis data to
// corresponding hardcoded genesis hash values.
func TestGenesisHashes(t *testing.T) { func TestGenesisHashes(t *testing.T) {
cases := []struct { for i, c := range []struct {
genesis *Genesis genesis *Genesis
hash common.Hash want common.Hash
}{ }{
{ {DefaultGenesisBlock(), params.MainnetGenesisHash},
genesis: DefaultGenesisBlock(), {DefaultGoerliGenesisBlock(), params.GoerliGenesisHash},
hash: params.MainnetGenesisHash, {DefaultRopstenGenesisBlock(), params.RopstenGenesisHash},
}, {DefaultRinkebyGenesisBlock(), params.RinkebyGenesisHash},
{ {DefaultSepoliaGenesisBlock(), params.SepoliaGenesisHash},
genesis: DefaultGoerliGenesisBlock(), } {
hash: params.GoerliGenesisHash, // Test via MustCommit
}, if have := c.genesis.MustCommit(rawdb.NewMemoryDatabase()).Hash(); have != c.want {
{ t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex())
genesis: DefaultRopstenGenesisBlock(),
hash: params.RopstenGenesisHash,
},
{
genesis: DefaultRinkebyGenesisBlock(),
hash: params.RinkebyGenesisHash,
},
} }
for i, c := range cases { // Test via ToBlock
b := c.genesis.MustCommit(rawdb.NewMemoryDatabase()) if have := c.genesis.ToBlock(nil).Hash(); have != c.want {
if got := b.Hash(); got != c.hash { t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex())
t.Errorf("case: %d, want: %s, got: %s", i, c.hash.Hex(), got.Hex())
} }
} }
} }
func TestGenesis_Commit(t *testing.T) {
genesis := &Genesis{
BaseFee: big.NewInt(params.InitialBaseFee),
Config: params.TestChainConfig,
// difficulty is nil
}
db := rawdb.NewMemoryDatabase()
genesisBlock, err := genesis.Commit(db)
if err != nil {
t.Fatal(err)
}
if genesis.Difficulty != nil {
t.Fatalf("assumption wrong")
}
// This value should have been set as default in the ToBlock method.
if genesisBlock.Difficulty().Cmp(params.GenesisDifficulty) != 0 {
t.Errorf("assumption wrong: want: %d, got: %v", params.GenesisDifficulty, genesisBlock.Difficulty())
}
// Expect the stored total difficulty to be the difficulty of the genesis block.
stored := rawdb.ReadTd(db, genesisBlock.Hash(), genesisBlock.NumberU64())
if stored.Cmp(genesisBlock.Difficulty()) != 0 {
t.Errorf("inequal difficulty; stored: %v, genesisBlock: %v", stored, genesisBlock.Difficulty())
}
}

View File

@ -35,20 +35,15 @@ import (
// ReadCanonicalHash retrieves the hash assigned to a canonical block number. // ReadCanonicalHash retrieves the hash assigned to a canonical block number.
func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash { func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
data, _ := db.Ancient(freezerHashTable, number) var data []byte
db.ReadAncients(func(reader ethdb.AncientReader) error {
data, _ = reader.Ancient(freezerHashTable, number)
if len(data) == 0 { if len(data) == 0 {
// Get it by hash from leveldb
data, _ = db.Get(headerHashKey(number)) data, _ = db.Get(headerHashKey(number))
// In the background freezer is moving data from leveldb to flatten files.
// So during the first check for ancient db, the data is not yet in there,
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
if len(data) == 0 {
data, _ = db.Ancient(freezerHashTable, number)
}
}
if len(data) == 0 {
return common.Hash{}
} }
return nil
})
return common.BytesToHash(data) return common.BytesToHash(data)
} }
@ -304,32 +299,25 @@ func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding. // ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
var data []byte
db.ReadAncients(func(reader ethdb.AncientReader) error {
// First try to look up the data in ancient database. Extra hash // First try to look up the data in ancient database. Extra hash
// comparison is necessary since ancient database only maintains // comparison is necessary since ancient database only maintains
// the canonical data. // the canonical data.
data, _ := db.Ancient(freezerHeaderTable, number) data, _ = reader.Ancient(freezerHeaderTable, number)
if len(data) > 0 && crypto.Keccak256Hash(data) == hash { if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
return data return nil
} }
// Then try to look up the data in leveldb. // If not, try reading from leveldb
data, _ = db.Get(headerKey(number, hash)) data, _ = db.Get(headerKey(number, hash))
if len(data) > 0 { return nil
})
return data return data
} }
// In the background freezer is moving data from leveldb to flatten files.
// So during the first check for ancient db, the data is not yet in there,
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
data, _ = db.Ancient(freezerHeaderTable, number)
if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
return data
}
return nil // Can't find the data anywhere.
}
// HasHeader verifies the existence of a block header corresponding to the hash. // HasHeader verifies the existence of a block header corresponding to the hash.
func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool { func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool {
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { if isCanon(db, number, hash) {
return true return true
} }
if has, err := db.Has(headerKey(number, hash)); !has || err != nil { if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
@ -389,53 +377,48 @@ func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number
} }
} }
// isCanon is an internal utility method, to check whether the given number/hash
// is part of the ancient (canon) set.
func isCanon(reader ethdb.AncientReader, number uint64, hash common.Hash) bool {
h, err := reader.Ancient(freezerHashTable, number)
if err != nil {
return false
}
return bytes.Equal(h, hash[:])
}
// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. // ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
// First try to look up the data in ancient database. Extra hash // First try to look up the data in ancient database. Extra hash
// comparison is necessary since ancient database only maintains // comparison is necessary since ancient database only maintains
// the canonical data. // the canonical data.
data, _ := db.Ancient(freezerBodiesTable, number) var data []byte
if len(data) > 0 { db.ReadAncients(func(reader ethdb.AncientReader) error {
h, _ := db.Ancient(freezerHashTable, number) // Check if the data is in ancients
if common.BytesToHash(h) == hash { if isCanon(reader, number, hash) {
return data data, _ = reader.Ancient(freezerBodiesTable, number)
return nil
} }
} // If not, try reading from leveldb
// Then try to look up the data in leveldb.
data, _ = db.Get(blockBodyKey(number, hash)) data, _ = db.Get(blockBodyKey(number, hash))
if len(data) > 0 { return nil
})
return data return data
} }
// In the background freezer is moving data from leveldb to flatten files.
// So during the first check for ancient db, the data is not yet in there,
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
data, _ = db.Ancient(freezerBodiesTable, number)
if len(data) > 0 {
h, _ := db.Ancient(freezerHashTable, number)
if common.BytesToHash(h) == hash {
return data
}
}
return nil // Can't find the data anywhere.
}
// ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical // ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
// block at number, in RLP encoding. // block at number, in RLP encoding.
func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue { func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
// If it's an ancient one, we don't need the canonical hash var data []byte
data, _ := db.Ancient(freezerBodiesTable, number) db.ReadAncients(func(reader ethdb.AncientReader) error {
if len(data) == 0 { data, _ = reader.Ancient(freezerBodiesTable, number)
// Need to get the hash if len(data) > 0 {
return nil
}
// Get it by hash from leveldb
data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number))) data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number)))
// In the background freezer is moving data from leveldb to flatten files. return nil
// So during the first check for ancient db, the data is not yet in there, })
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
if len(data) == 0 {
data, _ = db.Ancient(freezerBodiesTable, number)
}
}
return data return data
} }
@ -448,7 +431,7 @@ func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp
// HasBody verifies the existence of a block body corresponding to the hash. // HasBody verifies the existence of a block body corresponding to the hash.
func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool { func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { if isCanon(db, number, hash) {
return true return true
} }
if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil { if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
@ -489,34 +472,19 @@ func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
// ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding. // ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
// First try to look up the data in ancient database. Extra hash var data []byte
// comparison is necessary since ancient database only maintains db.ReadAncients(func(reader ethdb.AncientReader) error {
// the canonical data. // Check if the data is in ancients
data, _ := db.Ancient(freezerDifficultyTable, number) if isCanon(reader, number, hash) {
if len(data) > 0 { data, _ = reader.Ancient(freezerDifficultyTable, number)
h, _ := db.Ancient(freezerHashTable, number) return nil
if common.BytesToHash(h) == hash {
return data
} }
} // If not, try reading from leveldb
// Then try to look up the data in leveldb.
data, _ = db.Get(headerTDKey(number, hash)) data, _ = db.Get(headerTDKey(number, hash))
if len(data) > 0 { return nil
})
return data return data
} }
// In the background freezer is moving data from leveldb to flatten files.
// So during the first check for ancient db, the data is not yet in there,
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
data, _ = db.Ancient(freezerDifficultyTable, number)
if len(data) > 0 {
h, _ := db.Ancient(freezerHashTable, number)
if common.BytesToHash(h) == hash {
return data
}
}
return nil // Can't find the data anywhere.
}
// ReadTd retrieves a block's total difficulty corresponding to the hash. // ReadTd retrieves a block's total difficulty corresponding to the hash.
func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int { func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int {
@ -553,7 +521,7 @@ func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
// HasReceipts verifies the existence of all the transaction receipts belonging // HasReceipts verifies the existence of all the transaction receipts belonging
// to a block. // to a block.
func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool { func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { if isCanon(db, number, hash) {
return true return true
} }
if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil { if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
@ -564,34 +532,19 @@ func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding. // ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
// First try to look up the data in ancient database. Extra hash var data []byte
// comparison is necessary since ancient database only maintains db.ReadAncients(func(reader ethdb.AncientReader) error {
// the canonical data. // Check if the data is in ancients
data, _ := db.Ancient(freezerReceiptTable, number) if isCanon(reader, number, hash) {
if len(data) > 0 { data, _ = reader.Ancient(freezerReceiptTable, number)
h, _ := db.Ancient(freezerHashTable, number) return nil
if common.BytesToHash(h) == hash {
return data
} }
} // If not, try reading from leveldb
// Then try to look up the data in leveldb.
data, _ = db.Get(blockReceiptsKey(number, hash)) data, _ = db.Get(blockReceiptsKey(number, hash))
if len(data) > 0 { return nil
})
return data return data
} }
// In the background freezer is moving data from leveldb to flatten files.
// So during the first check for ancient db, the data is not yet in there,
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
data, _ = db.Ancient(freezerReceiptTable, number)
if len(data) > 0 {
h, _ := db.Ancient(freezerHashTable, number)
if common.BytesToHash(h) == hash {
return data
}
}
return nil // Can't find the data anywhere.
}
// ReadRawReceipts retrieves all the transaction receipts belonging to a block. // ReadRawReceipts retrieves all the transaction receipts belonging to a block.
// The receipt metadata fields are not guaranteed to be populated, so they // The receipt metadata fields are not guaranteed to be populated, so they
@ -716,7 +669,7 @@ func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, t
// ReadLogs retrieves the logs for all transactions in a block. The log fields // ReadLogs retrieves the logs for all transactions in a block. The log fields
// are populated with metadata. In case the receipts or the block body // are populated with metadata. In case the receipts or the block body
// are not found, a nil is returned. // are not found, a nil is returned.
func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log { func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) [][]*types.Log {
// Retrieve the flattened receipt slice // Retrieve the flattened receipt slice
data := ReadReceiptsRLP(db, hash, number) data := ReadReceiptsRLP(db, hash, number)
if len(data) == 0 { if len(data) == 0 {
@ -724,7 +677,12 @@ func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log {
} }
receipts := []*receiptLogs{} receipts := []*receiptLogs{}
if err := rlp.DecodeBytes(data, &receipts); err != nil { if err := rlp.DecodeBytes(data, &receipts); err != nil {
log.Error("Invalid receipt array RLP", "hash", hash, "err", err) // Receipts might be in the legacy format, try decoding that.
// TODO: to be removed after users migrated
if logs := readLegacyLogs(db, hash, number, config); logs != nil {
return logs
}
log.Error("Invalid receipt array RLP", "hash", "err", err)
return nil return nil
} }
@ -744,6 +702,21 @@ func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log {
return logs return logs
} }
// readLegacyLogs is a temporary workaround for when trying to read logs
// from a block which has its receipt stored in the legacy format. It'll
// be removed after users have migrated their freezer databases.
func readLegacyLogs(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) [][]*types.Log {
receipts := ReadReceipts(db, hash, number, config)
if receipts == nil {
return nil
}
logs := make([][]*types.Log, len(receipts))
for i, receipt := range receipts {
logs[i] = receipt.Logs
}
return logs
}
// ReadBlock retrieves an entire block corresponding to the hash, assembling it // ReadBlock retrieves an entire block corresponding to the hash, assembling it
// back from the stored header and body. If either the header or body could not // back from the stored header and body. If either the header or body could not
// be retrieved nil is returned. // be retrieved nil is returned.

View File

@ -744,7 +744,7 @@ func TestReadLogs(t *testing.T) {
// Insert the receipt slice into the database and check presence // Insert the receipt slice into the database and check presence
WriteReceipts(db, hash, 0, receipts) WriteReceipts(db, hash, 0, receipts)
logs := ReadLogs(db, hash, 0) logs := ReadLogs(db, hash, 0, params.TestChainConfig)
if len(logs) == 0 { if len(logs) == 0 {
t.Fatalf("no logs returned") t.Fatalf("no logs returned")
} }

View File

@ -47,7 +47,7 @@ func DeleteSnapshotDisabled(db ethdb.KeyValueWriter) {
// ReadSnapshotRoot retrieves the root of the block whose state is contained in // ReadSnapshotRoot retrieves the root of the block whose state is contained in
// the persisted snapshot. // the persisted snapshot.
func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash { func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash {
data, _ := db.Get(snapshotRootKey) data, _ := db.Get(SnapshotRootKey)
if len(data) != common.HashLength { if len(data) != common.HashLength {
return common.Hash{} return common.Hash{}
} }
@ -57,7 +57,7 @@ func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash {
// WriteSnapshotRoot stores the root of the block whose state is contained in // WriteSnapshotRoot stores the root of the block whose state is contained in
// the persisted snapshot. // the persisted snapshot.
func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) { func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) {
if err := db.Put(snapshotRootKey, root[:]); err != nil { if err := db.Put(SnapshotRootKey, root[:]); err != nil {
log.Crit("Failed to store snapshot root", "err", err) log.Crit("Failed to store snapshot root", "err", err)
} }
} }
@ -67,7 +67,7 @@ func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) {
// be used during updates, so a crash or failure will mark the entire snapshot // be used during updates, so a crash or failure will mark the entire snapshot
// invalid. // invalid.
func DeleteSnapshotRoot(db ethdb.KeyValueWriter) { func DeleteSnapshotRoot(db ethdb.KeyValueWriter) {
if err := db.Delete(snapshotRootKey); err != nil { if err := db.Delete(SnapshotRootKey); err != nil {
log.Crit("Failed to remove snapshot root", "err", err) log.Crit("Failed to remove snapshot root", "err", err)
} }
} }

View File

@ -44,17 +44,20 @@ func InitDatabaseFromFreezer(db ethdb.Database) {
logged = start.Add(-7 * time.Second) // Unindex during import is fast, don't double log logged = start.Add(-7 * time.Second) // Unindex during import is fast, don't double log
hash common.Hash hash common.Hash
) )
for i := uint64(0); i < frozen; i++ { for i := uint64(0); i < frozen; {
// Since the freezer has all data in sequential order on a file, // We read 100K hashes at a time, for a total of 3.2M
// it would be 'neat' to read more data in one go, and let the count := uint64(100_000)
// freezerdb return N items (e.g up to 1000 items per go) if i+count > frozen {
// That would require an API change in Ancients though count = frozen - i
if h, err := db.Ancient(freezerHashTable, i); err != nil {
log.Crit("Failed to init database from freezer", "err", err)
} else {
hash = common.BytesToHash(h)
} }
WriteHeaderNumber(batch, hash, i) data, err := db.AncientRange(freezerHashTable, i, count, 32*count)
if err != nil {
log.Crit("Failed to init database from freezer", "err", err)
}
for j, h := range data {
number := i + uint64(j)
hash = common.BytesToHash(h)
WriteHeaderNumber(batch, hash, number)
// If enough data was accumulated in memory or we're at the last block, dump to disk // If enough data was accumulated in memory or we're at the last block, dump to disk
if batch.ValueSize() > ethdb.IdealBatchSize { if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil { if err := batch.Write(); err != nil {
@ -62,6 +65,8 @@ func InitDatabaseFromFreezer(db ethdb.Database) {
} }
batch.Reset() batch.Reset()
} }
}
i += uint64(len(data))
// If we've spent too much time already, notify the user of what we're doing // If we've spent too much time already, notify the user of what we're doing
if time.Since(logged) > 8*time.Second { if time.Since(logged) > 8*time.Second {
log.Info("Initializing database from freezer", "total", frozen, "number", i, "hash", hash, "elapsed", common.PrettyDuration(time.Since(start))) log.Info("Initializing database from freezer", "total", frozen, "number", i, "hash", hash, "elapsed", common.PrettyDuration(time.Since(start)))

View File

@ -89,8 +89,8 @@ func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) {
return nil, errNotSupported return nil, errNotSupported
} }
// ReadAncients returns an error as we don't have a backing chain freezer. // AncientRange returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) ReadAncients(kind string, start, max, maxByteSize uint64) ([][]byte, error) { func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) {
return nil, errNotSupported return nil, errNotSupported
} }
@ -119,6 +119,22 @@ func (db *nofreezedb) Sync() error {
return errNotSupported return errNotSupported
} }
func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReader) error) (err error) {
// Unlike other ancient-related methods, this method does not return
// errNotSupported when invoked.
// The reason for this is that the caller might want to do several things:
// 1. Check if something is in freezer,
// 2. If not, check leveldb.
//
// This will work, since the ancient-checks inside 'fn' will return errors,
// and the leveldb work will continue.
//
// If we instead were to return errNotSupported here, then the caller would
// have to explicitly check for that, having an extra clause to do the
// non-ancient operations.
return fn(db)
}
// NewDatabase creates a high level database on top of a given key-value data // NewDatabase creates a high level database on top of a given key-value data
// store without a freezer moving immutable chain segments into cold storage. // store without a freezer moving immutable chain segments into cold storage.
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database { func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
@ -355,7 +371,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
accountSnaps.Add(size) accountSnaps.Add(size)
case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength): case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength):
storageSnaps.Add(size) storageSnaps.Add(size)
case bytes.HasPrefix(key, preimagePrefix) && len(key) == (len(preimagePrefix)+common.HashLength): case bytes.HasPrefix(key, PreimagePrefix) && len(key) == (len(PreimagePrefix)+common.HashLength):
preimages.Add(size) preimages.Add(size)
case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength): case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
metadata.Add(size) metadata.Add(size)
@ -377,7 +393,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
var accounted bool var accounted bool
for _, meta := range [][]byte{ for _, meta := range [][]byte{
databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey, databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey,
fastTrieProgressKey, snapshotDisabledKey, snapshotRootKey, snapshotJournalKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
uncleanShutdownKey, badBlockKey, uncleanShutdownKey, badBlockKey,
} { } {

View File

@ -80,8 +80,9 @@ type freezer struct {
frozen uint64 // Number of blocks already frozen frozen uint64 // Number of blocks already frozen
threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests) threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
// This lock synchronizes writers and the truncate operation. // This lock synchronizes writers and the truncate operation, as well as
writeLock sync.Mutex // the "atomic" (batched) read operations.
writeLock sync.RWMutex
writeBatch *freezerBatch writeBatch *freezerBatch
readonly bool readonly bool
@ -201,12 +202,12 @@ func (f *freezer) Ancient(kind string, number uint64) ([]byte, error) {
return nil, errUnknownTable return nil, errUnknownTable
} }
// ReadAncients retrieves multiple items in sequence, starting from the index 'start'. // AncientRange retrieves multiple items in sequence, starting from the index 'start'.
// It will return // It will return
// - at most 'max' items, // - at most 'max' items,
// - at least 1 item (even if exceeding the maxByteSize), but will otherwise // - at least 1 item (even if exceeding the maxByteSize), but will otherwise
// return as many items as fit into maxByteSize. // return as many items as fit into maxByteSize.
func (f *freezer) ReadAncients(kind string, start, count, maxBytes uint64) ([][]byte, error) { func (f *freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
if table := f.tables[kind]; table != nil { if table := f.tables[kind]; table != nil {
return table.RetrieveItems(start, count, maxBytes) return table.RetrieveItems(start, count, maxBytes)
} }
@ -222,8 +223,8 @@ func (f *freezer) Ancients() (uint64, error) {
func (f *freezer) AncientSize(kind string) (uint64, error) { func (f *freezer) AncientSize(kind string) (uint64, error) {
// This needs the write lock to avoid data races on table fields. // This needs the write lock to avoid data races on table fields.
// Speed doesn't matter here, AncientSize is for debugging. // Speed doesn't matter here, AncientSize is for debugging.
f.writeLock.Lock() f.writeLock.RLock()
defer f.writeLock.Unlock() defer f.writeLock.RUnlock()
if table := f.tables[kind]; table != nil { if table := f.tables[kind]; table != nil {
return table.size() return table.size()
@ -231,6 +232,14 @@ func (f *freezer) AncientSize(kind string) (uint64, error) {
return 0, errUnknownTable return 0, errUnknownTable
} }
// ReadAncients runs the given read operation while ensuring that no writes take place
// on the underlying freezer.
func (f *freezer) ReadAncients(fn func(ethdb.AncientReader) error) (err error) {
f.writeLock.RLock()
defer f.writeLock.RUnlock()
return fn(f)
}
// ModifyAncients runs the given write operation. // ModifyAncients runs the given write operation.
func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) { func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) {
if f.readonly { if f.readonly {

View File

@ -118,7 +118,7 @@ func (batch *freezerTableBatch) reset() {
// existing data. // existing data.
func (batch *freezerTableBatch) Append(item uint64, data interface{}) error { func (batch *freezerTableBatch) Append(item uint64, data interface{}) error {
if item != batch.curItem { if item != batch.curItem {
return errOutOrderInsertion return fmt.Errorf("%w: have %d want %d", errOutOrderInsertion, item, batch.curItem)
} }
// Encode the item. // Encode the item.
@ -138,7 +138,7 @@ func (batch *freezerTableBatch) Append(item uint64, data interface{}) error {
// existing data. // existing data.
func (batch *freezerTableBatch) AppendRaw(item uint64, blob []byte) error { func (batch *freezerTableBatch) AppendRaw(item uint64, blob []byte) error {
if item != batch.curItem { if item != batch.curItem {
return errOutOrderInsertion return fmt.Errorf("%w: have %d want %d", errOutOrderInsertion, item, batch.curItem)
} }
encItem := blob encItem := blob

Some files were not shown because too many files have changed in this diff Show More