Merge tag 'v1.10.17' into merge/v1.10.17

This commit is contained in:
philip-morlier 2022-03-30 15:40:07 -07:00
commit 4211c5c401
178 changed files with 10681 additions and 1780 deletions

View File

@ -16,7 +16,7 @@ jobs:
- stage: lint - stage: lint
os: linux os: linux
dist: bionic dist: bionic
go: 1.17.x go: 1.18.x
env: env:
- lint - lint
git: git:
@ -31,7 +31,7 @@ jobs:
os: linux os: linux
arch: amd64 arch: amd64
dist: bionic dist: bionic
go: 1.17.x go: 1.18.x
env: env:
- docker - docker
services: services:
@ -48,7 +48,7 @@ jobs:
os: linux os: linux
arch: arm64 arch: arm64
dist: bionic dist: bionic
go: 1.17.x go: 1.18.x
env: env:
- docker - docker
services: services:
@ -65,7 +65,7 @@ jobs:
if: type = push if: type = push
os: linux os: linux
dist: bionic dist: bionic
go: 1.17.x go: 1.18.x
env: env:
- ubuntu-ppa - ubuntu-ppa
- GO111MODULE=on - GO111MODULE=on
@ -90,7 +90,7 @@ jobs:
os: linux os: linux
dist: bionic dist: bionic
sudo: required sudo: required
go: 1.17.x go: 1.18.x
env: env:
- azure-linux - azure-linux
- GO111MODULE=on - GO111MODULE=on
@ -148,7 +148,7 @@ jobs:
- sdkmanager "platform-tools" "platforms;android-15" "platforms;android-19" "platforms;android-24" "ndk-bundle" - sdkmanager "platform-tools" "platforms;android-15" "platforms;android-19" "platforms;android-24" "ndk-bundle"
# Install Go to allow building with # Install Go to allow building with
- curl https://dl.google.com/go/go1.16.linux-amd64.tar.gz | tar -xz - curl https://dl.google.com/go/go1.18.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH - export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go - export GOROOT=`pwd`/go
- export GOPATH=$HOME/go - export GOPATH=$HOME/go
@ -162,7 +162,7 @@ jobs:
- stage: build - stage: build
if: type = push if: type = push
os: osx os: osx
go: 1.17.x go: 1.18.x
env: env:
- azure-osx - azure-osx
- azure-ios - azure-ios
@ -194,7 +194,7 @@ jobs:
os: linux os: linux
arch: amd64 arch: amd64
dist: bionic dist: bionic
go: 1.17.x go: 1.18.x
env: env:
- GO111MODULE=on - GO111MODULE=on
script: script:
@ -205,7 +205,7 @@ jobs:
os: linux os: linux
arch: arm64 arch: arm64
dist: bionic dist: bionic
go: 1.17.x go: 1.18.x
env: env:
- GO111MODULE=on - GO111MODULE=on
script: script:
@ -214,7 +214,7 @@ jobs:
- stage: build - stage: build
os: linux os: linux
dist: bionic dist: bionic
go: 1.16.x go: 1.17.x
env: env:
- GO111MODULE=on - GO111MODULE=on
script: script:
@ -225,7 +225,7 @@ jobs:
if: type = cron if: type = cron
os: linux os: linux
dist: bionic dist: bionic
go: 1.17.x go: 1.18.x
env: env:
- azure-purge - azure-purge
- GO111MODULE=on - GO111MODULE=on
@ -239,7 +239,7 @@ jobs:
if: type = cron if: type = cron
os: linux os: linux
dist: bionic dist: bionic
go: 1.17.x go: 1.18.x
env: env:
- GO111MODULE=on - GO111MODULE=on
script: script:

View File

@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM="" ARG BUILDNUM=""
# Build Geth in a stock Go builder container # Build Geth in a stock Go builder container
FROM golang:1.17-alpine as builder FROM golang:1.18-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git RUN apk add --no-cache gcc musl-dev linux-headers git

View File

@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM="" ARG BUILDNUM=""
# Build Geth in a stock Go builder container # Build Geth in a stock Go builder container
FROM golang:1.17-alpine as builder FROM golang:1.18-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git RUN apk add --no-cache gcc musl-dev linux-headers git

View File

@ -52,6 +52,22 @@ Going through all the possible command line flags is out of scope here (please c
but we've enumerated a few common parameter combos to get you up to speed quickly but we've enumerated a few common parameter combos to get you up to speed quickly
on how you can run your own `geth` instance. on how you can run your own `geth` instance.
### Hardware Requirements
Minimum:
* CPU with 2+ cores
* 4GB RAM
* 500GB free storage space to sync the Mainnet
* 8 MBit/sec download Internet service
Recommended:
* Fast CPU with 4+ cores
* 16GB+ RAM
* High Performance SSD with at least 500GB free space
* 25+ MBit/sec download Internet service
### Full node on the main Ethereum network ### Full node on the main Ethereum network
By far the most common scenario is people wanting to simply interact with the Ethereum By far the most common scenario is people wanting to simply interact with the Ethereum

View File

@ -0,0 +1,152 @@
package abi
import (
"fmt"
)
type SelectorMarshaling struct {
Name string `json:"name"`
Type string `json:"type"`
Inputs []ArgumentMarshaling `json:"inputs"`
}
func isDigit(c byte) bool {
return c >= '0' && c <= '9'
}
func isAlpha(c byte) bool {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
func isIdentifierSymbol(c byte) bool {
return c == '$' || c == '_'
}
func parseToken(unescapedSelector string, isIdent bool) (string, string, error) {
if len(unescapedSelector) == 0 {
return "", "", fmt.Errorf("empty token")
}
firstChar := unescapedSelector[0]
position := 1
if !(isAlpha(firstChar) || (isIdent && isIdentifierSymbol(firstChar))) {
return "", "", fmt.Errorf("invalid token start: %c", firstChar)
}
for position < len(unescapedSelector) {
char := unescapedSelector[position]
if !(isAlpha(char) || isDigit(char) || (isIdent && isIdentifierSymbol(char))) {
break
}
position++
}
return unescapedSelector[:position], unescapedSelector[position:], nil
}
func parseIdentifier(unescapedSelector string) (string, string, error) {
return parseToken(unescapedSelector, true)
}
func parseElementaryType(unescapedSelector string) (string, string, error) {
parsedType, rest, err := parseToken(unescapedSelector, false)
if err != nil {
return "", "", fmt.Errorf("failed to parse elementary type: %v", err)
}
// handle arrays
for len(rest) > 0 && rest[0] == '[' {
parsedType = parsedType + string(rest[0])
rest = rest[1:]
for len(rest) > 0 && isDigit(rest[0]) {
parsedType = parsedType + string(rest[0])
rest = rest[1:]
}
if len(rest) == 0 || rest[0] != ']' {
return "", "", fmt.Errorf("failed to parse array: expected ']', got %c", unescapedSelector[0])
}
parsedType = parsedType + string(rest[0])
rest = rest[1:]
}
return parsedType, rest, nil
}
func parseCompositeType(unescapedSelector string) ([]interface{}, string, error) {
if len(unescapedSelector) == 0 || unescapedSelector[0] != '(' {
return nil, "", fmt.Errorf("expected '(', got %c", unescapedSelector[0])
}
parsedType, rest, err := parseType(unescapedSelector[1:])
if err != nil {
return nil, "", fmt.Errorf("failed to parse type: %v", err)
}
result := []interface{}{parsedType}
for len(rest) > 0 && rest[0] != ')' {
parsedType, rest, err = parseType(rest[1:])
if err != nil {
return nil, "", fmt.Errorf("failed to parse type: %v", err)
}
result = append(result, parsedType)
}
if len(rest) == 0 || rest[0] != ')' {
return nil, "", fmt.Errorf("expected ')', got '%s'", rest)
}
return result, rest[1:], nil
}
func parseType(unescapedSelector string) (interface{}, string, error) {
if len(unescapedSelector) == 0 {
return nil, "", fmt.Errorf("empty type")
}
if unescapedSelector[0] == '(' {
return parseCompositeType(unescapedSelector)
} else {
return parseElementaryType(unescapedSelector)
}
}
func assembleArgs(args []interface{}) ([]ArgumentMarshaling, error) {
arguments := make([]ArgumentMarshaling, 0)
for i, arg := range args {
// generate dummy name to avoid unmarshal issues
name := fmt.Sprintf("name%d", i)
if s, ok := arg.(string); ok {
arguments = append(arguments, ArgumentMarshaling{name, s, s, nil, false})
} else if components, ok := arg.([]interface{}); ok {
subArgs, err := assembleArgs(components)
if err != nil {
return nil, fmt.Errorf("failed to assemble components: %v", err)
}
arguments = append(arguments, ArgumentMarshaling{name, "tuple", "tuple", subArgs, false})
} else {
return nil, fmt.Errorf("failed to assemble args: unexpected type %T", arg)
}
}
return arguments, nil
}
// ParseSelector converts a method selector into a struct that can be JSON encoded
// and consumed by other functions in this package.
// Note, although uppercase letters are not part of the ABI spec, this function
// still accepts it as the general format is valid.
func ParseSelector(unescapedSelector string) (SelectorMarshaling, error) {
name, rest, err := parseIdentifier(unescapedSelector)
if err != nil {
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': %v", unescapedSelector, err)
}
args := []interface{}{}
if len(rest) >= 2 && rest[0] == '(' && rest[1] == ')' {
rest = rest[2:]
} else {
args, rest, err = parseCompositeType(rest)
if err != nil {
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': %v", unescapedSelector, err)
}
}
if len(rest) > 0 {
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': unexpected string '%s'", unescapedSelector, rest)
}
// Reassemble the fake ABI and constuct the JSON
fakeArgs, err := assembleArgs(args)
if err != nil {
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector: %v", err)
}
return SelectorMarshaling{name, "function", fakeArgs}, nil
}

View File

@ -0,0 +1,54 @@
package abi
import (
"fmt"
"log"
"reflect"
"testing"
)
func TestParseSelector(t *testing.T) {
mkType := func(types ...interface{}) []ArgumentMarshaling {
var result []ArgumentMarshaling
for i, typeOrComponents := range types {
name := fmt.Sprintf("name%d", i)
if typeName, ok := typeOrComponents.(string); ok {
result = append(result, ArgumentMarshaling{name, typeName, typeName, nil, false})
} else if components, ok := typeOrComponents.([]ArgumentMarshaling); ok {
result = append(result, ArgumentMarshaling{name, "tuple", "tuple", components, false})
} else {
log.Fatalf("unexpected type %T", typeOrComponents)
}
}
return result
}
tests := []struct {
input string
name string
args []ArgumentMarshaling
}{
{"noargs()", "noargs", []ArgumentMarshaling{}},
{"simple(uint256,uint256,uint256)", "simple", mkType("uint256", "uint256", "uint256")},
{"other(uint256,address)", "other", mkType("uint256", "address")},
{"withArray(uint256[],address[2],uint8[4][][5])", "withArray", mkType("uint256[]", "address[2]", "uint8[4][][5]")},
{"singleNest(bytes32,uint8,(uint256,uint256),address)", "singleNest", mkType("bytes32", "uint8", mkType("uint256", "uint256"), "address")},
{"multiNest(address,(uint256[],uint256),((address,bytes32),uint256))", "multiNest",
mkType("address", mkType("uint256[]", "uint256"), mkType(mkType("address", "bytes32"), "uint256"))},
}
for i, tt := range tests {
selector, err := ParseSelector(tt.input)
if err != nil {
t.Errorf("test %d: failed to parse selector '%v': %v", i, tt.input, err)
}
if selector.Name != tt.name {
t.Errorf("test %d: unexpected function name: '%s' != '%s'", i, selector.Name, tt.name)
}
if selector.Type != "function" {
t.Errorf("test %d: unexpected type: '%s' != '%s'", i, selector.Type, "function")
}
if !reflect.DeepEqual(selector.Inputs, tt.args) {
t.Errorf("test %d: unexpected args: '%v' != '%v'", i, selector.Inputs, tt.args)
}
}
}

View File

@ -13,7 +13,7 @@ environment:
GETH_MINGW: 'C:\msys64\mingw32' GETH_MINGW: 'C:\msys64\mingw32'
install: install:
- git submodule update --init --depth 1 - git submodule update --init --depth 1 --recursive
- go version - go version
for: for:

View File

@ -1,37 +1,58 @@
# This file contains sha256 checksums of optional build dependencies. # This file contains sha256 checksums of optional build dependencies.
3defb9a09bed042403195e872dcbc8c6fae1485963332279668ec52e80a95a2d go1.17.5.src.tar.gz 38f423db4cc834883f2b52344282fa7a39fbb93650dc62a11fdf0be6409bdad6 go1.18.src.tar.gz
2db6a5d25815b56072465a2cacc8ed426c18f1d5fc26c1fc8c4f5a7188658264 go1.17.5.darwin-amd64.tar.gz 70bb4a066997535e346c8bfa3e0dfe250d61100b17ccc5676274642447834969 go1.18.darwin-amd64.tar.gz
111f71166de0cb8089bb3e8f9f5b02d76e1bf1309256824d4062a47b0e5f98e0 go1.17.5.darwin-arm64.tar.gz 9cab6123af9ffade905525d79fc9ee76651e716c85f1f215872b5f2976782480 go1.18.darwin-arm64.tar.gz
443c1cd9768df02085014f1eb034ebc7dbe032ffc8a9bb9f2e6617d037eee23c go1.17.5.freebsd-386.tar.gz e63492d4f38487331518eb4b50e670d853bb8d67e88596269af84bb9aca0b381 go1.18.freebsd-386.tar.gz
17180bdc4126acffd0ebf86d66ef5cbc3488b6734e93374fb00eb09494e006d3 go1.17.5.freebsd-amd64.tar.gz 01cd67bbc12e659ff236ecebde1806f76452f7ca145c172d5ecdbf4f4803daae go1.18.freebsd-amd64.tar.gz
4f4914303bc18f24fd137a97e595735308f5ce81323c7224c12466fd763fc59f go1.17.5.linux-386.tar.gz 1c04cf4440b323a66328e0df95d409f955b9b475e58eae235fdd3d1f1cf02f4f go1.18.linux-386.tar.gz
bd78114b0d441b029c8fe0341f4910370925a4d270a6a590668840675b0c653e go1.17.5.linux-amd64.tar.gz e85278e98f57cdb150fe8409e6e5df5343ecb13cebf03a5d5ff12bd55a80264f go1.18.linux-amd64.tar.gz
6f95ce3da40d9ce1355e48f31f4eb6508382415ca4d7413b1e7a3314e6430e7e go1.17.5.linux-arm64.tar.gz 7ac7b396a691e588c5fb57687759e6c4db84a2a3bbebb0765f4b38e5b1c5b00e go1.18.linux-arm64.tar.gz
aa1fb6c53b4fe72f159333362a10aca37ae938bde8adc9c6eaf2a8e87d1e47de go1.17.5.linux-armv6l.tar.gz a80fa43d1f4575fb030adbfbaa94acd860c6847820764eecb06c63b7c103612b go1.18.linux-armv6l.tar.gz
3d4be616e568f0a02cb7f7769bcaafda4b0969ed0f9bb4277619930b96847e70 go1.17.5.linux-ppc64le.tar.gz 070351edac192483c074b38d08ec19251a83f8210765a532a84c3dcf8aec04d8 go1.18.linux-ppc64le.tar.gz
8087d4fe991e82804e6485c26568c2e0ee0bfde00ceb9015dc86cb6bf84ef40b go1.17.5.linux-s390x.tar.gz ea265f5e62fcaf941d53f0cdb81222d9668e1672a0d39d992f16ff0e87c0ee6b go1.18.linux-s390x.tar.gz
6d7b9948ee14a906b14f5cbebdfab63cd6828b0b618160847ecd3cc3470a26fe go1.17.5.windows-386.zip e23fd2a0509690fe7e63b2b1bcd4c39ed57b46ccde76f35dc0d16ca7fdbc5aaa go1.18.windows-386.zip
671faf99cd5d81cd7e40936c0a94363c64d654faa0148d2af4bbc262555620b9 go1.17.5.windows-amd64.zip 65c5c0c709a7ca1b357091b10b795b439d8b50e579d3893edab4c7e9b384f435 go1.18.windows-amd64.zip
45e88676b68e9cf364be469b5a27965397f4e339aa622c2f52c10433c56e5030 go1.17.5.windows-arm64.zip 1c454eb60c64d481965a165c623ff1ed6cf32d68c6b31f36069c8768d908f093 go1.18.windows-arm64.zip
d4bd25b9814eeaa2134197dd2c7671bb791eae786d42010d9d788af20dee4bfa golangci-lint-1.42.0-darwin-amd64.tar.gz 03c181fc1bb29ea3e73cbb23399c43b081063833a7cf7554b94e5a98308df53e golangci-lint-1.45.2-linux-riscv64.deb
e56859c04a2ad5390c6a497b1acb1cc9329ecb1010260c6faae9b5a4c35b35ea golangci-lint-1.42.0-darwin-arm64.tar.gz 08a50bbbf451ede6d5354179eb3e14a5634e156dfa92cb9a2606f855a637e35b golangci-lint-1.45.2-linux-ppc64le.rpm
14d912a3fa856830339472fc4dc341933adf15f37bdb7130bbbfcf960ecf4809 golangci-lint-1.42.0-freebsd-386.tar.gz 0d12f6ec1296b5a70e392aa88cd2295cceef266165eb7028e675f455515dd1c9 golangci-lint-1.45.2-linux-armv7.deb
337257fccc9baeb5ee1cd7e70c153e9d9f59d3afde46d631659500048afbdf80 golangci-lint-1.42.0-freebsd-amd64.tar.gz 10f2846e2e50e4ea8ae426ee62dcd2227b23adddd8e991aa3c065927ac948735 golangci-lint-1.45.2-linux-ppc64le.deb
6debcc266b629359fdd8eef4f4abb05a621604079d27016265afb5b4593b0eff golangci-lint-1.42.0-freebsd-armv6.tar.gz 1463049b744871168095e3e8f687247d6040eeb895955b869889ea151e0603ab golangci-lint-1.45.2-linux-arm64.tar.gz
878f0e190169db2ce9dde8cefbd99adc4fe28b90b68686bbfcfcc2085e6d693e golangci-lint-1.42.0-freebsd-armv7.tar.gz 15720f9c4c6f9324af695f081dc189adc7751b255759e78d7b2df1d7e9192533 golangci-lint-1.45.2-linux-amd64.deb
42c78e31faf62b225363eff1b1d2aa74f9dbcb75686c8914aa3e90d6af65cece golangci-lint-1.42.0-linux-386.tar.gz 166d922e4d3cfe3d47786c590154a9c8ea689dff0aa92b73d2f5fc74fc570c29 golangci-lint-1.45.2-linux-arm64.rpm
6937f62f8e2329e94822dc11c10b871ace5557ae1fcc4ee2f9980cd6aecbc159 golangci-lint-1.42.0-linux-amd64.tar.gz 1a3754c69f7cc19ab89cbdcc2550da4cf9abb3120383c6b3bd440c1ec22da2e6 golangci-lint-1.45.2-freebsd-386.tar.gz
2cf8d23d96cd854a537b355dab2962b960b88a06b615232599f066afd233f246 golangci-lint-1.42.0-linux-arm64.tar.gz 1dec0aa46d4f0d241863b573f70129bdf1de9c595cf51172a840a588a4cd9fc5 golangci-lint-1.45.2-windows-amd64.zip
08b003d1ed61367473886defc957af5301066e62338e5d96a319c34dadc4c1d1 golangci-lint-1.42.0-linux-armv6.tar.gz 3198453806517c1ad988229f5e758ef850e671203f46d6905509df5bdf4dc24b golangci-lint-1.45.2-freebsd-armv7.tar.gz
c7c00ec4845e806a1f32685f5b150219e180bd6d6a9d584be8d27f0c41d7a1bf golangci-lint-1.42.0-linux-armv7.tar.gz 46a3cd1749d7b98adc2dc01510ddbe21abe42689c8a53fb0e81662713629f215 golangci-lint-1.45.2-linux-386.deb
3650fcf29eb3d8ee326d77791a896b15259eb2d5bf77437dc72e7efe5af6bd40 golangci-lint-1.42.0-linux-mips64.tar.gz 4e28bfb593d464b9e160f2acd5b71993836a183270bf8299b78ad31f7a168c0d golangci-lint-1.45.2-linux-arm64.deb
f51ae003fdbca4fef78ba73e2eb736a939c8eaa178cd452234213b489da5a420 golangci-lint-1.42.0-linux-mips64le.tar.gz 5157a58c8f9ab85c33af2e46f0d7c57a3b1e8953b81d61130e292e09f545cfab golangci-lint-1.45.2-linux-mips64le.tar.gz
1b0bb7b8b22cc4ea7da44fd5ad5faaf6111d0677e01cc6f961b62a96537de2c6 golangci-lint-1.42.0-linux-ppc64le.tar.gz 518cd027644129fbf8ec4f02bd6f9ad7278aae826f92b63c80d4d0819ddde49a golangci-lint-1.45.2-linux-armv6.rpm
8cb56927eb75e572450efbe0ff0f9cf3f56dc9faa81d9e8d30d6559fc1d06e6d golangci-lint-1.42.0-linux-riscv64.tar.gz 595ad6c6dade4c064351bc309f411703e457f8ffbb7a1806b3d8ee713333427f golangci-lint-1.45.2-linux-amd64.tar.gz
5ac41cd31825a176b21505a371a7b307cd9cdf17df0f35bbb3bf1466f9356ccc golangci-lint-1.42.0-linux-s390x.tar.gz 6994d6c80f0730751090986184a3481b4be2e6b6e84416238a2b857910045a4f golangci-lint-1.45.2-windows-arm64.zip
e1cebd2af621ac4b64c20937df92c3819264f2174c92f51e196db1e64ae097e0 golangci-lint-1.42.0-windows-386.zip 6c81652fc340118811b487f713c441fc6f527800bf5fd11b8929d08124efa015 golangci-lint-1.45.2-linux-armv7.tar.gz
7e70fcde8e87a17cae0455df07d257ebc86669f3968d568e12727fa24bbe9883 golangci-lint-1.42.0-windows-amd64.zip 726cb045559b7518bafdd3459de70a0647c087eb1b4634627a4b2e95b1258580 golangci-lint-1.45.2-freebsd-amd64.tar.gz
59da7ce1bda432616bfc28ae663e52c3675adee8d9bf5959fafd657c159576ab golangci-lint-1.42.0-windows-armv6.zip 77df3774cdfda49b956d4a0e676da9a9b883f496ee37293c530770fef6b1d24e golangci-lint-1.45.2-linux-mips64.deb
65f62dda937bfcede0326ac77abe947ce1548931e6e13298ca036cb31f224db5 golangci-lint-1.42.0-windows-armv7.zip 7a9840f279a7d5d405bb434e101c2290964b3729630ac2add29280b962b7b9a5 golangci-lint-1.45.2-windows-armv6.zip
7d4bf9a5d80ec467aaaf66e78dbdcab567bbc6ba8151334c714eee58766aae32 golangci-lint-1.45.2-windows-armv7.zip
7e5f8821d39bb11d273b0841b34355f56bd5a45a2d5179f0d09e614e0efc0482 golangci-lint-1.45.2-linux-s390x.rpm
828de1bde796b23d8656b17a8885fbd879ef612795d62d1e4618126b419728b5 golangci-lint-1.45.2-linux-mips64.rpm
879a52107a797678a03c175cc7cf441411a14a01f66dc87f70bdfa304a4129a6 golangci-lint-1.45.2-windows-386.zip
87b6c7e3a3769f7d9abeb3bb82119b3c91e3c975300f6834fdeef8b2e37c98ff golangci-lint-1.45.2-linux-amd64.rpm
8b605c6d686c8af53ecc4ef39544541eeb1644d34cc10f9ffc5087808210c4ff golangci-lint-1.45.2-linux-s390x.deb
9427dbf51d0ac6f73a0f992838bd40c817470cc5bf6c8e2e2bea6fac46d7af6e golangci-lint-1.45.2-linux-ppc64le.tar.gz
995e509e895ca6a64ffc7395ac884d5961bdec98423cb896b17f345a9b4a19cf golangci-lint-1.45.2-darwin-amd64.tar.gz
a3f36278f2ea5516341e9071a2df6e65df272be80230b5406a12b72c6d425bee golangci-lint-1.45.2-linux-armv7.rpm
a5e12c50c23e87ac1deffc872f92ae85427b1198604969399805ae47cfe43f08 golangci-lint-1.45.2-linux-riscv64.tar.gz
aa8fa1be0729dbc2fbc4e01e82027097613eee74bd686ebef20f860b01fff8b3 golangci-lint-1.45.2-freebsd-armv6.tar.gz
c2b9669decc1b638cf2ee9060571af4e255f6dfcbb225c293e3a7ee4bb2c7217 golangci-lint-1.45.2-darwin-arm64.tar.gz
dfa8bdaf0387aec1cd5c1aa8857f67b2bbdfc2e42efce540c8fb9bbe3e8af302 golangci-lint-1.45.2-linux-armv6.tar.gz
eb8b8539dd017eee5c131ea9b875893ab2cebeeca41e8c6624907fb02224d643 golangci-lint-1.45.2-linux-386.rpm
ed6c7e17a857f30d715c5302fa250d95936936b277024bffea201187a257d7a7 golangci-lint-1.45.2-linux-armv6.deb
ef4d0154ace4001f01b288baeb118176242efb4fd163e178763e3213b77ef30b golangci-lint-1.45.2-linux-mips64le.deb
ef7002a2229f5ff5ba201a715fcf877664ea88decbe58e69d163293913024955 golangci-lint-1.45.2-linux-s390x.tar.gz
f13ecbd09228632e6bbe91a8324bd675c406eed22eb6d2c1e8192eed9ec4f914 golangci-lint-1.45.2-linux-386.tar.gz
f4cd9cfb09252f51699407277512263cae8409b665dd764f55a34738d0e89edc golangci-lint-1.45.2-linux-riscv64.rpm
fb1945dc59d37c9d14bf0a4aea11ea8651fa0e1d582ea80c4c44d0a536c08893 golangci-lint-1.45.2-linux-mips64.tar.gz
fe542c22738010f453c735a3c410decfd3784d1bd394b395c298ee298fc4c606 golangci-lint-1.45.2-linux-mips64le.rpm

View File

@ -130,13 +130,14 @@ var (
// Distros for which packages are created. // Distros for which packages are created.
// Note: vivid is unsupported because there is no golang-1.6 package for it. // Note: vivid is unsupported because there is no golang-1.6 package for it.
// Note: the following Ubuntu releases have been officially deprecated on Launchpad: // Note: the following Ubuntu releases have been officially deprecated on Launchpad:
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy // wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite
debDistroGoBoots = map[string]string{ debDistroGoBoots = map[string]string{
"trusty": "golang-1.11", "trusty": "golang-1.11", // EOL: 04/2024
"xenial": "golang-go", "xenial": "golang-go", // EOL: 04/2026
"bionic": "golang-go", "bionic": "golang-go", // EOL: 04/2028
"focal": "golang-go", "focal": "golang-go", // EOL: 04/2030
"hirsute": "golang-go", "impish": "golang-go", // EOL: 07/2022
// "jammy": "golang-go", // EOL: 04/2027
} }
debGoBootPaths = map[string]string{ debGoBootPaths = map[string]string{
@ -147,7 +148,7 @@ var (
// This is the version of go that will be downloaded by // This is the version of go that will be downloaded by
// //
// go run ci.go install -dlgo // go run ci.go install -dlgo
dlgoVersion = "1.17.5" dlgoVersion = "1.18"
) )
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@ -331,16 +332,21 @@ func doLint(cmdline []string) {
// downloadLinter downloads and unpacks golangci-lint. // downloadLinter downloads and unpacks golangci-lint.
func downloadLinter(cachedir string) string { func downloadLinter(cachedir string) string {
const version = "1.42.0" const version = "1.45.2"
csdb := build.MustLoadChecksums("build/checksums.txt") csdb := build.MustLoadChecksums("build/checksums.txt")
arch := runtime.GOARCH arch := runtime.GOARCH
ext := ".tar.gz"
if runtime.GOOS == "windows" {
ext = ".zip"
}
if arch == "arm" { if arch == "arm" {
arch += "v" + os.Getenv("GOARM") arch += "v" + os.Getenv("GOARM")
} }
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, arch) base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, arch)
url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s.tar.gz", version, base) url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s%s", version, base, ext)
archivePath := filepath.Join(cachedir, base+".tar.gz") archivePath := filepath.Join(cachedir, base+ext)
if err := csdb.DownloadFile(url, archivePath); err != nil { if err := csdb.DownloadFile(url, archivePath); err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -1237,21 +1243,21 @@ func doPurge(cmdline []string) {
// Iterate over the blobs, collect and sort all unstable builds // Iterate over the blobs, collect and sort all unstable builds
for i := 0; i < len(blobs); i++ { for i := 0; i < len(blobs); i++ {
if !strings.Contains(blobs[i].Name, "unstable") { if !strings.Contains(*blobs[i].Name, "unstable") {
blobs = append(blobs[:i], blobs[i+1:]...) blobs = append(blobs[:i], blobs[i+1:]...)
i-- i--
} }
} }
for i := 0; i < len(blobs); i++ { for i := 0; i < len(blobs); i++ {
for j := i + 1; j < len(blobs); j++ { for j := i + 1; j < len(blobs); j++ {
if blobs[i].Properties.LastModified.After(blobs[j].Properties.LastModified) { if blobs[i].Properties.LastModified.After(*blobs[j].Properties.LastModified) {
blobs[i], blobs[j] = blobs[j], blobs[i] blobs[i], blobs[j] = blobs[j], blobs[i]
} }
} }
} }
// Filter out all archives more recent that the given threshold // Filter out all archives more recent that the given threshold
for i, blob := range blobs { for i, blob := range blobs {
if time.Since(blob.Properties.LastModified) < time.Duration(*limit)*24*time.Hour { if time.Since(*blob.Properties.LastModified) < time.Duration(*limit)*24*time.Hour {
blobs = blobs[:i] blobs = blobs[:i]
break break
} }

View File

@ -661,7 +661,7 @@ func signer(c *cli.Context) error {
if err != nil { if err != nil {
utils.Fatalf("Could not register API: %w", err) utils.Fatalf("Could not register API: %w", err)
} }
handler := node.NewHTTPHandlerStack(srv, cors, vhosts) handler := node.NewHTTPHandlerStack(srv, cors, vhosts, nil)
// set port // set port
port := c.Int(rpcPortFlag.Name) port := c.Int(rpcPortFlag.Name)

View File

@ -21,19 +21,19 @@
"error": "transaction type not supported" "error": "transaction type not supported"
}, },
{ {
"error": "rlp: expected List" "error": "typed transaction too short"
}, },
{ {
"error": "rlp: expected List" "error": "typed transaction too short"
}, },
{ {
"error": "rlp: expected List" "error": "typed transaction too short"
}, },
{ {
"error": "rlp: expected List" "error": "typed transaction too short"
}, },
{ {
"error": "rlp: expected List" "error": "typed transaction too short"
}, },
{ {
"error": "rlp: expected input list for types.AccessListTx" "error": "rlp: expected input list for types.AccessListTx"

View File

@ -32,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/scwallet" "github.com/ethereum/go-ethereum/accounts/scwallet"
"github.com/ethereum/go-ethereum/accounts/usbwallet" "github.com/ethereum/go-ethereum/accounts/usbwallet"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
@ -161,7 +162,23 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
if ctx.GlobalIsSet(utils.OverrideTerminalTotalDifficulty.Name) { if ctx.GlobalIsSet(utils.OverrideTerminalTotalDifficulty.Name) {
cfg.Eth.OverrideTerminalTotalDifficulty = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideTerminalTotalDifficulty.Name)) cfg.Eth.OverrideTerminalTotalDifficulty = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideTerminalTotalDifficulty.Name))
} }
backend, _ := utils.RegisterEthService(stack, &cfg.Eth) backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
// Warn users to migrate if they have a legacy freezer format.
if eth != nil {
firstIdx := uint64(0)
// Hack to speed up check for mainnet because we know
// the first non-empty block.
ghash := rawdb.ReadCanonicalHash(eth.ChainDb(), 0)
if cfg.Eth.NetworkId == 1 && ghash == params.MainnetGenesisHash {
firstIdx = 46147
}
isLegacy, _, err := dbHasLegacyReceipts(eth.ChainDb(), firstIdx)
if err != nil {
log.Error("Failed to check db for legacy receipts", "err", err)
} else if isLegacy {
log.Warn("Database has receipts with a legacy format. Please run `geth db freezer-migrate`.")
}
}
// Configure GraphQL if requested // Configure GraphQL if requested
if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) { if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) {

View File

@ -35,6 +35,7 @@ import (
"github.com/ethereum/go-ethereum/console/prompt" "github.com/ethereum/go-ethereum/console/prompt"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
@ -72,6 +73,7 @@ Remove blockchain and state databases`,
dbImportCmd, dbImportCmd,
dbExportCmd, dbExportCmd,
dbMetadataCmd, dbMetadataCmd,
dbMigrateFreezerCmd,
}, },
} }
dbInspectCmd = cli.Command{ dbInspectCmd = cli.Command{
@ -251,6 +253,23 @@ WARNING: This is a low-level operation which may cause database corruption!`,
}, },
Description: "Shows metadata about the chain status.", Description: "Shows metadata about the chain status.",
} }
dbMigrateFreezerCmd = cli.Command{
Action: utils.MigrateFlags(freezerMigrate),
Name: "freezer-migrate",
Usage: "Migrate legacy parts of the freezer. (WARNING: may take a long time)",
ArgsUsage: "",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: `The freezer-migrate command checks your database for receipts in a legacy format and updates those.
WARNING: please back-up the receipt files in your ancients before running this command.`,
}
) )
func removeDB(ctx *cli.Context) error { func removeDB(ctx *cli.Context) error {
@ -728,6 +747,9 @@ func showMetaData(ctx *cli.Context) error {
data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())}) data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())}) data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())})
} }
if b := rawdb.ReadSkeletonSyncStatus(db); b != nil {
data = append(data, []string{"SkeletonSyncStatus", string(b)})
}
if h := rawdb.ReadHeadHeader(db); h != nil { if h := rawdb.ReadHeadHeader(db); h != nil {
data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())}) data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)}) data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
@ -750,3 +772,88 @@ func showMetaData(ctx *cli.Context) error {
table.Render() table.Render()
return nil return nil
} }
func freezerMigrate(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
// Check first block for legacy receipt format
numAncients, err := db.Ancients()
if err != nil {
return err
}
if numAncients < 1 {
log.Info("No receipts in freezer to migrate")
return nil
}
isFirstLegacy, firstIdx, err := dbHasLegacyReceipts(db, 0)
if err != nil {
return err
}
if !isFirstLegacy {
log.Info("No legacy receipts to migrate")
return nil
}
log.Info("Starting migration", "ancients", numAncients, "firstLegacy", firstIdx)
start := time.Now()
if err := db.MigrateTable("receipts", types.ConvertLegacyStoredReceipts); err != nil {
return err
}
if err := db.Close(); err != nil {
return err
}
log.Info("Migration finished", "duration", time.Since(start))
return nil
}
// dbHasLegacyReceipts checks freezer entries for legacy receipts. It stops at the first
// non-empty receipt and checks its format. The index of this first non-empty element is
// the second return parameter.
func dbHasLegacyReceipts(db ethdb.Database, firstIdx uint64) (bool, uint64, error) {
// Check first block for legacy receipt format
numAncients, err := db.Ancients()
if err != nil {
return false, 0, err
}
if numAncients < 1 {
return false, 0, nil
}
if firstIdx >= numAncients {
return false, firstIdx, nil
}
var (
legacy bool
blob []byte
emptyRLPList = []byte{192}
)
// Find first block with non-empty receipt, only if
// the index is not already provided.
if firstIdx == 0 {
for i := uint64(0); i < numAncients; i++ {
blob, err = db.Ancient("receipts", i)
if err != nil {
return false, 0, err
}
if len(blob) == 0 {
continue
}
if !bytes.Equal(blob, emptyRLPList) {
firstIdx = i
break
}
}
}
// Is first non-empty receipt legacy?
first, err := db.Ancient("receipts", firstIdx)
if err != nil {
return false, 0, err
}
legacy, err = types.IsLegacyStoredReceipts(first)
return legacy, firstIdx, err
}

View File

@ -110,7 +110,8 @@ var (
utils.UltraLightFractionFlag, utils.UltraLightFractionFlag,
utils.UltraLightOnlyAnnounceFlag, utils.UltraLightOnlyAnnounceFlag,
utils.LightNoSyncServeFlag, utils.LightNoSyncServeFlag,
utils.WhitelistFlag, utils.EthPeerRequiredBlocksFlag,
utils.LegacyWhitelistFlag,
utils.BloomFilterSizeFlag, utils.BloomFilterSizeFlag,
utils.CacheFlag, utils.CacheFlag,
utils.CacheDatabaseFlag, utils.CacheDatabaseFlag,
@ -121,6 +122,7 @@ var (
utils.CacheSnapshotFlag, utils.CacheSnapshotFlag,
utils.CacheNoPrefetchFlag, utils.CacheNoPrefetchFlag,
utils.CachePreimagesFlag, utils.CachePreimagesFlag,
utils.FDLimitFlag,
utils.ListenPortFlag, utils.ListenPortFlag,
utils.MaxPeersFlag, utils.MaxPeersFlag,
utils.MaxPendingPeersFlag, utils.MaxPendingPeersFlag,
@ -149,6 +151,7 @@ var (
utils.SepoliaFlag, utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.KilnFlag,
utils.VMEnableDebugFlag, utils.VMEnableDebugFlag,
utils.NetworkIdFlag, utils.NetworkIdFlag,
utils.EthStatsURLFlag, utils.EthStatsURLFlag,
@ -167,6 +170,10 @@ var (
utils.HTTPListenAddrFlag, utils.HTTPListenAddrFlag,
utils.HTTPPortFlag, utils.HTTPPortFlag,
utils.HTTPCORSDomainFlag, utils.HTTPCORSDomainFlag,
utils.AuthListenFlag,
utils.AuthPortFlag,
utils.AuthVirtualHostsFlag,
utils.JWTSecretFlag,
utils.HTTPVirtualHostsFlag, utils.HTTPVirtualHostsFlag,
utils.GraphQLEnabledFlag, utils.GraphQLEnabledFlag,
utils.GraphQLCORSDomainFlag, utils.GraphQLCORSDomainFlag,

View File

@ -314,8 +314,7 @@ func traverseState(ctx *cli.Context) error {
} }
} }
if !bytes.Equal(acc.CodeHash, emptyCode) { if !bytes.Equal(acc.CodeHash, emptyCode) {
code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash)) if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) {
if len(code) == 0 {
log.Error("Code is missing", "hash", common.BytesToHash(acc.CodeHash)) log.Error("Code is missing", "hash", common.BytesToHash(acc.CodeHash))
return errors.New("missing code") return errors.New("missing code")
} }
@ -386,11 +385,10 @@ func traverseRawState(ctx *cli.Context) error {
nodes += 1 nodes += 1
node := accIter.Hash() node := accIter.Hash()
// Check the present for non-empty hash node(embedded node doesn't
// have their own hash).
if node != (common.Hash{}) { if node != (common.Hash{}) {
// Check the present for non-empty hash node(embedded node doesn't if !rawdb.HasTrieNode(chaindb, node) {
// have their own hash).
blob := rawdb.ReadTrieNode(chaindb, node)
if len(blob) == 0 {
log.Error("Missing trie node(account)", "hash", node) log.Error("Missing trie node(account)", "hash", node)
return errors.New("missing account") return errors.New("missing account")
} }
@ -434,8 +432,7 @@ func traverseRawState(ctx *cli.Context) error {
} }
} }
if !bytes.Equal(acc.CodeHash, emptyCode) { if !bytes.Equal(acc.CodeHash, emptyCode) {
code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash)) if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) {
if len(code) == 0 {
log.Error("Code is missing", "account", common.BytesToHash(accIter.LeafKey())) log.Error("Code is missing", "account", common.BytesToHash(accIter.LeafKey()))
return errors.New("missing code") return errors.New("missing code")
} }

View File

@ -46,6 +46,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.RinkebyFlag, utils.RinkebyFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag, utils.SepoliaFlag,
utils.KilnFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.ExitWhenSyncedFlag, utils.ExitWhenSyncedFlag,
utils.GCModeFlag, utils.GCModeFlag,
@ -53,7 +54,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.EthStatsURLFlag, utils.EthStatsURLFlag,
utils.IdentityFlag, utils.IdentityFlag,
utils.LightKDFFlag, utils.LightKDFFlag,
utils.WhitelistFlag, utils.EthPeerRequiredBlocksFlag,
}, },
}, },
{ {
@ -119,6 +120,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.CacheSnapshotFlag, utils.CacheSnapshotFlag,
utils.CacheNoPrefetchFlag, utils.CacheNoPrefetchFlag,
utils.CachePreimagesFlag, utils.CachePreimagesFlag,
utils.FDLimitFlag,
}, },
}, },
{ {
@ -148,6 +150,10 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.WSApiFlag, utils.WSApiFlag,
utils.WSPathPrefixFlag, utils.WSPathPrefixFlag,
utils.WSAllowedOriginsFlag, utils.WSAllowedOriginsFlag,
utils.JWTSecretFlag,
utils.AuthListenFlag,
utils.AuthPortFlag,
utils.AuthVirtualHostsFlag,
utils.GraphQLEnabledFlag, utils.GraphQLEnabledFlag,
utils.GraphQLCORSDomainFlag, utils.GraphQLCORSDomainFlag,
utils.GraphQLVirtualHostsFlag, utils.GraphQLVirtualHostsFlag,
@ -221,6 +227,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
Name: "ALIASED (deprecated)", Name: "ALIASED (deprecated)",
Flags: []cli.Flag{ Flags: []cli.Flag{
utils.NoUSBFlag, utils.NoUSBFlag,
utils.LegacyWhitelistFlag,
}, },
}, },
{ {

View File

@ -161,6 +161,10 @@ var (
Name: "sepolia", Name: "sepolia",
Usage: "Sepolia network: pre-configured proof-of-work test network", Usage: "Sepolia network: pre-configured proof-of-work test network",
} }
KilnFlag = cli.BoolFlag{
Name: "kiln",
Usage: "Kiln network: pre-configured proof-of-work to proof-of-stake test network",
}
DeveloperFlag = cli.BoolFlag{ DeveloperFlag = cli.BoolFlag{
Name: "dev", Name: "dev",
Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled", Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled",
@ -237,9 +241,13 @@ var (
Name: "lightkdf", Name: "lightkdf",
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength", Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
} }
WhitelistFlag = cli.StringFlag{ EthPeerRequiredBlocksFlag = cli.StringFlag{
Name: "eth.requiredblocks",
Usage: "Comma separated block number-to-hash mappings to require for peering (<number>=<hash>)",
}
LegacyWhitelistFlag = cli.StringFlag{
Name: "whitelist", Name: "whitelist",
Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>)", Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>) (deprecated in favor of --peer.requiredblocks)",
} }
BloomFilterSizeFlag = cli.Uint64Flag{ BloomFilterSizeFlag = cli.Uint64Flag{
Name: "bloomfilter.size", Name: "bloomfilter.size",
@ -433,6 +441,10 @@ var (
Name: "cache.preimages", Name: "cache.preimages",
Usage: "Enable recording the SHA3/keccak preimages of trie keys", Usage: "Enable recording the SHA3/keccak preimages of trie keys",
} }
FDLimitFlag = cli.IntFlag{
Name: "fdlimit",
Usage: "Raise the open file descriptor resource limit (default = system fd limit)",
}
// Miner settings // Miner settings
MiningEnabledFlag = cli.BoolFlag{ MiningEnabledFlag = cli.BoolFlag{
Name: "mine", Name: "mine",
@ -518,6 +530,26 @@ var (
Usage: "Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)", Usage: "Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)",
Value: ethconfig.Defaults.RPCTxFeeCap, Value: ethconfig.Defaults.RPCTxFeeCap,
} }
// Authenticated RPC HTTP settings
AuthListenFlag = cli.StringFlag{
Name: "authrpc.addr",
Usage: "Listening address for authenticated APIs",
Value: node.DefaultConfig.AuthAddr,
}
AuthPortFlag = cli.IntFlag{
Name: "authrpc.port",
Usage: "Listening port for authenticated APIs",
Value: node.DefaultConfig.AuthPort,
}
AuthVirtualHostsFlag = cli.StringFlag{
Name: "authrpc.vhosts",
Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
Value: strings.Join(node.DefaultConfig.AuthVirtualHosts, ","),
}
JWTSecretFlag = cli.StringFlag{
Name: "authrpc.jwtsecret",
Usage: "Path to a JWT secret to use for authenticated RPC endpoints",
}
// Logging and debug settings // Logging and debug settings
EthStatsURLFlag = cli.StringFlag{ EthStatsURLFlag = cli.StringFlag{
Name: "ethstats", Name: "ethstats",
@ -811,6 +843,9 @@ func MakeDataDir(ctx *cli.Context) string {
if ctx.GlobalBool(SepoliaFlag.Name) { if ctx.GlobalBool(SepoliaFlag.Name) {
return filepath.Join(path, "sepolia") return filepath.Join(path, "sepolia")
} }
if ctx.GlobalBool(KilnFlag.Name) {
return filepath.Join(path, "kiln")
}
return path return path
} }
Fatalf("Cannot determine default data directory, please set manually (--datadir)") Fatalf("Cannot determine default data directory, please set manually (--datadir)")
@ -865,6 +900,8 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
urls = params.RinkebyBootnodes urls = params.RinkebyBootnodes
case ctx.GlobalBool(GoerliFlag.Name): case ctx.GlobalBool(GoerliFlag.Name):
urls = params.GoerliBootnodes urls = params.GoerliBootnodes
case ctx.GlobalBool(KilnFlag.Name):
urls = params.KilnBootnodes
case cfg.BootstrapNodes != nil: case cfg.BootstrapNodes != nil:
return // already set, don't apply defaults. return // already set, don't apply defaults.
} }
@ -951,6 +988,18 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) {
cfg.HTTPPort = ctx.GlobalInt(HTTPPortFlag.Name) cfg.HTTPPort = ctx.GlobalInt(HTTPPortFlag.Name)
} }
if ctx.GlobalIsSet(AuthListenFlag.Name) {
cfg.AuthAddr = ctx.GlobalString(AuthListenFlag.Name)
}
if ctx.GlobalIsSet(AuthPortFlag.Name) {
cfg.AuthPort = ctx.GlobalInt(AuthPortFlag.Name)
}
if ctx.GlobalIsSet(AuthVirtualHostsFlag.Name) {
cfg.AuthVirtualHosts = SplitAndTrim(ctx.GlobalString(AuthVirtualHostsFlag.Name))
}
if ctx.GlobalIsSet(HTTPCORSDomainFlag.Name) { if ctx.GlobalIsSet(HTTPCORSDomainFlag.Name) {
cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(HTTPCORSDomainFlag.Name)) cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(HTTPCORSDomainFlag.Name))
} }
@ -1057,11 +1106,24 @@ func setLes(ctx *cli.Context, cfg *ethconfig.Config) {
// MakeDatabaseHandles raises out the number of allowed file handles per process // MakeDatabaseHandles raises out the number of allowed file handles per process
// for Geth and returns half of the allowance to assign to the database. // for Geth and returns half of the allowance to assign to the database.
func MakeDatabaseHandles() int { func MakeDatabaseHandles(max int) int {
limit, err := fdlimit.Maximum() limit, err := fdlimit.Maximum()
if err != nil { if err != nil {
Fatalf("Failed to retrieve file descriptor allowance: %v", err) Fatalf("Failed to retrieve file descriptor allowance: %v", err)
} }
switch {
case max == 0:
// User didn't specify a meaningful value, use system limits
case max < 128:
// User specified something unhealthy, just use system defaults
log.Error("File descriptor limit invalid (<128)", "had", max, "updated", limit)
case max > limit:
// User requested more than the OS allows, notify that we can't allocate it
log.Warn("Requested file descriptors denied by OS", "req", max, "limit", limit)
default:
// User limit is meaningful and within allowed range, use that
limit = max
}
raised, err := fdlimit.Raise(uint64(limit)) raised, err := fdlimit.Raise(uint64(limit))
if err != nil { if err != nil {
Fatalf("Failed to raise file descriptor allowance: %v", err) Fatalf("Failed to raise file descriptor allowance: %v", err)
@ -1218,6 +1280,10 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
setDataDir(ctx, cfg) setDataDir(ctx, cfg)
setSmartCard(ctx, cfg) setSmartCard(ctx, cfg)
if ctx.GlobalIsSet(JWTSecretFlag.Name) {
cfg.JWTSecret = ctx.GlobalString(JWTSecretFlag.Name)
}
if ctx.GlobalIsSet(ExternalSignerFlag.Name) { if ctx.GlobalIsSet(ExternalSignerFlag.Name) {
cfg.ExternalSigner = ctx.GlobalString(ExternalSignerFlag.Name) cfg.ExternalSigner = ctx.GlobalString(ExternalSignerFlag.Name)
} }
@ -1286,6 +1352,8 @@ func setDataDir(ctx *cli.Context, cfg *node.Config) {
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli") cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli")
case ctx.GlobalBool(SepoliaFlag.Name) && cfg.DataDir == node.DefaultDataDir(): case ctx.GlobalBool(SepoliaFlag.Name) && cfg.DataDir == node.DefaultDataDir():
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "sepolia") cfg.DataDir = filepath.Join(node.DefaultDataDir(), "sepolia")
case ctx.GlobalBool(KilnFlag.Name) && cfg.DataDir == node.DefaultDataDir():
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "kiln")
} }
} }
@ -1404,26 +1472,33 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) {
} }
} }
func setWhitelist(ctx *cli.Context, cfg *ethconfig.Config) { func setPeerRequiredBlocks(ctx *cli.Context, cfg *ethconfig.Config) {
whitelist := ctx.GlobalString(WhitelistFlag.Name) peerRequiredBlocks := ctx.GlobalString(EthPeerRequiredBlocksFlag.Name)
if whitelist == "" {
return if peerRequiredBlocks == "" {
if ctx.GlobalIsSet(LegacyWhitelistFlag.Name) {
log.Warn("The flag --rpc is deprecated and will be removed, please use --peer.requiredblocks")
peerRequiredBlocks = ctx.GlobalString(LegacyWhitelistFlag.Name)
} else {
return
}
} }
cfg.Whitelist = make(map[uint64]common.Hash)
for _, entry := range strings.Split(whitelist, ",") { cfg.PeerRequiredBlocks = make(map[uint64]common.Hash)
for _, entry := range strings.Split(peerRequiredBlocks, ",") {
parts := strings.Split(entry, "=") parts := strings.Split(entry, "=")
if len(parts) != 2 { if len(parts) != 2 {
Fatalf("Invalid whitelist entry: %s", entry) Fatalf("Invalid peer required block entry: %s", entry)
} }
number, err := strconv.ParseUint(parts[0], 0, 64) number, err := strconv.ParseUint(parts[0], 0, 64)
if err != nil { if err != nil {
Fatalf("Invalid whitelist block number %s: %v", parts[0], err) Fatalf("Invalid peer required block number %s: %v", parts[0], err)
} }
var hash common.Hash var hash common.Hash
if err = hash.UnmarshalText([]byte(parts[1])); err != nil { if err = hash.UnmarshalText([]byte(parts[1])); err != nil {
Fatalf("Invalid whitelist hash %s: %v", parts[1], err) Fatalf("Invalid peer required block hash %s: %v", parts[1], err)
} }
cfg.Whitelist[number] = hash cfg.PeerRequiredBlocks[number] = hash
} }
} }
@ -1471,7 +1546,7 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) {
// SetEthConfig applies eth-related command line flags to the config. // SetEthConfig applies eth-related command line flags to the config.
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// Avoid conflicting network flags // Avoid conflicting network flags
CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, SepoliaFlag) CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, SepoliaFlag, KilnFlag)
CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light") CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light")
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
if ctx.GlobalString(GCModeFlag.Name) == "archive" && ctx.GlobalUint64(TxLookupLimitFlag.Name) != 0 { if ctx.GlobalString(GCModeFlag.Name) == "archive" && ctx.GlobalUint64(TxLookupLimitFlag.Name) != 0 {
@ -1490,7 +1565,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
setTxPool(ctx, &cfg.TxPool) setTxPool(ctx, &cfg.TxPool)
setEthash(ctx, cfg) setEthash(ctx, cfg)
setMiner(ctx, &cfg.Miner) setMiner(ctx, &cfg.Miner)
setWhitelist(ctx, cfg) setPeerRequiredBlocks(ctx, cfg)
setLes(ctx, cfg) setLes(ctx, cfg)
// Cap the cache allowance and tune the garbage collector // Cap the cache allowance and tune the garbage collector
@ -1522,7 +1597,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) { if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) {
cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100 cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
} }
cfg.DatabaseHandles = MakeDatabaseHandles() cfg.DatabaseHandles = MakeDatabaseHandles(ctx.GlobalInt(FDLimitFlag.Name))
if ctx.GlobalIsSet(AncientFlag.Name) { if ctx.GlobalIsSet(AncientFlag.Name) {
cfg.DatabaseFreezer = ctx.GlobalString(AncientFlag.Name) cfg.DatabaseFreezer = ctx.GlobalString(AncientFlag.Name)
} }
@ -1633,6 +1708,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
} }
cfg.Genesis = core.DefaultGoerliGenesisBlock() cfg.Genesis = core.DefaultGoerliGenesisBlock()
SetDNSDiscoveryDefaults(cfg, params.GoerliGenesisHash) SetDNSDiscoveryDefaults(cfg, params.GoerliGenesisHash)
case ctx.GlobalBool(KilnFlag.Name):
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 1337802
}
cfg.Genesis = core.DefaultKilnGenesisBlock()
SetDNSDiscoveryDefaults(cfg, params.KilnGenesisHash)
case ctx.GlobalBool(DeveloperFlag.Name): case ctx.GlobalBool(DeveloperFlag.Name):
if !ctx.GlobalIsSet(NetworkIdFlag.Name) { if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 1337 cfg.NetworkId = 1337
@ -1840,7 +1921,7 @@ func SplitTagsFlag(tagsFlag string) map[string]string {
func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.Database { func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.Database {
var ( var (
cache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100 cache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
handles = MakeDatabaseHandles() handles = MakeDatabaseHandles(ctx.GlobalInt(FDLimitFlag.Name))
err error err error
chainDb ethdb.Database chainDb ethdb.Database
@ -1871,6 +1952,8 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
genesis = core.DefaultRinkebyGenesisBlock() genesis = core.DefaultRinkebyGenesisBlock()
case ctx.GlobalBool(GoerliFlag.Name): case ctx.GlobalBool(GoerliFlag.Name):
genesis = core.DefaultGoerliGenesisBlock() genesis = core.DefaultGoerliGenesisBlock()
case ctx.GlobalBool(KilnFlag.Name):
genesis = core.DefaultKilnGenesisBlock()
case ctx.GlobalBool(DeveloperFlag.Name): case ctx.GlobalBool(DeveloperFlag.Name):
Fatalf("Developer chains are ephemeral") Fatalf("Developer chains are ephemeral")
} }

View File

@ -549,6 +549,11 @@ func NewShared() *Ethash {
// Close closes the exit channel to notify all backend threads exiting. // Close closes the exit channel to notify all backend threads exiting.
func (ethash *Ethash) Close() error { func (ethash *Ethash) Close() error {
return ethash.StopRemoteSealer()
}
// StopRemoteSealer stops the remote sealer
func (ethash *Ethash) StopRemoteSealer() error {
ethash.closeOnce.Do(func() { ethash.closeOnce.Do(func() {
// Short circuit if the exit channel is not allocated. // Short circuit if the exit channel is not allocated.
if ethash.remote == nil { if ethash.remote == nil {

View File

@ -19,11 +19,32 @@ package beacon
import "github.com/ethereum/go-ethereum/rpc" import "github.com/ethereum/go-ethereum/rpc"
var ( var (
VALID = GenericStringResponse{"VALID"} // VALID is returned by the engine API in the following calls:
SUCCESS = GenericStringResponse{"SUCCESS"} // - newPayloadV1: if the payload was already known or was just validated and executed
INVALID = ForkChoiceResponse{Status: "INVALID", PayloadID: nil} // - forkchoiceUpdateV1: if the chain accepted the reorg (might ignore if it's stale)
SYNCING = ForkChoiceResponse{Status: "SYNCING", PayloadID: nil} VALID = "VALID"
// INVALID is returned by the engine API in the following calls:
// - newPayloadV1: if the payload failed to execute on top of the local chain
// - forkchoiceUpdateV1: if the new head is unknown, pre-merge, or reorg to it fails
INVALID = "INVALID"
// SYNCING is returned by the engine API in the following calls:
// - newPayloadV1: if the payload was accepted on top of an active sync
// - forkchoiceUpdateV1: if the new head was seen before, but not part of the chain
SYNCING = "SYNCING"
// ACCEPTED is returned by the engine API in the following calls:
// - newPayloadV1: if the payload was accepted, but not processed (side chain)
ACCEPTED = "ACCEPTED"
INVALIDBLOCKHASH = "INVALID_BLOCK_HASH"
INVALIDTERMINALBLOCK = "INVALID_TERMINAL_BLOCK"
GenericServerError = rpc.CustomError{Code: -32000, ValidationError: "Server error"} GenericServerError = rpc.CustomError{Code: -32000, ValidationError: "Server error"}
UnknownPayload = rpc.CustomError{Code: -32001, ValidationError: "Unknown payload"} UnknownPayload = rpc.CustomError{Code: -32001, ValidationError: "Unknown payload"}
InvalidTB = rpc.CustomError{Code: -32002, ValidationError: "Invalid terminal block"} InvalidTB = rpc.CustomError{Code: -32002, ValidationError: "Invalid terminal block"}
STATUS_INVALID = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: INVALID}, PayloadID: nil}
STATUS_SYNCING = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: SYNCING}, PayloadID: nil}
) )

View File

@ -16,7 +16,7 @@ var _ = (*payloadAttributesMarshaling)(nil)
func (p PayloadAttributesV1) MarshalJSON() ([]byte, error) { func (p PayloadAttributesV1) MarshalJSON() ([]byte, error) {
type PayloadAttributesV1 struct { type PayloadAttributesV1 struct {
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Random common.Hash `json:"random" gencodec:"required"` Random common.Hash `json:"prevRandao" gencodec:"required"`
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"` SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
} }
var enc PayloadAttributesV1 var enc PayloadAttributesV1
@ -30,7 +30,7 @@ func (p PayloadAttributesV1) MarshalJSON() ([]byte, error) {
func (p *PayloadAttributesV1) UnmarshalJSON(input []byte) error { func (p *PayloadAttributesV1) UnmarshalJSON(input []byte) error {
type PayloadAttributesV1 struct { type PayloadAttributesV1 struct {
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"` Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Random *common.Hash `json:"random" gencodec:"required"` Random *common.Hash `json:"prevRandao" gencodec:"required"`
SuggestedFeeRecipient *common.Address `json:"suggestedFeeRecipient" gencodec:"required"` SuggestedFeeRecipient *common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
} }
var dec PayloadAttributesV1 var dec PayloadAttributesV1
@ -42,7 +42,7 @@ func (p *PayloadAttributesV1) UnmarshalJSON(input []byte) error {
} }
p.Timestamp = uint64(*dec.Timestamp) p.Timestamp = uint64(*dec.Timestamp)
if dec.Random == nil { if dec.Random == nil {
return errors.New("missing required field 'random' for PayloadAttributesV1") return errors.New("missing required field 'prevRandao' for PayloadAttributesV1")
} }
p.Random = *dec.Random p.Random = *dec.Random
if dec.SuggestedFeeRecipient == nil { if dec.SuggestedFeeRecipient == nil {

View File

@ -19,9 +19,9 @@ func (e ExecutableDataV1) MarshalJSON() ([]byte, error) {
ParentHash common.Hash `json:"parentHash" gencodec:"required"` ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"` StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"` LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"random" gencodec:"required"` Random common.Hash `json:"prevRandao" gencodec:"required"`
Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"` Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
@ -60,9 +60,9 @@ func (e *ExecutableDataV1) UnmarshalJSON(input []byte) error {
ParentHash *common.Hash `json:"parentHash" gencodec:"required"` ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"` FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot *common.Hash `json:"stateRoot" gencodec:"required"` StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"` ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"` LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
Random *common.Hash `json:"random" gencodec:"required"` Random *common.Hash `json:"prevRandao" gencodec:"required"`
Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"` Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
@ -97,7 +97,7 @@ func (e *ExecutableDataV1) UnmarshalJSON(input []byte) error {
} }
e.LogsBloom = *dec.LogsBloom e.LogsBloom = *dec.LogsBloom
if dec.Random == nil { if dec.Random == nil {
return errors.New("missing required field 'random' for ExecutableDataV1") return errors.New("missing required field 'prevRandao' for ExecutableDataV1")
} }
e.Random = *dec.Random e.Random = *dec.Random
if dec.Number == nil { if dec.Number == nil {

View File

@ -31,7 +31,7 @@ import (
// PayloadAttributesV1 structure described at https://github.com/ethereum/execution-apis/pull/74 // PayloadAttributesV1 structure described at https://github.com/ethereum/execution-apis/pull/74
type PayloadAttributesV1 struct { type PayloadAttributesV1 struct {
Timestamp uint64 `json:"timestamp" gencodec:"required"` Timestamp uint64 `json:"timestamp" gencodec:"required"`
Random common.Hash `json:"random" gencodec:"required"` Random common.Hash `json:"prevRandao" gencodec:"required"`
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"` SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
} }
@ -47,9 +47,9 @@ type ExecutableDataV1 struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"` ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"` StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom []byte `json:"logsBloom" gencodec:"required"` LogsBloom []byte `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"random" gencodec:"required"` Random common.Hash `json:"prevRandao" gencodec:"required"`
Number uint64 `json:"blockNumber" gencodec:"required"` Number uint64 `json:"blockNumber" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"` GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"` GasUsed uint64 `json:"gasUsed" gencodec:"required"`
@ -72,26 +72,16 @@ type executableDataMarshaling struct {
Transactions []hexutil.Bytes Transactions []hexutil.Bytes
} }
type NewBlockResponse struct { type PayloadStatusV1 struct {
Valid bool `json:"valid"` Status string `json:"status"`
LatestValidHash *common.Hash `json:"latestValidHash"`
ValidationError *string `json:"validationError"`
} }
type GenericResponse struct { type TransitionConfigurationV1 struct {
Success bool `json:"success"` TerminalTotalDifficulty *hexutil.Big `json:"terminalTotalDifficulty"`
} TerminalBlockHash common.Hash `json:"terminalBlockHash"`
TerminalBlockNumber hexutil.Uint64 `json:"terminalBlockNumber"`
type GenericStringResponse struct {
Status string `json:"status"`
}
type ExecutePayloadResponse struct {
Status string `json:"status"`
LatestValidHash common.Hash `json:"latestValidHash"`
}
type ConsensusValidatedParams struct {
BlockHash common.Hash `json:"blockHash"`
Status string `json:"status"`
} }
// PayloadID is an identifier of the payload build process // PayloadID is an identifier of the payload build process
@ -114,8 +104,8 @@ func (b *PayloadID) UnmarshalText(input []byte) error {
} }
type ForkChoiceResponse struct { type ForkChoiceResponse struct {
Status string `json:"status"` PayloadStatus PayloadStatusV1 `json:"payloadStatus"`
PayloadID *PayloadID `json:"payloadId"` PayloadID *PayloadID `json:"payloadId"`
} }
type ForkchoiceStateV1 struct { type ForkchoiceStateV1 struct {

View File

@ -542,6 +542,19 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
} }
} }
if beyondRoot || newHeadBlock.NumberU64() == 0 { if beyondRoot || newHeadBlock.NumberU64() == 0 {
if newHeadBlock.NumberU64() == 0 {
// Recommit the genesis state into disk in case the rewinding destination
// is genesis block and the relevant state is gone. In the future this
// rewinding destination can be the earliest block stored in the chain
// if the historical chain pruning is enabled. In that case the logic
// needs to be improved here.
if !bc.HasState(bc.genesisBlock.Root()) {
if err := CommitGenesisState(bc.db, bc.genesisBlock.Hash()); err != nil {
log.Crit("Failed to commit genesis state", "err", err)
}
log.Debug("Recommitted genesis state to disk")
}
}
log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
break break
} }
@ -592,7 +605,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
if num+1 <= frozen { if num+1 <= frozen {
// Truncate all relative data(header, total difficulty, body, receipt // Truncate all relative data(header, total difficulty, body, receipt
// and canonical hash) from ancient store. // and canonical hash) from ancient store.
if err := bc.db.TruncateAncients(num); err != nil { if err := bc.db.TruncateHead(num); err != nil {
log.Crit("Failed to truncate ancient data", "number", num, "err", err) log.Crit("Failed to truncate ancient data", "number", num, "err", err)
} }
// Remove the hash <-> number mapping from the active store. // Remove the hash <-> number mapping from the active store.
@ -991,7 +1004,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
size += int64(batch.ValueSize()) size += int64(batch.ValueSize())
if err = batch.Write(); err != nil { if err = batch.Write(); err != nil {
fastBlock := bc.CurrentFastBlock().NumberU64() fastBlock := bc.CurrentFastBlock().NumberU64()
if err := bc.db.TruncateAncients(fastBlock + 1); err != nil { if err := bc.db.TruncateHead(fastBlock + 1); err != nil {
log.Error("Can't truncate ancient store after failed insert", "err", err) log.Error("Can't truncate ancient store after failed insert", "err", err)
} }
return 0, err return 0, err
@ -1009,7 +1022,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
if !updateHead(blockChain[len(blockChain)-1]) { if !updateHead(blockChain[len(blockChain)-1]) {
// We end up here if the header chain has reorg'ed, and the blocks/receipts // We end up here if the header chain has reorg'ed, and the blocks/receipts
// don't match the canonical chain. // don't match the canonical chain.
if err := bc.db.TruncateAncients(previousFastBlock + 1); err != nil { if err := bc.db.TruncateHead(previousFastBlock + 1); err != nil {
log.Error("Can't truncate ancient store after failed insert", "err", err) log.Error("Can't truncate ancient store after failed insert", "err", err)
} }
return 0, errSideChainReceipts return 0, errSideChainReceipts
@ -1655,12 +1668,16 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits) blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits)
blockInsertTimer.UpdateSince(start) blockInsertTimer.UpdateSince(start)
if !setHead { // Report the import stats before returning the various results
// We did not setHead, so we don't have any stats to update stats.processed++
log.Info("Inserted block", "number", block.Number(), "hash", block.Hash(), "txs", len(block.Transactions()), "elapsed", common.PrettyDuration(time.Since(start))) stats.usedGas += usedGas
return it.index, nil
}
dirty, _ := bc.stateCache.TrieDB().Size()
stats.report(chain, it.index, dirty, setHead)
if !setHead {
return it.index, nil // Direct block insertion of a single block
}
switch status { switch status {
case CanonStatTy: case CanonStatTy:
log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
@ -1687,11 +1704,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
"root", block.Root()) "root", block.Root())
} }
stats.processed++
stats.usedGas += usedGas
dirty, _ := bc.stateCache.TrieDB().Size()
stats.report(chain, it.index, dirty)
} }
// Any blocks remaining here? The only ones we care about are the future ones // Any blocks remaining here? The only ones we care about are the future ones
@ -2089,28 +2101,39 @@ func (bc *BlockChain) InsertBlockWithoutSetHead(block *types.Block) error {
// block. It's possible that after the reorg the relevant state of head // block. It's possible that after the reorg the relevant state of head
// is missing. It can be fixed by inserting a new block which triggers // is missing. It can be fixed by inserting a new block which triggers
// the re-execution. // the re-execution.
func (bc *BlockChain) SetChainHead(newBlock *types.Block) error { func (bc *BlockChain) SetChainHead(head *types.Block) error {
if !bc.chainmu.TryLock() { if !bc.chainmu.TryLock() {
return errChainStopped return errChainStopped
} }
defer bc.chainmu.Unlock() defer bc.chainmu.Unlock()
// Run the reorg if necessary and set the given block as new head. // Run the reorg if necessary and set the given block as new head.
if newBlock.ParentHash() != bc.CurrentBlock().Hash() { start := time.Now()
if err := bc.reorg(bc.CurrentBlock(), newBlock); err != nil { if head.ParentHash() != bc.CurrentBlock().Hash() {
if err := bc.reorg(bc.CurrentBlock(), head); err != nil {
return err return err
} }
} }
bc.writeHeadBlock(newBlock) bc.writeHeadBlock(head)
// Emit events // Emit events
logs := bc.collectLogs(newBlock.Hash(), false) logs := bc.collectLogs(head.Hash(), false)
bc.chainFeed.Send(ChainEvent{Block: newBlock, Hash: newBlock.Hash(), Logs: logs}) bc.chainFeed.Send(ChainEvent{Block: head, Hash: head.Hash(), Logs: logs})
if len(logs) > 0 { if len(logs) > 0 {
bc.logsFeed.Send(logs) bc.logsFeed.Send(logs)
} }
bc.chainHeadFeed.Send(ChainHeadEvent{Block: newBlock}) bc.chainHeadFeed.Send(ChainHeadEvent{Block: head})
log.Info("Set the chain head", "number", newBlock.Number(), "hash", newBlock.Hash())
context := []interface{}{
"number", head.Number(),
"hash", head.Hash(),
"root", head.Root(),
"elapsed", time.Since(start),
}
if timestamp := time.Unix(int64(head.Time()), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
}
log.Info("Chain head was updated", context...)
return nil return nil
} }
@ -2296,6 +2319,9 @@ Error: %v
// of the header retrieval mechanisms already need to verify nonces, as well as // of the header retrieval mechanisms already need to verify nonces, as well as
// because nonces can be verified sparsely, not needing to check each. // because nonces can be verified sparsely, not needing to check each.
func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
if len(chain) == 0 {
return 0, nil
}
start := time.Now() start := time.Now()
if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
return i, err return i, err

View File

@ -39,7 +39,7 @@ const statsReportLimit = 8 * time.Second
// report prints statistics if some number of blocks have been processed // report prints statistics if some number of blocks have been processed
// or more than a few seconds have passed since the last message. // or more than a few seconds have passed since the last message.
func (st *insertStats) report(chain []*types.Block, index int, dirty common.StorageSize) { func (st *insertStats) report(chain []*types.Block, index int, dirty common.StorageSize, setHead bool) {
// Fetch the timings for the batch // Fetch the timings for the batch
var ( var (
now = mclock.Now() now = mclock.Now()
@ -71,8 +71,11 @@ func (st *insertStats) report(chain []*types.Block, index int, dirty common.Stor
if st.ignored > 0 { if st.ignored > 0 {
context = append(context, []interface{}{"ignored", st.ignored}...) context = append(context, []interface{}{"ignored", st.ignored}...)
} }
log.Info("Imported new chain segment", context...) if setHead {
log.Info("Imported new chain segment", context...)
} else {
log.Info("Imported new potential chain segment", context...)
}
// Bump the stats reported to the next section // Bump the stats reported to the next section
*st = insertStats{startTime: now, lastIndex: index + 1} *st = insertStats{startTime: now, lastIndex: index + 1}
} }

View File

@ -80,6 +80,81 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
return nil return nil
} }
// flush adds allocated genesis accounts into a fresh new statedb and
// commit the state changes into the given database handler.
func (ga *GenesisAlloc) flush(db ethdb.Database) (common.Hash, error) {
statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil)
if err != nil {
return common.Hash{}, err
}
for addr, account := range *ga {
statedb.AddBalance(addr, account.Balance)
statedb.SetCode(addr, account.Code)
statedb.SetNonce(addr, account.Nonce)
for key, value := range account.Storage {
statedb.SetState(addr, key, value)
}
}
root, err := statedb.Commit(false)
if err != nil {
return common.Hash{}, err
}
err = statedb.Database().TrieDB().Commit(root, true, nil)
if err != nil {
return common.Hash{}, err
}
return root, nil
}
// write writes the json marshaled genesis state into database
// with the given block hash as the unique identifier.
func (ga *GenesisAlloc) write(db ethdb.KeyValueWriter, hash common.Hash) error {
blob, err := json.Marshal(ga)
if err != nil {
return err
}
rawdb.WriteGenesisState(db, hash, blob)
return nil
}
// CommitGenesisState loads the stored genesis state with the given block
// hash and commits them into the given database handler.
func CommitGenesisState(db ethdb.Database, hash common.Hash) error {
var alloc GenesisAlloc
blob := rawdb.ReadGenesisState(db, hash)
if len(blob) != 0 {
if err := alloc.UnmarshalJSON(blob); err != nil {
return err
}
} else {
// Genesis allocation is missing and there are several possibilities:
// the node is legacy which doesn't persist the genesis allocation or
// the persisted allocation is just lost.
// - supported networks(mainnet, testnets), recover with defined allocations
// - private network, can't recover
var genesis *Genesis
switch hash {
case params.MainnetGenesisHash:
genesis = DefaultGenesisBlock()
case params.RopstenGenesisHash:
genesis = DefaultRopstenGenesisBlock()
case params.RinkebyGenesisHash:
genesis = DefaultRinkebyGenesisBlock()
case params.GoerliGenesisHash:
genesis = DefaultGoerliGenesisBlock()
case params.SepoliaGenesisHash:
genesis = DefaultSepoliaGenesisBlock()
}
if genesis != nil {
alloc = genesis.Alloc
} else {
return errors.New("not found")
}
}
_, err := alloc.flush(db)
return err
}
// GenesisAccount is an account in the state of the genesis block. // GenesisAccount is an account in the state of the genesis block.
type GenesisAccount struct { type GenesisAccount struct {
Code []byte `json:"code,omitempty"` Code []byte `json:"code,omitempty"`
@ -219,11 +294,19 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
rawdb.WriteChainConfig(db, stored, newcfg) rawdb.WriteChainConfig(db, stored, newcfg)
return newcfg, stored, nil return newcfg, stored, nil
} }
// Special case: don't change the existing config of a non-mainnet chain if no new // Special case: if a private network is being used (no genesis and also no
// config is supplied. These chains would get AllProtocolChanges (and a compat error) // mainnet hash in the database), we must not apply the `configOrDefault`
// if we just continued here. // chain config as that would be AllProtocolChanges (applying any new fork
// on top of an existing private network genesis block). In that case, only
// apply the overrides.
if genesis == nil && stored != params.MainnetGenesisHash { if genesis == nil && stored != params.MainnetGenesisHash {
return storedcfg, stored, nil newcfg = storedcfg
if overrideArrowGlacier != nil {
newcfg.ArrowGlacierBlock = overrideArrowGlacier
}
if overrideTerminalTotalDifficulty != nil {
newcfg.TerminalTotalDifficulty = overrideTerminalTotalDifficulty
}
} }
// Check config compatibility and write the config. Compatibility errors // Check config compatibility and write the config. Compatibility errors
// are returned to the caller unless we're already at block zero. // are returned to the caller unless we're already at block zero.
@ -253,6 +336,8 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
return params.RinkebyChainConfig return params.RinkebyChainConfig
case ghash == params.GoerliGenesisHash: case ghash == params.GoerliGenesisHash:
return params.GoerliChainConfig return params.GoerliChainConfig
case ghash == params.KilnGenesisHash:
return DefaultKilnGenesisBlock().Config
default: default:
return params.AllEthashProtocolChanges return params.AllEthashProtocolChanges
} }
@ -264,19 +349,10 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
if db == nil { if db == nil {
db = rawdb.NewMemoryDatabase() db = rawdb.NewMemoryDatabase()
} }
statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil) root, err := g.Alloc.flush(db)
if err != nil { if err != nil {
panic(err) panic(err)
} }
for addr, account := range g.Alloc {
statedb.AddBalance(addr, account.Balance)
statedb.SetCode(addr, account.Code)
statedb.SetNonce(addr, account.Nonce)
for key, value := range account.Storage {
statedb.SetState(addr, key, value)
}
}
root := statedb.IntermediateRoot(false)
head := &types.Header{ head := &types.Header{
Number: new(big.Int).SetUint64(g.Number), Number: new(big.Int).SetUint64(g.Number),
Nonce: types.EncodeNonce(g.Nonce), Nonce: types.EncodeNonce(g.Nonce),
@ -304,9 +380,6 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
head.BaseFee = new(big.Int).SetUint64(params.InitialBaseFee) head.BaseFee = new(big.Int).SetUint64(params.InitialBaseFee)
} }
} }
statedb.Commit(false)
statedb.Database().TrieDB().Commit(root, true, nil)
return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil)) return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil))
} }
@ -327,6 +400,9 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
if config.Clique != nil && len(block.Extra()) == 0 { if config.Clique != nil && len(block.Extra()) == 0 {
return nil, errors.New("can't start clique chain without signers") return nil, errors.New("can't start clique chain without signers")
} }
if err := g.Alloc.write(db, block.Hash()); err != nil {
return nil, err
}
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty()) rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty())
rawdb.WriteBlock(db, block) rawdb.WriteBlock(db, block)
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil) rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
@ -418,6 +494,15 @@ func DefaultSepoliaGenesisBlock() *Genesis {
} }
} }
func DefaultKilnGenesisBlock() *Genesis {
g := new(Genesis)
reader := strings.NewReader(KilnAllocData)
if err := json.NewDecoder(reader).Decode(g); err != nil {
panic(err)
}
return g
}
// DeveloperGenesisBlock returns the 'geth --dev' genesis block. // DeveloperGenesisBlock returns the 'geth --dev' genesis block.
func DeveloperGenesisBlock(period uint64, gasLimit uint64, faucet common.Address) *Genesis { func DeveloperGenesisBlock(period uint64, gasLimit uint64, faucet common.Address) *Genesis {
// Override the default period to the user requested one // Override the default period to the user requested one

File diff suppressed because one or more lines are too long

View File

@ -213,3 +213,33 @@ func TestGenesis_Commit(t *testing.T) {
t.Errorf("inequal difficulty; stored: %v, genesisBlock: %v", stored, genesisBlock.Difficulty()) t.Errorf("inequal difficulty; stored: %v, genesisBlock: %v", stored, genesisBlock.Difficulty())
} }
} }
func TestReadWriteGenesisAlloc(t *testing.T) {
var (
db = rawdb.NewMemoryDatabase()
alloc = &GenesisAlloc{
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
{2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}},
}
hash = common.HexToHash("0xdeadbeef")
)
alloc.write(db, hash)
var reload GenesisAlloc
err := reload.UnmarshalJSON(rawdb.ReadGenesisState(db, hash))
if err != nil {
t.Fatalf("Failed to load genesis state %v", err)
}
if len(reload) != len(*alloc) {
t.Fatal("Unexpected genesis allocation")
}
for addr, account := range reload {
want, ok := (*alloc)[addr]
if !ok {
t.Fatal("Account is not found")
}
if !reflect.DeepEqual(want, account) {
t.Fatal("Unexpected account")
}
}
}

View File

@ -83,8 +83,8 @@ type NumberHash struct {
Hash common.Hash Hash common.Hash
} }
// ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights, // ReadAllHashesInRange retrieves all the hashes assigned to blocks at certain
// both canonical and reorged forks included. // heights, both canonical and reorged forks included.
// This method considers both limits to be _inclusive_. // This method considers both limits to be _inclusive_.
func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash { func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash {
var ( var (
@ -776,7 +776,7 @@ func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
WriteHeader(db, block.Header()) WriteHeader(db, block.Header())
} }
// WriteAncientBlock writes entire block data into ancient store and returns the total written size. // WriteAncientBlocks writes entire block data into ancient store and returns the total written size.
func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, td *big.Int) (int64, error) { func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, td *big.Int) (int64, error) {
var ( var (
tdSum = new(big.Int).Set(td) tdSum = new(big.Int).Set(td)

View File

@ -81,6 +81,19 @@ func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.Cha
} }
} }
// ReadGenesisState retrieves the genesis state based on the given genesis hash.
func ReadGenesisState(db ethdb.KeyValueReader, hash common.Hash) []byte {
data, _ := db.Get(genesisKey(hash))
return data
}
// WriteGenesisState writes the genesis state into the disk.
func WriteGenesisState(db ethdb.KeyValueWriter, hash common.Hash, data []byte) {
if err := db.Put(genesisKey(hash), data); err != nil {
log.Crit("Failed to store genesis state", "err", err)
}
}
// crashList is a list of unclean-shutdown-markers, for rlp-encoding to the // crashList is a list of unclean-shutdown-markers, for rlp-encoding to the
// database // database
type crashList struct { type crashList struct {

View File

@ -115,7 +115,7 @@ func DeleteStorageSnapshot(db ethdb.KeyValueWriter, accountHash, storageHash com
// IterateStorageSnapshots returns an iterator for walking the entire storage // IterateStorageSnapshots returns an iterator for walking the entire storage
// space of a specific account. // space of a specific account.
func IterateStorageSnapshots(db ethdb.Iteratee, accountHash common.Hash) ethdb.Iterator { func IterateStorageSnapshots(db ethdb.Iteratee, accountHash common.Hash) ethdb.Iterator {
return db.NewIterator(storageSnapshotsKey(accountHash), nil) return NewKeyLengthIterator(db.NewIterator(storageSnapshotsKey(accountHash), nil), len(SnapshotStoragePrefix)+2*common.HashLength)
} }
// ReadSnapshotJournal retrieves the serialized in-memory diff layers saved at // ReadSnapshotJournal retrieves the serialized in-memory diff layers saved at

View File

@ -28,17 +28,6 @@ func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte {
return data return data
} }
// WritePreimages writes the provided set of preimages to the database.
func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
for hash, preimage := range preimages {
if err := db.Put(preimageKey(hash), preimage); err != nil {
log.Crit("Failed to store trie preimage", "err", err)
}
}
preimageCounter.Inc(int64(len(preimages)))
preimageHitCounter.Inc(int64(len(preimages)))
}
// ReadCode retrieves the contract code of the provided code hash. // ReadCode retrieves the contract code of the provided code hash.
func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte { func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
// Try with the prefixed code scheme first, if not then try with legacy // Try with the prefixed code scheme first, if not then try with legacy
@ -47,7 +36,7 @@ func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
if len(data) != 0 { if len(data) != 0 {
return data return data
} }
data, _ = db.Get(hash[:]) data, _ = db.Get(hash.Bytes())
return data return data
} }
@ -59,6 +48,24 @@ func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte {
return data return data
} }
// ReadTrieNode retrieves the trie node of the provided hash.
func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
data, _ := db.Get(hash.Bytes())
return data
}
// HasCode checks if the contract code corresponding to the
// provided code hash is present in the db.
func HasCode(db ethdb.KeyValueReader, hash common.Hash) bool {
// Try with the prefixed code scheme first, if not then try with legacy
// scheme.
if ok := HasCodeWithPrefix(db, hash); ok {
return true
}
ok, _ := db.Has(hash.Bytes())
return ok
}
// HasCodeWithPrefix checks if the contract code corresponding to the // HasCodeWithPrefix checks if the contract code corresponding to the
// provided code hash is present in the db. This function will only check // provided code hash is present in the db. This function will only check
// presence using the prefix-scheme. // presence using the prefix-scheme.
@ -67,6 +74,23 @@ func HasCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) bool {
return ok return ok
} }
// HasTrieNode checks if the trie node with the provided hash is present in db.
func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool {
ok, _ := db.Has(hash.Bytes())
return ok
}
// WritePreimages writes the provided set of preimages to the database.
func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
for hash, preimage := range preimages {
if err := db.Put(preimageKey(hash), preimage); err != nil {
log.Crit("Failed to store trie preimage", "err", err)
}
}
preimageCounter.Inc(int64(len(preimages)))
preimageHitCounter.Inc(int64(len(preimages)))
}
// WriteCode writes the provided contract code database. // WriteCode writes the provided contract code database.
func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) { func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
if err := db.Put(codeKey(hash), code); err != nil { if err := db.Put(codeKey(hash), code); err != nil {
@ -74,6 +98,13 @@ func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
} }
} }
// WriteTrieNode writes the provided trie node database.
func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
if err := db.Put(hash.Bytes(), node); err != nil {
log.Crit("Failed to store trie node", "err", err)
}
}
// DeleteCode deletes the specified contract code from the database. // DeleteCode deletes the specified contract code from the database.
func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) { func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
if err := db.Delete(codeKey(hash)); err != nil { if err := db.Delete(codeKey(hash)); err != nil {
@ -81,25 +112,6 @@ func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
} }
} }
// ReadTrieNode retrieves the trie node of the provided hash.
func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
data, _ := db.Get(hash.Bytes())
return data
}
// HasTrieNode checks if the trie node with the provided hash is present in db.
func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool {
ok, _ := db.Has(hash.Bytes())
return ok
}
// WriteTrieNode writes the provided trie node database.
func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
if err := db.Put(hash.Bytes(), node); err != nil {
log.Crit("Failed to store trie node", "err", err)
}
}
// DeleteTrieNode deletes the specified trie node from the database. // DeleteTrieNode deletes the specified trie node from the database.
func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) { func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) {
if err := db.Delete(hash.Bytes()); err != nil { if err := db.Delete(hash.Bytes()); err != nil {

View File

@ -0,0 +1,80 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rawdb
import (
"bytes"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
// ReadSkeletonSyncStatus retrieves the serialized sync status saved at shutdown.
func ReadSkeletonSyncStatus(db ethdb.KeyValueReader) []byte {
data, _ := db.Get(skeletonSyncStatusKey)
return data
}
// WriteSkeletonSyncStatus stores the serialized sync status to save at shutdown.
func WriteSkeletonSyncStatus(db ethdb.KeyValueWriter, status []byte) {
if err := db.Put(skeletonSyncStatusKey, status); err != nil {
log.Crit("Failed to store skeleton sync status", "err", err)
}
}
// DeleteSkeletonSyncStatus deletes the serialized sync status saved at the last
// shutdown
func DeleteSkeletonSyncStatus(db ethdb.KeyValueWriter) {
if err := db.Delete(skeletonSyncStatusKey); err != nil {
log.Crit("Failed to remove skeleton sync status", "err", err)
}
}
// ReadSkeletonHeader retrieves a block header from the skeleton sync store,
func ReadSkeletonHeader(db ethdb.KeyValueReader, number uint64) *types.Header {
data, _ := db.Get(skeletonHeaderKey(number))
if len(data) == 0 {
return nil
}
header := new(types.Header)
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
log.Error("Invalid skeleton header RLP", "number", number, "err", err)
return nil
}
return header
}
// WriteSkeletonHeader stores a block header into the skeleton sync store.
func WriteSkeletonHeader(db ethdb.KeyValueWriter, header *types.Header) {
data, err := rlp.EncodeToBytes(header)
if err != nil {
log.Crit("Failed to RLP encode header", "err", err)
}
key := skeletonHeaderKey(header.Number.Uint64())
if err := db.Put(key, data); err != nil {
log.Crit("Failed to store skeleton header", "err", err)
}
}
// DeleteSkeletonHeader removes all block header data associated with a hash.
func DeleteSkeletonHeader(db ethdb.KeyValueWriter, number uint64) {
if err := db.Delete(skeletonHeaderKey(number)); err != nil {
log.Crit("Failed to delete skeleton header", "err", err)
}
}

View File

@ -99,6 +99,11 @@ func (db *nofreezedb) Ancients() (uint64, error) {
return 0, errNotSupported return 0, errNotSupported
} }
// Tail returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) Tail() (uint64, error) {
return 0, errNotSupported
}
// AncientSize returns an error as we don't have a backing chain freezer. // AncientSize returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) AncientSize(kind string) (uint64, error) { func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
return 0, errNotSupported return 0, errNotSupported
@ -109,8 +114,13 @@ func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, e
return 0, errNotSupported return 0, errNotSupported
} }
// TruncateAncients returns an error as we don't have a backing chain freezer. // TruncateHead returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) TruncateAncients(items uint64) error { func (db *nofreezedb) TruncateHead(items uint64) error {
return errNotSupported
}
// TruncateTail returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) TruncateTail(items uint64) error {
return errNotSupported return errNotSupported
} }
@ -135,6 +145,12 @@ func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReader) error) (e
return fn(db) return fn(db)
} }
// MigrateTable processes the entries in a given table in sequence
// converting them to a new format if they're of an old format.
func (db *nofreezedb) MigrateTable(kind string, convert convertLegacyFn) error {
return errNotSupported
}
// NewDatabase creates a high level database on top of a given key-value data // NewDatabase creates a high level database on top of a given key-value data
// store without a freezer moving immutable chain segments into cold storage. // store without a freezer moving immutable chain segments into cold storage.
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database { func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
@ -211,7 +227,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
// Block #1 is still in the database, we're allowed to init a new feezer // Block #1 is still in the database, we're allowed to init a new feezer
} }
// Otherwise, the head header is still the genesis, we're allowed to init a new // Otherwise, the head header is still the genesis, we're allowed to init a new
// feezer. // freezer.
} }
} }
// Freezer is consistent with the key-value database, permit combining the two // Freezer is consistent with the key-value database, permit combining the two
@ -321,6 +337,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
storageSnaps stat storageSnaps stat
preimages stat preimages stat
bloomBits stat bloomBits stat
beaconHeaders stat
cliqueSnaps stat cliqueSnaps stat
// Ancient store statistics // Ancient store statistics
@ -375,10 +392,14 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
preimages.Add(size) preimages.Add(size)
case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength): case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
metadata.Add(size) metadata.Add(size)
case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength):
metadata.Add(size)
case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength): case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
bloomBits.Add(size) bloomBits.Add(size)
case bytes.HasPrefix(key, BloomBitsIndexPrefix): case bytes.HasPrefix(key, BloomBitsIndexPrefix):
bloomBits.Add(size) bloomBits.Add(size)
case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8):
beaconHeaders.Add(size)
case bytes.HasPrefix(key, []byte("clique-")) && len(key) == 7+common.HashLength: case bytes.HasPrefix(key, []byte("clique-")) && len(key) == 7+common.HashLength:
cliqueSnaps.Add(size) cliqueSnaps.Add(size)
case bytes.HasPrefix(key, []byte("cht-")) || case bytes.HasPrefix(key, []byte("cht-")) ||
@ -395,7 +416,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey, databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey,
fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
uncleanShutdownKey, badBlockKey, transitionStatusKey, uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey,
} { } {
if bytes.Equal(key, meta) { if bytes.Equal(key, meta) {
metadata.Add(size) metadata.Add(size)
@ -441,6 +462,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
{"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()}, {"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()},
{"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()}, {"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()},
{"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()}, {"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()},
{"Key-Value store", "Beacon sync headers", beaconHeaders.Size(), beaconHeaders.Count()},
{"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()}, {"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
{"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()}, {"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
{"Ancient store", "Headers", ancientHeadersSize.String(), ancients.String()}, {"Ancient store", "Headers", ancientHeadersSize.String(), ancients.String()},

View File

@ -19,6 +19,7 @@ package rawdb
import ( import (
"errors" "errors"
"fmt" "fmt"
"io/ioutil"
"math" "math"
"os" "os"
"path/filepath" "path/filepath"
@ -66,7 +67,7 @@ const (
freezerTableSize = 2 * 1000 * 1000 * 1000 freezerTableSize = 2 * 1000 * 1000 * 1000
) )
// freezer is an memory mapped append-only database to store immutable chain data // freezer is a memory mapped append-only database to store immutable chain data
// into flat files: // into flat files:
// //
// - The append only nature ensures that disk writes are minimized. // - The append only nature ensures that disk writes are minimized.
@ -78,6 +79,7 @@ type freezer struct {
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned, // 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG). // so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
frozen uint64 // Number of blocks already frozen frozen uint64 // Number of blocks already frozen
tail uint64 // Number of the first stored item in the freezer
threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests) threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
// This lock synchronizes writers and the truncate operation, as well as // This lock synchronizes writers and the truncate operation, as well as
@ -226,6 +228,11 @@ func (f *freezer) Ancients() (uint64, error) {
return atomic.LoadUint64(&f.frozen), nil return atomic.LoadUint64(&f.frozen), nil
} }
// Tail returns the number of first stored item in the freezer.
func (f *freezer) Tail() (uint64, error) {
return atomic.LoadUint64(&f.tail), nil
}
// AncientSize returns the ancient size of the specified category. // AncientSize returns the ancient size of the specified category.
func (f *freezer) AncientSize(kind string) (uint64, error) { func (f *freezer) AncientSize(kind string) (uint64, error) {
// This needs the write lock to avoid data races on table fields. // This needs the write lock to avoid data races on table fields.
@ -261,7 +268,7 @@ func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize
if err != nil { if err != nil {
// The write operation has failed. Go back to the previous item position. // The write operation has failed. Go back to the previous item position.
for name, table := range f.tables { for name, table := range f.tables {
err := table.truncate(prevItem) err := table.truncateHead(prevItem)
if err != nil { if err != nil {
log.Error("Freezer table roll-back failed", "table", name, "index", prevItem, "err", err) log.Error("Freezer table roll-back failed", "table", name, "index", prevItem, "err", err)
} }
@ -282,8 +289,8 @@ func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize
return writeSize, nil return writeSize, nil
} }
// TruncateAncients discards any recent data above the provided threshold number. // TruncateHead discards any recent data above the provided threshold number.
func (f *freezer) TruncateAncients(items uint64) error { func (f *freezer) TruncateHead(items uint64) error {
if f.readonly { if f.readonly {
return errReadOnly return errReadOnly
} }
@ -294,7 +301,7 @@ func (f *freezer) TruncateAncients(items uint64) error {
return nil return nil
} }
for _, table := range f.tables { for _, table := range f.tables {
if err := table.truncate(items); err != nil { if err := table.truncateHead(items); err != nil {
return err return err
} }
} }
@ -302,6 +309,26 @@ func (f *freezer) TruncateAncients(items uint64) error {
return nil return nil
} }
// TruncateTail discards any recent data below the provided threshold number.
func (f *freezer) TruncateTail(tail uint64) error {
if f.readonly {
return errReadOnly
}
f.writeLock.Lock()
defer f.writeLock.Unlock()
if atomic.LoadUint64(&f.tail) >= tail {
return nil
}
for _, table := range f.tables {
if err := table.truncateTail(tail); err != nil {
return err
}
}
atomic.StoreUint64(&f.tail, tail)
return nil
}
// Sync flushes all data tables to disk. // Sync flushes all data tables to disk.
func (f *freezer) Sync() error { func (f *freezer) Sync() error {
var errs []error var errs []error
@ -345,19 +372,30 @@ func (f *freezer) validate() error {
// repair truncates all data tables to the same length. // repair truncates all data tables to the same length.
func (f *freezer) repair() error { func (f *freezer) repair() error {
min := uint64(math.MaxUint64) var (
head = uint64(math.MaxUint64)
tail = uint64(0)
)
for _, table := range f.tables { for _, table := range f.tables {
items := atomic.LoadUint64(&table.items) items := atomic.LoadUint64(&table.items)
if min > items { if head > items {
min = items head = items
}
hidden := atomic.LoadUint64(&table.itemHidden)
if hidden > tail {
tail = hidden
} }
} }
for _, table := range f.tables { for _, table := range f.tables {
if err := table.truncate(min); err != nil { if err := table.truncateHead(head); err != nil {
return err
}
if err := table.truncateTail(tail); err != nil {
return err return err
} }
} }
atomic.StoreUint64(&f.frozen, min) atomic.StoreUint64(&f.frozen, head)
atomic.StoreUint64(&f.tail, tail)
return nil return nil
} }
@ -581,3 +619,116 @@ func (f *freezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hashes []
return hashes, err return hashes, err
} }
// convertLegacyFn takes a raw freezer entry in an older format and
// returns it in the new format.
type convertLegacyFn = func([]byte) ([]byte, error)
// MigrateTable processes the entries in a given table in sequence
// converting them to a new format if they're of an old format.
func (f *freezer) MigrateTable(kind string, convert convertLegacyFn) error {
if f.readonly {
return errReadOnly
}
f.writeLock.Lock()
defer f.writeLock.Unlock()
table, ok := f.tables[kind]
if !ok {
return errUnknownTable
}
// forEach iterates every entry in the table serially and in order, calling `fn`
// with the item as argument. If `fn` returns an error the iteration stops
// and that error will be returned.
forEach := func(t *freezerTable, offset uint64, fn func(uint64, []byte) error) error {
var (
items = atomic.LoadUint64(&t.items)
batchSize = uint64(1024)
maxBytes = uint64(1024 * 1024)
)
for i := offset; i < items; {
if i+batchSize > items {
batchSize = items - i
}
data, err := t.RetrieveItems(i, batchSize, maxBytes)
if err != nil {
return err
}
for j, item := range data {
if err := fn(i+uint64(j), item); err != nil {
return err
}
}
i += uint64(len(data))
}
return nil
}
// TODO(s1na): This is a sanity-check since as of now no process does tail-deletion. But the migration
// process assumes no deletion at tail and needs to be modified to account for that.
if table.itemOffset > 0 || table.itemHidden > 0 {
return fmt.Errorf("migration not supported for tail-deleted freezers")
}
ancientsPath := filepath.Dir(table.index.Name())
// Set up new dir for the migrated table, the content of which
// we'll at the end move over to the ancients dir.
migrationPath := filepath.Join(ancientsPath, "migration")
newTable, err := NewFreezerTable(migrationPath, kind, FreezerNoSnappy[kind], false)
if err != nil {
return err
}
var (
batch = newTable.newBatch()
out []byte
start = time.Now()
logged = time.Now()
offset = newTable.items
)
if offset > 0 {
log.Info("found previous migration attempt", "migrated", offset)
}
// Iterate through entries and transform them
if err := forEach(table, offset, func(i uint64, blob []byte) error {
if i%10000 == 0 && time.Since(logged) > 16*time.Second {
log.Info("Processing legacy elements", "count", i, "elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
out, err = convert(blob)
if err != nil {
return err
}
if err := batch.AppendRaw(i, out); err != nil {
return err
}
return nil
}); err != nil {
return err
}
if err := batch.commit(); err != nil {
return err
}
log.Info("Replacing old table files with migrated ones", "elapsed", common.PrettyDuration(time.Since(start)))
// Release and delete old table files. Note this won't
// delete the index file.
table.releaseFilesAfter(0, true)
if err := newTable.Close(); err != nil {
return err
}
files, err := ioutil.ReadDir(migrationPath)
if err != nil {
return err
}
// Move migrated files to ancients dir.
for _, f := range files {
// This will replace the old index file as a side-effect.
if err := os.Rename(filepath.Join(migrationPath, f.Name()), filepath.Join(ancientsPath, f.Name())); err != nil {
return err
}
}
// Delete by now empty dir.
if err := os.Remove(migrationPath); err != nil {
return err
}
return nil
}

View File

@ -193,7 +193,7 @@ func (batch *freezerTableBatch) commit() error {
dataSize := int64(len(batch.dataBuffer)) dataSize := int64(len(batch.dataBuffer))
batch.dataBuffer = batch.dataBuffer[:0] batch.dataBuffer = batch.dataBuffer[:0]
// Write index. // Write indices.
_, err = batch.t.index.Write(batch.indexBuffer) _, err = batch.t.index.Write(batch.indexBuffer)
if err != nil { if err != nil {
return err return err

109
core/rawdb/freezer_meta.go Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
package rawdb
import (
"io"
"os"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
const freezerVersion = 1 // The initial version tag of freezer table metadata
// freezerTableMeta wraps all the metadata of the freezer table.
type freezerTableMeta struct {
// Version is the versioning descriptor of the freezer table.
Version uint16
// VirtualTail indicates how many items have been marked as deleted.
// Its value is equal to the number of items removed from the table
// plus the number of items hidden in the table, so it should never
// be lower than the "actual tail".
VirtualTail uint64
}
// newMetadata initializes the metadata object with the given virtual tail.
func newMetadata(tail uint64) *freezerTableMeta {
return &freezerTableMeta{
Version: freezerVersion,
VirtualTail: tail,
}
}
// readMetadata reads the metadata of the freezer table from the
// given metadata file.
func readMetadata(file *os.File) (*freezerTableMeta, error) {
_, err := file.Seek(0, io.SeekStart)
if err != nil {
return nil, err
}
var meta freezerTableMeta
if err := rlp.Decode(file, &meta); err != nil {
return nil, err
}
return &meta, nil
}
// writeMetadata writes the metadata of the freezer table into the
// given metadata file.
func writeMetadata(file *os.File, meta *freezerTableMeta) error {
_, err := file.Seek(0, io.SeekStart)
if err != nil {
return err
}
return rlp.Encode(file, meta)
}
// loadMetadata loads the metadata from the given metadata file.
// Initializes the metadata file with the given "actual tail" if
// it's empty.
func loadMetadata(file *os.File, tail uint64) (*freezerTableMeta, error) {
stat, err := file.Stat()
if err != nil {
return nil, err
}
// Write the metadata with the given actual tail into metadata file
// if it's non-existent. There are two possible scenarios here:
// - the freezer table is empty
// - the freezer table is legacy
// In both cases, write the meta into the file with the actual tail
// as the virtual tail.
if stat.Size() == 0 {
m := newMetadata(tail)
if err := writeMetadata(file, m); err != nil {
return nil, err
}
return m, nil
}
m, err := readMetadata(file)
if err != nil {
return nil, err
}
// Update the virtual tail with the given actual tail if it's even
// lower than it. Theoretically it shouldn't happen at all, print
// a warning here.
if m.VirtualTail < tail {
log.Warn("Updated virtual tail", "have", m.VirtualTail, "now", tail)
m.VirtualTail = tail
if err := writeMetadata(file, m); err != nil {
return nil, err
}
}
return m, nil
}

View File

@ -0,0 +1,61 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
package rawdb
import (
"io/ioutil"
"os"
"testing"
)
func TestReadWriteFreezerTableMeta(t *testing.T) {
f, err := ioutil.TempFile(os.TempDir(), "*")
if err != nil {
t.Fatalf("Failed to create file %v", err)
}
err = writeMetadata(f, newMetadata(100))
if err != nil {
t.Fatalf("Failed to write metadata %v", err)
}
meta, err := readMetadata(f)
if err != nil {
t.Fatalf("Failed to read metadata %v", err)
}
if meta.Version != freezerVersion {
t.Fatalf("Unexpected version field")
}
if meta.VirtualTail != uint64(100) {
t.Fatalf("Unexpected virtual tail field")
}
}
func TestInitializeFreezerTableMeta(t *testing.T) {
f, err := ioutil.TempFile(os.TempDir(), "*")
if err != nil {
t.Fatalf("Failed to create file %v", err)
}
meta, err := loadMetadata(f, uint64(100))
if err != nil {
t.Fatalf("Failed to read metadata %v", err)
}
if meta.Version != freezerVersion {
t.Fatalf("Unexpected version field")
}
if meta.VirtualTail != uint64(100) {
t.Fatalf("Unexpected virtual tail field")
}
}

View File

@ -47,20 +47,19 @@ var (
) )
// indexEntry contains the number/id of the file that the data resides in, aswell as the // indexEntry contains the number/id of the file that the data resides in, aswell as the
// offset within the file to the end of the data // offset within the file to the end of the data.
// In serialized form, the filenum is stored as uint16. // In serialized form, the filenum is stored as uint16.
type indexEntry struct { type indexEntry struct {
filenum uint32 // stored as uint16 ( 2 bytes) filenum uint32 // stored as uint16 ( 2 bytes )
offset uint32 // stored as uint32 ( 4 bytes) offset uint32 // stored as uint32 ( 4 bytes )
} }
const indexEntrySize = 6 const indexEntrySize = 6
// unmarshalBinary deserializes binary b into the rawIndex entry. // unmarshalBinary deserializes binary b into the rawIndex entry.
func (i *indexEntry) unmarshalBinary(b []byte) error { func (i *indexEntry) unmarshalBinary(b []byte) {
i.filenum = uint32(binary.BigEndian.Uint16(b[:2])) i.filenum = uint32(binary.BigEndian.Uint16(b[:2]))
i.offset = binary.BigEndian.Uint32(b[2:6]) i.offset = binary.BigEndian.Uint32(b[2:6])
return nil
} }
// append adds the encoded entry to the end of b. // append adds the encoded entry to the end of b.
@ -75,14 +74,14 @@ func (i *indexEntry) append(b []byte) []byte {
// bounds returns the start- and end- offsets, and the file number of where to // bounds returns the start- and end- offsets, and the file number of where to
// read there data item marked by the two index entries. The two entries are // read there data item marked by the two index entries. The two entries are
// assumed to be sequential. // assumed to be sequential.
func (start *indexEntry) bounds(end *indexEntry) (startOffset, endOffset, fileId uint32) { func (i *indexEntry) bounds(end *indexEntry) (startOffset, endOffset, fileId uint32) {
if start.filenum != end.filenum { if i.filenum != end.filenum {
// If a piece of data 'crosses' a data-file, // If a piece of data 'crosses' a data-file,
// it's actually in one piece on the second data-file. // it's actually in one piece on the second data-file.
// We return a zero-indexEntry for the second file as start // We return a zero-indexEntry for the second file as start
return 0, end.offset, end.filenum return 0, end.offset, end.filenum
} }
return start.offset, end.offset, end.filenum return i.offset, end.offset, end.filenum
} }
// freezerTable represents a single chained data table within the freezer (e.g. blocks). // freezerTable represents a single chained data table within the freezer (e.g. blocks).
@ -92,7 +91,15 @@ type freezerTable struct {
// WARNING: The `items` field is accessed atomically. On 32 bit platforms, only // WARNING: The `items` field is accessed atomically. On 32 bit platforms, only
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned, // 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG). // so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
items uint64 // Number of items stored in the table (including items removed from tail) items uint64 // Number of items stored in the table (including items removed from tail)
itemOffset uint64 // Number of items removed from the table
// itemHidden is the number of items marked as deleted. Tail deletion is
// only supported at file level which means the actual deletion will be
// delayed until the entire data file is marked as deleted. Before that
// these items will be hidden to prevent being visited again. The value
// should never be lower than itemOffset.
itemHidden uint64
noCompression bool // if true, disables snappy compression. Note: does not work retroactively noCompression bool // if true, disables snappy compression. Note: does not work retroactively
readonly bool readonly bool
@ -101,14 +108,11 @@ type freezerTable struct {
path string path string
head *os.File // File descriptor for the data head of the table head *os.File // File descriptor for the data head of the table
index *os.File // File descriptor for the indexEntry file of the table
meta *os.File // File descriptor for metadata of the table
files map[uint32]*os.File // open files files map[uint32]*os.File // open files
headId uint32 // number of the currently active head file headId uint32 // number of the currently active head file
tailId uint32 // number of the earliest file tailId uint32 // number of the earliest file
index *os.File // File descriptor for the indexEntry file of the table
// In the case that old items are deleted (from the tail), we use itemOffset
// to count how many historic items have gone missing.
itemOffset uint32 // Offset (number of discarded items)
headBytes int64 // Number of bytes written to the head file headBytes int64 // Number of bytes written to the head file
readMeter metrics.Meter // Meter for measuring the effective amount of data read readMeter metrics.Meter // Meter for measuring the effective amount of data read
@ -124,46 +128,8 @@ func NewFreezerTable(path, name string, disableSnappy, readonly bool) (*freezerT
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, readonly) return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, readonly)
} }
// openFreezerFileForAppend opens a freezer table file and seeks to the end
func openFreezerFileForAppend(filename string) (*os.File, error) {
// Open the file without the O_APPEND flag
// because it has differing behaviour during Truncate operations
// on different OS's
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return nil, err
}
// Seek to end for append
if _, err = file.Seek(0, io.SeekEnd); err != nil {
return nil, err
}
return file, nil
}
// openFreezerFileForReadOnly opens a freezer table file for read only access
func openFreezerFileForReadOnly(filename string) (*os.File, error) {
return os.OpenFile(filename, os.O_RDONLY, 0644)
}
// openFreezerFileTruncated opens a freezer table making sure it is truncated
func openFreezerFileTruncated(filename string) (*os.File, error) {
return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
}
// truncateFreezerFile resizes a freezer table file and seeks to the end
func truncateFreezerFile(file *os.File, size int64) error {
if err := file.Truncate(size); err != nil {
return err
}
// Seek to end for append
if _, err := file.Seek(0, io.SeekEnd); err != nil {
return err
}
return nil
}
// newTable opens a freezer table, creating the data and index files if they are // newTable opens a freezer table, creating the data and index files if they are
// non existent. Both files are truncated to the shortest common length to ensure // non-existent. Both files are truncated to the shortest common length to ensure
// they don't go out of sync. // they don't go out of sync.
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression, readonly bool) (*freezerTable, error) { func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression, readonly bool) (*freezerTable, error) {
// Ensure the containing directory exists and open the indexEntry file // Ensure the containing directory exists and open the indexEntry file
@ -172,28 +138,47 @@ func newTable(path string, name string, readMeter metrics.Meter, writeMeter metr
} }
var idxName string var idxName string
if noCompression { if noCompression {
// Raw idx idxName = fmt.Sprintf("%s.ridx", name) // raw index file
idxName = fmt.Sprintf("%s.ridx", name)
} else { } else {
// Compressed idx idxName = fmt.Sprintf("%s.cidx", name) // compressed index file
idxName = fmt.Sprintf("%s.cidx", name)
} }
var ( var (
err error err error
offsets *os.File index *os.File
meta *os.File
) )
if readonly { if readonly {
// Will fail if table doesn't exist // Will fail if table doesn't exist
offsets, err = openFreezerFileForReadOnly(filepath.Join(path, idxName)) index, err = openFreezerFileForReadOnly(filepath.Join(path, idxName))
if err != nil {
return nil, err
}
// TODO(rjl493456442) change it to read-only mode. Open the metadata file
// in rw mode. It's a temporary solution for now and should be changed
// whenever the tail deletion is actually used. The reason for this hack is
// the additional meta file for each freezer table is added in order to support
// tail deletion, but for most legacy nodes this file is missing. This check
// will suddenly break lots of database relevant commands. So the metadata file
// is always opened for mutation and nothing else will be written except
// the initialization.
meta, err = openFreezerFileForAppend(filepath.Join(path, fmt.Sprintf("%s.meta", name)))
if err != nil {
return nil, err
}
} else { } else {
offsets, err = openFreezerFileForAppend(filepath.Join(path, idxName)) index, err = openFreezerFileForAppend(filepath.Join(path, idxName))
} if err != nil {
if err != nil { return nil, err
return nil, err }
meta, err = openFreezerFileForAppend(filepath.Join(path, fmt.Sprintf("%s.meta", name)))
if err != nil {
return nil, err
}
} }
// Create the table and repair any past inconsistency // Create the table and repair any past inconsistency
tab := &freezerTable{ tab := &freezerTable{
index: offsets, index: index,
meta: meta,
files: make(map[uint32]*os.File), files: make(map[uint32]*os.File),
readMeter: readMeter, readMeter: readMeter,
writeMeter: writeMeter, writeMeter: writeMeter,
@ -220,7 +205,7 @@ func newTable(path string, name string, readMeter metrics.Meter, writeMeter metr
return tab, nil return tab, nil
} }
// repair cross checks the head and the index file and truncates them to // repair cross-checks the head and the index file and truncates them to
// be in sync with each other after a potential crash / data loss. // be in sync with each other after a potential crash / data loss.
func (t *freezerTable) repair() error { func (t *freezerTable) repair() error {
// Create a temporary offset buffer to init files with and read indexEntry into // Create a temporary offset buffer to init files with and read indexEntry into
@ -258,11 +243,27 @@ func (t *freezerTable) repair() error {
t.index.ReadAt(buffer, 0) t.index.ReadAt(buffer, 0)
firstIndex.unmarshalBinary(buffer) firstIndex.unmarshalBinary(buffer)
// Assign the tail fields with the first stored index.
// The total removed items is represented with an uint32,
// which is not enough in theory but enough in practice.
// TODO: use uint64 to represent total removed items.
t.tailId = firstIndex.filenum t.tailId = firstIndex.filenum
t.itemOffset = firstIndex.offset t.itemOffset = uint64(firstIndex.offset)
t.index.ReadAt(buffer, offsetsSize-indexEntrySize) // Load metadata from the file
lastIndex.unmarshalBinary(buffer) meta, err := loadMetadata(t.meta, t.itemOffset)
if err != nil {
return err
}
t.itemHidden = meta.VirtualTail
// Read the last index, use the default value in case the freezer is empty
if offsetsSize == indexEntrySize {
lastIndex = indexEntry{filenum: t.tailId, offset: 0}
} else {
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
lastIndex.unmarshalBinary(buffer)
}
if t.readonly { if t.readonly {
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly) t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly)
} else { } else {
@ -278,7 +279,6 @@ func (t *freezerTable) repair() error {
// Keep truncating both files until they come in sync // Keep truncating both files until they come in sync
contentExp = int64(lastIndex.offset) contentExp = int64(lastIndex.offset)
for contentExp != contentSize { for contentExp != contentSize {
// Truncate the head file to the last offset pointer // Truncate the head file to the last offset pointer
if contentExp < contentSize { if contentExp < contentSize {
@ -295,9 +295,16 @@ func (t *freezerTable) repair() error {
return err return err
} }
offsetsSize -= indexEntrySize offsetsSize -= indexEntrySize
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
// Read the new head index, use the default value in case
// the freezer is already empty.
var newLastIndex indexEntry var newLastIndex indexEntry
newLastIndex.unmarshalBinary(buffer) if offsetsSize == indexEntrySize {
newLastIndex = indexEntry{filenum: t.tailId, offset: 0}
} else {
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
newLastIndex.unmarshalBinary(buffer)
}
// We might have slipped back into an earlier head-file here // We might have slipped back into an earlier head-file here
if newLastIndex.filenum != lastIndex.filenum { if newLastIndex.filenum != lastIndex.filenum {
// Release earlier opened file // Release earlier opened file
@ -325,12 +332,21 @@ func (t *freezerTable) repair() error {
if err := t.head.Sync(); err != nil { if err := t.head.Sync(); err != nil {
return err return err
} }
if err := t.meta.Sync(); err != nil {
return err
}
} }
// Update the item and byte counters and return // Update the item and byte counters and return
t.items = uint64(t.itemOffset) + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file t.items = t.itemOffset + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
t.headBytes = contentSize t.headBytes = contentSize
t.headId = lastIndex.filenum t.headId = lastIndex.filenum
// Delete the leftover files because of head deletion
t.releaseFilesAfter(t.headId, true)
// Delete the leftover files because of tail deletion
t.releaseFilesBefore(t.tailId, true)
// Close opened files and preopen all files // Close opened files and preopen all files
if err := t.preopen(); err != nil { if err := t.preopen(); err != nil {
return err return err
@ -346,6 +362,7 @@ func (t *freezerTable) repair() error {
func (t *freezerTable) preopen() (err error) { func (t *freezerTable) preopen() (err error) {
// The repair might have already opened (some) files // The repair might have already opened (some) files
t.releaseFilesAfter(0, false) t.releaseFilesAfter(0, false)
// Open all except head in RDONLY // Open all except head in RDONLY
for i := t.tailId; i < t.headId; i++ { for i := t.tailId; i < t.headId; i++ {
if _, err = t.openFile(i, openFreezerFileForReadOnly); err != nil { if _, err = t.openFile(i, openFreezerFileForReadOnly); err != nil {
@ -361,16 +378,19 @@ func (t *freezerTable) preopen() (err error) {
return err return err
} }
// truncate discards any recent data above the provided threshold number. // truncateHead discards any recent data above the provided threshold number.
func (t *freezerTable) truncate(items uint64) error { func (t *freezerTable) truncateHead(items uint64) error {
t.lock.Lock() t.lock.Lock()
defer t.lock.Unlock() defer t.lock.Unlock()
// If our item count is correct, don't do anything // Ensure the given truncate target falls in the correct range
existing := atomic.LoadUint64(&t.items) existing := atomic.LoadUint64(&t.items)
if existing <= items { if existing <= items {
return nil return nil
} }
if items < atomic.LoadUint64(&t.itemHidden) {
return errors.New("truncation below tail")
}
// We need to truncate, save the old size for metrics tracking // We need to truncate, save the old size for metrics tracking
oldSize, err := t.sizeNolock() oldSize, err := t.sizeNolock()
if err != nil { if err != nil {
@ -382,17 +402,24 @@ func (t *freezerTable) truncate(items uint64) error {
log = t.logger.Warn // Only loud warn if we delete multiple items log = t.logger.Warn // Only loud warn if we delete multiple items
} }
log("Truncating freezer table", "items", existing, "limit", items) log("Truncating freezer table", "items", existing, "limit", items)
if err := truncateFreezerFile(t.index, int64(items+1)*indexEntrySize); err != nil {
// Truncate the index file first, the tail position is also considered
// when calculating the new freezer table length.
length := items - atomic.LoadUint64(&t.itemOffset)
if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil {
return err return err
} }
// Calculate the new expected size of the data file and truncate it // Calculate the new expected size of the data file and truncate it
buffer := make([]byte, indexEntrySize)
if _, err := t.index.ReadAt(buffer, int64(items*indexEntrySize)); err != nil {
return err
}
var expected indexEntry var expected indexEntry
expected.unmarshalBinary(buffer) if length == 0 {
expected = indexEntry{filenum: t.tailId, offset: 0}
} else {
buffer := make([]byte, indexEntrySize)
if _, err := t.index.ReadAt(buffer, int64(length*indexEntrySize)); err != nil {
return err
}
expected.unmarshalBinary(buffer)
}
// We might need to truncate back to older files // We might need to truncate back to older files
if expected.filenum != t.headId { if expected.filenum != t.headId {
// If already open for reading, force-reopen for writing // If already open for reading, force-reopen for writing
@ -421,7 +448,110 @@ func (t *freezerTable) truncate(items uint64) error {
return err return err
} }
t.sizeGauge.Dec(int64(oldSize - newSize)) t.sizeGauge.Dec(int64(oldSize - newSize))
return nil
}
// truncateTail discards any recent data before the provided threshold number.
func (t *freezerTable) truncateTail(items uint64) error {
t.lock.Lock()
defer t.lock.Unlock()
// Ensure the given truncate target falls in the correct range
if atomic.LoadUint64(&t.itemHidden) >= items {
return nil
}
if atomic.LoadUint64(&t.items) < items {
return errors.New("truncation above head")
}
// Load the new tail index by the given new tail position
var (
newTailId uint32
buffer = make([]byte, indexEntrySize)
)
if atomic.LoadUint64(&t.items) == items {
newTailId = t.headId
} else {
offset := items - atomic.LoadUint64(&t.itemOffset)
if _, err := t.index.ReadAt(buffer, int64((offset+1)*indexEntrySize)); err != nil {
return err
}
var newTail indexEntry
newTail.unmarshalBinary(buffer)
newTailId = newTail.filenum
}
// Update the virtual tail marker and hidden these entries in table.
atomic.StoreUint64(&t.itemHidden, items)
if err := writeMetadata(t.meta, newMetadata(items)); err != nil {
return err
}
// Hidden items still fall in the current tail file, no data file
// can be dropped.
if t.tailId == newTailId {
return nil
}
// Hidden items fall in the incorrect range, returns the error.
if t.tailId > newTailId {
return fmt.Errorf("invalid index, tail-file %d, item-file %d", t.tailId, newTailId)
}
// Hidden items exceed the current tail file, drop the relevant
// data files. We need to truncate, save the old size for metrics
// tracking.
oldSize, err := t.sizeNolock()
if err != nil {
return err
}
// Count how many items can be deleted from the file.
var (
newDeleted = items
deleted = atomic.LoadUint64(&t.itemOffset)
)
for current := items - 1; current >= deleted; current -= 1 {
if _, err := t.index.ReadAt(buffer, int64((current-deleted+1)*indexEntrySize)); err != nil {
return err
}
var pre indexEntry
pre.unmarshalBinary(buffer)
if pre.filenum != newTailId {
break
}
newDeleted = current
}
// Commit the changes of metadata file first before manipulating
// the indexes file.
if err := t.meta.Sync(); err != nil {
return err
}
// Truncate the deleted index entries from the index file.
err = copyFrom(t.index.Name(), t.index.Name(), indexEntrySize*(newDeleted-deleted+1), func(f *os.File) error {
tailIndex := indexEntry{
filenum: newTailId,
offset: uint32(newDeleted),
}
_, err := f.Write(tailIndex.append(nil))
return err
})
if err != nil {
return err
}
// Reopen the modified index file to load the changes
if err := t.index.Close(); err != nil {
return err
}
t.index, err = openFreezerFileForAppend(t.index.Name())
if err != nil {
return err
}
// Release any files before the current tail
t.tailId = newTailId
atomic.StoreUint64(&t.itemOffset, newDeleted)
t.releaseFilesBefore(t.tailId, true)
// Retrieve the new size and update the total size counter
newSize, err := t.sizeNolock()
if err != nil {
return err
}
t.sizeGauge.Dec(int64(oldSize - newSize))
return nil return nil
} }
@ -436,6 +566,11 @@ func (t *freezerTable) Close() error {
} }
t.index = nil t.index = nil
if err := t.meta.Close(); err != nil {
errs = append(errs, err)
}
t.meta = nil
for _, f := range t.files { for _, f := range t.files {
if err := f.Close(); err != nil { if err := f.Close(); err != nil {
errs = append(errs, err) errs = append(errs, err)
@ -490,6 +625,19 @@ func (t *freezerTable) releaseFilesAfter(num uint32, remove bool) {
} }
} }
// releaseFilesBefore closes all open files with a lower number, and optionally also deletes the files
func (t *freezerTable) releaseFilesBefore(num uint32, remove bool) {
for fnum, f := range t.files {
if fnum < num {
delete(t.files, fnum)
f.Close()
if remove {
os.Remove(f.Name())
}
}
}
}
// getIndices returns the index entries for the given from-item, covering 'count' items. // getIndices returns the index entries for the given from-item, covering 'count' items.
// N.B: The actual number of returned indices for N items will always be N+1 (unless an // N.B: The actual number of returned indices for N items will always be N+1 (unless an
// error is returned). // error is returned).
@ -498,7 +646,7 @@ func (t *freezerTable) releaseFilesAfter(num uint32, remove bool) {
// it will return error. // it will return error.
func (t *freezerTable) getIndices(from, count uint64) ([]*indexEntry, error) { func (t *freezerTable) getIndices(from, count uint64) ([]*indexEntry, error) {
// Apply the table-offset // Apply the table-offset
from = from - uint64(t.itemOffset) from = from - t.itemOffset
// For reading N items, we need N+1 indices. // For reading N items, we need N+1 indices.
buffer := make([]byte, (count+1)*indexEntrySize) buffer := make([]byte, (count+1)*indexEntrySize)
if _, err := t.index.ReadAt(buffer, int64(from*indexEntrySize)); err != nil { if _, err := t.index.ReadAt(buffer, int64(from*indexEntrySize)); err != nil {
@ -583,18 +731,21 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
t.lock.RLock() t.lock.RLock()
defer t.lock.RUnlock() defer t.lock.RUnlock()
// Ensure the table and the item is accessible // Ensure the table and the item are accessible
if t.index == nil || t.head == nil { if t.index == nil || t.head == nil {
return nil, nil, errClosed return nil, nil, errClosed
} }
itemCount := atomic.LoadUint64(&t.items) // max number var (
items = atomic.LoadUint64(&t.items) // the total items(head + 1)
hidden = atomic.LoadUint64(&t.itemHidden) // the number of hidden items
)
// Ensure the start is written, not deleted from the tail, and that the // Ensure the start is written, not deleted from the tail, and that the
// caller actually wants something // caller actually wants something
if itemCount <= start || uint64(t.itemOffset) > start || count == 0 { if items <= start || hidden > start || count == 0 {
return nil, nil, errOutOfBounds return nil, nil, errOutOfBounds
} }
if start+count > itemCount { if start+count > items {
count = itemCount - start count = items - start
} }
var ( var (
output = make([]byte, maxBytes) // Buffer to read data into output = make([]byte, maxBytes) // Buffer to read data into
@ -670,10 +821,10 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
return output[:outputSize], sizes, nil return output[:outputSize], sizes, nil
} }
// has returns an indicator whether the specified number data // has returns an indicator whether the specified number data is still accessible
// exists in the freezer table. // in the freezer table.
func (t *freezerTable) has(number uint64) bool { func (t *freezerTable) has(number uint64) bool {
return atomic.LoadUint64(&t.items) > number return atomic.LoadUint64(&t.items) > number && atomic.LoadUint64(&t.itemHidden) <= number
} }
// size returns the total data size in the freezer table. // size returns the total data size in the freezer table.
@ -727,6 +878,9 @@ func (t *freezerTable) Sync() error {
if err := t.index.Sync(); err != nil { if err := t.index.Sync(); err != nil {
return err return err
} }
if err := t.meta.Sync(); err != nil {
return err
}
return t.head.Sync() return t.head.Sync()
} }
@ -744,13 +898,20 @@ func (t *freezerTable) dumpIndexString(start, stop int64) string {
} }
func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) { func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) {
meta, err := readMetadata(t.meta)
if err != nil {
fmt.Fprintf(w, "Failed to decode freezer table %v\n", err)
return
}
fmt.Fprintf(w, "Version %d deleted %d, hidden %d\n", meta.Version, atomic.LoadUint64(&t.itemOffset), atomic.LoadUint64(&t.itemHidden))
buf := make([]byte, indexEntrySize) buf := make([]byte, indexEntrySize)
fmt.Fprintf(w, "| number | fileno | offset |\n") fmt.Fprintf(w, "| number | fileno | offset |\n")
fmt.Fprintf(w, "|--------|--------|--------|\n") fmt.Fprintf(w, "|--------|--------|--------|\n")
for i := uint64(start); ; i++ { for i := uint64(start); ; i++ {
if _, err := t.index.ReadAt(buf, int64(i*indexEntrySize)); err != nil { if _, err := t.index.ReadAt(buf, int64((i+1)*indexEntrySize)); err != nil {
break break
} }
var entry indexEntry var entry indexEntry

View File

@ -18,13 +18,18 @@ package rawdb
import ( import (
"bytes" "bytes"
"encoding/binary"
"fmt" "fmt"
"math/rand" "math/rand"
"os" "os"
"path/filepath" "path/filepath"
"reflect"
"sync/atomic"
"testing" "testing"
"testing/quick"
"time" "time"
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -204,7 +209,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
} }
// Remove everything but the first item, and leave data unaligned // Remove everything but the first item, and leave data unaligned
// 0-indexEntry, 1-indexEntry, corrupt-indexEntry // 0-indexEntry, 1-indexEntry, corrupt-indexEntry
idxFile.Truncate(indexEntrySize + indexEntrySize + indexEntrySize/2) idxFile.Truncate(2*indexEntrySize + indexEntrySize/2)
idxFile.Close() idxFile.Close()
// Now open it again // Now open it again
@ -387,7 +392,7 @@ func TestFreezerTruncate(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
defer f.Close() defer f.Close()
f.truncate(10) // 150 bytes f.truncateHead(10) // 150 bytes
if f.items != 10 { if f.items != 10 {
t.Fatalf("expected %d items, got %d", 10, f.items) t.Fatalf("expected %d items, got %d", 10, f.items)
} }
@ -504,7 +509,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
} }
// Now, truncate back to zero // Now, truncate back to zero
f.truncate(0) f.truncateHead(0)
// Write the data again // Write the data again
batch := f.newBatch() batch := f.newBatch()
@ -565,18 +570,19 @@ func TestFreezerOffset(t *testing.T) {
// Update the index file, so that we store // Update the index file, so that we store
// [ file = 2, offset = 4 ] at index zero // [ file = 2, offset = 4 ] at index zero
tailId := uint32(2) // First file is 2
itemOffset := uint32(4) // We have removed four items
zeroIndex := indexEntry{ zeroIndex := indexEntry{
filenum: tailId, filenum: uint32(2), // First file is 2
offset: itemOffset, offset: uint32(4), // We have removed four items
} }
buf := zeroIndex.append(nil) buf := zeroIndex.append(nil)
// Overwrite index zero // Overwrite index zero
copy(indexBuf, buf) copy(indexBuf, buf)
// Remove the four next indices by overwriting // Remove the four next indices by overwriting
copy(indexBuf[indexEntrySize:], indexBuf[indexEntrySize*5:]) copy(indexBuf[indexEntrySize:], indexBuf[indexEntrySize*5:])
indexFile.WriteAt(indexBuf, 0) indexFile.WriteAt(indexBuf, 0)
// Need to truncate the moved index items // Need to truncate the moved index items
indexFile.Truncate(indexEntrySize * (1 + 2)) indexFile.Truncate(indexEntrySize * (1 + 2))
indexFile.Close() indexFile.Close()
@ -623,13 +629,12 @@ func TestFreezerOffset(t *testing.T) {
// Update the index file, so that we store // Update the index file, so that we store
// [ file = 2, offset = 1M ] at index zero // [ file = 2, offset = 1M ] at index zero
tailId := uint32(2) // First file is 2
itemOffset := uint32(1000000) // We have removed 1M items
zeroIndex := indexEntry{ zeroIndex := indexEntry{
offset: itemOffset, offset: uint32(1000000), // We have removed 1M items
filenum: tailId, filenum: uint32(2), // First file is 2
} }
buf := zeroIndex.append(nil) buf := zeroIndex.append(nil)
// Overwrite index zero // Overwrite index zero
copy(indexBuf, buf) copy(indexBuf, buf)
indexFile.WriteAt(indexBuf, 0) indexFile.WriteAt(indexBuf, 0)
@ -659,6 +664,171 @@ func TestFreezerOffset(t *testing.T) {
} }
} }
func TestTruncateTail(t *testing.T) {
t.Parallel()
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("truncate-tail-%d", rand.Uint64())
// Fill table
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
if err != nil {
t.Fatal(err)
}
// Write 7 x 20 bytes, splitting out into four files
batch := f.newBatch()
require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
require.NoError(t, batch.commit())
// nothing to do, all the items should still be there.
f.truncateTail(0)
fmt.Println(f.dumpIndexString(0, 1000))
checkRetrieve(t, f, map[uint64][]byte{
0: getChunk(20, 0xFF),
1: getChunk(20, 0xEE),
2: getChunk(20, 0xdd),
3: getChunk(20, 0xcc),
4: getChunk(20, 0xbb),
5: getChunk(20, 0xaa),
6: getChunk(20, 0x11),
})
// truncate single element( item 0 ), deletion is only supported at file level
f.truncateTail(1)
fmt.Println(f.dumpIndexString(0, 1000))
checkRetrieveError(t, f, map[uint64]error{
0: errOutOfBounds,
})
checkRetrieve(t, f, map[uint64][]byte{
1: getChunk(20, 0xEE),
2: getChunk(20, 0xdd),
3: getChunk(20, 0xcc),
4: getChunk(20, 0xbb),
5: getChunk(20, 0xaa),
6: getChunk(20, 0x11),
})
// Reopen the table, the deletion information should be persisted as well
f.Close()
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
if err != nil {
t.Fatal(err)
}
checkRetrieveError(t, f, map[uint64]error{
0: errOutOfBounds,
})
checkRetrieve(t, f, map[uint64][]byte{
1: getChunk(20, 0xEE),
2: getChunk(20, 0xdd),
3: getChunk(20, 0xcc),
4: getChunk(20, 0xbb),
5: getChunk(20, 0xaa),
6: getChunk(20, 0x11),
})
// truncate two elements( item 0, item 1 ), the file 0 should be deleted
f.truncateTail(2)
checkRetrieveError(t, f, map[uint64]error{
0: errOutOfBounds,
1: errOutOfBounds,
})
checkRetrieve(t, f, map[uint64][]byte{
2: getChunk(20, 0xdd),
3: getChunk(20, 0xcc),
4: getChunk(20, 0xbb),
5: getChunk(20, 0xaa),
6: getChunk(20, 0x11),
})
// Reopen the table, the above testing should still pass
f.Close()
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
if err != nil {
t.Fatal(err)
}
defer f.Close()
checkRetrieveError(t, f, map[uint64]error{
0: errOutOfBounds,
1: errOutOfBounds,
})
checkRetrieve(t, f, map[uint64][]byte{
2: getChunk(20, 0xdd),
3: getChunk(20, 0xcc),
4: getChunk(20, 0xbb),
5: getChunk(20, 0xaa),
6: getChunk(20, 0x11),
})
// truncate all, the entire freezer should be deleted
f.truncateTail(7)
checkRetrieveError(t, f, map[uint64]error{
0: errOutOfBounds,
1: errOutOfBounds,
2: errOutOfBounds,
3: errOutOfBounds,
4: errOutOfBounds,
5: errOutOfBounds,
6: errOutOfBounds,
})
}
func TestTruncateHead(t *testing.T) {
t.Parallel()
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("truncate-head-blow-tail-%d", rand.Uint64())
// Fill table
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
if err != nil {
t.Fatal(err)
}
// Write 7 x 20 bytes, splitting out into four files
batch := f.newBatch()
require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
require.NoError(t, batch.commit())
f.truncateTail(4) // Tail = 4
// NewHead is required to be 3, the entire table should be truncated
f.truncateHead(4)
checkRetrieveError(t, f, map[uint64]error{
0: errOutOfBounds, // Deleted by tail
1: errOutOfBounds, // Deleted by tail
2: errOutOfBounds, // Deleted by tail
3: errOutOfBounds, // Deleted by tail
4: errOutOfBounds, // Deleted by Head
5: errOutOfBounds, // Deleted by Head
6: errOutOfBounds, // Deleted by Head
})
// Append new items
batch = f.newBatch()
require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
require.NoError(t, batch.commit())
checkRetrieve(t, f, map[uint64][]byte{
4: getChunk(20, 0xbb),
5: getChunk(20, 0xaa),
6: getChunk(20, 0x11),
})
}
func checkRetrieve(t *testing.T, f *freezerTable, items map[uint64][]byte) { func checkRetrieve(t *testing.T, f *freezerTable, items map[uint64][]byte) {
t.Helper() t.Helper()
@ -915,3 +1085,212 @@ func TestFreezerReadonly(t *testing.T) {
t.Fatalf("Writing to readonly table should fail") t.Fatalf("Writing to readonly table should fail")
} }
} }
// randTest performs random freezer table operations.
// Instances of this test are created by Generate.
type randTest []randTestStep
type randTestStep struct {
op int
items []uint64 // for append and retrieve
blobs [][]byte // for append
target uint64 // for truncate(head/tail)
err error // for debugging
}
const (
opReload = iota
opAppend
opRetrieve
opTruncateHead
opTruncateHeadAll
opTruncateTail
opTruncateTailAll
opCheckAll
opMax // boundary value, not an actual op
)
func getVals(first uint64, n int) [][]byte {
var ret [][]byte
for i := 0; i < n; i++ {
val := make([]byte, 8)
binary.BigEndian.PutUint64(val, first+uint64(i))
ret = append(ret, val)
}
return ret
}
func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
var (
deleted uint64 // The number of deleted items from tail
items []uint64 // The index of entries in table
// getItems retrieves the indexes for items in table.
getItems = func(n int) []uint64 {
length := len(items)
if length == 0 {
return nil
}
var ret []uint64
index := rand.Intn(length)
for i := index; len(ret) < n && i < length; i++ {
ret = append(ret, items[i])
}
return ret
}
// addItems appends the given length items into the table.
addItems = func(n int) []uint64 {
var first = deleted
if len(items) != 0 {
first = items[len(items)-1] + 1
}
var ret []uint64
for i := 0; i < n; i++ {
ret = append(ret, first+uint64(i))
}
items = append(items, ret...)
return ret
}
)
var steps randTest
for i := 0; i < size; i++ {
step := randTestStep{op: r.Intn(opMax)}
switch step.op {
case opReload, opCheckAll:
case opAppend:
num := r.Intn(3)
step.items = addItems(num)
if len(step.items) == 0 {
step.blobs = nil
} else {
step.blobs = getVals(step.items[0], num)
}
case opRetrieve:
step.items = getItems(r.Intn(3))
case opTruncateHead:
if len(items) == 0 {
step.target = deleted
} else {
index := r.Intn(len(items))
items = items[:index]
step.target = deleted + uint64(index)
}
case opTruncateHeadAll:
step.target = deleted
items = items[:0]
case opTruncateTail:
if len(items) == 0 {
step.target = deleted
} else {
index := r.Intn(len(items))
items = items[index:]
deleted += uint64(index)
step.target = deleted
}
case opTruncateTailAll:
step.target = deleted + uint64(len(items))
items = items[:0]
deleted = step.target
}
steps = append(steps, step)
}
return reflect.ValueOf(steps)
}
func runRandTest(rt randTest) bool {
fname := fmt.Sprintf("randtest-%d", rand.Uint64())
f, err := newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
if err != nil {
panic("failed to initialize table")
}
var values [][]byte
for i, step := range rt {
switch step.op {
case opReload:
f.Close()
f, err = newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
if err != nil {
rt[i].err = fmt.Errorf("failed to reload table %v", err)
}
case opCheckAll:
tail := atomic.LoadUint64(&f.itemHidden)
head := atomic.LoadUint64(&f.items)
if tail == head {
continue
}
got, err := f.RetrieveItems(atomic.LoadUint64(&f.itemHidden), head-tail, 100000)
if err != nil {
rt[i].err = err
} else {
if !reflect.DeepEqual(got, values) {
rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v", got, values)
}
}
case opAppend:
batch := f.newBatch()
for i := 0; i < len(step.items); i++ {
batch.AppendRaw(step.items[i], step.blobs[i])
}
batch.commit()
values = append(values, step.blobs...)
case opRetrieve:
var blobs [][]byte
if len(step.items) == 0 {
continue
}
tail := atomic.LoadUint64(&f.itemHidden)
for i := 0; i < len(step.items); i++ {
blobs = append(blobs, values[step.items[i]-tail])
}
got, err := f.RetrieveItems(step.items[0], uint64(len(step.items)), 100000)
if err != nil {
rt[i].err = err
} else {
if !reflect.DeepEqual(got, blobs) {
rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v %v", got, blobs, step.items)
}
}
case opTruncateHead:
f.truncateHead(step.target)
length := atomic.LoadUint64(&f.items) - atomic.LoadUint64(&f.itemHidden)
values = values[:length]
case opTruncateHeadAll:
f.truncateHead(step.target)
values = nil
case opTruncateTail:
prev := atomic.LoadUint64(&f.itemHidden)
f.truncateTail(step.target)
truncated := atomic.LoadUint64(&f.itemHidden) - prev
values = values[truncated:]
case opTruncateTailAll:
f.truncateTail(step.target)
values = nil
}
// Abort the test on error.
if rt[i].err != nil {
return false
}
}
f.Close()
return true
}
func TestRandom(t *testing.T) {
if err := quick.Check(runRandTest, nil); err != nil {
if cerr, ok := err.(*quick.CheckError); ok {
t.Fatalf("random test iteration %d failed: %s", cerr.Count, spew.Sdump(cerr.In))
}
t.Fatal(err)
}
}

View File

@ -24,6 +24,7 @@ import (
"math/big" "math/big"
"math/rand" "math/rand"
"os" "os"
"path"
"sync" "sync"
"testing" "testing"
@ -186,7 +187,7 @@ func TestFreezerConcurrentModifyRetrieve(t *testing.T) {
wg.Wait() wg.Wait()
} }
// This test runs ModifyAncients and TruncateAncients concurrently with each other. // This test runs ModifyAncients and TruncateHead concurrently with each other.
func TestFreezerConcurrentModifyTruncate(t *testing.T) { func TestFreezerConcurrentModifyTruncate(t *testing.T) {
f, dir := newFreezerForTesting(t, freezerTestTableDef) f, dir := newFreezerForTesting(t, freezerTestTableDef)
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
@ -196,7 +197,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
for i := 0; i < 1000; i++ { for i := 0; i < 1000; i++ {
// First reset and write 100 items. // First reset and write 100 items.
if err := f.TruncateAncients(0); err != nil { if err := f.TruncateHead(0); err != nil {
t.Fatal("truncate failed:", err) t.Fatal("truncate failed:", err)
} }
_, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error { _, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
@ -231,7 +232,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
wg.Done() wg.Done()
}() }()
go func() { go func() {
truncateErr = f.TruncateAncients(10) truncateErr = f.TruncateHead(10)
wg.Done() wg.Done()
}() }()
go func() { go func() {
@ -337,3 +338,92 @@ func checkAncientCount(t *testing.T, f *freezer, kind string, n uint64) {
t.Errorf("Ancient(%q, %d) returned unexpected error %q", kind, index, err) t.Errorf("Ancient(%q, %d) returned unexpected error %q", kind, index, err)
} }
} }
func TestRenameWindows(t *testing.T) {
var (
fname = "file.bin"
fname2 = "file2.bin"
data = []byte{1, 2, 3, 4}
data2 = []byte{2, 3, 4, 5}
data3 = []byte{3, 5, 6, 7}
dataLen = 4
)
// Create 2 temp dirs
dir1, err := os.MkdirTemp("", "rename-test")
if err != nil {
t.Fatal(err)
}
defer os.Remove(dir1)
dir2, err := os.MkdirTemp("", "rename-test")
if err != nil {
t.Fatal(err)
}
defer os.Remove(dir2)
// Create file in dir1 and fill with data
f, err := os.Create(path.Join(dir1, fname))
if err != nil {
t.Fatal(err)
}
f2, err := os.Create(path.Join(dir1, fname2))
if err != nil {
t.Fatal(err)
}
f3, err := os.Create(path.Join(dir2, fname2))
if err != nil {
t.Fatal(err)
}
if _, err := f.Write(data); err != nil {
t.Fatal(err)
}
if _, err := f2.Write(data2); err != nil {
t.Fatal(err)
}
if _, err := f3.Write(data3); err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
if err := f2.Close(); err != nil {
t.Fatal(err)
}
if err := f3.Close(); err != nil {
t.Fatal(err)
}
if err := os.Rename(f.Name(), path.Join(dir2, fname)); err != nil {
t.Fatal(err)
}
if err := os.Rename(f2.Name(), path.Join(dir2, fname2)); err != nil {
t.Fatal(err)
}
// Check file contents
f, err = os.Open(path.Join(dir2, fname))
if err != nil {
t.Fatal(err)
}
defer f.Close()
defer os.Remove(f.Name())
buf := make([]byte, dataLen)
if _, err := f.Read(buf); err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, data) {
t.Errorf("unexpected file contents. Got %v\n", buf)
}
f, err = os.Open(path.Join(dir2, fname2))
if err != nil {
t.Fatal(err)
}
defer f.Close()
defer os.Remove(f.Name())
if _, err := f.Read(buf); err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, data2) {
t.Errorf("unexpected file contents. Got %v\n", buf)
}
}

120
core/rawdb/freezer_utils.go Normal file
View File

@ -0,0 +1,120 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rawdb
import (
"io"
"io/ioutil"
"os"
"path/filepath"
)
// copyFrom copies data from 'srcPath' at offset 'offset' into 'destPath'.
// The 'destPath' is created if it doesn't exist, otherwise it is overwritten.
// Before the copy is executed, there is a callback can be registered to
// manipulate the dest file.
// It is perfectly valid to have destPath == srcPath.
func copyFrom(srcPath, destPath string, offset uint64, before func(f *os.File) error) error {
// Create a temp file in the same dir where we want it to wind up
f, err := ioutil.TempFile(filepath.Dir(destPath), "*")
if err != nil {
return err
}
fname := f.Name()
// Clean up the leftover file
defer func() {
if f != nil {
f.Close()
}
os.Remove(fname)
}()
// Apply the given function if it's not nil before we copy
// the content from the src.
if before != nil {
if err := before(f); err != nil {
return err
}
}
// Open the source file
src, err := os.Open(srcPath)
if err != nil {
return err
}
if _, err = src.Seek(int64(offset), 0); err != nil {
src.Close()
return err
}
// io.Copy uses 32K buffer internally.
_, err = io.Copy(f, src)
if err != nil {
src.Close()
return err
}
// Rename the temporary file to the specified dest name.
// src may be same as dest, so needs to be closed before
// we do the final move.
src.Close()
if err := f.Close(); err != nil {
return err
}
f = nil
if err := os.Rename(fname, destPath); err != nil {
return err
}
return nil
}
// openFreezerFileForAppend opens a freezer table file and seeks to the end
func openFreezerFileForAppend(filename string) (*os.File, error) {
// Open the file without the O_APPEND flag
// because it has differing behaviour during Truncate operations
// on different OS's
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return nil, err
}
// Seek to end for append
if _, err = file.Seek(0, io.SeekEnd); err != nil {
return nil, err
}
return file, nil
}
// openFreezerFileForReadOnly opens a freezer table file for read only access
func openFreezerFileForReadOnly(filename string) (*os.File, error) {
return os.OpenFile(filename, os.O_RDONLY, 0644)
}
// openFreezerFileTruncated opens a freezer table making sure it is truncated
func openFreezerFileTruncated(filename string) (*os.File, error) {
return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
}
// truncateFreezerFile resizes a freezer table file and seeks to the end
func truncateFreezerFile(file *os.File, size int64) error {
if err := file.Truncate(size); err != nil {
return err
}
// Seek to end for append
if _, err := file.Seek(0, io.SeekEnd); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,76 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rawdb
import (
"bytes"
"io/ioutil"
"os"
"testing"
)
func TestCopyFrom(t *testing.T) {
var (
content = []byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}
prefix = []byte{0x9, 0xa, 0xb, 0xc, 0xd, 0xf}
)
var cases = []struct {
src, dest string
offset uint64
writePrefix bool
}{
{"foo", "bar", 0, false},
{"foo", "bar", 1, false},
{"foo", "bar", 8, false},
{"foo", "foo", 0, false},
{"foo", "foo", 1, false},
{"foo", "foo", 8, false},
{"foo", "bar", 0, true},
{"foo", "bar", 1, true},
{"foo", "bar", 8, true},
}
for _, c := range cases {
ioutil.WriteFile(c.src, content, 0644)
if err := copyFrom(c.src, c.dest, c.offset, func(f *os.File) error {
if !c.writePrefix {
return nil
}
f.Write(prefix)
return nil
}); err != nil {
os.Remove(c.src)
t.Fatalf("Failed to copy %v", err)
}
blob, err := ioutil.ReadFile(c.dest)
if err != nil {
os.Remove(c.src)
os.Remove(c.dest)
t.Fatalf("Failed to read %v", err)
}
want := content[c.offset:]
if c.writePrefix {
want = append(prefix, want...)
}
if !bytes.Equal(blob, want) {
t.Fatal("Unexpected value")
}
os.Remove(c.src)
os.Remove(c.dest)
}
}

View File

@ -0,0 +1,47 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rawdb
import "github.com/ethereum/go-ethereum/ethdb"
// KeyLengthIterator is a wrapper for a database iterator that ensures only key-value pairs
// with a specific key length will be returned.
type KeyLengthIterator struct {
requiredKeyLength int
ethdb.Iterator
}
// NewKeyLengthIterator returns a wrapped version of the iterator that will only return key-value
// pairs where keys with a specific key length will be returned.
func NewKeyLengthIterator(it ethdb.Iterator, keyLen int) ethdb.Iterator {
return &KeyLengthIterator{
Iterator: it,
requiredKeyLength: keyLen,
}
}
func (it *KeyLengthIterator) Next() bool {
// Return true as soon as a key with the required key length is discovered
for it.Iterator.Next() {
if len(it.Iterator.Key()) == it.requiredKeyLength {
return true
}
}
// Return false when we exhaust the keys in the underlying iterator.
return false
}

View File

@ -0,0 +1,60 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rawdb
import (
"encoding/binary"
"testing"
)
func TestKeyLengthIterator(t *testing.T) {
db := NewMemoryDatabase()
keyLen := 8
expectedKeys := make(map[string]struct{})
for i := 0; i < 100; i++ {
key := make([]byte, keyLen)
binary.BigEndian.PutUint64(key, uint64(i))
if err := db.Put(key, []byte{0x1}); err != nil {
t.Fatal(err)
}
expectedKeys[string(key)] = struct{}{}
longerKey := make([]byte, keyLen*2)
binary.BigEndian.PutUint64(longerKey, uint64(i))
if err := db.Put(longerKey, []byte{0x1}); err != nil {
t.Fatal(err)
}
}
it := NewKeyLengthIterator(db.NewIterator(nil, nil), keyLen)
for it.Next() {
key := it.Key()
_, exists := expectedKeys[string(key)]
if !exists {
t.Fatalf("Found unexpected key %d", binary.BigEndian.Uint64(key))
}
delete(expectedKeys, string(key))
if len(key) != keyLen {
t.Fatalf("Found unexpected key in key length iterator with length %d", len(key))
}
}
if len(expectedKeys) != 0 {
t.Fatalf("Expected all keys of length %d to be removed from expected keys during iteration", keyLen)
}
}

View File

@ -63,6 +63,9 @@ var (
// snapshotSyncStatusKey tracks the snapshot sync status across restarts. // snapshotSyncStatusKey tracks the snapshot sync status across restarts.
snapshotSyncStatusKey = []byte("SnapshotSyncStatus") snapshotSyncStatusKey = []byte("SnapshotSyncStatus")
// skeletonSyncStatusKey tracks the skeleton sync status across restarts.
skeletonSyncStatusKey = []byte("SkeletonSyncStatus")
// txIndexTailKey tracks the oldest block whose transactions have been indexed. // txIndexTailKey tracks the oldest block whose transactions have been indexed.
txIndexTailKey = []byte("TransactionIndexTail") txIndexTailKey = []byte("TransactionIndexTail")
@ -92,9 +95,11 @@ var (
SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value
SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value
CodePrefix = []byte("c") // CodePrefix + code hash -> account code CodePrefix = []byte("c") // CodePrefix + code hash -> account code
skeletonHeaderPrefix = []byte("S") // skeletonHeaderPrefix + num (uint64 big endian) -> header
PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
configPrefix = []byte("ethereum-config-") // config prefix for the db configPrefix = []byte("ethereum-config-") // config prefix for the db
genesisPrefix = []byte("ethereum-genesis-") // genesis state prefix for the db
// Chain index prefixes (use `i` + single byte to avoid mixing data types). // Chain index prefixes (use `i` + single byte to avoid mixing data types).
BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress
@ -210,6 +215,11 @@ func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte {
return key return key
} }
// skeletonHeaderKey = skeletonHeaderPrefix + num (uint64 big endian)
func skeletonHeaderKey(number uint64) []byte {
return append(skeletonHeaderPrefix, encodeBlockNumber(number)...)
}
// preimageKey = PreimagePrefix + hash // preimageKey = PreimagePrefix + hash
func preimageKey(hash common.Hash) []byte { func preimageKey(hash common.Hash) []byte {
return append(PreimagePrefix, hash.Bytes()...) return append(PreimagePrefix, hash.Bytes()...)
@ -233,3 +243,8 @@ func IsCodeKey(key []byte) (bool, []byte) {
func configKey(hash common.Hash) []byte { func configKey(hash common.Hash) []byte {
return append(configPrefix, hash.Bytes()...) return append(configPrefix, hash.Bytes()...)
} }
// genesisKey = genesisPrefix + hash
func genesisKey(hash common.Hash) []byte {
return append(genesisPrefix, hash.Bytes()...)
}

View File

@ -74,6 +74,12 @@ func (t *table) Ancients() (uint64, error) {
return t.db.Ancients() return t.db.Ancients()
} }
// Tail is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) Tail() (uint64, error) {
return t.db.Tail()
}
// AncientSize is a noop passthrough that just forwards the request to the underlying // AncientSize is a noop passthrough that just forwards the request to the underlying
// database. // database.
func (t *table) AncientSize(kind string) (uint64, error) { func (t *table) AncientSize(kind string) (uint64, error) {
@ -89,10 +95,16 @@ func (t *table) ReadAncients(fn func(reader ethdb.AncientReader) error) (err err
return t.db.ReadAncients(fn) return t.db.ReadAncients(fn)
} }
// TruncateAncients is a noop passthrough that just forwards the request to the underlying // TruncateHead is a noop passthrough that just forwards the request to the underlying
// database. // database.
func (t *table) TruncateAncients(items uint64) error { func (t *table) TruncateHead(items uint64) error {
return t.db.TruncateAncients(items) return t.db.TruncateHead(items)
}
// TruncateTail is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) TruncateTail(items uint64) error {
return t.db.TruncateTail(items)
} }
// Sync is a noop passthrough that just forwards the request to the underlying // Sync is a noop passthrough that just forwards the request to the underlying
@ -101,6 +113,12 @@ func (t *table) Sync() error {
return t.db.Sync() return t.db.Sync()
} }
// MigrateTable processes the entries in a given table in sequence
// converting them to a new format if they're of an old format.
func (t *table) MigrateTable(kind string, convert convertLegacyFn) error {
return t.db.MigrateTable(kind, convert)
}
// Put inserts the given value into the database at a prefixed version of the // Put inserts the given value into the database at a prefixed version of the
// provided key. // provided key.
func (t *table) Put(key []byte, value []byte) error { func (t *table) Put(key []byte, value []byte) error {
@ -172,6 +190,18 @@ func (t *table) NewBatch() ethdb.Batch {
return &tableBatch{t.db.NewBatch(), t.prefix} return &tableBatch{t.db.NewBatch(), t.prefix}
} }
// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
func (t *table) NewBatchWithSize(size int) ethdb.Batch {
return &tableBatch{t.db.NewBatchWithSize(size), t.prefix}
}
// NewSnapshot creates a database snapshot based on the current state.
// The created snapshot will not be affected by all following mutations
// happened on the database.
func (t *table) NewSnapshot() (ethdb.Snapshot, error) {
return t.db.NewSnapshot()
}
// tableBatch is a wrapper around a database batch that prefixes each key access // tableBatch is a wrapper around a database batch that prefixes each key access
// with a pre-configured string. // with a pre-configured string.
type tableBatch struct { type tableBatch struct {

View File

@ -265,7 +265,7 @@ func (p *Pruner) Prune(root common.Hash) error {
// Ensure the root is really present. The weak assumption // Ensure the root is really present. The weak assumption
// is the presence of root can indicate the presence of the // is the presence of root can indicate the presence of the
// entire trie. // entire trie.
if blob := rawdb.ReadTrieNode(p.db, root); len(blob) == 0 { if !rawdb.HasTrieNode(p.db, root) {
// The special case is for clique based networks(rinkeby, goerli // The special case is for clique based networks(rinkeby, goerli
// and some other private networks), it's possible that two // and some other private networks), it's possible that two
// consecutive blocks will have same root. In this case snapshot // consecutive blocks will have same root. In this case snapshot
@ -279,7 +279,7 @@ func (p *Pruner) Prune(root common.Hash) error {
// as the pruning target. // as the pruning target.
var found bool var found bool
for i := len(layers) - 2; i >= 2; i-- { for i := len(layers) - 2; i >= 2; i-- {
if blob := rawdb.ReadTrieNode(p.db, layers[i].Root()); len(blob) != 0 { if rawdb.HasTrieNode(p.db, layers[i].Root()) {
root = layers[i].Root() root = layers[i].Root()
found = true found = true
log.Info("Selecting middle-layer as the pruning target", "root", root, "depth", i) log.Info("Selecting middle-layer as the pruning target", "root", root, "depth", i)

View File

@ -546,20 +546,19 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
it := rawdb.IterateStorageSnapshots(base.diskdb, hash) it := rawdb.IterateStorageSnapshots(base.diskdb, hash)
for it.Next() { for it.Next() {
if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator key := it.Key()
batch.Delete(key) batch.Delete(key)
base.cache.Del(key[1:]) base.cache.Del(key[1:])
snapshotFlushStorageItemMeter.Mark(1) snapshotFlushStorageItemMeter.Mark(1)
// Ensure we don't delete too much data blindly (contract can be // Ensure we don't delete too much data blindly (contract can be
// huge). It's ok to flush, the root will go missing in case of a // huge). It's ok to flush, the root will go missing in case of a
// crash and we'll detect and regenerate the snapshot. // crash and we'll detect and regenerate the snapshot.
if batch.ValueSize() > ethdb.IdealBatchSize { if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil { if err := batch.Write(); err != nil {
log.Crit("Failed to write storage deletions", "err", err) log.Crit("Failed to write storage deletions", "err", err)
}
batch.Reset()
} }
batch.Reset()
} }
} }
it.Release() it.Release()

View File

@ -891,7 +891,6 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
func (s *StateDB) Prepare(thash common.Hash, ti int) { func (s *StateDB) Prepare(thash common.Hash, ti int) {
s.thash = thash s.thash = thash
s.txIndex = ti s.txIndex = ti
s.accessList = newAccessList()
} }
func (s *StateDB) clearJournalAndRefund() { func (s *StateDB) clearJournalAndRefund() {
@ -1006,6 +1005,9 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
// //
// This method should only be called if Berlin/2929+2930 is applicable at the current number. // This method should only be called if Berlin/2929+2930 is applicable at the current number.
func (s *StateDB) PrepareAccessList(sender common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) { func (s *StateDB) PrepareAccessList(sender common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) {
// Clear out any leftover from previous executions
s.accessList = newAccessList()
s.AddAddressToAccessList(sender) s.AddAddressToAccessList(sender)
if dst != nil { if dst != nil {
s.AddAddressToAccessList(*dst) s.AddAddressToAccessList(*dst)

View File

@ -22,7 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
) )
//go:generate gencodec -type AccessTuple -out gen_access_tuple.go //go:generate go run github.com/fjl/gencodec@latest -type AccessTuple -out gen_access_tuple.go
// AccessList is an EIP-2930 access list. // AccessList is an EIP-2930 access list.
type AccessList []AccessTuple type AccessList []AccessTuple

View File

@ -63,7 +63,8 @@ func (n *BlockNonce) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("BlockNonce", input, n[:]) return hexutil.UnmarshalFixedText("BlockNonce", input, n[:])
} }
//go:generate gencodec -type Header -field-override headerMarshaling -out gen_header_json.go //go:generate go run github.com/fjl/gencodec@latest -type Header -field-override headerMarshaling -out gen_header_json.go
//go:generate go run ../../rlp/rlpgen -type Header -out gen_header_rlp.go
// Header represents a block header in the Ethereum blockchain. // Header represents a block header in the Ethereum blockchain.
type Header struct { type Header struct {

View File

@ -285,7 +285,7 @@ func makeBenchBlock() *Block {
func TestRlpDecodeParentHash(t *testing.T) { func TestRlpDecodeParentHash(t *testing.T) {
// A minimum one // A minimum one
want := common.HexToHash("0x112233445566778899001122334455667788990011223344556677889900aabb") want := common.HexToHash("0x112233445566778899001122334455667788990011223344556677889900aabb")
if rlpData, err := rlp.EncodeToBytes(Header{ParentHash: want}); err != nil { if rlpData, err := rlp.EncodeToBytes(&Header{ParentHash: want}); err != nil {
t.Fatal(err) t.Fatal(err)
} else { } else {
if have := HeaderParentHashFromRLP(rlpData); have != want { if have := HeaderParentHashFromRLP(rlpData); have != want {
@ -299,7 +299,7 @@ func TestRlpDecodeParentHash(t *testing.T) {
// | BaseFee | dynamic| *big.Int | 64 bits | // | BaseFee | dynamic| *big.Int | 64 bits |
mainnetTd := new(big.Int) mainnetTd := new(big.Int)
mainnetTd.SetString("5ad3c2c71bbff854908", 16) mainnetTd.SetString("5ad3c2c71bbff854908", 16)
if rlpData, err := rlp.EncodeToBytes(Header{ if rlpData, err := rlp.EncodeToBytes(&Header{
ParentHash: want, ParentHash: want,
Difficulty: mainnetTd, Difficulty: mainnetTd,
Number: new(big.Int).SetUint64(math.MaxUint64), Number: new(big.Int).SetUint64(math.MaxUint64),
@ -316,7 +316,7 @@ func TestRlpDecodeParentHash(t *testing.T) {
{ {
// The rlp-encoding of the heder belowCauses _total_ length of 65540, // The rlp-encoding of the heder belowCauses _total_ length of 65540,
// which is the first to blow the fast-path. // which is the first to blow the fast-path.
h := Header{ h := &Header{
ParentHash: want, ParentHash: want,
Extra: make([]byte, 65041), Extra: make([]byte, 65041),
} }

View File

@ -0,0 +1,27 @@
// Code generated by rlpgen. DO NOT EDIT.
//go:build !norlpgen
// +build !norlpgen
package types
import "github.com/ethereum/go-ethereum/rlp"
import "io"
func (obj *StateAccount) EncodeRLP(_w io.Writer) error {
w := rlp.NewEncoderBuffer(_w)
_tmp0 := w.List()
w.WriteUint64(obj.Nonce)
if obj.Balance == nil {
w.Write(rlp.EmptyString)
} else {
if obj.Balance.Sign() == -1 {
return rlp.ErrNegativeBigInt
}
w.WriteBigInt(obj.Balance)
}
w.WriteBytes(obj.Root[:])
w.WriteBytes(obj.CodeHash)
w.ListEnd(_tmp0)
return w.Flush()
}

View File

@ -0,0 +1,56 @@
// Code generated by rlpgen. DO NOT EDIT.
//go:build !norlpgen
// +build !norlpgen
package types
import "github.com/ethereum/go-ethereum/rlp"
import "io"
func (obj *Header) EncodeRLP(_w io.Writer) error {
w := rlp.NewEncoderBuffer(_w)
_tmp0 := w.List()
w.WriteBytes(obj.ParentHash[:])
w.WriteBytes(obj.UncleHash[:])
w.WriteBytes(obj.Coinbase[:])
w.WriteBytes(obj.Root[:])
w.WriteBytes(obj.TxHash[:])
w.WriteBytes(obj.ReceiptHash[:])
w.WriteBytes(obj.Bloom[:])
if obj.Difficulty == nil {
w.Write(rlp.EmptyString)
} else {
if obj.Difficulty.Sign() == -1 {
return rlp.ErrNegativeBigInt
}
w.WriteBigInt(obj.Difficulty)
}
if obj.Number == nil {
w.Write(rlp.EmptyString)
} else {
if obj.Number.Sign() == -1 {
return rlp.ErrNegativeBigInt
}
w.WriteBigInt(obj.Number)
}
w.WriteUint64(obj.GasLimit)
w.WriteUint64(obj.GasUsed)
w.WriteUint64(obj.Time)
w.WriteBytes(obj.Extra)
w.WriteBytes(obj.MixDigest[:])
w.WriteBytes(obj.Nonce[:])
_tmp1 := obj.BaseFee != nil
if _tmp1 {
if obj.BaseFee == nil {
w.Write(rlp.EmptyString)
} else {
if obj.BaseFee.Sign() == -1 {
return rlp.ErrNegativeBigInt
}
w.WriteBigInt(obj.BaseFee)
}
}
w.ListEnd(_tmp0)
return w.Flush()
}

23
core/types/gen_log_rlp.go Normal file
View File

@ -0,0 +1,23 @@
// Code generated by rlpgen. DO NOT EDIT.
//go:build !norlpgen
// +build !norlpgen
package types
import "github.com/ethereum/go-ethereum/rlp"
import "io"
func (obj *rlpLog) EncodeRLP(_w io.Writer) error {
w := rlp.NewEncoderBuffer(_w)
_tmp0 := w.List()
w.WriteBytes(obj.Address[:])
_tmp1 := w.List()
for _, _tmp2 := range obj.Topics {
w.WriteBytes(_tmp2[:])
}
w.ListEnd(_tmp1)
w.WriteBytes(obj.Data)
w.ListEnd(_tmp0)
return w.Flush()
}

53
core/types/legacy.go Normal file
View File

@ -0,0 +1,53 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"errors"
"github.com/ethereum/go-ethereum/rlp"
)
// IsLegacyStoredReceipts tries to parse the RLP-encoded blob
// first as an array of v3 stored receipt, then v4 stored receipt and
// returns true if successful.
func IsLegacyStoredReceipts(raw []byte) (bool, error) {
var v3 []v3StoredReceiptRLP
if err := rlp.DecodeBytes(raw, &v3); err == nil {
return true, nil
}
var v4 []v4StoredReceiptRLP
if err := rlp.DecodeBytes(raw, &v4); err == nil {
return true, nil
}
var v5 []storedReceiptRLP
// Check to see valid fresh stored receipt
if err := rlp.DecodeBytes(raw, &v5); err == nil {
return false, nil
}
return false, errors.New("value is not a valid receipt encoding")
}
// ConvertLegacyStoredReceipts takes the RLP encoding of an array of legacy
// stored receipts and returns a fresh RLP-encoded stored receipt.
func ConvertLegacyStoredReceipts(raw []byte) ([]byte, error) {
var receipts []ReceiptForStorage
if err := rlp.DecodeBytes(raw, &receipts); err != nil {
return nil, err
}
return rlp.EncodeToBytes(&receipts)
}

View File

@ -24,7 +24,7 @@ import (
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
//go:generate gencodec -type Log -field-override logMarshaling -out gen_log_json.go //go:generate go run github.com/fjl/gencodec@latest -type Log -field-override logMarshaling -out gen_log_json.go
// Log represents a contract log event. These events are generated by the LOG opcode and // Log represents a contract log event. These events are generated by the LOG opcode and
// stored/indexed by the node. // stored/indexed by the node.
@ -62,15 +62,14 @@ type logMarshaling struct {
Index hexutil.Uint Index hexutil.Uint
} }
//go:generate go run ../../rlp/rlpgen -type rlpLog -out gen_log_rlp.go
type rlpLog struct { type rlpLog struct {
Address common.Address Address common.Address
Topics []common.Hash Topics []common.Hash
Data []byte Data []byte
} }
// rlpStorageLog is the storage encoding of a log.
type rlpStorageLog rlpLog
// legacyRlpStorageLog is the previous storage encoding of a log including some redundant fields. // legacyRlpStorageLog is the previous storage encoding of a log including some redundant fields.
type legacyRlpStorageLog struct { type legacyRlpStorageLog struct {
Address common.Address Address common.Address
@ -85,7 +84,8 @@ type legacyRlpStorageLog struct {
// EncodeRLP implements rlp.Encoder. // EncodeRLP implements rlp.Encoder.
func (l *Log) EncodeRLP(w io.Writer) error { func (l *Log) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data}) rl := rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data}
return rlp.Encode(w, &rl)
} }
// DecodeRLP implements rlp.Decoder. // DecodeRLP implements rlp.Decoder.
@ -104,11 +104,8 @@ type LogForStorage Log
// EncodeRLP implements rlp.Encoder. // EncodeRLP implements rlp.Encoder.
func (l *LogForStorage) EncodeRLP(w io.Writer) error { func (l *LogForStorage) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, rlpStorageLog{ rl := rlpLog{Address: l.Address, Topics: l.Topics, Data: l.Data}
Address: l.Address, return rlp.Encode(w, &rl)
Topics: l.Topics,
Data: l.Data,
})
} }
// DecodeRLP implements rlp.Decoder. // DecodeRLP implements rlp.Decoder.
@ -119,7 +116,7 @@ func (l *LogForStorage) DecodeRLP(s *rlp.Stream) error {
if err != nil { if err != nil {
return err return err
} }
var dec rlpStorageLog var dec rlpLog
err = rlp.DecodeBytes(blob, &dec) err = rlp.DecodeBytes(blob, &dec)
if err == nil { if err == nil {
*l = LogForStorage{ *l = LogForStorage{

View File

@ -31,15 +31,14 @@ import (
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
//go:generate gencodec -type Receipt -field-override receiptMarshaling -out gen_receipt_json.go //go:generate go run github.com/fjl/gencodec@latest -type Receipt -field-override receiptMarshaling -out gen_receipt_json.go
var ( var (
receiptStatusFailedRLP = []byte{} receiptStatusFailedRLP = []byte{}
receiptStatusSuccessfulRLP = []byte{0x01} receiptStatusSuccessfulRLP = []byte{0x01}
) )
// This error is returned when a typed receipt is decoded, but the string is empty. var errShortTypedReceipt = errors.New("typed receipt too short")
var errEmptyTypedReceipt = errors.New("empty typed receipt bytes")
const ( const (
// ReceiptStatusFailed is the status code of a transaction if execution failed. // ReceiptStatusFailed is the status code of a transaction if execution failed.
@ -182,26 +181,13 @@ func (r *Receipt) DecodeRLP(s *rlp.Stream) error {
} }
r.Type = LegacyTxType r.Type = LegacyTxType
return r.setFromRLP(dec) return r.setFromRLP(dec)
case kind == rlp.String: default:
// It's an EIP-2718 typed tx receipt. // It's an EIP-2718 typed tx receipt.
b, err := s.Bytes() b, err := s.Bytes()
if err != nil { if err != nil {
return err return err
} }
if len(b) == 0 { return r.decodeTyped(b)
return errEmptyTypedReceipt
}
r.Type = b[0]
if r.Type == AccessListTxType || r.Type == DynamicFeeTxType {
var dec receiptRLP
if err := rlp.DecodeBytes(b[1:], &dec); err != nil {
return err
}
return r.setFromRLP(dec)
}
return ErrTxTypeNotSupported
default:
return rlp.ErrExpectedList
} }
} }
@ -224,8 +210,8 @@ func (r *Receipt) UnmarshalBinary(b []byte) error {
// decodeTyped decodes a typed receipt from the canonical format. // decodeTyped decodes a typed receipt from the canonical format.
func (r *Receipt) decodeTyped(b []byte) error { func (r *Receipt) decodeTyped(b []byte) error {
if len(b) == 0 { if len(b) <= 1 {
return errEmptyTypedReceipt return errShortTypedReceipt
} }
switch b[0] { switch b[0] {
case DynamicFeeTxType, AccessListTxType: case DynamicFeeTxType, AccessListTxType:
@ -287,16 +273,20 @@ type ReceiptForStorage Receipt
// EncodeRLP implements rlp.Encoder, and flattens all content fields of a receipt // EncodeRLP implements rlp.Encoder, and flattens all content fields of a receipt
// into an RLP stream. // into an RLP stream.
func (r *ReceiptForStorage) EncodeRLP(w io.Writer) error { func (r *ReceiptForStorage) EncodeRLP(_w io.Writer) error {
enc := &storedReceiptRLP{ w := rlp.NewEncoderBuffer(_w)
PostStateOrStatus: (*Receipt)(r).statusEncoding(), outerList := w.List()
CumulativeGasUsed: r.CumulativeGasUsed, w.WriteBytes((*Receipt)(r).statusEncoding())
Logs: make([]*LogForStorage, len(r.Logs)), w.WriteUint64(r.CumulativeGasUsed)
logList := w.List()
for _, log := range r.Logs {
if err := rlp.Encode(w, log); err != nil {
return err
}
} }
for i, log := range r.Logs { w.ListEnd(logList)
enc.Logs[i] = (*LogForStorage)(log) w.ListEnd(outerList)
} return w.Flush()
return rlp.Encode(w, enc)
} }
// DecodeRLP implements rlp.Decoder, and loads both consensus and implementation // DecodeRLP implements rlp.Decoder, and loads both consensus and implementation

View File

@ -86,7 +86,7 @@ func TestDecodeEmptyTypedReceipt(t *testing.T) {
input := []byte{0x80} input := []byte{0x80}
var r Receipt var r Receipt
err := rlp.DecodeBytes(input, &r) err := rlp.DecodeBytes(input, &r)
if err != errEmptyTypedReceipt { if err != errShortTypedReceipt {
t.Fatal("wrong error:", err) t.Fatal("wrong error:", err)
} }
} }

View File

@ -22,6 +22,8 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
) )
//go:generate go run ../../rlp/rlpgen -type StateAccount -out gen_account_rlp.go
// StateAccount is the Ethereum consensus representation of accounts. // StateAccount is the Ethereum consensus representation of accounts.
// These objects are stored in the main account trie. // These objects are stored in the main account trie.
type StateAccount struct { type StateAccount struct {

View File

@ -37,7 +37,7 @@ var (
ErrInvalidTxType = errors.New("transaction type not valid in this context") ErrInvalidTxType = errors.New("transaction type not valid in this context")
ErrTxTypeNotSupported = errors.New("transaction type not supported") ErrTxTypeNotSupported = errors.New("transaction type not supported")
ErrGasFeeCapTooLow = errors.New("fee cap less than base fee") ErrGasFeeCapTooLow = errors.New("fee cap less than base fee")
errEmptyTypedTx = errors.New("empty typed transaction bytes") errShortTypedTx = errors.New("typed transaction too short")
) )
// Transaction types. // Transaction types.
@ -134,7 +134,7 @@ func (tx *Transaction) DecodeRLP(s *rlp.Stream) error {
tx.setDecoded(&inner, int(rlp.ListSize(size))) tx.setDecoded(&inner, int(rlp.ListSize(size)))
} }
return err return err
case kind == rlp.String: default:
// It's an EIP-2718 typed TX envelope. // It's an EIP-2718 typed TX envelope.
var b []byte var b []byte
if b, err = s.Bytes(); err != nil { if b, err = s.Bytes(); err != nil {
@ -145,8 +145,6 @@ func (tx *Transaction) DecodeRLP(s *rlp.Stream) error {
tx.setDecoded(inner, len(b)) tx.setDecoded(inner, len(b))
} }
return err return err
default:
return rlp.ErrExpectedList
} }
} }
@ -174,8 +172,8 @@ func (tx *Transaction) UnmarshalBinary(b []byte) error {
// decodeTyped decodes a typed transaction from the canonical format. // decodeTyped decodes a typed transaction from the canonical format.
func (tx *Transaction) decodeTyped(b []byte) (TxData, error) { func (tx *Transaction) decodeTyped(b []byte) (TxData, error) {
if len(b) == 0 { if len(b) <= 1 {
return nil, errEmptyTypedTx return nil, errShortTypedTx
} }
switch b[0] { switch b[0] {
case AccessListTxType: case AccessListTxType:

View File

@ -76,7 +76,7 @@ func TestDecodeEmptyTypedTx(t *testing.T) {
input := []byte{0x80} input := []byte{0x80}
var tx Transaction var tx Transaction
err := rlp.DecodeBytes(input, &tx) err := rlp.DecodeBytes(input, &tx)
if err != errEmptyTypedTx { if err != errShortTypedTx {
t.Fatal("wrong error:", err) t.Fatal("wrong error:", err)
} }
} }

View File

@ -214,7 +214,7 @@ var (
// see gasSStoreEIP2200(...) in core/vm/gas_table.go for more info about how EIP 2200 is specified // see gasSStoreEIP2200(...) in core/vm/gas_table.go for more info about how EIP 2200 is specified
gasSStoreEIP2929 = makeGasSStoreFunc(params.SstoreClearsScheduleRefundEIP2200) gasSStoreEIP2929 = makeGasSStoreFunc(params.SstoreClearsScheduleRefundEIP2200)
// gasSStoreEIP2539 implements gas cost for SSTORE according to EPI-2539 // gasSStoreEIP2539 implements gas cost for SSTORE according to EIP-2539
// Replace `SSTORE_CLEARS_SCHEDULE` with `SSTORE_RESET_GAS + ACCESS_LIST_STORAGE_KEY_COST` (4,800) // Replace `SSTORE_CLEARS_SCHEDULE` with `SSTORE_RESET_GAS + ACCESS_LIST_STORAGE_KEY_COST` (4,800)
gasSStoreEIP3529 = makeGasSStoreFunc(params.SstoreClearsScheduleRefundEIP3529) gasSStoreEIP3529 = makeGasSStoreFunc(params.SstoreClearsScheduleRefundEIP3529)
) )

View File

@ -119,105 +119,105 @@ var g2One = PointG2{
*/ */
var frobeniusCoeffs61 = [6]fe2{ var frobeniusCoeffs61 = [6]fe2{
fe2{ {
fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493}, fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}, fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
}, },
fe2{ {
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}, fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
fe{0xcd03c9e48671f071, 0x5dab22461fcda5d2, 0x587042afd3851b95, 0x8eb60ebe01bacb9e, 0x03f97d6e83d050d2, 0x18f0206554638741}, fe{0xcd03c9e48671f071, 0x5dab22461fcda5d2, 0x587042afd3851b95, 0x8eb60ebe01bacb9e, 0x03f97d6e83d050d2, 0x18f0206554638741},
}, },
fe2{ {
fe{0x30f1361b798a64e8, 0xf3b8ddab7ece5a2a, 0x16a8ca3ac61577f7, 0xc26a2ff874fd029b, 0x3636b76660701c6e, 0x051ba4ab241b6160}, fe{0x30f1361b798a64e8, 0xf3b8ddab7ece5a2a, 0x16a8ca3ac61577f7, 0xc26a2ff874fd029b, 0x3636b76660701c6e, 0x051ba4ab241b6160},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}, fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
}, },
fe2{ {
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}, fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493}, fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493},
}, },
fe2{ {
fe{0xcd03c9e48671f071, 0x5dab22461fcda5d2, 0x587042afd3851b95, 0x8eb60ebe01bacb9e, 0x03f97d6e83d050d2, 0x18f0206554638741}, fe{0xcd03c9e48671f071, 0x5dab22461fcda5d2, 0x587042afd3851b95, 0x8eb60ebe01bacb9e, 0x03f97d6e83d050d2, 0x18f0206554638741},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}, fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
}, },
fe2{ {
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}, fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
fe{0x30f1361b798a64e8, 0xf3b8ddab7ece5a2a, 0x16a8ca3ac61577f7, 0xc26a2ff874fd029b, 0x3636b76660701c6e, 0x051ba4ab241b6160}, fe{0x30f1361b798a64e8, 0xf3b8ddab7ece5a2a, 0x16a8ca3ac61577f7, 0xc26a2ff874fd029b, 0x3636b76660701c6e, 0x051ba4ab241b6160},
}, },
} }
var frobeniusCoeffs62 = [6]fe2{ var frobeniusCoeffs62 = [6]fe2{
fe2{ {
fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493}, fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}, fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
}, },
fe2{ {
fe{0x890dc9e4867545c3, 0x2af322533285a5d5, 0x50880866309b7e2c, 0xa20d1b8c7e881024, 0x14e4f04fe2db9068, 0x14e56d3f1564853a}, fe{0x890dc9e4867545c3, 0x2af322533285a5d5, 0x50880866309b7e2c, 0xa20d1b8c7e881024, 0x14e4f04fe2db9068, 0x14e56d3f1564853a},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}, fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
}, },
fe2{ {
fe{0xcd03c9e48671f071, 0x5dab22461fcda5d2, 0x587042afd3851b95, 0x8eb60ebe01bacb9e, 0x03f97d6e83d050d2, 0x18f0206554638741}, fe{0xcd03c9e48671f071, 0x5dab22461fcda5d2, 0x587042afd3851b95, 0x8eb60ebe01bacb9e, 0x03f97d6e83d050d2, 0x18f0206554638741},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}, fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
}, },
fe2{ {
fe{0x43f5fffffffcaaae, 0x32b7fff2ed47fffd, 0x07e83a49a2e99d69, 0xeca8f3318332bb7a, 0xef148d1ea0f4c069, 0x040ab3263eff0206}, fe{0x43f5fffffffcaaae, 0x32b7fff2ed47fffd, 0x07e83a49a2e99d69, 0xeca8f3318332bb7a, 0xef148d1ea0f4c069, 0x040ab3263eff0206},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}, fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
}, },
fe2{ {
fe{0x30f1361b798a64e8, 0xf3b8ddab7ece5a2a, 0x16a8ca3ac61577f7, 0xc26a2ff874fd029b, 0x3636b76660701c6e, 0x051ba4ab241b6160}, fe{0x30f1361b798a64e8, 0xf3b8ddab7ece5a2a, 0x16a8ca3ac61577f7, 0xc26a2ff874fd029b, 0x3636b76660701c6e, 0x051ba4ab241b6160},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}, fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
}, },
fe2{ {
fe{0xecfb361b798dba3a, 0xc100ddb891865a2c, 0x0ec08ff1232bda8e, 0xd5c13cc6f1ca4721, 0x47222a47bf7b5c04, 0x0110f184e51c5f59}, fe{0xecfb361b798dba3a, 0xc100ddb891865a2c, 0x0ec08ff1232bda8e, 0xd5c13cc6f1ca4721, 0x47222a47bf7b5c04, 0x0110f184e51c5f59},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}, fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
}, },
} }
var frobeniusCoeffs12 = [12]fe2{ var frobeniusCoeffs12 = [12]fe2{
fe2{ {
fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493}, fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}, fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
}, },
fe2{ {
fe{0x07089552b319d465, 0xc6695f92b50a8313, 0x97e83cccd117228f, 0xa35baecab2dc29ee, 0x1ce393ea5daace4d, 0x08f2220fb0fb66eb}, fe{0x07089552b319d465, 0xc6695f92b50a8313, 0x97e83cccd117228f, 0xa35baecab2dc29ee, 0x1ce393ea5daace4d, 0x08f2220fb0fb66eb},
fe{0xb2f66aad4ce5d646, 0x5842a06bfc497cec, 0xcf4895d42599d394, 0xc11b9cba40a8e8d0, 0x2e3813cbe5a0de89, 0x110eefda88847faf}, fe{0xb2f66aad4ce5d646, 0x5842a06bfc497cec, 0xcf4895d42599d394, 0xc11b9cba40a8e8d0, 0x2e3813cbe5a0de89, 0x110eefda88847faf},
}, },
fe2{ {
fe{0xecfb361b798dba3a, 0xc100ddb891865a2c, 0x0ec08ff1232bda8e, 0xd5c13cc6f1ca4721, 0x47222a47bf7b5c04, 0x0110f184e51c5f59}, fe{0xecfb361b798dba3a, 0xc100ddb891865a2c, 0x0ec08ff1232bda8e, 0xd5c13cc6f1ca4721, 0x47222a47bf7b5c04, 0x0110f184e51c5f59},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}, fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
}, },
fe2{ {
fe{0x3e2f585da55c9ad1, 0x4294213d86c18183, 0x382844c88b623732, 0x92ad2afd19103e18, 0x1d794e4fac7cf0b9, 0x0bd592fc7d825ec8}, fe{0x3e2f585da55c9ad1, 0x4294213d86c18183, 0x382844c88b623732, 0x92ad2afd19103e18, 0x1d794e4fac7cf0b9, 0x0bd592fc7d825ec8},
fe{0x7bcfa7a25aa30fda, 0xdc17dec12a927e7c, 0x2f088dd86b4ebef1, 0xd1ca2087da74d4a7, 0x2da2596696cebc1d, 0x0e2b7eedbbfd87d2}, fe{0x7bcfa7a25aa30fda, 0xdc17dec12a927e7c, 0x2f088dd86b4ebef1, 0xd1ca2087da74d4a7, 0x2da2596696cebc1d, 0x0e2b7eedbbfd87d2},
}, },
fe2{ {
fe{0x30f1361b798a64e8, 0xf3b8ddab7ece5a2a, 0x16a8ca3ac61577f7, 0xc26a2ff874fd029b, 0x3636b76660701c6e, 0x051ba4ab241b6160}, fe{0x30f1361b798a64e8, 0xf3b8ddab7ece5a2a, 0x16a8ca3ac61577f7, 0xc26a2ff874fd029b, 0x3636b76660701c6e, 0x051ba4ab241b6160},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}, fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
}, },
fe2{ {
fe{0x3726c30af242c66c, 0x7c2ac1aad1b6fe70, 0xa04007fbba4b14a2, 0xef517c3266341429, 0x0095ba654ed2226b, 0x02e370eccc86f7dd}, fe{0x3726c30af242c66c, 0x7c2ac1aad1b6fe70, 0xa04007fbba4b14a2, 0xef517c3266341429, 0x0095ba654ed2226b, 0x02e370eccc86f7dd},
fe{0x82d83cf50dbce43f, 0xa2813e53df9d018f, 0xc6f0caa53c65e181, 0x7525cf528d50fe95, 0x4a85ed50f4798a6b, 0x171da0fd6cf8eebd}, fe{0x82d83cf50dbce43f, 0xa2813e53df9d018f, 0xc6f0caa53c65e181, 0x7525cf528d50fe95, 0x4a85ed50f4798a6b, 0x171da0fd6cf8eebd},
}, },
fe2{ {
fe{0x43f5fffffffcaaae, 0x32b7fff2ed47fffd, 0x07e83a49a2e99d69, 0xeca8f3318332bb7a, 0xef148d1ea0f4c069, 0x040ab3263eff0206}, fe{0x43f5fffffffcaaae, 0x32b7fff2ed47fffd, 0x07e83a49a2e99d69, 0xeca8f3318332bb7a, 0xef148d1ea0f4c069, 0x040ab3263eff0206},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}, fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
}, },
fe2{ {
fe{0xb2f66aad4ce5d646, 0x5842a06bfc497cec, 0xcf4895d42599d394, 0xc11b9cba40a8e8d0, 0x2e3813cbe5a0de89, 0x110eefda88847faf}, fe{0xb2f66aad4ce5d646, 0x5842a06bfc497cec, 0xcf4895d42599d394, 0xc11b9cba40a8e8d0, 0x2e3813cbe5a0de89, 0x110eefda88847faf},
fe{0x07089552b319d465, 0xc6695f92b50a8313, 0x97e83cccd117228f, 0xa35baecab2dc29ee, 0x1ce393ea5daace4d, 0x08f2220fb0fb66eb}, fe{0x07089552b319d465, 0xc6695f92b50a8313, 0x97e83cccd117228f, 0xa35baecab2dc29ee, 0x1ce393ea5daace4d, 0x08f2220fb0fb66eb},
}, },
fe2{ {
fe{0xcd03c9e48671f071, 0x5dab22461fcda5d2, 0x587042afd3851b95, 0x8eb60ebe01bacb9e, 0x03f97d6e83d050d2, 0x18f0206554638741}, fe{0xcd03c9e48671f071, 0x5dab22461fcda5d2, 0x587042afd3851b95, 0x8eb60ebe01bacb9e, 0x03f97d6e83d050d2, 0x18f0206554638741},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}, fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
}, },
fe2{ {
fe{0x7bcfa7a25aa30fda, 0xdc17dec12a927e7c, 0x2f088dd86b4ebef1, 0xd1ca2087da74d4a7, 0x2da2596696cebc1d, 0x0e2b7eedbbfd87d2}, fe{0x7bcfa7a25aa30fda, 0xdc17dec12a927e7c, 0x2f088dd86b4ebef1, 0xd1ca2087da74d4a7, 0x2da2596696cebc1d, 0x0e2b7eedbbfd87d2},
fe{0x3e2f585da55c9ad1, 0x4294213d86c18183, 0x382844c88b623732, 0x92ad2afd19103e18, 0x1d794e4fac7cf0b9, 0x0bd592fc7d825ec8}, fe{0x3e2f585da55c9ad1, 0x4294213d86c18183, 0x382844c88b623732, 0x92ad2afd19103e18, 0x1d794e4fac7cf0b9, 0x0bd592fc7d825ec8},
}, },
fe2{ {
fe{0x890dc9e4867545c3, 0x2af322533285a5d5, 0x50880866309b7e2c, 0xa20d1b8c7e881024, 0x14e4f04fe2db9068, 0x14e56d3f1564853a}, fe{0x890dc9e4867545c3, 0x2af322533285a5d5, 0x50880866309b7e2c, 0xa20d1b8c7e881024, 0x14e4f04fe2db9068, 0x14e56d3f1564853a},
fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000}, fe{0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000},
}, },
fe2{ {
fe{0x82d83cf50dbce43f, 0xa2813e53df9d018f, 0xc6f0caa53c65e181, 0x7525cf528d50fe95, 0x4a85ed50f4798a6b, 0x171da0fd6cf8eebd}, fe{0x82d83cf50dbce43f, 0xa2813e53df9d018f, 0xc6f0caa53c65e181, 0x7525cf528d50fe95, 0x4a85ed50f4798a6b, 0x171da0fd6cf8eebd},
fe{0x3726c30af242c66c, 0x7c2ac1aad1b6fe70, 0xa04007fbba4b14a2, 0xef517c3266341429, 0x0095ba654ed2226b, 0x02e370eccc86f7dd}, fe{0x3726c30af242c66c, 0x7c2ac1aad1b6fe70, 0xa04007fbba4b14a2, 0xef517c3266341429, 0x0095ba654ed2226b, 0x02e370eccc86f7dd},
}, },

View File

@ -77,149 +77,149 @@ func isogenyMapG2(e *fp2, x, y *fe2) {
} }
var isogenyConstansG1 = [4][16]*fe{ var isogenyConstansG1 = [4][16]*fe{
[16]*fe{ {
&fe{0x4d18b6f3af00131c, 0x19fa219793fee28c, 0x3f2885f1467f19ae, 0x23dcea34f2ffb304, 0xd15b58d2ffc00054, 0x0913be200a20bef4}, {0x4d18b6f3af00131c, 0x19fa219793fee28c, 0x3f2885f1467f19ae, 0x23dcea34f2ffb304, 0xd15b58d2ffc00054, 0x0913be200a20bef4},
&fe{0x898985385cdbbd8b, 0x3c79e43cc7d966aa, 0x1597e193f4cd233a, 0x8637ef1e4d6623ad, 0x11b22deed20d827b, 0x07097bc5998784ad}, {0x898985385cdbbd8b, 0x3c79e43cc7d966aa, 0x1597e193f4cd233a, 0x8637ef1e4d6623ad, 0x11b22deed20d827b, 0x07097bc5998784ad},
&fe{0xa542583a480b664b, 0xfc7169c026e568c6, 0x5ba2ef314ed8b5a6, 0x5b5491c05102f0e7, 0xdf6e99707d2a0079, 0x0784151ed7605524}, {0xa542583a480b664b, 0xfc7169c026e568c6, 0x5ba2ef314ed8b5a6, 0x5b5491c05102f0e7, 0xdf6e99707d2a0079, 0x0784151ed7605524},
&fe{0x494e212870f72741, 0xab9be52fbda43021, 0x26f5577994e34c3d, 0x049dfee82aefbd60, 0x65dadd7828505289, 0x0e93d431ea011aeb}, {0x494e212870f72741, 0xab9be52fbda43021, 0x26f5577994e34c3d, 0x049dfee82aefbd60, 0x65dadd7828505289, 0x0e93d431ea011aeb},
&fe{0x90ee774bd6a74d45, 0x7ada1c8a41bfb185, 0x0f1a8953b325f464, 0x104c24211be4805c, 0x169139d319ea7a8f, 0x09f20ead8e532bf6}, {0x90ee774bd6a74d45, 0x7ada1c8a41bfb185, 0x0f1a8953b325f464, 0x104c24211be4805c, 0x169139d319ea7a8f, 0x09f20ead8e532bf6},
&fe{0x6ddd93e2f43626b7, 0xa5482c9aa1ccd7bd, 0x143245631883f4bd, 0x2e0a94ccf77ec0db, 0xb0282d480e56489f, 0x18f4bfcbb4368929}, {0x6ddd93e2f43626b7, 0xa5482c9aa1ccd7bd, 0x143245631883f4bd, 0x2e0a94ccf77ec0db, 0xb0282d480e56489f, 0x18f4bfcbb4368929},
&fe{0x23c5f0c953402dfd, 0x7a43ff6958ce4fe9, 0x2c390d3d2da5df63, 0xd0df5c98e1f9d70f, 0xffd89869a572b297, 0x1277ffc72f25e8fe}, {0x23c5f0c953402dfd, 0x7a43ff6958ce4fe9, 0x2c390d3d2da5df63, 0xd0df5c98e1f9d70f, 0xffd89869a572b297, 0x1277ffc72f25e8fe},
&fe{0x79f4f0490f06a8a6, 0x85f894a88030fd81, 0x12da3054b18b6410, 0xe2a57f6505880d65, 0xbba074f260e400f1, 0x08b76279f621d028}, {0x79f4f0490f06a8a6, 0x85f894a88030fd81, 0x12da3054b18b6410, 0xe2a57f6505880d65, 0xbba074f260e400f1, 0x08b76279f621d028},
&fe{0xe67245ba78d5b00b, 0x8456ba9a1f186475, 0x7888bff6e6b33bb4, 0xe21585b9a30f86cb, 0x05a69cdcef55feee, 0x09e699dd9adfa5ac}, {0xe67245ba78d5b00b, 0x8456ba9a1f186475, 0x7888bff6e6b33bb4, 0xe21585b9a30f86cb, 0x05a69cdcef55feee, 0x09e699dd9adfa5ac},
&fe{0x0de5c357bff57107, 0x0a0db4ae6b1a10b2, 0xe256bb67b3b3cd8d, 0x8ad456574e9db24f, 0x0443915f50fd4179, 0x098c4bf7de8b6375}, {0x0de5c357bff57107, 0x0a0db4ae6b1a10b2, 0xe256bb67b3b3cd8d, 0x8ad456574e9db24f, 0x0443915f50fd4179, 0x098c4bf7de8b6375},
&fe{0xe6b0617e7dd929c7, 0xfe6e37d442537375, 0x1dafdeda137a489e, 0xe4efd1ad3f767ceb, 0x4a51d8667f0fe1cf, 0x054fdf4bbf1d821c}, {0xe6b0617e7dd929c7, 0xfe6e37d442537375, 0x1dafdeda137a489e, 0xe4efd1ad3f767ceb, 0x4a51d8667f0fe1cf, 0x054fdf4bbf1d821c},
&fe{0x72db2a50658d767b, 0x8abf91faa257b3d5, 0xe969d6833764ab47, 0x464170142a1009eb, 0xb14f01aadb30be2f, 0x18ae6a856f40715d}, {0x72db2a50658d767b, 0x8abf91faa257b3d5, 0xe969d6833764ab47, 0x464170142a1009eb, 0xb14f01aadb30be2f, 0x18ae6a856f40715d},
&fe{0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0},
&fe{0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0},
&fe{0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0},
&fe{0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0},
}, },
[16]*fe{ {
&fe{0xb962a077fdb0f945, 0xa6a9740fefda13a0, 0xc14d568c3ed6c544, 0xb43fc37b908b133e, 0x9c0b3ac929599016, 0x0165aa6c93ad115f}, {0xb962a077fdb0f945, 0xa6a9740fefda13a0, 0xc14d568c3ed6c544, 0xb43fc37b908b133e, 0x9c0b3ac929599016, 0x0165aa6c93ad115f},
&fe{0x23279a3ba506c1d9, 0x92cfca0a9465176a, 0x3b294ab13755f0ff, 0x116dda1c5070ae93, 0xed4530924cec2045, 0x083383d6ed81f1ce}, {0x23279a3ba506c1d9, 0x92cfca0a9465176a, 0x3b294ab13755f0ff, 0x116dda1c5070ae93, 0xed4530924cec2045, 0x083383d6ed81f1ce},
&fe{0x9885c2a6449fecfc, 0x4a2b54ccd37733f0, 0x17da9ffd8738c142, 0xa0fba72732b3fafd, 0xff364f36e54b6812, 0x0f29c13c660523e2}, {0x9885c2a6449fecfc, 0x4a2b54ccd37733f0, 0x17da9ffd8738c142, 0xa0fba72732b3fafd, 0xff364f36e54b6812, 0x0f29c13c660523e2},
&fe{0xe349cc118278f041, 0xd487228f2f3204fb, 0xc9d325849ade5150, 0x43a92bd69c15c2df, 0x1c2c7844bc417be4, 0x12025184f407440c}, {0xe349cc118278f041, 0xd487228f2f3204fb, 0xc9d325849ade5150, 0x43a92bd69c15c2df, 0x1c2c7844bc417be4, 0x12025184f407440c},
&fe{0x587f65ae6acb057b, 0x1444ef325140201f, 0xfbf995e71270da49, 0xccda066072436a42, 0x7408904f0f186bb2, 0x13b93c63edf6c015}, {0x587f65ae6acb057b, 0x1444ef325140201f, 0xfbf995e71270da49, 0xccda066072436a42, 0x7408904f0f186bb2, 0x13b93c63edf6c015},
&fe{0xfb918622cd141920, 0x4a4c64423ecaddb4, 0x0beb232927f7fb26, 0x30f94df6f83a3dc2, 0xaeedd424d780f388, 0x06cc402dd594bbeb}, {0xfb918622cd141920, 0x4a4c64423ecaddb4, 0x0beb232927f7fb26, 0x30f94df6f83a3dc2, 0xaeedd424d780f388, 0x06cc402dd594bbeb},
&fe{0xd41f761151b23f8f, 0x32a92465435719b3, 0x64f436e888c62cb9, 0xdf70a9a1f757c6e4, 0x6933a38d5b594c81, 0x0c6f7f7237b46606}, {0xd41f761151b23f8f, 0x32a92465435719b3, 0x64f436e888c62cb9, 0xdf70a9a1f757c6e4, 0x6933a38d5b594c81, 0x0c6f7f7237b46606},
&fe{0x693c08747876c8f7, 0x22c9850bf9cf80f0, 0x8e9071dab950c124, 0x89bc62d61c7baf23, 0xbc6be2d8dad57c23, 0x17916987aa14a122}, {0x693c08747876c8f7, 0x22c9850bf9cf80f0, 0x8e9071dab950c124, 0x89bc62d61c7baf23, 0xbc6be2d8dad57c23, 0x17916987aa14a122},
&fe{0x1be3ff439c1316fd, 0x9965243a7571dfa7, 0xc7f7f62962f5cd81, 0x32c6aa9af394361c, 0xbbc2ee18e1c227f4, 0x0c102cbac531bb34}, {0x1be3ff439c1316fd, 0x9965243a7571dfa7, 0xc7f7f62962f5cd81, 0x32c6aa9af394361c, 0xbbc2ee18e1c227f4, 0x0c102cbac531bb34},
&fe{0x997614c97bacbf07, 0x61f86372b99192c0, 0x5b8c95fc14353fc3, 0xca2b066c2a87492f, 0x16178f5bbf698711, 0x12a6dcd7f0f4e0e8}, {0x997614c97bacbf07, 0x61f86372b99192c0, 0x5b8c95fc14353fc3, 0xca2b066c2a87492f, 0x16178f5bbf698711, 0x12a6dcd7f0f4e0e8},
&fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493}, {0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493},
&fe{0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0},
&fe{0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0},
&fe{0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0},
&fe{0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0},
&fe{0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0},
}, },
[16]*fe{ {
&fe{0x2b567ff3e2837267, 0x1d4d9e57b958a767, 0xce028fea04bd7373, 0xcc31a30a0b6cd3df, 0x7d7b18a682692693, 0x0d300744d42a0310}, {0x2b567ff3e2837267, 0x1d4d9e57b958a767, 0xce028fea04bd7373, 0xcc31a30a0b6cd3df, 0x7d7b18a682692693, 0x0d300744d42a0310},
&fe{0x99c2555fa542493f, 0xfe7f53cc4874f878, 0x5df0608b8f97608a, 0x14e03832052b49c8, 0x706326a6957dd5a4, 0x0a8dadd9c2414555}, {0x99c2555fa542493f, 0xfe7f53cc4874f878, 0x5df0608b8f97608a, 0x14e03832052b49c8, 0x706326a6957dd5a4, 0x0a8dadd9c2414555},
&fe{0x13d942922a5cf63a, 0x357e33e36e261e7d, 0xcf05a27c8456088d, 0x0000bd1de7ba50f0, 0x83d0c7532f8c1fde, 0x13f70bf38bbf2905}, {0x13d942922a5cf63a, 0x357e33e36e261e7d, 0xcf05a27c8456088d, 0x0000bd1de7ba50f0, 0x83d0c7532f8c1fde, 0x13f70bf38bbf2905},
&fe{0x5c57fd95bfafbdbb, 0x28a359a65e541707, 0x3983ceb4f6360b6d, 0xafe19ff6f97e6d53, 0xb3468f4550192bf7, 0x0bb6cde49d8ba257}, {0x5c57fd95bfafbdbb, 0x28a359a65e541707, 0x3983ceb4f6360b6d, 0xafe19ff6f97e6d53, 0xb3468f4550192bf7, 0x0bb6cde49d8ba257},
&fe{0x590b62c7ff8a513f, 0x314b4ce372cacefd, 0x6bef32ce94b8a800, 0x6ddf84a095713d5f, 0x64eace4cb0982191, 0x0386213c651b888d}, {0x590b62c7ff8a513f, 0x314b4ce372cacefd, 0x6bef32ce94b8a800, 0x6ddf84a095713d5f, 0x64eace4cb0982191, 0x0386213c651b888d},
&fe{0xa5310a31111bbcdd, 0xa14ac0f5da148982, 0xf9ad9cc95423d2e9, 0xaa6ec095283ee4a7, 0xcf5b1f022e1c9107, 0x01fddf5aed881793}, {0xa5310a31111bbcdd, 0xa14ac0f5da148982, 0xf9ad9cc95423d2e9, 0xaa6ec095283ee4a7, 0xcf5b1f022e1c9107, 0x01fddf5aed881793},
&fe{0x65a572b0d7a7d950, 0xe25c2d8183473a19, 0xc2fcebe7cb877dbd, 0x05b2d36c769a89b0, 0xba12961be86e9efb, 0x07eb1b29c1dfde1f}, {0x65a572b0d7a7d950, 0xe25c2d8183473a19, 0xc2fcebe7cb877dbd, 0x05b2d36c769a89b0, 0xba12961be86e9efb, 0x07eb1b29c1dfde1f},
&fe{0x93e09572f7c4cd24, 0x364e929076795091, 0x8569467e68af51b5, 0xa47da89439f5340f, 0xf4fa918082e44d64, 0x0ad52ba3e6695a79}, {0x93e09572f7c4cd24, 0x364e929076795091, 0x8569467e68af51b5, 0xa47da89439f5340f, 0xf4fa918082e44d64, 0x0ad52ba3e6695a79},
&fe{0x911429844e0d5f54, 0xd03f51a3516bb233, 0x3d587e5640536e66, 0xfa86d2a3a9a73482, 0xa90ed5adf1ed5537, 0x149c9c326a5e7393}, {0x911429844e0d5f54, 0xd03f51a3516bb233, 0x3d587e5640536e66, 0xfa86d2a3a9a73482, 0xa90ed5adf1ed5537, 0x149c9c326a5e7393},
&fe{0x462bbeb03c12921a, 0xdc9af5fa0a274a17, 0x9a558ebde836ebed, 0x649ef8f11a4fae46, 0x8100e1652b3cdc62, 0x1862bd62c291dacb}, {0x462bbeb03c12921a, 0xdc9af5fa0a274a17, 0x9a558ebde836ebed, 0x649ef8f11a4fae46, 0x8100e1652b3cdc62, 0x1862bd62c291dacb},
&fe{0x05c9b8ca89f12c26, 0x0194160fa9b9ac4f, 0x6a643d5a6879fa2c, 0x14665bdd8846e19d, 0xbb1d0d53af3ff6bf, 0x12c7e1c3b28962e5}, {0x05c9b8ca89f12c26, 0x0194160fa9b9ac4f, 0x6a643d5a6879fa2c, 0x14665bdd8846e19d, 0xbb1d0d53af3ff6bf, 0x12c7e1c3b28962e5},
&fe{0xb55ebf900b8a3e17, 0xfedc77ec1a9201c4, 0x1f07db10ea1a4df4, 0x0dfbd15dc41a594d, 0x389547f2334a5391, 0x02419f98165871a4}, {0xb55ebf900b8a3e17, 0xfedc77ec1a9201c4, 0x1f07db10ea1a4df4, 0x0dfbd15dc41a594d, 0x389547f2334a5391, 0x02419f98165871a4},
&fe{0xb416af000745fc20, 0x8e563e9d1ea6d0f5, 0x7c763e17763a0652, 0x01458ef0159ebbef, 0x8346fe421f96bb13, 0x0d2d7b829ce324d2}, {0xb416af000745fc20, 0x8e563e9d1ea6d0f5, 0x7c763e17763a0652, 0x01458ef0159ebbef, 0x8346fe421f96bb13, 0x0d2d7b829ce324d2},
&fe{0x93096bb538d64615, 0x6f2a2619951d823a, 0x8f66b3ea59514fa4, 0xf563e63704f7092f, 0x724b136c4cf2d9fa, 0x046959cfcfd0bf49}, {0x93096bb538d64615, 0x6f2a2619951d823a, 0x8f66b3ea59514fa4, 0xf563e63704f7092f, 0x724b136c4cf2d9fa, 0x046959cfcfd0bf49},
&fe{0xea748d4b6e405346, 0x91e9079c2c02d58f, 0x41064965946d9b59, 0xa06731f1d2bbe1ee, 0x07f897e267a33f1b, 0x1017290919210e5f}, {0xea748d4b6e405346, 0x91e9079c2c02d58f, 0x41064965946d9b59, 0xa06731f1d2bbe1ee, 0x07f897e267a33f1b, 0x1017290919210e5f},
&fe{0x872aa6c17d985097, 0xeecc53161264562a, 0x07afe37afff55002, 0x54759078e5be6838, 0xc4b92d15db8acca8, 0x106d87d1b51d13b9}, {0x872aa6c17d985097, 0xeecc53161264562a, 0x07afe37afff55002, 0x54759078e5be6838, 0xc4b92d15db8acca8, 0x106d87d1b51d13b9},
}, },
[16]*fe{ {
&fe{0xeb6c359d47e52b1c, 0x18ef5f8a10634d60, 0xddfa71a0889d5b7e, 0x723e71dcc5fc1323, 0x52f45700b70d5c69, 0x0a8b981ee47691f1}, {0xeb6c359d47e52b1c, 0x18ef5f8a10634d60, 0xddfa71a0889d5b7e, 0x723e71dcc5fc1323, 0x52f45700b70d5c69, 0x0a8b981ee47691f1},
&fe{0x616a3c4f5535b9fb, 0x6f5f037395dbd911, 0xf25f4cc5e35c65da, 0x3e50dffea3c62658, 0x6a33dca523560776, 0x0fadeff77b6bfe3e}, {0x616a3c4f5535b9fb, 0x6f5f037395dbd911, 0xf25f4cc5e35c65da, 0x3e50dffea3c62658, 0x6a33dca523560776, 0x0fadeff77b6bfe3e},
&fe{0x2be9b66df470059c, 0x24a2c159a3d36742, 0x115dbe7ad10c2a37, 0xb6634a652ee5884d, 0x04fe8bb2b8d81af4, 0x01c2a7a256fe9c41}, {0x2be9b66df470059c, 0x24a2c159a3d36742, 0x115dbe7ad10c2a37, 0xb6634a652ee5884d, 0x04fe8bb2b8d81af4, 0x01c2a7a256fe9c41},
&fe{0xf27bf8ef3b75a386, 0x898b367476c9073f, 0x24482e6b8c2f4e5f, 0xc8e0bbd6fe110806, 0x59b0c17f7631448a, 0x11037cd58b3dbfbd}, {0xf27bf8ef3b75a386, 0x898b367476c9073f, 0x24482e6b8c2f4e5f, 0xc8e0bbd6fe110806, 0x59b0c17f7631448a, 0x11037cd58b3dbfbd},
&fe{0x31c7912ea267eec6, 0x1dbf6f1c5fcdb700, 0xd30d4fe3ba86fdb1, 0x3cae528fbee9a2a4, 0xb1cce69b6aa9ad9a, 0x044393bb632d94fb}, {0x31c7912ea267eec6, 0x1dbf6f1c5fcdb700, 0xd30d4fe3ba86fdb1, 0x3cae528fbee9a2a4, 0xb1cce69b6aa9ad9a, 0x044393bb632d94fb},
&fe{0xc66ef6efeeb5c7e8, 0x9824c289dd72bb55, 0x71b1a4d2f119981d, 0x104fc1aafb0919cc, 0x0e49df01d942a628, 0x096c3a09773272d4}, {0xc66ef6efeeb5c7e8, 0x9824c289dd72bb55, 0x71b1a4d2f119981d, 0x104fc1aafb0919cc, 0x0e49df01d942a628, 0x096c3a09773272d4},
&fe{0x9abc11eb5fadeff4, 0x32dca50a885728f0, 0xfb1fa3721569734c, 0xc4b76271ea6506b3, 0xd466a75599ce728e, 0x0c81d4645f4cb6ed}, {0x9abc11eb5fadeff4, 0x32dca50a885728f0, 0xfb1fa3721569734c, 0xc4b76271ea6506b3, 0xd466a75599ce728e, 0x0c81d4645f4cb6ed},
&fe{0x4199f10e5b8be45b, 0xda64e495b1e87930, 0xcb353efe9b33e4ff, 0x9e9efb24aa6424c6, 0xf08d33680a237465, 0x0d3378023e4c7406}, {0x4199f10e5b8be45b, 0xda64e495b1e87930, 0xcb353efe9b33e4ff, 0x9e9efb24aa6424c6, 0xf08d33680a237465, 0x0d3378023e4c7406},
&fe{0x7eb4ae92ec74d3a5, 0xc341b4aa9fac3497, 0x5be603899e907687, 0x03bfd9cca75cbdeb, 0x564c2935a96bfa93, 0x0ef3c33371e2fdb5}, {0x7eb4ae92ec74d3a5, 0xc341b4aa9fac3497, 0x5be603899e907687, 0x03bfd9cca75cbdeb, 0x564c2935a96bfa93, 0x0ef3c33371e2fdb5},
&fe{0x7ee91fd449f6ac2e, 0xe5d5bd5cb9357a30, 0x773a8ca5196b1380, 0xd0fda172174ed023, 0x6cb95e0fa776aead, 0x0d22d5a40cec7cff}, {0x7ee91fd449f6ac2e, 0xe5d5bd5cb9357a30, 0x773a8ca5196b1380, 0xd0fda172174ed023, 0x6cb95e0fa776aead, 0x0d22d5a40cec7cff},
&fe{0xf727e09285fd8519, 0xdc9d55a83017897b, 0x7549d8bd057894ae, 0x178419613d90d8f8, 0xfce95ebdeb5b490a, 0x0467ffaef23fc49e}, {0xf727e09285fd8519, 0xdc9d55a83017897b, 0x7549d8bd057894ae, 0x178419613d90d8f8, 0xfce95ebdeb5b490a, 0x0467ffaef23fc49e},
&fe{0xc1769e6a7c385f1b, 0x79bc930deac01c03, 0x5461c75a23ede3b5, 0x6e20829e5c230c45, 0x828e0f1e772a53cd, 0x116aefa749127bff}, {0xc1769e6a7c385f1b, 0x79bc930deac01c03, 0x5461c75a23ede3b5, 0x6e20829e5c230c45, 0x828e0f1e772a53cd, 0x116aefa749127bff},
&fe{0x101c10bf2744c10a, 0xbbf18d053a6a3154, 0xa0ecf39ef026f602, 0xfc009d4996dc5153, 0xb9000209d5bd08d3, 0x189e5fe4470cd73c}, {0x101c10bf2744c10a, 0xbbf18d053a6a3154, 0xa0ecf39ef026f602, 0xfc009d4996dc5153, 0xb9000209d5bd08d3, 0x189e5fe4470cd73c},
&fe{0x7ebd546ca1575ed2, 0xe47d5a981d081b55, 0x57b2b625b6d4ca21, 0xb0a1ba04228520cc, 0x98738983c2107ff3, 0x13dddbc4799d81d6}, {0x7ebd546ca1575ed2, 0xe47d5a981d081b55, 0x57b2b625b6d4ca21, 0xb0a1ba04228520cc, 0x98738983c2107ff3, 0x13dddbc4799d81d6},
&fe{0x09319f2e39834935, 0x039e952cbdb05c21, 0x55ba77a9a2f76493, 0xfd04e3dfc6086467, 0xfb95832e7d78742e, 0x0ef9c24eccaf5e0e}, {0x09319f2e39834935, 0x039e952cbdb05c21, 0x55ba77a9a2f76493, 0xfd04e3dfc6086467, 0xfb95832e7d78742e, 0x0ef9c24eccaf5e0e},
&fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493}, {0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493},
}, },
} }
var isogenyConstantsG2 = [4][4]*fe2{ var isogenyConstantsG2 = [4][4]*fe2{
[4]*fe2{ {
&fe2{ {
fe{0x47f671c71ce05e62, 0x06dd57071206393e, 0x7c80cd2af3fd71a2, 0x048103ea9e6cd062, 0xc54516acc8d037f6, 0x13808f550920ea41}, fe{0x47f671c71ce05e62, 0x06dd57071206393e, 0x7c80cd2af3fd71a2, 0x048103ea9e6cd062, 0xc54516acc8d037f6, 0x13808f550920ea41},
fe{0x47f671c71ce05e62, 0x06dd57071206393e, 0x7c80cd2af3fd71a2, 0x048103ea9e6cd062, 0xc54516acc8d037f6, 0x13808f550920ea41}, fe{0x47f671c71ce05e62, 0x06dd57071206393e, 0x7c80cd2af3fd71a2, 0x048103ea9e6cd062, 0xc54516acc8d037f6, 0x13808f550920ea41},
}, },
&fe2{ {
fe{0, 0, 0, 0, 0, 0}, fe{0, 0, 0, 0, 0, 0},
fe{0x5fe55555554c71d0, 0x873fffdd236aaaa3, 0x6a6b4619b26ef918, 0x21c2888408874945, 0x2836cda7028cabc5, 0x0ac73310a7fd5abd}, fe{0x5fe55555554c71d0, 0x873fffdd236aaaa3, 0x6a6b4619b26ef918, 0x21c2888408874945, 0x2836cda7028cabc5, 0x0ac73310a7fd5abd},
}, },
&fe2{ {
fe{0x0a0c5555555971c3, 0xdb0c00101f9eaaae, 0xb1fb2f941d797997, 0xd3960742ef416e1c, 0xb70040e2c20556f4, 0x149d7861e581393b}, fe{0x0a0c5555555971c3, 0xdb0c00101f9eaaae, 0xb1fb2f941d797997, 0xd3960742ef416e1c, 0xb70040e2c20556f4, 0x149d7861e581393b},
fe{0xaff2aaaaaaa638e8, 0x439fffee91b55551, 0xb535a30cd9377c8c, 0x90e144420443a4a2, 0x941b66d3814655e2, 0x0563998853fead5e}, fe{0xaff2aaaaaaa638e8, 0x439fffee91b55551, 0xb535a30cd9377c8c, 0x90e144420443a4a2, 0x941b66d3814655e2, 0x0563998853fead5e},
}, },
&fe2{ {
fe{0x40aac71c71c725ed, 0x190955557a84e38e, 0xd817050a8f41abc3, 0xd86485d4c87f6fb1, 0x696eb479f885d059, 0x198e1a74328002d2}, fe{0x40aac71c71c725ed, 0x190955557a84e38e, 0xd817050a8f41abc3, 0xd86485d4c87f6fb1, 0x696eb479f885d059, 0x198e1a74328002d2},
fe{0, 0, 0, 0, 0, 0}, fe{0, 0, 0, 0, 0, 0},
}, },
}, },
[4]*fe2{ {
&fe2{ {
fe{0, 0, 0, 0, 0, 0}, fe{0, 0, 0, 0, 0, 0},
fe{0x1f3affffff13ab97, 0xf25bfc611da3ff3e, 0xca3757cb3819b208, 0x3e6427366f8cec18, 0x03977bc86095b089, 0x04f69db13f39a952}, fe{0x1f3affffff13ab97, 0xf25bfc611da3ff3e, 0xca3757cb3819b208, 0x3e6427366f8cec18, 0x03977bc86095b089, 0x04f69db13f39a952},
}, },
&fe2{ {
fe{0x447600000027552e, 0xdcb8009a43480020, 0x6f7ee9ce4a6e8b59, 0xb10330b7c0a95bc6, 0x6140b1fcfb1e54b7, 0x0381be097f0bb4e1}, fe{0x447600000027552e, 0xdcb8009a43480020, 0x6f7ee9ce4a6e8b59, 0xb10330b7c0a95bc6, 0x6140b1fcfb1e54b7, 0x0381be097f0bb4e1},
fe{0x7588ffffffd8557d, 0x41f3ff646e0bffdf, 0xf7b1e8d2ac426aca, 0xb3741acd32dbb6f8, 0xe9daf5b9482d581f, 0x167f53e0ba7431b8}, fe{0x7588ffffffd8557d, 0x41f3ff646e0bffdf, 0xf7b1e8d2ac426aca, 0xb3741acd32dbb6f8, 0xe9daf5b9482d581f, 0x167f53e0ba7431b8},
}, },
&fe2{ {
fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493}, fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493},
fe{0, 0, 0, 0, 0, 0}, fe{0, 0, 0, 0, 0, 0},
}, },
&fe2{ {
fe{0, 0, 0, 0, 0, 0}, fe{0, 0, 0, 0, 0, 0},
fe{0, 0, 0, 0, 0, 0}, fe{0, 0, 0, 0, 0, 0},
}, },
}, },
[4]*fe2{ {
&fe2{ {
fe{0x96d8f684bdfc77be, 0xb530e4f43b66d0e2, 0x184a88ff379652fd, 0x57cb23ecfae804e1, 0x0fd2e39eada3eba9, 0x08c8055e31c5d5c3}, fe{0x96d8f684bdfc77be, 0xb530e4f43b66d0e2, 0x184a88ff379652fd, 0x57cb23ecfae804e1, 0x0fd2e39eada3eba9, 0x08c8055e31c5d5c3},
fe{0x96d8f684bdfc77be, 0xb530e4f43b66d0e2, 0x184a88ff379652fd, 0x57cb23ecfae804e1, 0x0fd2e39eada3eba9, 0x08c8055e31c5d5c3}, fe{0x96d8f684bdfc77be, 0xb530e4f43b66d0e2, 0x184a88ff379652fd, 0x57cb23ecfae804e1, 0x0fd2e39eada3eba9, 0x08c8055e31c5d5c3},
}, },
&fe2{ {
fe{0, 0, 0, 0, 0, 0}, fe{0, 0, 0, 0, 0, 0},
fe{0xbf0a71c71c91b406, 0x4d6d55d28b7638fd, 0x9d82f98e5f205aee, 0xa27aa27b1d1a18d5, 0x02c3b2b2d2938e86, 0x0c7d13420b09807f}, fe{0xbf0a71c71c91b406, 0x4d6d55d28b7638fd, 0x9d82f98e5f205aee, 0xa27aa27b1d1a18d5, 0x02c3b2b2d2938e86, 0x0c7d13420b09807f},
}, },
&fe2{ {
fe{0xd7f9555555531c74, 0x21cffff748daaaa8, 0x5a9ad1866c9bbe46, 0x4870a2210221d251, 0x4a0db369c0a32af1, 0x02b1ccc429ff56af}, fe{0xd7f9555555531c74, 0x21cffff748daaaa8, 0x5a9ad1866c9bbe46, 0x4870a2210221d251, 0x4a0db369c0a32af1, 0x02b1ccc429ff56af},
fe{0xe205aaaaaaac8e37, 0xfcdc000768795556, 0x0c96011a8a1537dd, 0x1c06a963f163406e, 0x010df44c82a881e6, 0x174f45260f808feb}, fe{0xe205aaaaaaac8e37, 0xfcdc000768795556, 0x0c96011a8a1537dd, 0x1c06a963f163406e, 0x010df44c82a881e6, 0x174f45260f808feb},
}, },
&fe2{ {
fe{0xa470bda12f67f35c, 0xc0fe38e23327b425, 0xc9d3d0f2c6f0678d, 0x1c55c9935b5a982e, 0x27f6c0e2f0746764, 0x117c5e6e28aa9054}, fe{0xa470bda12f67f35c, 0xc0fe38e23327b425, 0xc9d3d0f2c6f0678d, 0x1c55c9935b5a982e, 0x27f6c0e2f0746764, 0x117c5e6e28aa9054},
fe{0, 0, 0, 0, 0, 0}, fe{0, 0, 0, 0, 0, 0},
}, },
}, },
[4]*fe2{ {
&fe2{ {
fe{0x0162fffffa765adf, 0x8f7bea480083fb75, 0x561b3c2259e93611, 0x11e19fc1a9c875d5, 0xca713efc00367660, 0x03c6a03d41da1151}, fe{0x0162fffffa765adf, 0x8f7bea480083fb75, 0x561b3c2259e93611, 0x11e19fc1a9c875d5, 0xca713efc00367660, 0x03c6a03d41da1151},
fe{0x0162fffffa765adf, 0x8f7bea480083fb75, 0x561b3c2259e93611, 0x11e19fc1a9c875d5, 0xca713efc00367660, 0x03c6a03d41da1151}, fe{0x0162fffffa765adf, 0x8f7bea480083fb75, 0x561b3c2259e93611, 0x11e19fc1a9c875d5, 0xca713efc00367660, 0x03c6a03d41da1151},
}, },
&fe2{ {
fe{0, 0, 0, 0, 0, 0}, fe{0, 0, 0, 0, 0, 0},
fe{0x5db0fffffd3b02c5, 0xd713f52358ebfdba, 0x5ea60761a84d161a, 0xbb2c75a34ea6c44a, 0x0ac6735921c1119b, 0x0ee3d913bdacfbf6}, fe{0x5db0fffffd3b02c5, 0xd713f52358ebfdba, 0x5ea60761a84d161a, 0xbb2c75a34ea6c44a, 0x0ac6735921c1119b, 0x0ee3d913bdacfbf6},
}, },
&fe2{ {
fe{0x66b10000003affc5, 0xcb1400e764ec0030, 0xa73e5eb56fa5d106, 0x8984c913a0fe09a9, 0x11e10afb78ad7f13, 0x05429d0e3e918f52}, fe{0x66b10000003affc5, 0xcb1400e764ec0030, 0xa73e5eb56fa5d106, 0x8984c913a0fe09a9, 0x11e10afb78ad7f13, 0x05429d0e3e918f52},
fe{0x534dffffffc4aae6, 0x5397ff174c67ffcf, 0xbff273eb870b251d, 0xdaf2827152870915, 0x393a9cbaca9e2dc3, 0x14be74dbfaee5748}, fe{0x534dffffffc4aae6, 0x5397ff174c67ffcf, 0xbff273eb870b251d, 0xdaf2827152870915, 0x393a9cbaca9e2dc3, 0x14be74dbfaee5748},
}, },
&fe2{ {
fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493}, fe{0x760900000002fffd, 0xebf4000bc40c0002, 0x5f48985753c758ba, 0x77ce585370525745, 0x5c071a97a256ec6d, 0x15f65ec3fa80e493},
fe{0, 0, 0, 0, 0, 0}, fe{0, 0, 0, 0, 0, 0},
}, },

View File

@ -17,7 +17,7 @@
package bls12381 package bls12381
// swuMapG1 is implementation of Simplified Shallue-van de Woestijne-Ulas Method // swuMapG1 is implementation of Simplified Shallue-van de Woestijne-Ulas Method
// follows the implmentation at draft-irtf-cfrg-hash-to-curve-06. // follows the implementation at draft-irtf-cfrg-hash-to-curve-06.
func swuMapG1(u *fe) (*fe, *fe) { func swuMapG1(u *fe) (*fe, *fe) {
var params = swuParamsForG1 var params = swuParamsForG1
var tv [4]*fe var tv [4]*fe

View File

@ -49,7 +49,7 @@ TEXT ·gfpNeg(SB),0,$0-16
SBBQ 24(DI), R11 SBBQ 24(DI), R11
MOVQ $0, AX MOVQ $0, AX
gfpCarry(R8,R9,R10,R11,AX, R12,R13,R14,R15,BX) gfpCarry(R8,R9,R10,R11,AX, R12,R13,R14,CX,BX)
MOVQ c+0(FP), DI MOVQ c+0(FP), DI
storeBlock(R8,R9,R10,R11, 0(DI)) storeBlock(R8,R9,R10,R11, 0(DI))
@ -68,7 +68,7 @@ TEXT ·gfpAdd(SB),0,$0-24
ADCQ 24(SI), R11 ADCQ 24(SI), R11
ADCQ $0, R12 ADCQ $0, R12
gfpCarry(R8,R9,R10,R11,R12, R13,R14,R15,AX,BX) gfpCarry(R8,R9,R10,R11,R12, R13,R14,CX,AX,BX)
MOVQ c+0(FP), DI MOVQ c+0(FP), DI
storeBlock(R8,R9,R10,R11, 0(DI)) storeBlock(R8,R9,R10,R11, 0(DI))
@ -83,7 +83,7 @@ TEXT ·gfpSub(SB),0,$0-24
MOVQ ·p2+0(SB), R12 MOVQ ·p2+0(SB), R12
MOVQ ·p2+8(SB), R13 MOVQ ·p2+8(SB), R13
MOVQ ·p2+16(SB), R14 MOVQ ·p2+16(SB), R14
MOVQ ·p2+24(SB), R15 MOVQ ·p2+24(SB), CX
MOVQ $0, AX MOVQ $0, AX
SUBQ 0(SI), R8 SUBQ 0(SI), R8
@ -94,12 +94,12 @@ TEXT ·gfpSub(SB),0,$0-24
CMOVQCC AX, R12 CMOVQCC AX, R12
CMOVQCC AX, R13 CMOVQCC AX, R13
CMOVQCC AX, R14 CMOVQCC AX, R14
CMOVQCC AX, R15 CMOVQCC AX, CX
ADDQ R12, R8 ADDQ R12, R8
ADCQ R13, R9 ADCQ R13, R9
ADCQ R14, R10 ADCQ R14, R10
ADCQ R15, R11 ADCQ CX, R11
MOVQ c+0(FP), DI MOVQ c+0(FP), DI
storeBlock(R8,R9,R10,R11, 0(DI)) storeBlock(R8,R9,R10,R11, 0(DI))
@ -115,7 +115,7 @@ TEXT ·gfpMul(SB),0,$160-24
mulBMI2(0(DI),8(DI),16(DI),24(DI), 0(SI)) mulBMI2(0(DI),8(DI),16(DI),24(DI), 0(SI))
storeBlock( R8, R9,R10,R11, 0(SP)) storeBlock( R8, R9,R10,R11, 0(SP))
storeBlock(R12,R13,R14,R15, 32(SP)) storeBlock(R12,R13,R14,CX, 32(SP))
gfpReduceBMI2() gfpReduceBMI2()
JMP end JMP end
@ -125,5 +125,5 @@ nobmi2Mul:
end: end:
MOVQ c+0(FP), DI MOVQ c+0(FP), DI
storeBlock(R12,R13,R14,R15, 0(DI)) storeBlock(R12,R13,R14,CX, 0(DI))
RET RET

View File

@ -165,7 +165,7 @@
\ \
\ // Add the 512-bit intermediate to m*N \ // Add the 512-bit intermediate to m*N
loadBlock(96+stack, R8,R9,R10,R11) \ loadBlock(96+stack, R8,R9,R10,R11) \
loadBlock(128+stack, R12,R13,R14,R15) \ loadBlock(128+stack, R12,R13,R14,CX) \
\ \
MOVQ $0, AX \ MOVQ $0, AX \
ADDQ 0+stack, R8 \ ADDQ 0+stack, R8 \
@ -175,7 +175,7 @@
ADCQ 32+stack, R12 \ ADCQ 32+stack, R12 \
ADCQ 40+stack, R13 \ ADCQ 40+stack, R13 \
ADCQ 48+stack, R14 \ ADCQ 48+stack, R14 \
ADCQ 56+stack, R15 \ ADCQ 56+stack, CX \
ADCQ $0, AX \ ADCQ $0, AX \
\ \
gfpCarry(R12,R13,R14,R15,AX, R8,R9,R10,R11,BX) gfpCarry(R12,R13,R14,CX,AX, R8,R9,R10,R11,BX)

View File

@ -29,7 +29,7 @@
ADCQ $0, R14 \ ADCQ $0, R14 \
\ \
MOVQ a2, DX \ MOVQ a2, DX \
MOVQ $0, R15 \ MOVQ $0, CX \
MULXQ 0+rb, AX, BX \ MULXQ 0+rb, AX, BX \
ADDQ AX, R10 \ ADDQ AX, R10 \
ADCQ BX, R11 \ ADCQ BX, R11 \
@ -43,7 +43,7 @@
MULXQ 24+rb, AX, BX \ MULXQ 24+rb, AX, BX \
ADCQ AX, R13 \ ADCQ AX, R13 \
ADCQ BX, R14 \ ADCQ BX, R14 \
ADCQ $0, R15 \ ADCQ $0, CX \
\ \
MOVQ a3, DX \ MOVQ a3, DX \
MULXQ 0+rb, AX, BX \ MULXQ 0+rb, AX, BX \
@ -52,13 +52,13 @@
MULXQ 16+rb, AX, BX \ MULXQ 16+rb, AX, BX \
ADCQ AX, R13 \ ADCQ AX, R13 \
ADCQ BX, R14 \ ADCQ BX, R14 \
ADCQ $0, R15 \ ADCQ $0, CX \
MULXQ 8+rb, AX, BX \ MULXQ 8+rb, AX, BX \
ADDQ AX, R12 \ ADDQ AX, R12 \
ADCQ BX, R13 \ ADCQ BX, R13 \
MULXQ 24+rb, AX, BX \ MULXQ 24+rb, AX, BX \
ADCQ AX, R14 \ ADCQ AX, R14 \
ADCQ BX, R15 ADCQ BX, CX
#define gfpReduceBMI2() \ #define gfpReduceBMI2() \
\ // m = (T * N') mod R, store m in R8:R9:R10:R11 \ // m = (T * N') mod R, store m in R8:R9:R10:R11
@ -106,7 +106,7 @@
ADCQ 32(SP), R12 \ ADCQ 32(SP), R12 \
ADCQ 40(SP), R13 \ ADCQ 40(SP), R13 \
ADCQ 48(SP), R14 \ ADCQ 48(SP), R14 \
ADCQ 56(SP), R15 \ ADCQ 56(SP), CX \
ADCQ $0, AX \ ADCQ $0, AX \
\ \
gfpCarry(R12,R13,R14,R15,AX, R8,R9,R10,R11,BX) gfpCarry(R12,R13,R14,CX,AX, R8,R9,R10,R11,BX)

View File

@ -24,37 +24,48 @@ import (
"crypto/elliptic" "crypto/elliptic"
"errors" "errors"
"fmt" "fmt"
"math/big"
"github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/btcec/v2"
btc_ecdsa "github.com/btcsuite/btcd/btcec/v2/ecdsa"
) )
// Ecrecover returns the uncompressed public key that created the given signature. // Ecrecover returns the uncompressed public key that created the given signature.
func Ecrecover(hash, sig []byte) ([]byte, error) { func Ecrecover(hash, sig []byte) ([]byte, error) {
pub, err := SigToPub(hash, sig) pub, err := sigToPub(hash, sig)
if err != nil { if err != nil {
return nil, err return nil, err
} }
bytes := (*btcec.PublicKey)(pub).SerializeUncompressed() bytes := pub.SerializeUncompressed()
return bytes, err return bytes, err
} }
func sigToPub(hash, sig []byte) (*btcec.PublicKey, error) {
if len(sig) != SignatureLength {
return nil, errors.New("invalid signature")
}
// Convert to btcec input format with 'recovery id' v at the beginning.
btcsig := make([]byte, SignatureLength)
btcsig[0] = sig[RecoveryIDOffset] + 27
copy(btcsig[1:], sig)
pub, _, err := btc_ecdsa.RecoverCompact(btcsig, hash)
return pub, err
}
// SigToPub returns the public key that created the given signature. // SigToPub returns the public key that created the given signature.
func SigToPub(hash, sig []byte) (*ecdsa.PublicKey, error) { func SigToPub(hash, sig []byte) (*ecdsa.PublicKey, error) {
// Convert to btcec input format with 'recovery id' v at the beginning. pub, err := sigToPub(hash, sig)
btcsig := make([]byte, SignatureLength) if err != nil {
btcsig[0] = sig[64] + 27 return nil, err
copy(btcsig[1:], sig) }
return pub.ToECDSA(), nil
pub, _, err := btcec.RecoverCompact(btcec.S256(), btcsig, hash)
return (*ecdsa.PublicKey)(pub), err
} }
// Sign calculates an ECDSA signature. // Sign calculates an ECDSA signature.
// //
// This function is susceptible to chosen plaintext attacks that can leak // This function is susceptible to chosen plaintext attacks that can leak
// information about the private key that is used for signing. Callers must // information about the private key that is used for signing. Callers must
// be aware that the given hash cannot be chosen by an adversery. Common // be aware that the given hash cannot be chosen by an adversary. Common
// solution is to hash any input before calculating the signature. // solution is to hash any input before calculating the signature.
// //
// The produced signature is in the [R || S || V] format where V is 0 or 1. // The produced signature is in the [R || S || V] format where V is 0 or 1.
@ -65,14 +76,20 @@ func Sign(hash []byte, prv *ecdsa.PrivateKey) ([]byte, error) {
if prv.Curve != btcec.S256() { if prv.Curve != btcec.S256() {
return nil, fmt.Errorf("private key curve is not secp256k1") return nil, fmt.Errorf("private key curve is not secp256k1")
} }
sig, err := btcec.SignCompact(btcec.S256(), (*btcec.PrivateKey)(prv), hash, false) // ecdsa.PrivateKey -> btcec.PrivateKey
var priv btcec.PrivateKey
if overflow := priv.Key.SetByteSlice(prv.D.Bytes()); overflow || priv.Key.IsZero() {
return nil, fmt.Errorf("invalid private key")
}
defer priv.Zero()
sig, err := btc_ecdsa.SignCompact(&priv, hash, false) // ref uncompressed pubkey
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Convert to Ethereum signature format with 'recovery id' v at the end. // Convert to Ethereum signature format with 'recovery id' v at the end.
v := sig[0] - 27 v := sig[0] - 27
copy(sig, sig[1:]) copy(sig, sig[1:])
sig[64] = v sig[RecoveryIDOffset] = v
return sig, nil return sig, nil
} }
@ -83,13 +100,20 @@ func VerifySignature(pubkey, hash, signature []byte) bool {
if len(signature) != 64 { if len(signature) != 64 {
return false return false
} }
sig := &btcec.Signature{R: new(big.Int).SetBytes(signature[:32]), S: new(big.Int).SetBytes(signature[32:])} var r, s btcec.ModNScalar
key, err := btcec.ParsePubKey(pubkey, btcec.S256()) if r.SetByteSlice(signature[:32]) {
return false // overflow
}
if s.SetByteSlice(signature[32:]) {
return false
}
sig := btc_ecdsa.NewSignature(&r, &s)
key, err := btcec.ParsePubKey(pubkey)
if err != nil { if err != nil {
return false return false
} }
// Reject malleable signatures. libsecp256k1 does this check but btcec doesn't. // Reject malleable signatures. libsecp256k1 does this check but btcec doesn't.
if sig.S.Cmp(secp256k1halfN) > 0 { if s.IsOverHalfOrder() {
return false return false
} }
return sig.Verify(hash, key) return sig.Verify(hash, key)
@ -100,16 +124,26 @@ func DecompressPubkey(pubkey []byte) (*ecdsa.PublicKey, error) {
if len(pubkey) != 33 { if len(pubkey) != 33 {
return nil, errors.New("invalid compressed public key length") return nil, errors.New("invalid compressed public key length")
} }
key, err := btcec.ParsePubKey(pubkey, btcec.S256()) key, err := btcec.ParsePubKey(pubkey)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return key.ToECDSA(), nil return key.ToECDSA(), nil
} }
// CompressPubkey encodes a public key to the 33-byte compressed format. // CompressPubkey encodes a public key to the 33-byte compressed format. The
// provided PublicKey must be valid. Namely, the coordinates must not be larger
// than 32 bytes each, they must be less than the field prime, and it must be a
// point on the secp256k1 curve. This is the case for a PublicKey constructed by
// elliptic.Unmarshal (see UnmarshalPubkey), or by ToECDSA and ecdsa.GenerateKey
// when constructing a PrivateKey.
func CompressPubkey(pubkey *ecdsa.PublicKey) []byte { func CompressPubkey(pubkey *ecdsa.PublicKey) []byte {
return (*btcec.PublicKey)(pubkey).SerializeCompressed() // NOTE: the coordinates may be validated with
// btcec.ParsePubKey(FromECDSAPub(pubkey))
var x, y btcec.FieldVal
x.SetByteSlice(pubkey.X.Bytes())
y.SetByteSlice(pubkey.Y.Bytes())
return btcec.NewPublicKey(&x, &y).SerializeCompressed()
} }
// S256 returns an instance of the secp256k1 curve. // S256 returns an instance of the secp256k1 curve.

View File

@ -5,7 +5,7 @@ This is a post-mortem concerning the minority split that occurred on Ethereum ma
## Timeline ## Timeline
- 2021-08-17: Guido Vranken submitted bounty report. Investigation started, root cause identified, patch variations discussed. - 2021-08-17: Guido Vranken submitted a bounty report. Investigation started, root cause identified, patch variations discussed.
- 2021-08-18: Made public announcement over twitter about upcoming security release upcoming Tuesday. Downstream projects were also notified about the upcoming patch-release. - 2021-08-18: Made public announcement over twitter about upcoming security release upcoming Tuesday. Downstream projects were also notified about the upcoming patch-release.
- 2021-08-24: Released [v1.10.8](https://github.com/ethereum/go-ethereum/releases/tag/v1.10.8) containing the fix on Tuesday morning (CET). Erigon released [v2021.08.04](https://github.com/ledgerwatch/erigon/releases/tag/v2021.08.04). - 2021-08-24: Released [v1.10.8](https://github.com/ethereum/go-ethereum/releases/tag/v1.10.8) containing the fix on Tuesday morning (CET). Erigon released [v2021.08.04](https://github.com/ledgerwatch/erigon/releases/tag/v2021.08.04).
- 2021-08-27: At 12:50:07 UTC, issue exploited. Analysis started roughly 30m later, - 2021-08-27: At 12:50:07 UTC, issue exploited. Analysis started roughly 30m later,
@ -51,7 +51,7 @@ A memory-corruption bug within the EVM can cause a consensus error, where vulner
#### Handling #### Handling
On the evening of 17th, we discussed options how to handle it. We made a state test to reproduce the issue, and verified that neither `openethereum`, `nethermind` nor `besu` were affected by the same vulnerability, and started a full-sync with a patched version of `geth`. On the evening of 17th, we discussed options on how to handle it. We made a state test to reproduce the issue, and verified that neither `openethereum`, `nethermind` nor `besu` were affected by the same vulnerability, and started a full-sync with a patched version of `geth`.
It was decided that in this specific instance, it would be possible to make a public announcement and a patch release: It was decided that in this specific instance, it would be possible to make a public announcement and a patch release:

View File

@ -220,16 +220,16 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
checkpoint = params.TrustedCheckpoints[genesisHash] checkpoint = params.TrustedCheckpoints[genesisHash]
} }
if eth.handler, err = newHandler(&handlerConfig{ if eth.handler, err = newHandler(&handlerConfig{
Database: chainDb, Database: chainDb,
Chain: eth.blockchain, Chain: eth.blockchain,
TxPool: eth.txPool, TxPool: eth.txPool,
Merger: merger, Merger: merger,
Network: config.NetworkId, Network: config.NetworkId,
Sync: config.SyncMode, Sync: config.SyncMode,
BloomCache: uint64(cacheLimit), BloomCache: uint64(cacheLimit),
EventMux: eth.eventMux, EventMux: eth.eventMux,
Checkpoint: checkpoint, Checkpoint: checkpoint,
Whitelist: config.Whitelist, PeerRequiredBlocks: config.PeerRequiredBlocks,
}); err != nil { }); err != nil {
return nil, err return nil, err
} }

View File

@ -20,10 +20,14 @@ package catalyst
import ( import (
"crypto/sha256" "crypto/sha256"
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/beacon" "github.com/ethereum/go-ethereum/core/beacon"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
@ -36,18 +40,27 @@ func Register(stack *node.Node, backend *eth.Ethereum) error {
log.Warn("Catalyst mode enabled", "protocol", "eth") log.Warn("Catalyst mode enabled", "protocol", "eth")
stack.RegisterAPIs([]rpc.API{ stack.RegisterAPIs([]rpc.API{
{ {
Namespace: "engine", Namespace: "engine",
Version: "1.0", Version: "1.0",
Service: NewConsensusAPI(backend), Service: NewConsensusAPI(backend),
Public: true, Public: true,
Authenticated: true,
},
{
Namespace: "engine",
Version: "1.0",
Service: NewConsensusAPI(backend),
Public: true,
Authenticated: false,
}, },
}) })
return nil return nil
} }
type ConsensusAPI struct { type ConsensusAPI struct {
eth *eth.Ethereum eth *eth.Ethereum
preparedBlocks *payloadQueue // preparedBlocks caches payloads (*ExecutableDataV1) by payload ID (PayloadID) remoteBlocks *headerQueue // Cache of remote payloads received
localBlocks *payloadQueue // Cache of local payloads generated
} }
// NewConsensusAPI creates a new consensus api for the given backend. // NewConsensusAPI creates a new consensus api for the given backend.
@ -57,8 +70,9 @@ func NewConsensusAPI(eth *eth.Ethereum) *ConsensusAPI {
panic("Catalyst started without valid total difficulty") panic("Catalyst started without valid total difficulty")
} }
return &ConsensusAPI{ return &ConsensusAPI{
eth: eth, eth: eth,
preparedBlocks: newPayloadQueue(), remoteBlocks: newHeaderQueue(),
localBlocks: newPayloadQueue(),
} }
} }
@ -72,85 +86,237 @@ func NewConsensusAPI(eth *eth.Ethereum) *ConsensusAPI {
// We try to set our blockchain to the headBlock // We try to set our blockchain to the headBlock
// If there are payloadAttributes: // If there are payloadAttributes:
// we try to assemble a block with the payloadAttributes and return its payloadID // we try to assemble a block with the payloadAttributes and return its payloadID
func (api *ConsensusAPI) ForkchoiceUpdatedV1(heads beacon.ForkchoiceStateV1, payloadAttributes *beacon.PayloadAttributesV1) (beacon.ForkChoiceResponse, error) { func (api *ConsensusAPI) ForkchoiceUpdatedV1(update beacon.ForkchoiceStateV1, payloadAttributes *beacon.PayloadAttributesV1) (beacon.ForkChoiceResponse, error) {
log.Trace("Engine API request received", "method", "ForkChoiceUpdated", "head", heads.HeadBlockHash, "finalized", heads.FinalizedBlockHash, "safe", heads.SafeBlockHash) log.Trace("Engine API request received", "method", "ForkchoiceUpdated", "head", update.HeadBlockHash, "finalized", update.FinalizedBlockHash, "safe", update.SafeBlockHash)
if heads.HeadBlockHash == (common.Hash{}) { if update.HeadBlockHash == (common.Hash{}) {
return beacon.ForkChoiceResponse{Status: beacon.SUCCESS.Status, PayloadID: nil}, nil log.Warn("Forkchoice requested update to zero hash")
return beacon.STATUS_INVALID, nil // TODO(karalabe): Why does someone send us this?
} }
if err := api.checkTerminalTotalDifficulty(heads.HeadBlockHash); err != nil { // Check whether we have the block yet in our database or not. If not, we'll
if block := api.eth.BlockChain().GetBlockByHash(heads.HeadBlockHash); block == nil { // need to either trigger a sync, or to reject this forkchoice update for a
// TODO (MariusVanDerWijden) trigger sync // reason.
return beacon.SYNCING, nil block := api.eth.BlockChain().GetBlockByHash(update.HeadBlockHash)
if block == nil {
// If the head hash is unknown (was not given to us in a newPayload request),
// we cannot resolve the header, so not much to do. This could be extended in
// the future to resolve from the `eth` network, but it's an unexpected case
// that should be fixed, not papered over.
header := api.remoteBlocks.get(update.HeadBlockHash)
if header == nil {
log.Warn("Forkchoice requested unknown head", "hash", update.HeadBlockHash)
return beacon.STATUS_SYNCING, nil
} }
return beacon.INVALID, err // Header advertised via a past newPayload request. Start syncing to it.
// Before we do however, make sure any legacy sync in switched off so we
// don't accidentally have 2 cycles running.
if merger := api.eth.Merger(); !merger.TDDReached() {
merger.ReachTTD()
api.eth.Downloader().Cancel()
}
log.Info("Forkchoice requested sync to new head", "number", header.Number, "hash", header.Hash())
if err := api.eth.Downloader().BeaconSync(api.eth.SyncMode(), header); err != nil {
return beacon.STATUS_SYNCING, err
}
return beacon.STATUS_SYNCING, nil
} }
// If the finalized block is set, check if it is in our blockchain // Block is known locally, just sanity check that the beacon client does not
if heads.FinalizedBlockHash != (common.Hash{}) { // attempt to push us back to before the merge.
if block := api.eth.BlockChain().GetBlockByHash(heads.FinalizedBlockHash); block == nil { if block.Difficulty().BitLen() > 0 || block.NumberU64() == 0 {
// TODO (MariusVanDerWijden) trigger sync var (
return beacon.SYNCING, nil td = api.eth.BlockChain().GetTd(update.HeadBlockHash, block.NumberU64())
ptd = api.eth.BlockChain().GetTd(block.ParentHash(), block.NumberU64()-1)
ttd = api.eth.BlockChain().Config().TerminalTotalDifficulty
)
if td == nil || (block.NumberU64() > 0 && ptd == nil) {
log.Error("TDs unavailable for TTD check", "number", block.NumberU64(), "hash", update.HeadBlockHash, "td", td, "parent", block.ParentHash(), "ptd", ptd)
return beacon.STATUS_INVALID, errors.New("TDs unavailable for TDD check")
}
if td.Cmp(ttd) < 0 || (block.NumberU64() > 0 && ptd.Cmp(ttd) > 0) {
log.Error("Refusing beacon update to pre-merge", "number", block.NumberU64(), "hash", update.HeadBlockHash, "diff", block.Difficulty(), "age", common.PrettyAge(time.Unix(int64(block.Time()), 0)))
return beacon.ForkChoiceResponse{PayloadStatus: beacon.PayloadStatusV1{Status: beacon.INVALIDTERMINALBLOCK}, PayloadID: nil}, nil
} }
} }
// SetHead
if err := api.setHead(heads.HeadBlockHash); err != nil { if rawdb.ReadCanonicalHash(api.eth.ChainDb(), block.NumberU64()) != update.HeadBlockHash {
return beacon.INVALID, err // Block is not canonical, set head.
if err := api.eth.BlockChain().SetChainHead(block); err != nil {
return beacon.STATUS_INVALID, err
}
} else {
// If the head block is already in our canonical chain, the beacon client is
// probably resyncing. Ignore the update.
log.Info("Ignoring beacon update to old head", "number", block.NumberU64(), "hash", update.HeadBlockHash, "age", common.PrettyAge(time.Unix(int64(block.Time()), 0)), "have", api.eth.BlockChain().CurrentBlock().NumberU64())
} }
// Assemble block (if needed). It only works for full node. api.eth.SetSynced()
// If the beacon client also advertised a finalized block, mark the local
// chain final and completely in PoS mode.
if update.FinalizedBlockHash != (common.Hash{}) {
if merger := api.eth.Merger(); !merger.PoSFinalized() {
merger.FinalizePoS()
}
// TODO (MariusVanDerWijden): If the finalized block is not in our canonical tree, somethings wrong
finalBlock := api.eth.BlockChain().GetBlockByHash(update.FinalizedBlockHash)
if finalBlock == nil {
log.Warn("Final block not available in database", "hash", update.FinalizedBlockHash)
return beacon.STATUS_INVALID, errors.New("final block not available")
} else if rawdb.ReadCanonicalHash(api.eth.ChainDb(), finalBlock.NumberU64()) != update.FinalizedBlockHash {
log.Warn("Final block not in canonical chain", "number", block.NumberU64(), "hash", update.HeadBlockHash)
return beacon.STATUS_INVALID, errors.New("final block not canonical")
}
}
// TODO (MariusVanDerWijden): Check if the safe block hash is in our canonical tree, if not somethings wrong
if update.SafeBlockHash != (common.Hash{}) {
safeBlock := api.eth.BlockChain().GetBlockByHash(update.SafeBlockHash)
if safeBlock == nil {
log.Warn("Safe block not available in database")
return beacon.STATUS_INVALID, errors.New("safe head not available")
}
if rawdb.ReadCanonicalHash(api.eth.ChainDb(), safeBlock.NumberU64()) != update.SafeBlockHash {
log.Warn("Safe block not in canonical chain")
return beacon.STATUS_INVALID, errors.New("safe head not canonical")
}
}
// If payload generation was requested, create a new block to be potentially
// sealed by the beacon client. The payload will be requested later, and we
// might replace it arbitrarily many times in between.
if payloadAttributes != nil { if payloadAttributes != nil {
data, err := api.assembleBlock(heads.HeadBlockHash, payloadAttributes) log.Info("Creating new payload for sealing")
start := time.Now()
data, err := api.assembleBlock(update.HeadBlockHash, payloadAttributes)
if err != nil { if err != nil {
return beacon.INVALID, err log.Error("Failed to create sealing payload", "err", err)
return api.validForkChoiceResponse(nil), err // valid setHead, invalid payload
} }
id := computePayloadId(heads.HeadBlockHash, payloadAttributes) id := computePayloadId(update.HeadBlockHash, payloadAttributes)
api.preparedBlocks.put(id, data) api.localBlocks.put(id, data)
log.Info("Created payload", "payloadID", id)
return beacon.ForkChoiceResponse{Status: beacon.SUCCESS.Status, PayloadID: &id}, nil log.Info("Created payload for sealing", "id", id, "elapsed", time.Since(start))
return api.validForkChoiceResponse(&id), nil
} }
return beacon.ForkChoiceResponse{Status: beacon.SUCCESS.Status, PayloadID: nil}, nil return api.validForkChoiceResponse(nil), nil
}
// validForkChoiceResponse returns the ForkChoiceResponse{VALID}
// with the latest valid hash and an optional payloadID.
func (api *ConsensusAPI) validForkChoiceResponse(id *beacon.PayloadID) beacon.ForkChoiceResponse {
currentHash := api.eth.BlockChain().CurrentBlock().Hash()
return beacon.ForkChoiceResponse{
PayloadStatus: beacon.PayloadStatusV1{Status: beacon.VALID, LatestValidHash: &currentHash},
PayloadID: id,
}
}
// ExchangeTransitionConfigurationV1 checks the given configuration against
// the configuration of the node.
func (api *ConsensusAPI) ExchangeTransitionConfigurationV1(config beacon.TransitionConfigurationV1) (*beacon.TransitionConfigurationV1, error) {
if config.TerminalTotalDifficulty == nil {
return nil, errors.New("invalid terminal total difficulty")
}
ttd := api.eth.BlockChain().Config().TerminalTotalDifficulty
if ttd.Cmp(config.TerminalTotalDifficulty.ToInt()) != 0 {
log.Warn("Invalid TTD configured", "geth", ttd, "beacon", config.TerminalTotalDifficulty)
return nil, fmt.Errorf("invalid ttd: execution %v consensus %v", ttd, config.TerminalTotalDifficulty)
}
if config.TerminalBlockHash != (common.Hash{}) {
if hash := api.eth.BlockChain().GetCanonicalHash(uint64(config.TerminalBlockNumber)); hash == config.TerminalBlockHash {
return &beacon.TransitionConfigurationV1{
TerminalTotalDifficulty: (*hexutil.Big)(ttd),
TerminalBlockHash: config.TerminalBlockHash,
TerminalBlockNumber: config.TerminalBlockNumber,
}, nil
}
return nil, fmt.Errorf("invalid terminal block hash")
}
return &beacon.TransitionConfigurationV1{TerminalTotalDifficulty: (*hexutil.Big)(ttd)}, nil
} }
// GetPayloadV1 returns a cached payload by id. // GetPayloadV1 returns a cached payload by id.
func (api *ConsensusAPI) GetPayloadV1(payloadID beacon.PayloadID) (*beacon.ExecutableDataV1, error) { func (api *ConsensusAPI) GetPayloadV1(payloadID beacon.PayloadID) (*beacon.ExecutableDataV1, error) {
log.Trace("Engine API request received", "method", "GetPayload", "id", payloadID) log.Trace("Engine API request received", "method", "GetPayload", "id", payloadID)
data := api.preparedBlocks.get(payloadID) data := api.localBlocks.get(payloadID)
if data == nil { if data == nil {
return nil, &beacon.UnknownPayload return nil, &beacon.UnknownPayload
} }
return data, nil return data, nil
} }
// ExecutePayloadV1 creates an Eth1 block, inserts it in the chain, and returns the status of the chain. // NewPayloadV1 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
func (api *ConsensusAPI) ExecutePayloadV1(params beacon.ExecutableDataV1) (beacon.ExecutePayloadResponse, error) { func (api *ConsensusAPI) NewPayloadV1(params beacon.ExecutableDataV1) (beacon.PayloadStatusV1, error) {
log.Trace("Engine API request received", "method", "ExecutePayload", params.BlockHash, "number", params.Number) log.Trace("Engine API request received", "method", "ExecutePayload", "number", params.Number, "hash", params.BlockHash)
block, err := beacon.ExecutableDataToBlock(params) block, err := beacon.ExecutableDataToBlock(params)
if err != nil { if err != nil {
return api.invalid(), err log.Debug("Invalid NewPayload params", "params", params, "error", err)
return beacon.PayloadStatusV1{Status: beacon.INVALIDBLOCKHASH}, nil
} }
if !api.eth.BlockChain().HasBlock(block.ParentHash(), block.NumberU64()-1) { // If we already have the block locally, ignore the entire execution and just
/* // return a fake success.
TODO (MariusVanDerWijden) reenable once sync is merged if block := api.eth.BlockChain().GetBlockByHash(params.BlockHash); block != nil {
if err := api.eth.Downloader().BeaconSync(api.eth.SyncMode(), block.Header()); err != nil { log.Warn("Ignoring already known beacon payload", "number", params.Number, "hash", params.BlockHash, "age", common.PrettyAge(time.Unix(int64(block.Time()), 0)))
return SYNCING, err hash := block.Hash()
} return beacon.PayloadStatusV1{Status: beacon.VALID, LatestValidHash: &hash}, nil
*/
// TODO (MariusVanDerWijden) we should return nil here not empty hash
return beacon.ExecutePayloadResponse{Status: beacon.SYNCING.Status, LatestValidHash: common.Hash{}}, nil
}
parent := api.eth.BlockChain().GetBlockByHash(params.ParentHash)
td := api.eth.BlockChain().GetTd(parent.Hash(), block.NumberU64()-1)
ttd := api.eth.BlockChain().Config().TerminalTotalDifficulty
if td.Cmp(ttd) < 0 {
return api.invalid(), fmt.Errorf("can not execute payload on top of block with low td got: %v threshold %v", td, ttd)
}
log.Trace("Inserting block without head", "hash", block.Hash(), "number", block.Number)
if err := api.eth.BlockChain().InsertBlockWithoutSetHead(block); err != nil {
return api.invalid(), err
} }
// If the parent is missing, we - in theory - could trigger a sync, but that
// would also entail a reorg. That is problematic if multiple sibling blocks
// are being fed to us, and even more so, if some semi-distant uncle shortens
// our live chain. As such, payload execution will not permit reorgs and thus
// will not trigger a sync cycle. That is fine though, if we get a fork choice
// update after legit payload executions.
parent := api.eth.BlockChain().GetBlock(block.ParentHash(), block.NumberU64()-1)
if parent == nil {
// Stash the block away for a potential forced forckchoice update to it
// at a later time.
api.remoteBlocks.put(block.Hash(), block.Header())
// Although we don't want to trigger a sync, if there is one already in
// progress, try to extend if with the current payload request to relieve
// some strain from the forkchoice update.
if err := api.eth.Downloader().BeaconExtend(api.eth.SyncMode(), block.Header()); err == nil {
log.Debug("Payload accepted for sync extension", "number", params.Number, "hash", params.BlockHash)
return beacon.PayloadStatusV1{Status: beacon.SYNCING}, nil
}
// Either no beacon sync was started yet, or it rejected the delivered
// payload as non-integratable on top of the existing sync. We'll just
// have to rely on the beacon client to forcefully update the head with
// a forkchoice update request.
log.Warn("Ignoring payload with missing parent", "number", params.Number, "hash", params.BlockHash, "parent", params.ParentHash)
return beacon.PayloadStatusV1{Status: beacon.ACCEPTED}, nil
}
// We have an existing parent, do some sanity checks to avoid the beacon client
// triggering too early
var (
td = api.eth.BlockChain().GetTd(parent.Hash(), parent.NumberU64())
ttd = api.eth.BlockChain().Config().TerminalTotalDifficulty
)
if td.Cmp(ttd) < 0 {
log.Warn("Ignoring pre-merge payload", "number", params.Number, "hash", params.BlockHash, "td", td, "ttd", ttd)
return beacon.PayloadStatusV1{Status: beacon.INVALIDTERMINALBLOCK}, nil
}
if block.Time() <= parent.Time() {
log.Warn("Invalid timestamp", "parent", block.Time(), "block", block.Time())
return api.invalid(errors.New("invalid timestamp")), nil
}
if !api.eth.BlockChain().HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
api.remoteBlocks.put(block.Hash(), block.Header())
log.Warn("State not available, ignoring new payload")
return beacon.PayloadStatusV1{Status: beacon.ACCEPTED}, nil
}
log.Trace("Inserting block without sethead", "hash", block.Hash(), "number", block.Number)
if err := api.eth.BlockChain().InsertBlockWithoutSetHead(block); err != nil {
log.Warn("NewPayloadV1: inserting block failed", "error", err)
return api.invalid(err), nil
}
// We've accepted a valid payload from the beacon client. Mark the local
// chain transitions to notify other subsystems (e.g. downloader) of the
// behavioral change.
if merger := api.eth.Merger(); !merger.TDDReached() { if merger := api.eth.Merger(); !merger.TDDReached() {
merger.ReachTTD() merger.ReachTTD()
api.eth.Downloader().Cancel()
} }
return beacon.ExecutePayloadResponse{Status: beacon.VALID.Status, LatestValidHash: block.Hash()}, nil hash := block.Hash()
return beacon.PayloadStatusV1{Status: beacon.VALID, LatestValidHash: &hash}, nil
} }
// computePayloadId computes a pseudo-random payloadid, based on the parameters. // computePayloadId computes a pseudo-random payloadid, based on the parameters.
@ -167,8 +333,10 @@ func computePayloadId(headBlockHash common.Hash, params *beacon.PayloadAttribute
} }
// invalid returns a response "INVALID" with the latest valid hash set to the current head. // invalid returns a response "INVALID" with the latest valid hash set to the current head.
func (api *ConsensusAPI) invalid() beacon.ExecutePayloadResponse { func (api *ConsensusAPI) invalid(err error) beacon.PayloadStatusV1 {
return beacon.ExecutePayloadResponse{Status: beacon.INVALID.Status, LatestValidHash: api.eth.BlockChain().CurrentHeader().Hash()} currentHash := api.eth.BlockChain().CurrentHeader().Hash()
errorMsg := err.Error()
return beacon.PayloadStatusV1{Status: beacon.INVALID, LatestValidHash: &currentHash, ValidationError: &errorMsg}
} }
// assembleBlock creates a new block and returns the "execution // assembleBlock creates a new block and returns the "execution
@ -189,43 +357,3 @@ func (api *ConsensusAPI) insertTransactions(txs types.Transactions) error {
} }
return nil return nil
} }
func (api *ConsensusAPI) checkTerminalTotalDifficulty(head common.Hash) error {
// shortcut if we entered PoS already
if api.eth.Merger().PoSFinalized() {
return nil
}
// make sure the parent has enough terminal total difficulty
newHeadBlock := api.eth.BlockChain().GetBlockByHash(head)
if newHeadBlock == nil {
return &beacon.GenericServerError
}
td := api.eth.BlockChain().GetTd(newHeadBlock.Hash(), newHeadBlock.NumberU64())
if td != nil && td.Cmp(api.eth.BlockChain().Config().TerminalTotalDifficulty) < 0 {
return &beacon.InvalidTB
}
return nil
}
// setHead is called to perform a force choice.
func (api *ConsensusAPI) setHead(newHead common.Hash) error {
log.Info("Setting head", "head", newHead)
headBlock := api.eth.BlockChain().CurrentBlock()
if headBlock.Hash() == newHead {
return nil
}
newHeadBlock := api.eth.BlockChain().GetBlockByHash(newHead)
if newHeadBlock == nil {
return &beacon.GenericServerError
}
if err := api.eth.BlockChain().SetChainHead(newHeadBlock); err != nil {
return err
}
// Trigger the transition if it's the first `NewHead` event.
if merger := api.eth.Merger(); !merger.PoSFinalized() {
merger.FinalizePoS()
}
// TODO (MariusVanDerWijden) are we really synced now?
api.eth.SetSynced()
return nil
}

View File

@ -23,6 +23,7 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/beacon" "github.com/ethereum/go-ethereum/core/beacon"
@ -49,11 +50,12 @@ func generatePreMergeChain(n int) (*core.Genesis, []*types.Block) {
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
config := params.AllEthashProtocolChanges config := params.AllEthashProtocolChanges
genesis := &core.Genesis{ genesis := &core.Genesis{
Config: config, Config: config,
Alloc: core.GenesisAlloc{testAddr: {Balance: testBalance}}, Alloc: core.GenesisAlloc{testAddr: {Balance: testBalance}},
ExtraData: []byte("test genesis"), ExtraData: []byte("test genesis"),
Timestamp: 9000, Timestamp: 9000,
BaseFee: big.NewInt(params.InitialBaseFee), BaseFee: big.NewInt(params.InitialBaseFee),
Difficulty: big.NewInt(0),
} }
testNonce := uint64(0) testNonce := uint64(0)
generate := func(i int, g *core.BlockGen) { generate := func(i int, g *core.BlockGen) {
@ -130,50 +132,55 @@ func TestSetHeadBeforeTotalDifficulty(t *testing.T) {
SafeBlockHash: common.Hash{}, SafeBlockHash: common.Hash{},
FinalizedBlockHash: common.Hash{}, FinalizedBlockHash: common.Hash{},
} }
if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err == nil { if resp, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
t.Errorf("fork choice updated before total terminal difficulty should fail") t.Errorf("fork choice updated should not error: %v", err)
} else if resp.PayloadStatus.Status != beacon.INVALIDTERMINALBLOCK {
t.Errorf("fork choice updated before total terminal difficulty should be INVALID")
} }
} }
func TestEth2PrepareAndGetPayload(t *testing.T) { func TestEth2PrepareAndGetPayload(t *testing.T) {
genesis, blocks := generatePreMergeChain(10) // TODO (MariusVanDerWijden) TestEth2PrepareAndGetPayload is currently broken, fixed in upcoming merge-kiln-v2 pr
// We need to properly set the terminal total difficulty /*
genesis.Config.TerminalTotalDifficulty.Sub(genesis.Config.TerminalTotalDifficulty, blocks[9].Difficulty()) genesis, blocks := generatePreMergeChain(10)
n, ethservice := startEthService(t, genesis, blocks[:9]) // We need to properly set the terminal total difficulty
defer n.Close() genesis.Config.TerminalTotalDifficulty.Sub(genesis.Config.TerminalTotalDifficulty, blocks[9].Difficulty())
n, ethservice := startEthService(t, genesis, blocks[:9])
defer n.Close()
api := NewConsensusAPI(ethservice) api := NewConsensusAPI(ethservice)
// Put the 10th block's tx in the pool and produce a new block // Put the 10th block's tx in the pool and produce a new block
api.insertTransactions(blocks[9].Transactions()) api.insertTransactions(blocks[9].Transactions())
blockParams := beacon.PayloadAttributesV1{ blockParams := beacon.PayloadAttributesV1{
Timestamp: blocks[8].Time() + 5, Timestamp: blocks[8].Time() + 5,
} }
fcState := beacon.ForkchoiceStateV1{ fcState := beacon.ForkchoiceStateV1{
HeadBlockHash: blocks[8].Hash(), HeadBlockHash: blocks[8].Hash(),
SafeBlockHash: common.Hash{}, SafeBlockHash: common.Hash{},
FinalizedBlockHash: common.Hash{}, FinalizedBlockHash: common.Hash{},
} }
_, err := api.ForkchoiceUpdatedV1(fcState, &blockParams) _, err := api.ForkchoiceUpdatedV1(fcState, &blockParams)
if err != nil { if err != nil {
t.Fatalf("error preparing payload, err=%v", err) t.Fatalf("error preparing payload, err=%v", err)
} }
payloadID := computePayloadId(fcState.HeadBlockHash, &blockParams) payloadID := computePayloadId(fcState.HeadBlockHash, &blockParams)
execData, err := api.GetPayloadV1(payloadID) execData, err := api.GetPayloadV1(payloadID)
if err != nil { if err != nil {
t.Fatalf("error getting payload, err=%v", err) t.Fatalf("error getting payload, err=%v", err)
} }
if len(execData.Transactions) != blocks[9].Transactions().Len() { if len(execData.Transactions) != blocks[9].Transactions().Len() {
t.Fatalf("invalid number of transactions %d != 1", len(execData.Transactions)) t.Fatalf("invalid number of transactions %d != 1", len(execData.Transactions))
} }
// Test invalid payloadID // Test invalid payloadID
var invPayload beacon.PayloadID var invPayload beacon.PayloadID
copy(invPayload[:], payloadID[:]) copy(invPayload[:], payloadID[:])
invPayload[0] = ^invPayload[0] invPayload[0] = ^invPayload[0]
_, err = api.GetPayloadV1(invPayload) _, err = api.GetPayloadV1(invPayload)
if err == nil { if err == nil {
t.Fatal("expected error retrieving invalid payload") t.Fatal("expected error retrieving invalid payload")
} }
*/
} }
func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan core.RemovedLogsEvent, wantNew, wantRemoved int) { func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan core.RemovedLogsEvent, wantNew, wantRemoved int) {
@ -210,8 +217,11 @@ func TestInvalidPayloadTimestamp(t *testing.T) {
{0, true}, {0, true},
{parent.Time(), true}, {parent.Time(), true},
{parent.Time() - 1, true}, {parent.Time() - 1, true},
{parent.Time() + 1, false},
{uint64(time.Now().Unix()) + uint64(time.Minute), false}, // TODO (MariusVanDerWijden) following tests are currently broken,
// fixed in upcoming merge-kiln-v2 pr
//{parent.Time() + 1, false},
//{uint64(time.Now().Unix()) + uint64(time.Minute), false},
} }
for i, test := range tests { for i, test := range tests {
@ -271,7 +281,7 @@ func TestEth2NewBlock(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to convert executable data to block %v", err) t.Fatalf("Failed to convert executable data to block %v", err)
} }
newResp, err := api.ExecutePayloadV1(*execData) newResp, err := api.NewPayloadV1(*execData)
if err != nil || newResp.Status != "VALID" { if err != nil || newResp.Status != "VALID" {
t.Fatalf("Failed to insert block: %v", err) t.Fatalf("Failed to insert block: %v", err)
} }
@ -311,7 +321,7 @@ func TestEth2NewBlock(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to convert executable data to block %v", err) t.Fatalf("Failed to convert executable data to block %v", err)
} }
newResp, err := api.ExecutePayloadV1(*execData) newResp, err := api.NewPayloadV1(*execData)
if err != nil || newResp.Status != "VALID" { if err != nil || newResp.Status != "VALID" {
t.Fatalf("Failed to insert block: %v", err) t.Fatalf("Failed to insert block: %v", err)
} }
@ -429,6 +439,7 @@ func TestFullAPI(t *testing.T) {
Random: crypto.Keccak256Hash([]byte{byte(i)}), Random: crypto.Keccak256Hash([]byte{byte(i)}),
SuggestedFeeRecipient: parent.Coinbase(), SuggestedFeeRecipient: parent.Coinbase(),
} }
fcState := beacon.ForkchoiceStateV1{ fcState := beacon.ForkchoiceStateV1{
HeadBlockHash: parent.Hash(), HeadBlockHash: parent.Hash(),
SafeBlockHash: common.Hash{}, SafeBlockHash: common.Hash{},
@ -438,19 +449,18 @@ func TestFullAPI(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("error preparing payload, err=%v", err) t.Fatalf("error preparing payload, err=%v", err)
} }
if resp.Status != beacon.SUCCESS.Status { if resp.PayloadStatus.Status != beacon.VALID {
t.Fatalf("error preparing payload, invalid status: %v", resp.Status) t.Fatalf("error preparing payload, invalid status: %v", resp.PayloadStatus.Status)
} }
payloadID := computePayloadId(parent.Hash(), &params) payload, err := api.GetPayloadV1(*resp.PayloadID)
payload, err := api.GetPayloadV1(payloadID)
if err != nil { if err != nil {
t.Fatalf("can't get payload: %v", err) t.Fatalf("can't get payload: %v", err)
} }
execResp, err := api.ExecutePayloadV1(*payload) execResp, err := api.NewPayloadV1(*payload)
if err != nil { if err != nil {
t.Fatalf("can't execute payload: %v", err) t.Fatalf("can't execute payload: %v", err)
} }
if execResp.Status != beacon.VALID.Status { if execResp.Status != beacon.VALID {
t.Fatalf("invalid status: %v", execResp.Status) t.Fatalf("invalid status: %v", execResp.Status)
} }
fcState = beacon.ForkchoiceStateV1{ fcState = beacon.ForkchoiceStateV1{
@ -467,3 +477,49 @@ func TestFullAPI(t *testing.T) {
parent = ethservice.BlockChain().CurrentBlock() parent = ethservice.BlockChain().CurrentBlock()
} }
} }
func TestExchangeTransitionConfig(t *testing.T) {
genesis, preMergeBlocks := generatePreMergeChain(10)
n, ethservice := startEthService(t, genesis, preMergeBlocks)
ethservice.Merger().ReachTTD()
defer n.Close()
var (
api = NewConsensusAPI(ethservice)
)
// invalid ttd
config := beacon.TransitionConfigurationV1{
TerminalTotalDifficulty: (*hexutil.Big)(big.NewInt(0)),
TerminalBlockHash: common.Hash{},
TerminalBlockNumber: 0,
}
if _, err := api.ExchangeTransitionConfigurationV1(config); err == nil {
t.Fatal("expected error on invalid config, invalid ttd")
}
// invalid terminal block hash
config = beacon.TransitionConfigurationV1{
TerminalTotalDifficulty: (*hexutil.Big)(genesis.Config.TerminalTotalDifficulty),
TerminalBlockHash: common.Hash{1},
TerminalBlockNumber: 0,
}
if _, err := api.ExchangeTransitionConfigurationV1(config); err == nil {
t.Fatal("expected error on invalid config, invalid hash")
}
// valid config
config = beacon.TransitionConfigurationV1{
TerminalTotalDifficulty: (*hexutil.Big)(genesis.Config.TerminalTotalDifficulty),
TerminalBlockHash: common.Hash{},
TerminalBlockNumber: 0,
}
if _, err := api.ExchangeTransitionConfigurationV1(config); err != nil {
t.Fatalf("expected no error on valid config, got %v", err)
}
// valid config
config = beacon.TransitionConfigurationV1{
TerminalTotalDifficulty: (*hexutil.Big)(genesis.Config.TerminalTotalDifficulty),
TerminalBlockHash: preMergeBlocks[5].Hash(),
TerminalBlockNumber: 6,
}
if _, err := api.ExchangeTransitionConfigurationV1(config); err != nil {
t.Fatalf("expected no error on valid config, got %v", err)
}
}

View File

@ -19,7 +19,9 @@ package catalyst
import ( import (
"sync" "sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/beacon" "github.com/ethereum/go-ethereum/core/beacon"
"github.com/ethereum/go-ethereum/core/types"
) )
// maxTrackedPayloads is the maximum number of prepared payloads the execution // maxTrackedPayloads is the maximum number of prepared payloads the execution
@ -27,6 +29,11 @@ import (
// latest one; but have a slight wiggle room for non-ideal conditions. // latest one; but have a slight wiggle room for non-ideal conditions.
const maxTrackedPayloads = 10 const maxTrackedPayloads = 10
// maxTrackedHeaders is the maximum number of executed payloads the execution
// engine tracks before evicting old ones. Ideally we should only ever track the
// latest one; but have a slight wiggle room for non-ideal conditions.
const maxTrackedHeaders = 10
// payloadQueueItem represents an id->payload tuple to store until it's retrieved // payloadQueueItem represents an id->payload tuple to store until it's retrieved
// or evicted. // or evicted.
type payloadQueueItem struct { type payloadQueueItem struct {
@ -76,3 +83,53 @@ func (q *payloadQueue) get(id beacon.PayloadID) *beacon.ExecutableDataV1 {
} }
return nil return nil
} }
// headerQueueItem represents an hash->header tuple to store until it's retrieved
// or evicted.
type headerQueueItem struct {
hash common.Hash
header *types.Header
}
// headerQueue tracks the latest handful of constructed headers to be retrieved
// by the beacon chain if block production is requested.
type headerQueue struct {
headers []*headerQueueItem
lock sync.RWMutex
}
// newHeaderQueue creates a pre-initialized queue with a fixed number of slots
// all containing empty items.
func newHeaderQueue() *headerQueue {
return &headerQueue{
headers: make([]*headerQueueItem, maxTrackedHeaders),
}
}
// put inserts a new header into the queue at the given hash.
func (q *headerQueue) put(hash common.Hash, data *types.Header) {
q.lock.Lock()
defer q.lock.Unlock()
copy(q.headers[1:], q.headers)
q.headers[0] = &headerQueueItem{
hash: hash,
header: data,
}
}
// get retrieves a previously stored header item or nil if it does not exist.
func (q *headerQueue) get(hash common.Hash) *types.Header {
q.lock.RLock()
defer q.lock.RUnlock()
for _, item := range q.headers {
if item == nil {
return nil // no more items
}
if item.hash == hash {
return item.header
}
}
return nil
}

View File

@ -0,0 +1,308 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package downloader
import (
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
// beaconBackfiller is the chain and state backfilling that can be commenced once
// the skeleton syncer has successfully reverse downloaded all the headers up to
// the genesis block or an existing header in the database. Its operation is fully
// directed by the skeleton sync's head/tail events.
type beaconBackfiller struct {
downloader *Downloader // Downloader to direct via this callback implementation
syncMode SyncMode // Sync mode to use for backfilling the skeleton chains
success func() // Callback to run on successful sync cycle completion
filling bool // Flag whether the downloader is backfilling or not
started chan struct{} // Notification channel whether the downloader inited
lock sync.Mutex // Mutex protecting the sync lock
}
// newBeaconBackfiller is a helper method to create the backfiller.
func newBeaconBackfiller(dl *Downloader, success func()) backfiller {
return &beaconBackfiller{
downloader: dl,
success: success,
}
}
// suspend cancels any background downloader threads.
func (b *beaconBackfiller) suspend() {
// If no filling is running, don't waste cycles
b.lock.Lock()
filling := b.filling
started := b.started
b.lock.Unlock()
if !filling {
return
}
// A previous filling should be running, though it may happen that it hasn't
// yet started (being done on a new goroutine). Many concurrent beacon head
// announcements can lead to sync start/stop thrashing. In that case we need
// to wait for initialization before we can safely cancel it. It is safe to
// read this channel multiple times, it gets closed on startup.
<-started
// Now that we're sure the downloader successfully started up, we can cancel
// it safely without running the risk of data races.
b.downloader.Cancel()
}
// resume starts the downloader threads for backfilling state and chain data.
func (b *beaconBackfiller) resume() {
b.lock.Lock()
if b.filling {
// If a previous filling cycle is still running, just ignore this start
// request. // TODO(karalabe): We should make this channel driven
b.lock.Unlock()
return
}
b.filling = true
b.started = make(chan struct{})
mode := b.syncMode
b.lock.Unlock()
// Start the backfilling on its own thread since the downloader does not have
// its own lifecycle runloop.
go func() {
// Set the backfiller to non-filling when download completes
defer func() {
b.lock.Lock()
b.filling = false
b.lock.Unlock()
}()
// If the downloader fails, report an error as in beacon chain mode there
// should be no errors as long as the chain we're syncing to is valid.
if err := b.downloader.synchronise("", common.Hash{}, nil, nil, mode, true, b.started); err != nil {
log.Error("Beacon backfilling failed", "err", err)
return
}
// Synchronization succeeded. Since this happens async, notify the outer
// context to disable snap syncing and enable transaction propagation.
if b.success != nil {
b.success()
}
}()
}
// setMode updates the sync mode from the current one to the requested one. If
// there's an active sync in progress, it will be cancelled and restarted.
func (b *beaconBackfiller) setMode(mode SyncMode) {
// Update the old sync mode and track if it was changed
b.lock.Lock()
updated := b.syncMode != mode
filling := b.filling
b.syncMode = mode
b.lock.Unlock()
// If the sync mode was changed mid-sync, restart. This should never ever
// really happen, we just handle it to detect programming errors.
if !updated || !filling {
return
}
log.Error("Downloader sync mode changed mid-run", "old", mode.String(), "new", mode.String())
b.suspend()
b.resume()
}
// BeaconSync is the post-merge version of the chain synchronization, where the
// chain is not downloaded from genesis onward, rather from trusted head announces
// backwards.
//
// Internally backfilling and state sync is done the same way, but the header
// retrieval and scheduling is replaced.
func (d *Downloader) BeaconSync(mode SyncMode, head *types.Header) error {
return d.beaconSync(mode, head, true)
}
// BeaconExtend is an optimistic version of BeaconSync, where an attempt is made
// to extend the current beacon chain with a new header, but in case of a mismatch,
// the old sync will not be terminated and reorged, rather the new head is dropped.
//
// This is useful if a beacon client is feeding us large chunks of payloads to run,
// but is not setting the head after each.
func (d *Downloader) BeaconExtend(mode SyncMode, head *types.Header) error {
return d.beaconSync(mode, head, false)
}
// beaconSync is the post-merge version of the chain synchronization, where the
// chain is not downloaded from genesis onward, rather from trusted head announces
// backwards.
//
// Internally backfilling and state sync is done the same way, but the header
// retrieval and scheduling is replaced.
func (d *Downloader) beaconSync(mode SyncMode, head *types.Header, force bool) error {
// When the downloader starts a sync cycle, it needs to be aware of the sync
// mode to use (full, snap). To keep the skeleton chain oblivious, inject the
// mode into the backfiller directly.
//
// Super crazy dangerous type cast. Should be fine (TM), we're only using a
// different backfiller implementation for skeleton tests.
d.skeleton.filler.(*beaconBackfiller).setMode(mode)
// Signal the skeleton sync to switch to a new head, however it wants
if err := d.skeleton.Sync(head, force); err != nil {
return err
}
return nil
}
// findBeaconAncestor tries to locate the common ancestor link of the local chain
// and the beacon chain just requested. In the general case when our node was in
// sync and on the correct chain, checking the top N links should already get us
// a match. In the rare scenario when we ended up on a long reorganisation (i.e.
// none of the head links match), we do a binary search to find the ancestor.
func (d *Downloader) findBeaconAncestor() (uint64, error) {
// Figure out the current local head position
var chainHead *types.Header
switch d.getMode() {
case FullSync:
chainHead = d.blockchain.CurrentBlock().Header()
case SnapSync:
chainHead = d.blockchain.CurrentFastBlock().Header()
default:
chainHead = d.lightchain.CurrentHeader()
}
number := chainHead.Number.Uint64()
// Retrieve the skeleton bounds and ensure they are linked to the local chain
beaconHead, beaconTail, err := d.skeleton.Bounds()
if err != nil {
// This is a programming error. The chain backfiller was called with an
// invalid beacon sync state. Ideally we would panic here, but erroring
// gives us at least a remote chance to recover. It's still a big fault!
log.Error("Failed to retrieve beacon bounds", "err", err)
return 0, err
}
var linked bool
switch d.getMode() {
case FullSync:
linked = d.blockchain.HasBlock(beaconTail.ParentHash, beaconTail.Number.Uint64()-1)
case SnapSync:
linked = d.blockchain.HasFastBlock(beaconTail.ParentHash, beaconTail.Number.Uint64()-1)
default:
linked = d.blockchain.HasHeader(beaconTail.ParentHash, beaconTail.Number.Uint64()-1)
}
if !linked {
// This is a programming error. The chain backfiller was called with a
// tail that's not linked to the local chain. Whilst this should never
// happen, there might be some weirdnesses if beacon sync backfilling
// races with the user (or beacon client) calling setHead. Whilst panic
// would be the ideal thing to do, it is safer long term to attempt a
// recovery and fix any noticed issue after the fact.
log.Error("Beacon sync linkup unavailable", "number", beaconTail.Number.Uint64()-1, "hash", beaconTail.ParentHash)
return 0, fmt.Errorf("beacon linkup unavailable locally: %d [%x]", beaconTail.Number.Uint64()-1, beaconTail.ParentHash)
}
// Binary search to find the ancestor
start, end := beaconTail.Number.Uint64()-1, number
if number := beaconHead.Number.Uint64(); end > number {
// This shouldn't really happen in a healty network, but if the consensus
// clients feeds us a shorter chain as the canonical, we should not attempt
// to access non-existent skeleton items.
log.Warn("Beacon head lower than local chain", "beacon", number, "local", end)
end = number
}
for start+1 < end {
// Split our chain interval in two, and request the hash to cross check
check := (start + end) / 2
h := d.skeleton.Header(check)
n := h.Number.Uint64()
var known bool
switch d.getMode() {
case FullSync:
known = d.blockchain.HasBlock(h.Hash(), n)
case SnapSync:
known = d.blockchain.HasFastBlock(h.Hash(), n)
default:
known = d.lightchain.HasHeader(h.Hash(), n)
}
if !known {
end = check
continue
}
start = check
}
return start, nil
}
// fetchBeaconHeaders feeds skeleton headers to the downloader queue for scheduling
// until sync errors or is finished.
func (d *Downloader) fetchBeaconHeaders(from uint64) error {
head, _, err := d.skeleton.Bounds()
if err != nil {
return err
}
for {
// Retrieve a batch of headers and feed it to the header processor
var (
headers = make([]*types.Header, 0, maxHeadersProcess)
hashes = make([]common.Hash, 0, maxHeadersProcess)
)
for i := 0; i < maxHeadersProcess && from <= head.Number.Uint64(); i++ {
headers = append(headers, d.skeleton.Header(from))
hashes = append(hashes, headers[i].Hash())
from++
}
if len(headers) > 0 {
log.Trace("Scheduling new beacon headers", "count", len(headers), "from", from-uint64(len(headers)))
select {
case d.headerProcCh <- &headerTask{
headers: headers,
hashes: hashes,
}:
case <-d.cancelCh:
return errCanceled
}
}
// If we still have headers to import, loop and keep pushing them
if from <= head.Number.Uint64() {
continue
}
// If the pivot block is committed, signal header sync termination
if atomic.LoadInt32(&d.committed) == 1 {
select {
case d.headerProcCh <- nil:
return nil
case <-d.cancelCh:
return errCanceled
}
}
// State sync still going, wait a bit for new headers and retry
log.Trace("Pivot not yet committed, waiting...")
select {
case <-time.After(fsHeaderContCheck):
case <-d.cancelCh:
return errCanceled
}
head, _, err = d.skeleton.Bounds()
if err != nil {
return err
}
}
}

View File

@ -30,7 +30,6 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/eth/protocols/snap"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
@ -79,6 +78,7 @@ var (
errCanceled = errors.New("syncing canceled (requested)") errCanceled = errors.New("syncing canceled (requested)")
errTooOld = errors.New("peer's protocol version too old") errTooOld = errors.New("peer's protocol version too old")
errNoAncestorFound = errors.New("no common ancestor found") errNoAncestorFound = errors.New("no common ancestor found")
ErrMergeTransition = errors.New("legacy sync reached the merge")
) )
// peerDropFn is a callback type for dropping a peer detected as malicious. // peerDropFn is a callback type for dropping a peer detected as malicious.
@ -123,6 +123,9 @@ type Downloader struct {
// Channels // Channels
headerProcCh chan *headerTask // Channel to feed the header processor new tasks headerProcCh chan *headerTask // Channel to feed the header processor new tasks
// Skeleton sync
skeleton *skeleton // Header skeleton to backfill the chain with (eth2 mode)
// State sync // State sync
pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root
pivotLock sync.RWMutex // Lock protecting pivot header reads from updates pivotLock sync.RWMutex // Lock protecting pivot header reads from updates
@ -201,7 +204,7 @@ type BlockChain interface {
} }
// New creates a new downloader to fetch hashes and blocks from remote peers. // New creates a new downloader to fetch hashes and blocks from remote peers.
func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader { func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, success func()) *Downloader {
if lightchain == nil { if lightchain == nil {
lightchain = chain lightchain = chain
} }
@ -219,6 +222,8 @@ func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain Bl
SnapSyncer: snap.NewSyncer(stateDb), SnapSyncer: snap.NewSyncer(stateDb),
stateSyncStart: make(chan *stateSync), stateSyncStart: make(chan *stateSync),
} }
dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success))
go dl.stateFetcher() go dl.stateFetcher()
return dl return dl
} }
@ -318,10 +323,10 @@ func (d *Downloader) UnregisterPeer(id string) error {
return nil return nil
} }
// Synchronise tries to sync up our local block chain with a remote peer, both // LegacySync tries to sync up our local block chain with a remote peer, both
// adding various sanity checks as well as wrapping it with various log entries. // adding various sanity checks as well as wrapping it with various log entries.
func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error { func (d *Downloader) LegacySync(id string, head common.Hash, td, ttd *big.Int, mode SyncMode) error {
err := d.synchronise(id, head, td, mode) err := d.synchronise(id, head, td, ttd, mode, false, nil)
switch err { switch err {
case nil, errBusy, errCanceled: case nil, errBusy, errCanceled:
@ -340,6 +345,9 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
} }
return err return err
} }
if errors.Is(err, ErrMergeTransition) {
return err // This is an expected fault, don't keep printing it in a spin-loop
}
log.Warn("Synchronisation failed, retrying", "err", err) log.Warn("Synchronisation failed, retrying", "err", err)
return err return err
} }
@ -347,7 +355,21 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode
// synchronise will select the peer and use it for synchronising. If an empty string is given // synchronise will select the peer and use it for synchronising. If an empty string is given
// it will use the best peer possible and synchronize if its TD is higher than our own. If any of the // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the
// checks fail an error will be returned. This method is synchronous // checks fail an error will be returned. This method is synchronous
func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error { func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, mode SyncMode, beaconMode bool, beaconPing chan struct{}) error {
// The beacon header syncer is async. It will start this synchronization and
// will continue doing other tasks. However, if synchornization needs to be
// cancelled, the syncer needs to know if we reached the startup point (and
// inited the cancel cannel) or not yet. Make sure that we'll signal even in
// case of a failure.
if beaconPing != nil {
defer func() {
select {
case <-beaconPing: // already notified
default:
close(beaconPing) // weird exit condition, notify that it's safe to cancel (the nothing)
}
}()
}
// Mock out the synchronisation if testing // Mock out the synchronisation if testing
if d.synchroniseMock != nil { if d.synchroniseMock != nil {
return d.synchroniseMock(id, hash) return d.synchroniseMock(id, hash)
@ -362,9 +384,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
log.Info("Block synchronisation started") log.Info("Block synchronisation started")
} }
// If snap sync was requested, create the snap scheduler and switch to snap
// sync mode. Long term we could drop snap sync or merge the two together,
// but until snap becomes prevalent, we should support both. TODO(karalabe).
if mode == SnapSync { if mode == SnapSync {
// Snap sync uses the snapshot namespace to store potentially flakey data until // Snap sync uses the snapshot namespace to store potentially flakey data until
// sync completely heals and finishes. Pause snapshot maintenance in the mean- // sync completely heals and finishes. Pause snapshot maintenance in the mean-
@ -402,11 +421,17 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
atomic.StoreUint32(&d.mode, uint32(mode)) atomic.StoreUint32(&d.mode, uint32(mode))
// Retrieve the origin peer and initiate the downloading process // Retrieve the origin peer and initiate the downloading process
p := d.peers.Peer(id) var p *peerConnection
if p == nil { if !beaconMode { // Beacon mode doesn't need a peer to sync from
return errUnknownPeer p = d.peers.Peer(id)
if p == nil {
return errUnknownPeer
}
} }
return d.syncWithPeer(p, hash, td) if beaconPing != nil {
close(beaconPing)
}
return d.syncWithPeer(p, hash, td, ttd, beaconMode)
} }
func (d *Downloader) getMode() SyncMode { func (d *Downloader) getMode() SyncMode {
@ -415,7 +440,7 @@ func (d *Downloader) getMode() SyncMode {
// syncWithPeer starts a block synchronization based on the hash chain from the // syncWithPeer starts a block synchronization based on the hash chain from the
// specified peer and head hash. // specified peer and head hash.
func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) { func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *big.Int, beaconMode bool) (err error) {
d.mux.Post(StartEvent{}) d.mux.Post(StartEvent{})
defer func() { defer func() {
// reset on error // reset on error
@ -426,33 +451,57 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
d.mux.Post(DoneEvent{latest}) d.mux.Post(DoneEvent{latest})
} }
}() }()
if p.version < eth.ETH66 {
return fmt.Errorf("%w: advertized %d < required %d", errTooOld, p.version, eth.ETH66)
}
mode := d.getMode() mode := d.getMode()
log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", mode) if !beaconMode {
log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", mode)
} else {
log.Debug("Backfilling with the network", "mode", mode)
}
defer func(start time.Time) { defer func(start time.Time) {
log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start))) log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start)))
}(time.Now()) }(time.Now())
// Look up the sync boundaries: the common ancestor and the target block // Look up the sync boundaries: the common ancestor and the target block
latest, pivot, err := d.fetchHead(p) var latest, pivot *types.Header
if err != nil { if !beaconMode {
return err // In legacy mode, use the master peer to retrieve the headers from
latest, pivot, err = d.fetchHead(p)
if err != nil {
return err
}
} else {
// In beacon mode, user the skeleton chain to retrieve the headers from
latest, _, err = d.skeleton.Bounds()
if err != nil {
return err
}
if latest.Number.Uint64() > uint64(fsMinFullBlocks) {
pivot = d.skeleton.Header(latest.Number.Uint64() - uint64(fsMinFullBlocks))
}
} }
// If no pivot block was returned, the head is below the min full block
// threshold (i.e. new chain). In that case we won't really snap sync
// anyway, but still need a valid pivot block to avoid some code hitting
// nil panics on access.
if mode == SnapSync && pivot == nil { if mode == SnapSync && pivot == nil {
// If no pivot block was returned, the head is below the min full block
// threshold (i.e. new chain). In that case we won't really snap sync
// anyway, but still need a valid pivot block to avoid some code hitting
// nil panics on an access.
pivot = d.blockchain.CurrentBlock().Header() pivot = d.blockchain.CurrentBlock().Header()
} }
height := latest.Number.Uint64() height := latest.Number.Uint64()
origin, err := d.findAncestor(p, latest) var origin uint64
if err != nil { if !beaconMode {
return err // In legacy mode, reach out to the network and find the ancestor
origin, err = d.findAncestor(p, latest)
if err != nil {
return err
}
} else {
// In beacon mode, use the skeleton chain for the ancestor lookup
origin, err = d.findBeaconAncestor()
if err != nil {
return err
}
} }
d.syncStatsLock.Lock() d.syncStatsLock.Lock()
if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
@ -523,11 +572,19 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
if d.syncInitHook != nil { if d.syncInitHook != nil {
d.syncInitHook(origin, height) d.syncInitHook(origin, height)
} }
var headerFetcher func() error
if !beaconMode {
// In legacy mode, headers are retrieved from the network
headerFetcher = func() error { return d.fetchHeaders(p, origin+1, latest.Number.Uint64()) }
} else {
// In beacon mode, headers are served by the skeleton syncer
headerFetcher = func() error { return d.fetchBeaconHeaders(origin + 1) }
}
fetchers := []func() error{ fetchers := []func() error{
func() error { return d.fetchHeaders(p, origin+1, latest.Number.Uint64()) }, // Headers are always retrieved headerFetcher, // Headers are always retrieved
func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and snap sync func() error { return d.fetchBodies(origin+1, beaconMode) }, // Bodies are retrieved during normal and snap sync
func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during snap sync func() error { return d.fetchReceipts(origin+1, beaconMode) }, // Receipts are retrieved during snap sync
func() error { return d.processHeaders(origin+1, td) }, func() error { return d.processHeaders(origin+1, td, ttd, beaconMode) },
} }
if mode == SnapSync { if mode == SnapSync {
d.pivotLock.Lock() d.pivotLock.Lock()
@ -536,7 +593,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
fetchers = append(fetchers, func() error { return d.processSnapSyncContent() }) fetchers = append(fetchers, func() error { return d.processSnapSyncContent() })
} else if mode == FullSync { } else if mode == FullSync {
fetchers = append(fetchers, d.processFullSyncContent) fetchers = append(fetchers, func() error { return d.processFullSyncContent(ttd, beaconMode) })
} }
return d.spawnSync(fetchers) return d.spawnSync(fetchers)
} }
@ -602,6 +659,9 @@ func (d *Downloader) Terminate() {
case <-d.quitCh: case <-d.quitCh:
default: default:
close(d.quitCh) close(d.quitCh)
// Terminate the internal beacon syncer
d.skeleton.Terminate()
} }
d.quitLock.Unlock() d.quitLock.Unlock()
@ -1127,7 +1187,7 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
log.Debug("Filling up skeleton", "from", from) log.Debug("Filling up skeleton", "from", from)
d.queue.ScheduleSkeleton(from, skeleton) d.queue.ScheduleSkeleton(from, skeleton)
err := d.concurrentFetch((*headerQueue)(d)) err := d.concurrentFetch((*headerQueue)(d), false)
if err != nil { if err != nil {
log.Debug("Skeleton fill failed", "err", err) log.Debug("Skeleton fill failed", "err", err)
} }
@ -1141,9 +1201,9 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
// fetchBodies iteratively downloads the scheduled block bodies, taking any // fetchBodies iteratively downloads the scheduled block bodies, taking any
// available peers, reserving a chunk of blocks for each, waiting for delivery // available peers, reserving a chunk of blocks for each, waiting for delivery
// and also periodically checking for timeouts. // and also periodically checking for timeouts.
func (d *Downloader) fetchBodies(from uint64) error { func (d *Downloader) fetchBodies(from uint64, beaconMode bool) error {
log.Debug("Downloading block bodies", "origin", from) log.Debug("Downloading block bodies", "origin", from)
err := d.concurrentFetch((*bodyQueue)(d)) err := d.concurrentFetch((*bodyQueue)(d), beaconMode)
log.Debug("Block body download terminated", "err", err) log.Debug("Block body download terminated", "err", err)
return err return err
@ -1152,9 +1212,9 @@ func (d *Downloader) fetchBodies(from uint64) error {
// fetchReceipts iteratively downloads the scheduled block receipts, taking any // fetchReceipts iteratively downloads the scheduled block receipts, taking any
// available peers, reserving a chunk of receipts for each, waiting for delivery // available peers, reserving a chunk of receipts for each, waiting for delivery
// and also periodically checking for timeouts. // and also periodically checking for timeouts.
func (d *Downloader) fetchReceipts(from uint64) error { func (d *Downloader) fetchReceipts(from uint64, beaconMode bool) error {
log.Debug("Downloading receipts", "origin", from) log.Debug("Downloading receipts", "origin", from)
err := d.concurrentFetch((*receiptQueue)(d)) err := d.concurrentFetch((*receiptQueue)(d), beaconMode)
log.Debug("Receipt download terminated", "err", err) log.Debug("Receipt download terminated", "err", err)
return err return err
@ -1163,7 +1223,7 @@ func (d *Downloader) fetchReceipts(from uint64) error {
// processHeaders takes batches of retrieved headers from an input channel and // processHeaders takes batches of retrieved headers from an input channel and
// keeps processing and scheduling them into the header chain and downloader's // keeps processing and scheduling them into the header chain and downloader's
// queue until the stream ends or a failure occurs. // queue until the stream ends or a failure occurs.
func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode bool) error {
// Keep a count of uncertain headers to roll back // Keep a count of uncertain headers to roll back
var ( var (
rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis) rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis)
@ -1211,35 +1271,40 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
case <-d.cancelCh: case <-d.cancelCh:
} }
} }
// If no headers were retrieved at all, the peer violated its TD promise that it had a // If we're in legacy sync mode, we need to check total difficulty
// better chain compared to ours. The only exception is if its promised blocks were // violations from malicious peers. That is not needed in beacon
// already imported by other means (e.g. fetcher): // mode and we can skip to terminating sync.
// if !beaconMode {
// R <remote peer>, L <local node>: Both at block 10 // If no headers were retrieved at all, the peer violated its TD promise that it had a
// R: Mine block 11, and propagate it to L // better chain compared to ours. The only exception is if its promised blocks were
// L: Queue block 11 for import // already imported by other means (e.g. fetcher):
// L: Notice that R's head and TD increased compared to ours, start sync //
// L: Import of block 11 finishes // R <remote peer>, L <local node>: Both at block 10
// L: Sync begins, and finds common ancestor at 11 // R: Mine block 11, and propagate it to L
// L: Request new headers up from 11 (R's TD was higher, it must have something) // L: Queue block 11 for import
// R: Nothing to give // L: Notice that R's head and TD increased compared to ours, start sync
if mode != LightSync { // L: Import of block 11 finishes
head := d.blockchain.CurrentBlock() // L: Sync begins, and finds common ancestor at 11
if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 { // L: Request new headers up from 11 (R's TD was higher, it must have something)
return errStallingPeer // R: Nothing to give
if mode != LightSync {
head := d.blockchain.CurrentBlock()
if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 {
return errStallingPeer
}
} }
} // If snap or light syncing, ensure promised headers are indeed delivered. This is
// If snap or light syncing, ensure promised headers are indeed delivered. This is // needed to detect scenarios where an attacker feeds a bad pivot and then bails out
// needed to detect scenarios where an attacker feeds a bad pivot and then bails out // of delivering the post-pivot blocks that would flag the invalid content.
// of delivering the post-pivot blocks that would flag the invalid content. //
// // This check cannot be executed "as is" for full imports, since blocks may still be
// This check cannot be executed "as is" for full imports, since blocks may still be // queued for processing when the header download completes. However, as long as the
// queued for processing when the header download completes. However, as long as the // peer gave us something useful, we're already happy/progressed (above check).
// peer gave us something useful, we're already happy/progressed (above check). if mode == SnapSync || mode == LightSync {
if mode == SnapSync || mode == LightSync { head := d.lightchain.CurrentHeader()
head := d.lightchain.CurrentHeader() if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 {
if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { return errStallingPeer
return errStallingPeer }
} }
} }
// Disable any rollback and return // Disable any rollback and return
@ -1281,24 +1346,64 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
if chunkHeaders[len(chunkHeaders)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { if chunkHeaders[len(chunkHeaders)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
frequency = 1 frequency = 1
} }
if n, err := d.lightchain.InsertHeaderChain(chunkHeaders, frequency); err != nil { // Although the received headers might be all valid, a legacy
rollbackErr = err // PoW/PoA sync must not accept post-merge headers. Make sure
// that any transition is rejected at this point.
// If some headers were inserted, track them as uncertain var (
if (mode == SnapSync || frequency > 1) && n > 0 && rollback == 0 { rejected []*types.Header
rollback = chunkHeaders[0].Number.Uint64() td *big.Int
)
if !beaconMode && ttd != nil {
td = d.blockchain.GetTd(chunkHeaders[0].ParentHash, chunkHeaders[0].Number.Uint64()-1)
if td == nil {
// This should never really happen, but handle gracefully for now
log.Error("Failed to retrieve parent header TD", "number", chunkHeaders[0].Number.Uint64()-1, "hash", chunkHeaders[0].ParentHash)
return fmt.Errorf("%w: parent TD missing", errInvalidChain)
}
for i, header := range chunkHeaders {
td = new(big.Int).Add(td, header.Difficulty)
if td.Cmp(ttd) >= 0 {
// Terminal total difficulty reached, allow the last header in
if new(big.Int).Sub(td, header.Difficulty).Cmp(ttd) < 0 {
chunkHeaders, rejected = chunkHeaders[:i+1], chunkHeaders[i+1:]
if len(rejected) > 0 {
// Make a nicer user log as to the first TD truly rejected
td = new(big.Int).Add(td, rejected[0].Difficulty)
}
} else {
chunkHeaders, rejected = chunkHeaders[:i], chunkHeaders[i:]
}
break
}
} }
log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err)
return fmt.Errorf("%w: %v", errInvalidChain, err)
} }
// All verifications passed, track all headers within the alloted limits if len(chunkHeaders) > 0 {
if mode == SnapSync { if n, err := d.lightchain.InsertHeaderChain(chunkHeaders, frequency); err != nil {
head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64() rollbackErr = err
if head-rollback > uint64(fsHeaderSafetyNet) {
rollback = head - uint64(fsHeaderSafetyNet) // If some headers were inserted, track them as uncertain
} else { if (mode == SnapSync || frequency > 1) && n > 0 && rollback == 0 {
rollback = 1 rollback = chunkHeaders[0].Number.Uint64()
}
log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err)
return fmt.Errorf("%w: %v", errInvalidChain, err)
} }
// All verifications passed, track all headers within the allowed limits
if mode == SnapSync {
head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64()
if head-rollback > uint64(fsHeaderSafetyNet) {
rollback = head - uint64(fsHeaderSafetyNet)
} else {
rollback = 1
}
}
}
if len(rejected) != 0 {
// Merge threshold reached, stop importing, but don't roll back
rollback = 0
log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd)
return ErrMergeTransition
} }
} }
// Unless we're doing light chains, schedule the headers for associated content retrieval // Unless we're doing light chains, schedule the headers for associated content retrieval
@ -1342,7 +1447,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
} }
// processFullSyncContent takes fetch results from the queue and imports them into the chain. // processFullSyncContent takes fetch results from the queue and imports them into the chain.
func (d *Downloader) processFullSyncContent() error { func (d *Downloader) processFullSyncContent(ttd *big.Int, beaconMode bool) error {
for { for {
results := d.queue.Results(true) results := d.queue.Results(true)
if len(results) == 0 { if len(results) == 0 {
@ -1351,9 +1456,44 @@ func (d *Downloader) processFullSyncContent() error {
if d.chainInsertHook != nil { if d.chainInsertHook != nil {
d.chainInsertHook(results) d.chainInsertHook(results)
} }
// Although the received blocks might be all valid, a legacy PoW/PoA sync
// must not accept post-merge blocks. Make sure that pre-merge blocks are
// imported, but post-merge ones are rejected.
var (
rejected []*fetchResult
td *big.Int
)
if !beaconMode && ttd != nil {
td = d.blockchain.GetTd(results[0].Header.ParentHash, results[0].Header.Number.Uint64()-1)
if td == nil {
// This should never really happen, but handle gracefully for now
log.Error("Failed to retrieve parent block TD", "number", results[0].Header.Number.Uint64()-1, "hash", results[0].Header.ParentHash)
return fmt.Errorf("%w: parent TD missing", errInvalidChain)
}
for i, result := range results {
td = new(big.Int).Add(td, result.Header.Difficulty)
if td.Cmp(ttd) >= 0 {
// Terminal total difficulty reached, allow the last block in
if new(big.Int).Sub(td, result.Header.Difficulty).Cmp(ttd) < 0 {
results, rejected = results[:i+1], results[i+1:]
if len(rejected) > 0 {
// Make a nicer user log as to the first TD truly rejected
td = new(big.Int).Add(td, rejected[0].Header.Difficulty)
}
} else {
results, rejected = results[:i], results[i:]
}
break
}
}
}
if err := d.importBlockResults(results); err != nil { if err := d.importBlockResults(results); err != nil {
return err return err
} }
if len(rejected) != 0 {
log.Info("Legacy sync reached merge threshold", "number", rejected[0].Header.Number, "hash", rejected[0].Header.Hash(), "td", td, "ttd", ttd)
return ErrMergeTransition
}
} }
} }

View File

@ -75,7 +75,7 @@ func newTester() *downloadTester {
chain: chain, chain: chain,
peers: make(map[string]*downloadTesterPeer), peers: make(map[string]*downloadTesterPeer),
} }
tester.downloader = New(0, db, new(event.TypeMux), tester.chain, nil, tester.dropPeer) tester.downloader = New(0, db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, nil)
return tester return tester
} }
@ -96,7 +96,7 @@ func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {
td = dl.peers[id].chain.GetTd(head.Hash(), head.NumberU64()) td = dl.peers[id].chain.GetTd(head.Hash(), head.NumberU64())
} }
// Synchronise with the chosen peer and ensure proper cleanup afterwards // Synchronise with the chosen peer and ensure proper cleanup afterwards
err := dl.downloader.synchronise(id, head.Hash(), td, mode) err := dl.downloader.synchronise(id, head.Hash(), td, nil, mode, false, nil)
select { select {
case <-dl.downloader.cancelCh: case <-dl.downloader.cancelCh:
// Ok, downloader fully cancelled after sync cycle // Ok, downloader fully cancelled after sync cycle
@ -971,7 +971,7 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
// Simulate a synchronisation and check the required result // Simulate a synchronisation and check the required result
tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }
tester.downloader.Synchronise(id, tester.chain.Genesis().Hash(), big.NewInt(1000), FullSync) tester.downloader.LegacySync(id, tester.chain.Genesis().Hash(), big.NewInt(1000), nil, FullSync)
if _, ok := tester.peers[id]; !ok != tt.drop { if _, ok := tester.peers[id]; !ok != tt.drop {
t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop)
} }

View File

@ -76,7 +76,7 @@ type typedQueue interface {
// concurrentFetch iteratively downloads scheduled block parts, taking available // concurrentFetch iteratively downloads scheduled block parts, taking available
// peers, reserving a chunk of fetch requests for each and waiting for delivery // peers, reserving a chunk of fetch requests for each and waiting for delivery
// or timeouts. // or timeouts.
func (d *Downloader) concurrentFetch(queue typedQueue) error { func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error {
// Create a delivery channel to accept responses from all peers // Create a delivery channel to accept responses from all peers
responses := make(chan *eth.Response) responses := make(chan *eth.Response)
@ -127,7 +127,7 @@ func (d *Downloader) concurrentFetch(queue typedQueue) error {
finished := false finished := false
for { for {
// Short circuit if we lost all our peers // Short circuit if we lost all our peers
if d.peers.Len() == 0 { if d.peers.Len() == 0 && !beaconMode {
return errNoPeers return errNoPeers
} }
// If there's nothing more to fetch, wait or terminate // If there's nothing more to fetch, wait or terminate
@ -209,7 +209,7 @@ func (d *Downloader) concurrentFetch(queue typedQueue) error {
} }
// Make sure that we have peers available for fetching. If all peers have been tried // Make sure that we have peers available for fetching. If all peers have been tried
// and all failed throw an error // and all failed throw an error
if !progressed && !throttled && len(pending) == 0 && len(idles) == d.peers.Len() && queued > 0 { if !progressed && !throttled && len(pending) == 0 && len(idles) == d.peers.Len() && queued > 0 && !beaconMode {
return errPeersUnavailable return errPeersUnavailable
} }
} }

View File

@ -294,19 +294,19 @@ func (ps *peerSet) AllPeers() []*peerConnection {
// peerCapacitySort implements sort.Interface. // peerCapacitySort implements sort.Interface.
// It sorts peer connections by capacity (descending). // It sorts peer connections by capacity (descending).
type peerCapacitySort struct { type peerCapacitySort struct {
p []*peerConnection peers []*peerConnection
tp []int caps []int
} }
func (ps *peerCapacitySort) Len() int { func (ps *peerCapacitySort) Len() int {
return len(ps.p) return len(ps.peers)
} }
func (ps *peerCapacitySort) Less(i, j int) bool { func (ps *peerCapacitySort) Less(i, j int) bool {
return ps.tp[i] > ps.tp[j] return ps.caps[i] > ps.caps[j]
} }
func (ps *peerCapacitySort) Swap(i, j int) { func (ps *peerCapacitySort) Swap(i, j int) {
ps.p[i], ps.p[j] = ps.p[j], ps.p[i] ps.peers[i], ps.peers[j] = ps.peers[j], ps.peers[i]
ps.tp[i], ps.tp[j] = ps.tp[j], ps.tp[i] ps.caps[i], ps.caps[j] = ps.caps[j], ps.caps[i]
} }

1063
eth/downloader/skeleton.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,896 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package downloader
import (
"encoding/json"
"errors"
"fmt"
"math/big"
"os"
"sync/atomic"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/log"
)
// hookedBackfiller is a tester backfiller with all interface methods mocked and
// hooked so tests can implement only the things they need.
type hookedBackfiller struct {
// suspendHook is an optional hook to be called when the filler is requested
// to be suspended.
suspendHook func()
// resumeHook is an optional hook to be called when the filler is requested
// to be resumed.
resumeHook func()
}
// newHookedBackfiller creates a hooked backfiller with all callbacks disabled,
// essentially acting as a noop.
func newHookedBackfiller() backfiller {
return new(hookedBackfiller)
}
// suspend requests the backfiller to abort any running full or snap sync
// based on the skeleton chain as it might be invalid. The backfiller should
// gracefully handle multiple consecutive suspends without a resume, even
// on initial sartup.
func (hf *hookedBackfiller) suspend() {
if hf.suspendHook != nil {
hf.suspendHook()
}
}
// resume requests the backfiller to start running fill or snap sync based on
// the skeleton chain as it has successfully been linked. Appending new heads
// to the end of the chain will not result in suspend/resume cycles.
func (hf *hookedBackfiller) resume() {
if hf.resumeHook != nil {
hf.resumeHook()
}
}
// skeletonTestPeer is a mock peer that can only serve header requests from a
// pre-perated header chain (which may be arbitrarily wrong for testing).
//
// Requesting anything else from these peers will hard panic. Note, do *not*
// implement any other methods. We actually want to make sure that the skeleton
// syncer only depends on - and will only ever do so - on header requests.
type skeletonTestPeer struct {
id string // Unique identifier of the mock peer
headers []*types.Header // Headers to serve when requested
serve func(origin uint64) []*types.Header // Hook to allow custom responses
served uint64 // Number of headers served by this peer
dropped uint64 // Flag whether the peer was dropped (stop responding)
}
// newSkeletonTestPeer creates a new mock peer to test the skeleton sync with.
func newSkeletonTestPeer(id string, headers []*types.Header) *skeletonTestPeer {
return &skeletonTestPeer{
id: id,
headers: headers,
}
}
// newSkeletonTestPeer creates a new mock peer to test the skeleton sync with,
// and sets an optional serve hook that can return headers for delivery instead
// of the predefined chain. Useful for emulating malicious behavior that would
// otherwise require dedicated peer types.
func newSkeletonTestPeerWithHook(id string, headers []*types.Header, serve func(origin uint64) []*types.Header) *skeletonTestPeer {
return &skeletonTestPeer{
id: id,
headers: headers,
serve: serve,
}
}
// RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered
// origin; associated with a particular peer in the download tester. The returned
// function can be used to retrieve batches of headers from the particular peer.
func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) {
// Since skeleton test peer are in-memory mocks, dropping the does not make
// them inaccepssible. As such, check a local `dropped` field to see if the
// peer has been dropped and should not respond any more.
if atomic.LoadUint64(&p.dropped) != 0 {
return nil, errors.New("peer already dropped")
}
// Skeleton sync retrieves batches of headers going backward without gaps.
// This ensures we can follow a clean parent progression without any reorg
// hiccups. There is no need for any other type of header retrieval, so do
// panic if there's such a request.
if !reverse || skip != 0 {
// Note, if other clients want to do these kinds of requests, it's their
// problem, it will still work. We just don't want *us* making complicated
// requests without a very strong reason to.
panic(fmt.Sprintf("invalid header retrieval: reverse %v, want true; skip %d, want 0", reverse, skip))
}
// If the skeleton syncer requests the genesis block, panic. Whilst it could
// be considered a valid request, our code specifically should not request it
// ever since we want to link up headers to an existing local chain, which at
// worse will be the genesis.
if int64(origin)-int64(amount) < 0 {
panic(fmt.Sprintf("headers requested before (or at) genesis: origin %d, amount %d", origin, amount))
}
// To make concurrency easier, the skeleton syncer always requests fixed size
// batches of headers. Panic if the peer is requested an amount other than the
// configured batch size (apart from the request leading to the genesis).
if amount > requestHeaders || (amount < requestHeaders && origin > uint64(amount)) {
panic(fmt.Sprintf("non-chunk size header batch requested: requested %d, want %d, origin %d", amount, requestHeaders, origin))
}
// Simple reverse header retrieval. Fill from the peer's chain and return.
// If the tester has a serve hook set, try to use that before falling back
// to the default behavior.
var headers []*types.Header
if p.serve != nil {
headers = p.serve(origin)
}
if headers == nil {
headers = make([]*types.Header, 0, amount)
if len(p.headers) > int(origin) { // Don't serve headers if we're missing the origin
for i := 0; i < amount; i++ {
// Consider nil headers as a form of attack and withhold them. Nil
// cannot be decoded from RLP, so it's not possible to produce an
// attack by sending/receiving those over eth.
header := p.headers[int(origin)-i]
if header == nil {
continue
}
headers = append(headers, header)
}
}
}
atomic.AddUint64(&p.served, uint64(len(headers)))
hashes := make([]common.Hash, len(headers))
for i, header := range headers {
hashes[i] = header.Hash()
}
// Deliver the headers to the downloader
req := &eth.Request{
Peer: p.id,
}
res := &eth.Response{
Req: req,
Res: (*eth.BlockHeadersPacket)(&headers),
Meta: hashes,
Time: 1,
Done: make(chan error),
}
go func() {
sink <- res
if err := <-res.Done; err != nil {
log.Warn("Skeleton test peer response rejected", "err", err)
atomic.AddUint64(&p.dropped, 1)
}
}()
return req, nil
}
func (p *skeletonTestPeer) Head() (common.Hash, *big.Int) {
panic("skeleton sync must not request the remote head")
}
func (p *skeletonTestPeer) RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error) {
panic("skeleton sync must not request headers by hash")
}
func (p *skeletonTestPeer) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) {
panic("skeleton sync must not request block bodies")
}
func (p *skeletonTestPeer) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) {
panic("skeleton sync must not request receipts")
}
// Tests various sync initialzations based on previous leftovers in the database
// and announced heads.
func TestSkeletonSyncInit(t *testing.T) {
// Create a few key headers
var (
genesis = &types.Header{Number: big.NewInt(0)}
block49 = &types.Header{Number: big.NewInt(49)}
block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")}
block50 = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()}
)
tests := []struct {
headers []*types.Header // Database content (beside the genesis)
oldstate []*subchain // Old sync state with various interrupted subchains
head *types.Header // New head header to announce to reorg to
newstate []*subchain // Expected sync state after the reorg
}{
// Completely empty database with only the genesis set. The sync is expected
// to create a single subchain with the requested head.
{
head: block50,
newstate: []*subchain{{Head: 50, Tail: 50}},
},
// Empty database with only the genesis set with a leftover empty sync
// progess. This is a synthetic case, just for the sake of covering things.
{
oldstate: []*subchain{},
head: block50,
newstate: []*subchain{{Head: 50, Tail: 50}},
},
// A single leftover subchain is present, older than the new head. The
// old subchain should be left as is and a new one appended to the sync
// status.
{
oldstate: []*subchain{{Head: 10, Tail: 5}},
head: block50,
newstate: []*subchain{
{Head: 50, Tail: 50},
{Head: 10, Tail: 5},
},
},
// Multiple leftover subchains are present, older than the new head. The
// old subchains should be left as is and a new one appended to the sync
// status.
{
oldstate: []*subchain{
{Head: 20, Tail: 15},
{Head: 10, Tail: 5},
},
head: block50,
newstate: []*subchain{
{Head: 50, Tail: 50},
{Head: 20, Tail: 15},
{Head: 10, Tail: 5},
},
},
// A single leftover subchain is present, newer than the new head. The
// newer subchain should be deleted and a fresh one created for the head.
{
oldstate: []*subchain{{Head: 65, Tail: 60}},
head: block50,
newstate: []*subchain{{Head: 50, Tail: 50}},
},
// Multiple leftover subchain is present, newer than the new head. The
// newer subchains should be deleted and a fresh one created for the head.
{
oldstate: []*subchain{
{Head: 75, Tail: 70},
{Head: 65, Tail: 60},
},
head: block50,
newstate: []*subchain{{Head: 50, Tail: 50}},
},
// Two leftover subchains are present, one fully older and one fully
// newer than the announced head. The head should delete the newer one,
// keeping the older one.
{
oldstate: []*subchain{
{Head: 65, Tail: 60},
{Head: 10, Tail: 5},
},
head: block50,
newstate: []*subchain{
{Head: 50, Tail: 50},
{Head: 10, Tail: 5},
},
},
// Multiple leftover subchains are present, some fully older and some
// fully newer than the announced head. The head should delete the newer
// ones, keeping the older ones.
{
oldstate: []*subchain{
{Head: 75, Tail: 70},
{Head: 65, Tail: 60},
{Head: 20, Tail: 15},
{Head: 10, Tail: 5},
},
head: block50,
newstate: []*subchain{
{Head: 50, Tail: 50},
{Head: 20, Tail: 15},
{Head: 10, Tail: 5},
},
},
// A single leftover subchain is present and the new head is extending
// it with one more header. We expect the subchain head to be pushed
// forward.
{
headers: []*types.Header{block49},
oldstate: []*subchain{{Head: 49, Tail: 5}},
head: block50,
newstate: []*subchain{{Head: 50, Tail: 5}},
},
// A single leftover subchain is present and although the new head does
// extend it number wise, the hash chain does not link up. We expect a
// new subchain to be created for the dangling head.
{
headers: []*types.Header{block49B},
oldstate: []*subchain{{Head: 49, Tail: 5}},
head: block50,
newstate: []*subchain{
{Head: 50, Tail: 50},
{Head: 49, Tail: 5},
},
},
// A single leftover subchain is present. A new head is announced that
// links into the middle of it, correctly anchoring into an existing
// header. We expect the old subchain to be truncated and extended with
// the new head.
{
headers: []*types.Header{block49},
oldstate: []*subchain{{Head: 100, Tail: 5}},
head: block50,
newstate: []*subchain{{Head: 50, Tail: 5}},
},
// A single leftover subchain is present. A new head is announced that
// links into the middle of it, but does not anchor into an existing
// header. We expect the old subchain to be truncated and a new chain
// be created for the dangling head.
{
headers: []*types.Header{block49B},
oldstate: []*subchain{{Head: 100, Tail: 5}},
head: block50,
newstate: []*subchain{
{Head: 50, Tail: 50},
{Head: 49, Tail: 5},
},
},
}
for i, tt := range tests {
// Create a fresh database and initialize it with the starting state
db := rawdb.NewMemoryDatabase()
rawdb.WriteHeader(db, genesis)
for _, header := range tt.headers {
rawdb.WriteSkeletonHeader(db, header)
}
if tt.oldstate != nil {
blob, _ := json.Marshal(&skeletonProgress{Subchains: tt.oldstate})
rawdb.WriteSkeletonSyncStatus(db, blob)
}
// Create a skeleton sync and run a cycle
wait := make(chan struct{})
skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller())
skeleton.syncStarting = func() { close(wait) }
skeleton.Sync(tt.head, true)
<-wait
skeleton.Terminate()
// Ensure the correct resulting sync status
var progress skeletonProgress
json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
if len(progress.Subchains) != len(tt.newstate) {
t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate))
continue
}
for j := 0; j < len(progress.Subchains); j++ {
if progress.Subchains[j].Head != tt.newstate[j].Head {
t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head)
}
if progress.Subchains[j].Tail != tt.newstate[j].Tail {
t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail)
}
}
}
}
// Tests that a running skeleton sync can be extended with properly linked up
// headers but not with side chains.
func TestSkeletonSyncExtend(t *testing.T) {
// Create a few key headers
var (
genesis = &types.Header{Number: big.NewInt(0)}
block49 = &types.Header{Number: big.NewInt(49)}
block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")}
block50 = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()}
block51 = &types.Header{Number: big.NewInt(51), ParentHash: block50.Hash()}
)
tests := []struct {
head *types.Header // New head header to announce to reorg to
extend *types.Header // New head header to announce to extend with
newstate []*subchain // Expected sync state after the reorg
err error // Whether extension succeeds or not
}{
// Initialize a sync and try to extend it with a subsequent block.
{
head: block49,
extend: block50,
newstate: []*subchain{
{Head: 50, Tail: 49},
},
},
// Initialize a sync and try to extend it with the existing head block.
{
head: block49,
extend: block49,
newstate: []*subchain{
{Head: 49, Tail: 49},
},
err: errReorgDenied,
},
// Initialize a sync and try to extend it with a sibling block.
{
head: block49,
extend: block49B,
newstate: []*subchain{
{Head: 49, Tail: 49},
},
err: errReorgDenied,
},
// Initialize a sync and try to extend it with a number-wise sequential
// header, but a hash wise non-linking one.
{
head: block49B,
extend: block50,
newstate: []*subchain{
{Head: 49, Tail: 49},
},
err: errReorgDenied,
},
// Initialize a sync and try to extend it with a non-linking future block.
{
head: block49,
extend: block51,
newstate: []*subchain{
{Head: 49, Tail: 49},
},
err: errReorgDenied,
},
// Initialize a sync and try to extend it with a past canonical block.
{
head: block50,
extend: block49,
newstate: []*subchain{
{Head: 50, Tail: 50},
},
err: errReorgDenied,
},
// Initialize a sync and try to extend it with a past sidechain block.
{
head: block50,
extend: block49B,
newstate: []*subchain{
{Head: 50, Tail: 50},
},
err: errReorgDenied,
},
}
for i, tt := range tests {
// Create a fresh database and initialize it with the starting state
db := rawdb.NewMemoryDatabase()
rawdb.WriteHeader(db, genesis)
// Create a skeleton sync and run a cycle
wait := make(chan struct{})
skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller())
skeleton.syncStarting = func() { close(wait) }
skeleton.Sync(tt.head, true)
<-wait
if err := skeleton.Sync(tt.extend, false); err != tt.err {
t.Errorf("extension failure mismatch: have %v, want %v", err, tt.err)
}
skeleton.Terminate()
// Ensure the correct resulting sync status
var progress skeletonProgress
json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
if len(progress.Subchains) != len(tt.newstate) {
t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate))
continue
}
for j := 0; j < len(progress.Subchains); j++ {
if progress.Subchains[j].Head != tt.newstate[j].Head {
t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head)
}
if progress.Subchains[j].Tail != tt.newstate[j].Tail {
t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail)
}
}
}
}
// Tests that the skeleton sync correctly retrieves headers from one or more
// peers without duplicates or other strange side effects.
func TestSkeletonSyncRetrievals(t *testing.T) {
log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// Since skeleton headers don't need to be meaningful, beyond a parent hash
// progression, create a long fake chain to test with.
chain := []*types.Header{{Number: big.NewInt(0)}}
for i := 1; i < 10000; i++ {
chain = append(chain, &types.Header{
ParentHash: chain[i-1].Hash(),
Number: big.NewInt(int64(i)),
})
}
tests := []struct {
headers []*types.Header // Database content (beside the genesis)
oldstate []*subchain // Old sync state with various interrupted subchains
head *types.Header // New head header to announce to reorg to
peers []*skeletonTestPeer // Initial peer set to start the sync with
midstate []*subchain // Expected sync state after initial cycle
midserve uint64 // Expected number of header retrievals after initial cycle
middrop uint64 // Expectd number of peers dropped after initial cycle
newHead *types.Header // New header to annount on top of the old one
newPeer *skeletonTestPeer // New peer to join the skeleton syncer
endstate []*subchain // Expected sync state after the post-init event
endserve uint64 // Expected number of header retrievals after the post-init event
enddrop uint64 // Expectd number of peers dropped after the post-init event
}{
// Completely empty database with only the genesis set. The sync is expected
// to create a single subchain with the requested head. No peers however, so
// the sync should be stuck without any progression.
//
// When a new peer is added, it should detect the join and fill the headers
// to the genesis block.
{
head: chain[len(chain)-1],
midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: uint64(len(chain) - 1)}},
newPeer: newSkeletonTestPeer("test-peer", chain),
endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
endserve: uint64(len(chain) - 2), // len - head - genesis
},
// Completely empty database with only the genesis set. The sync is expected
// to create a single subchain with the requested head. With one valid peer,
// the sync is expected to complete already in the initial round.
//
// Adding a second peer should not have any effect.
{
head: chain[len(chain)-1],
peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-1", chain)},
midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
midserve: uint64(len(chain) - 2), // len - head - genesis
newPeer: newSkeletonTestPeer("test-peer-2", chain),
endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
endserve: uint64(len(chain) - 2), // len - head - genesis
},
// Completely empty database with only the genesis set. The sync is expected
// to create a single subchain with the requested head. With many valid peers,
// the sync is expected to complete already in the initial round.
//
// Adding a new peer should not have any effect.
{
head: chain[len(chain)-1],
peers: []*skeletonTestPeer{
newSkeletonTestPeer("test-peer-1", chain),
newSkeletonTestPeer("test-peer-2", chain),
newSkeletonTestPeer("test-peer-3", chain),
},
midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
midserve: uint64(len(chain) - 2), // len - head - genesis
newPeer: newSkeletonTestPeer("test-peer-4", chain),
endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}},
endserve: uint64(len(chain) - 2), // len - head - genesis
},
// This test checks if a peer tries to withhold a header - *on* the sync
// boundary - instead of sending the requested amount. The malicious short
// package should not be accepted.
//
// Joining with a new peer should however unblock the sync.
{
head: chain[requestHeaders+100],
peers: []*skeletonTestPeer{
newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:99]...), nil), chain[100:]...)),
},
midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
midserve: requestHeaders + 101 - 3, // len - head - genesis - missing
middrop: 1, // penalize shortened header deliveries
newPeer: newSkeletonTestPeer("good-peer", chain),
endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis
enddrop: 1, // no new drops
},
// This test checks if a peer tries to withhold a header - *off* the sync
// boundary - instead of sending the requested amount. The malicious short
// package should not be accepted.
//
// Joining with a new peer should however unblock the sync.
{
head: chain[requestHeaders+100],
peers: []*skeletonTestPeer{
newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:50]...), nil), chain[51:]...)),
},
midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
midserve: requestHeaders + 101 - 3, // len - head - genesis - missing
middrop: 1, // penalize shortened header deliveries
newPeer: newSkeletonTestPeer("good-peer", chain),
endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis
enddrop: 1, // no new drops
},
// This test checks if a peer tries to duplicate a header - *on* the sync
// boundary - instead of sending the correct sequence. The malicious duped
// package should not be accepted.
//
// Joining with a new peer should however unblock the sync.
{
head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
peers: []*skeletonTestPeer{
newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:99]...), chain[98]), chain[100:]...)),
},
midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
midserve: requestHeaders + 101 - 2, // len - head - genesis
middrop: 1, // penalize invalid header sequences
newPeer: newSkeletonTestPeer("good-peer", chain),
endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
enddrop: 1, // no new drops
},
// This test checks if a peer tries to duplicate a header - *off* the sync
// boundary - instead of sending the correct sequence. The malicious duped
// package should not be accepted.
//
// Joining with a new peer should however unblock the sync.
{
head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
peers: []*skeletonTestPeer{
newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:50]...), chain[49]), chain[51:]...)),
},
midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
midserve: requestHeaders + 101 - 2, // len - head - genesis
middrop: 1, // penalize invalid header sequences
newPeer: newSkeletonTestPeer("good-peer", chain),
endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
enddrop: 1, // no new drops
},
// This test checks if a peer tries to inject a different header - *on*
// the sync boundary - instead of sending the correct sequence. The bad
// package should not be accepted.
//
// Joining with a new peer should however unblock the sync.
{
head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
peers: []*skeletonTestPeer{
newSkeletonTestPeer("header-changer",
append(
append(
append([]*types.Header{}, chain[:99]...),
&types.Header{
ParentHash: chain[98].Hash(),
Number: big.NewInt(int64(99)),
GasLimit: 1,
},
), chain[100:]...,
),
),
},
midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
midserve: requestHeaders + 101 - 2, // len - head - genesis
middrop: 1, // different set of headers, drop // TODO(karalabe): maybe just diff sync?
newPeer: newSkeletonTestPeer("good-peer", chain),
endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
enddrop: 1, // no new drops
},
// This test checks if a peer tries to inject a different header - *off*
// the sync boundary - instead of sending the correct sequence. The bad
// package should not be accepted.
//
// Joining with a new peer should however unblock the sync.
{
head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
peers: []*skeletonTestPeer{
newSkeletonTestPeer("header-changer",
append(
append(
append([]*types.Header{}, chain[:50]...),
&types.Header{
ParentHash: chain[49].Hash(),
Number: big.NewInt(int64(50)),
GasLimit: 1,
},
), chain[51:]...,
),
),
},
midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
midserve: requestHeaders + 101 - 2, // len - head - genesis
middrop: 1, // different set of headers, drop
newPeer: newSkeletonTestPeer("good-peer", chain),
endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}},
endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis
enddrop: 1, // no new drops
},
// This test reproduces a bug caught during review (kudos to @holiman)
// where a subchain is merged with a previously interrupted one, causing
// pending data in the scratch space to become "invalid" (since we jump
// ahead during subchain merge). In that case it is expected to ignore
// the queued up data instead of trying to process on top of a shifted
// task set.
//
// The test is a bit convoluted since it needs to trigger a concurrency
// issue. First we sync up an initial chain of 2x512 items. Then announce
// 2x512+2 as head and delay delivering the head batch to fill the scratch
// space first. The delivery head should merge with the previous download
// and the scratch space must not be consumed further.
{
head: chain[2*requestHeaders],
peers: []*skeletonTestPeer{
newSkeletonTestPeerWithHook("peer-1", chain, func(origin uint64) []*types.Header {
if origin == chain[2*requestHeaders+1].Number.Uint64() {
time.Sleep(100 * time.Millisecond)
}
return nil // Fallback to default behavior, just delayed
}),
newSkeletonTestPeerWithHook("peer-2", chain, func(origin uint64) []*types.Header {
if origin == chain[2*requestHeaders+1].Number.Uint64() {
time.Sleep(100 * time.Millisecond)
}
return nil // Fallback to default behavior, just delayed
}),
},
midstate: []*subchain{{Head: 2 * requestHeaders, Tail: 1}},
midserve: 2*requestHeaders - 1, // len - head - genesis
newHead: chain[2*requestHeaders+2],
endstate: []*subchain{{Head: 2*requestHeaders + 2, Tail: 1}},
endserve: 4 * requestHeaders,
},
}
for i, tt := range tests {
// Create a fresh database and initialize it with the starting state
db := rawdb.NewMemoryDatabase()
rawdb.WriteHeader(db, chain[0])
// Create a peer set to feed headers through
peerset := newPeerSet()
for _, peer := range tt.peers {
peerset.Register(newPeerConnection(peer.id, eth.ETH66, peer, log.New("id", peer.id)))
}
// Create a peer dropper to track malicious peers
dropped := make(map[string]int)
drop := func(peer string) {
if p := peerset.Peer(peer); p != nil {
atomic.AddUint64(&p.peer.(*skeletonTestPeer).dropped, 1)
}
peerset.Unregister(peer)
dropped[peer]++
}
// Create a skeleton sync and run a cycle
skeleton := newSkeleton(db, peerset, drop, newHookedBackfiller())
skeleton.Sync(tt.head, true)
var progress skeletonProgress
// Wait a bit (bleah) for the initial sync loop to go to idle. This might
// be either a finish or a never-start hence why there's no event to hook.
check := func() error {
if len(progress.Subchains) != len(tt.midstate) {
return fmt.Errorf("test %d, mid state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.midstate))
}
for j := 0; j < len(progress.Subchains); j++ {
if progress.Subchains[j].Head != tt.midstate[j].Head {
return fmt.Errorf("test %d, mid state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.midstate[j].Head)
}
if progress.Subchains[j].Tail != tt.midstate[j].Tail {
return fmt.Errorf("test %d, mid state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.midstate[j].Tail)
}
}
return nil
}
waitStart := time.Now()
for waitTime := 20 * time.Millisecond; time.Since(waitStart) < time.Second; waitTime = waitTime * 2 {
time.Sleep(waitTime)
// Check the post-init end state if it matches the required results
json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
if err := check(); err == nil {
break
}
}
if err := check(); err != nil {
t.Error(err)
continue
}
var served uint64
for _, peer := range tt.peers {
served += atomic.LoadUint64(&peer.served)
}
if served != tt.midserve {
t.Errorf("test %d, mid state: served headers mismatch: have %d, want %d", i, served, tt.midserve)
}
var drops uint64
for _, peer := range tt.peers {
drops += atomic.LoadUint64(&peer.dropped)
}
if drops != tt.middrop {
t.Errorf("test %d, mid state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop)
}
// Apply the post-init events if there's any
if tt.newHead != nil {
skeleton.Sync(tt.newHead, true)
}
if tt.newPeer != nil {
if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil {
t.Errorf("test %d: failed to register new peer: %v", i, err)
}
}
// Wait a bit (bleah) for the second sync loop to go to idle. This might
// be either a finish or a never-start hence why there's no event to hook.
check = func() error {
if len(progress.Subchains) != len(tt.endstate) {
return fmt.Errorf("test %d, end state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.endstate))
}
for j := 0; j < len(progress.Subchains); j++ {
if progress.Subchains[j].Head != tt.endstate[j].Head {
return fmt.Errorf("test %d, end state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.endstate[j].Head)
}
if progress.Subchains[j].Tail != tt.endstate[j].Tail {
return fmt.Errorf("test %d, end state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.endstate[j].Tail)
}
}
return nil
}
waitStart = time.Now()
for waitTime := 20 * time.Millisecond; time.Since(waitStart) < time.Second; waitTime = waitTime * 2 {
time.Sleep(waitTime)
// Check the post-init end state if it matches the required results
json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress)
if err := check(); err == nil {
break
}
}
if err := check(); err != nil {
t.Error(err)
continue
}
// Check that the peers served no more headers than we actually needed
served = 0
for _, peer := range tt.peers {
served += atomic.LoadUint64(&peer.served)
}
if tt.newPeer != nil {
served += atomic.LoadUint64(&tt.newPeer.served)
}
if served != tt.endserve {
t.Errorf("test %d, end state: served headers mismatch: have %d, want %d", i, served, tt.endserve)
}
drops = 0
for _, peer := range tt.peers {
drops += atomic.LoadUint64(&peer.dropped)
}
if tt.newPeer != nil {
drops += atomic.LoadUint64(&tt.newPeer.dropped)
}
if drops != tt.middrop {
t.Errorf("test %d, end state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop)
}
// Clean up any leftover skeleton sync resources
skeleton.Terminate()
}
}

View File

@ -138,8 +138,10 @@ type Config struct {
TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved. TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
// Whitelist of required block number -> hash values to accept // PeerRequiredBlocks is a set of block number -> hash mappings which must be in the
Whitelist map[uint64]common.Hash `toml:"-"` // canonical chain of all remote peers. Setting the option makes geth verify the
// presence of these blocks for every new peer connection.
PeerRequiredBlocks map[uint64]common.Hash `toml:"-"`
// Light client options // Light client options
LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests

View File

@ -26,7 +26,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
NoPruning bool NoPruning bool
NoPrefetch bool NoPrefetch bool
TxLookupLimit uint64 `toml:",omitempty"` TxLookupLimit uint64 `toml:",omitempty"`
Whitelist map[uint64]common.Hash `toml:"-"` PeerRequiredBlocks map[uint64]common.Hash `toml:"-"`
LightServ int `toml:",omitempty"` LightServ int `toml:",omitempty"`
LightIngress int `toml:",omitempty"` LightIngress int `toml:",omitempty"`
LightEgress int `toml:",omitempty"` LightEgress int `toml:",omitempty"`
@ -71,7 +71,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.NoPruning = c.NoPruning enc.NoPruning = c.NoPruning
enc.NoPrefetch = c.NoPrefetch enc.NoPrefetch = c.NoPrefetch
enc.TxLookupLimit = c.TxLookupLimit enc.TxLookupLimit = c.TxLookupLimit
enc.Whitelist = c.Whitelist enc.PeerRequiredBlocks = c.PeerRequiredBlocks
enc.LightServ = c.LightServ enc.LightServ = c.LightServ
enc.LightIngress = c.LightIngress enc.LightIngress = c.LightIngress
enc.LightEgress = c.LightEgress enc.LightEgress = c.LightEgress
@ -120,7 +120,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
NoPruning *bool NoPruning *bool
NoPrefetch *bool NoPrefetch *bool
TxLookupLimit *uint64 `toml:",omitempty"` TxLookupLimit *uint64 `toml:",omitempty"`
Whitelist map[uint64]common.Hash `toml:"-"` PeerRequiredBlocks map[uint64]common.Hash `toml:"-"`
LightServ *int `toml:",omitempty"` LightServ *int `toml:",omitempty"`
LightIngress *int `toml:",omitempty"` LightIngress *int `toml:",omitempty"`
LightEgress *int `toml:",omitempty"` LightEgress *int `toml:",omitempty"`
@ -184,8 +184,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.TxLookupLimit != nil { if dec.TxLookupLimit != nil {
c.TxLookupLimit = *dec.TxLookupLimit c.TxLookupLimit = *dec.TxLookupLimit
} }
if dec.Whitelist != nil { if dec.PeerRequiredBlocks != nil {
c.Whitelist = dec.Whitelist c.PeerRequiredBlocks = dec.PeerRequiredBlocks
} }
if dec.LightServ != nil { if dec.LightServ != nil {
c.LightServ = *dec.LightServ c.LightServ = *dec.LightServ

View File

@ -117,7 +117,7 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) {
reward, _ := tx.EffectiveGasTip(bf.block.BaseFee()) reward, _ := tx.EffectiveGasTip(bf.block.BaseFee())
sorter[i] = txGasAndReward{gasUsed: bf.receipts[i].GasUsed, reward: reward} sorter[i] = txGasAndReward{gasUsed: bf.receipts[i].GasUsed, reward: reward}
} }
sort.Sort(sorter) sort.Stable(sorter)
var txIndex int var txIndex int
sumGasUsed := sorter[0].gasUsed sumGasUsed := sorter[0].gasUsed

View File

@ -86,7 +86,8 @@ type handlerConfig struct {
BloomCache uint64 // Megabytes to alloc for snap sync bloom BloomCache uint64 // Megabytes to alloc for snap sync bloom
EventMux *event.TypeMux // Legacy event mux, deprecate for `feed` EventMux *event.TypeMux // Legacy event mux, deprecate for `feed`
Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges
Whitelist map[uint64]common.Hash // Hard coded whitelist for sync challenged
PeerRequiredBlocks map[uint64]common.Hash // Hard coded map of required block hashes for sync challenges
} }
type handler struct { type handler struct {
@ -115,7 +116,7 @@ type handler struct {
txsSub event.Subscription txsSub event.Subscription
minedBlockSub *event.TypeMuxSubscription minedBlockSub *event.TypeMuxSubscription
whitelist map[uint64]common.Hash peerRequiredBlocks map[uint64]common.Hash
// channels for fetcher, syncer, txsyncLoop // channels for fetcher, syncer, txsyncLoop
quitSync chan struct{} quitSync chan struct{}
@ -132,16 +133,16 @@ func newHandler(config *handlerConfig) (*handler, error) {
config.EventMux = new(event.TypeMux) // Nicety initialization for tests config.EventMux = new(event.TypeMux) // Nicety initialization for tests
} }
h := &handler{ h := &handler{
networkID: config.Network, networkID: config.Network,
forkFilter: forkid.NewFilter(config.Chain), forkFilter: forkid.NewFilter(config.Chain),
eventMux: config.EventMux, eventMux: config.EventMux,
database: config.Database, database: config.Database,
txpool: config.TxPool, txpool: config.TxPool,
chain: config.Chain, chain: config.Chain,
peers: newPeerSet(), peers: newPeerSet(),
merger: config.Merger, merger: config.Merger,
whitelist: config.Whitelist, peerRequiredBlocks: config.PeerRequiredBlocks,
quitSync: make(chan struct{}), quitSync: make(chan struct{}),
} }
if config.Sync == downloader.FullSync { if config.Sync == downloader.FullSync {
// The database seems empty as the current block is the genesis. Yet the snap // The database seems empty as the current block is the genesis. Yet the snap
@ -171,10 +172,30 @@ func newHandler(config *handlerConfig) (*handler, error) {
h.checkpointNumber = (config.Checkpoint.SectionIndex+1)*params.CHTFrequency - 1 h.checkpointNumber = (config.Checkpoint.SectionIndex+1)*params.CHTFrequency - 1
h.checkpointHash = config.Checkpoint.SectionHead h.checkpointHash = config.Checkpoint.SectionHead
} }
// If sync succeeds, pass a callback to potentially disable snap sync mode
// and enable transaction propagation.
success := func() {
// If we were running snap sync and it finished, disable doing another
// round on next sync cycle
if atomic.LoadUint32(&h.snapSync) == 1 {
log.Info("Snap sync complete, auto disabling")
atomic.StoreUint32(&h.snapSync, 0)
}
// If we've successfully finished a sync cycle and passed any required
// checkpoint, enable accepting transactions from the network
head := h.chain.CurrentBlock()
if head.NumberU64() >= h.checkpointNumber {
// Checkpoint passed, sanity check the timestamp to have a fallback mechanism
// for non-checkpointed (number = 0) private networks.
if head.Time() >= uint64(time.Now().AddDate(0, -1, 0).Unix()) {
atomic.StoreUint32(&h.acceptTxs, 1)
}
}
}
// Construct the downloader (long sync) and its backing state bloom if snap // Construct the downloader (long sync) and its backing state bloom if snap
// sync is requested. The downloader is responsible for deallocating the state // sync is requested. The downloader is responsible for deallocating the state
// bloom when it's done. // bloom when it's done.
h.downloader = downloader.New(h.checkpointNumber, config.Database, h.eventMux, h.chain, nil, h.removePeer) h.downloader = downloader.New(h.checkpointNumber, config.Database, h.eventMux, h.chain, nil, h.removePeer, success)
// Construct the fetcher (short sync) // Construct the fetcher (short sync)
validator := func(header *types.Header) error { validator := func(header *types.Header) error {
@ -403,8 +424,8 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
} }
}() }()
} }
// If we have any explicit whitelist block hashes, request them // If we have any explicit peer required block hashes, request them
for number, hash := range h.whitelist { for number := range h.peerRequiredBlocks {
resCh := make(chan *eth.Response) resCh := make(chan *eth.Response)
if _, err := peer.RequestHeadersByNumber(number, 1, 0, false, resCh); err != nil { if _, err := peer.RequestHeadersByNumber(number, 1, 0, false, resCh); err != nil {
return err return err
@ -417,25 +438,25 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
case res := <-resCh: case res := <-resCh:
headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket)) headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket))
if len(headers) == 0 { if len(headers) == 0 {
// Whitelisted blocks are allowed to be missing if the remote // Required blocks are allowed to be missing if the remote
// node is not yet synced // node is not yet synced
res.Done <- nil res.Done <- nil
return return
} }
// Validate the header and either drop the peer or continue // Validate the header and either drop the peer or continue
if len(headers) > 1 { if len(headers) > 1 {
res.Done <- errors.New("too many headers in whitelist response") res.Done <- errors.New("too many headers in required block response")
return return
} }
if headers[0].Number.Uint64() != number || headers[0].Hash() != hash { if headers[0].Number.Uint64() != number || headers[0].Hash() != hash {
peer.Log().Info("Whitelist mismatch, dropping peer", "number", number, "hash", headers[0].Hash(), "want", hash) peer.Log().Info("Required block mismatch, dropping peer", "number", number, "hash", headers[0].Hash(), "want", hash)
res.Done <- errors.New("whitelist block mismatch") res.Done <- errors.New("required block mismatch")
return return
} }
peer.Log().Debug("Whitelist block verified", "number", number, "hash", hash) peer.Log().Debug("Peer required block verified", "number", number, "hash", hash)
res.Done <- nil res.Done <- nil
case <-timeout.C: case <-timeout.C:
peer.Log().Warn("Whitelist challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name()) peer.Log().Warn("Required block challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name())
h.removePeer(peer.ID()) h.removePeer(peer.ID())
} }
}(number, hash) }(number, hash)

View File

@ -570,7 +570,7 @@ func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpo
t.Fatalf("failed to answer challenge: %v", err) t.Fatalf("failed to answer challenge: %v", err)
} }
} else { } else {
responseRlp, _ := rlp.EncodeToBytes(types.Header{Number: response.Number}) responseRlp, _ := rlp.EncodeToBytes(&types.Header{Number: response.Number})
if err := remote.ReplyBlockHeadersRLP(request.RequestId, []rlp.RawValue{responseRlp}); err != nil { if err := remote.ReplyBlockHeadersRLP(request.RequestId, []rlp.RawValue{responseRlp}); err != nil {
t.Fatalf("failed to answer challenge: %v", err) t.Fatalf("failed to answer challenge: %v", err)
} }

View File

@ -230,7 +230,7 @@ func (ps *peerSet) snapLen() int {
} }
// peerWithHighestTD retrieves the known peer with the currently highest total // peerWithHighestTD retrieves the known peer with the currently highest total
// difficulty. // difficulty, but below the given PoS switchover threshold.
func (ps *peerSet) peerWithHighestTD() *eth.Peer { func (ps *peerSet) peerWithHighestTD() *eth.Peer {
ps.lock.RLock() ps.lock.RLock()
defer ps.lock.RUnlock() defer ps.lock.RUnlock()

View File

@ -264,11 +264,11 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
headers = append(headers, backend.chain.GetBlockByHash(hash).Header()) headers = append(headers, backend.chain.GetBlockByHash(hash).Header())
} }
// Send the hash request and verify the response // Send the hash request and verify the response
p2p.Send(peer.app, GetBlockHeadersMsg, GetBlockHeadersPacket66{ p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
RequestId: 123, RequestId: 123,
GetBlockHeadersPacket: tt.query, GetBlockHeadersPacket: tt.query,
}) })
if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, BlockHeadersPacket66{ if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket66{
RequestId: 123, RequestId: 123,
BlockHeadersPacket: headers, BlockHeadersPacket: headers,
}); err != nil { }); err != nil {
@ -279,14 +279,12 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil { if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil {
tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0 tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0
p2p.Send(peer.app, GetBlockHeadersMsg, GetBlockHeadersPacket66{ p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket66{
RequestId: 456, RequestId: 456,
GetBlockHeadersPacket: tt.query, GetBlockHeadersPacket: tt.query,
}) })
if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, BlockHeadersPacket66{ expected := &BlockHeadersPacket66{RequestId: 456, BlockHeadersPacket: headers}
RequestId: 456, if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil {
BlockHeadersPacket: headers,
}); err != nil {
t.Errorf("test %d by hash: headers mismatch: %v", i, err) t.Errorf("test %d by hash: headers mismatch: %v", i, err)
} }
} }
@ -364,11 +362,11 @@ func testGetBlockBodies(t *testing.T, protocol uint) {
} }
} }
// Send the hash request and verify the response // Send the hash request and verify the response
p2p.Send(peer.app, GetBlockBodiesMsg, GetBlockBodiesPacket66{ p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket66{
RequestId: 123, RequestId: 123,
GetBlockBodiesPacket: hashes, GetBlockBodiesPacket: hashes,
}) })
if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, BlockBodiesPacket66{ if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket66{
RequestId: 123, RequestId: 123,
BlockBodiesPacket: bodies, BlockBodiesPacket: bodies,
}); err != nil { }); err != nil {
@ -436,7 +434,7 @@ func testGetNodeData(t *testing.T, protocol uint) {
it.Release() it.Release()
// Request all hashes. // Request all hashes.
p2p.Send(peer.app, GetNodeDataMsg, GetNodeDataPacket66{ p2p.Send(peer.app, GetNodeDataMsg, &GetNodeDataPacket66{
RequestId: 123, RequestId: 123,
GetNodeDataPacket: hashes, GetNodeDataPacket: hashes,
}) })
@ -546,11 +544,11 @@ func testGetBlockReceipts(t *testing.T, protocol uint) {
receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash())) receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash()))
} }
// Send the hash request and verify the response // Send the hash request and verify the response
p2p.Send(peer.app, GetReceiptsMsg, GetReceiptsPacket66{ p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket66{
RequestId: 123, RequestId: 123,
GetReceiptsPacket: hashes, GetReceiptsPacket: hashes,
}) })
if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, ReceiptsPacket66{ if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket66{
RequestId: 123, RequestId: 123,
ReceiptsPacket: receipts, ReceiptsPacket: receipts,
}); err != nil { }); err != nil {

View File

@ -241,7 +241,7 @@ func (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs [
p.knownTxs.Add(hashes...) p.knownTxs.Add(hashes...)
// Not packed into PooledTransactionsPacket to avoid RLP decoding // Not packed into PooledTransactionsPacket to avoid RLP decoding
return p2p.Send(p.rw, PooledTransactionsMsg, PooledTransactionsRLPPacket66{ return p2p.Send(p.rw, PooledTransactionsMsg, &PooledTransactionsRLPPacket66{
RequestId: id, RequestId: id,
PooledTransactionsRLPPacket: txs, PooledTransactionsRLPPacket: txs,
}) })
@ -298,7 +298,7 @@ func (p *Peer) AsyncSendNewBlock(block *types.Block, td *big.Int) {
// ReplyBlockHeaders is the eth/66 version of SendBlockHeaders. // ReplyBlockHeaders is the eth/66 version of SendBlockHeaders.
func (p *Peer) ReplyBlockHeadersRLP(id uint64, headers []rlp.RawValue) error { func (p *Peer) ReplyBlockHeadersRLP(id uint64, headers []rlp.RawValue) error {
return p2p.Send(p.rw, BlockHeadersMsg, BlockHeadersRLPPacket66{ return p2p.Send(p.rw, BlockHeadersMsg, &BlockHeadersRLPPacket66{
RequestId: id, RequestId: id,
BlockHeadersRLPPacket: headers, BlockHeadersRLPPacket: headers,
}) })
@ -307,7 +307,7 @@ func (p *Peer) ReplyBlockHeadersRLP(id uint64, headers []rlp.RawValue) error {
// ReplyBlockBodiesRLP is the eth/66 version of SendBlockBodiesRLP. // ReplyBlockBodiesRLP is the eth/66 version of SendBlockBodiesRLP.
func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error { func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error {
// Not packed into BlockBodiesPacket to avoid RLP decoding // Not packed into BlockBodiesPacket to avoid RLP decoding
return p2p.Send(p.rw, BlockBodiesMsg, BlockBodiesRLPPacket66{ return p2p.Send(p.rw, BlockBodiesMsg, &BlockBodiesRLPPacket66{
RequestId: id, RequestId: id,
BlockBodiesRLPPacket: bodies, BlockBodiesRLPPacket: bodies,
}) })
@ -315,7 +315,7 @@ func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error {
// ReplyNodeData is the eth/66 response to GetNodeData. // ReplyNodeData is the eth/66 response to GetNodeData.
func (p *Peer) ReplyNodeData(id uint64, data [][]byte) error { func (p *Peer) ReplyNodeData(id uint64, data [][]byte) error {
return p2p.Send(p.rw, NodeDataMsg, NodeDataPacket66{ return p2p.Send(p.rw, NodeDataMsg, &NodeDataPacket66{
RequestId: id, RequestId: id,
NodeDataPacket: data, NodeDataPacket: data,
}) })
@ -323,7 +323,7 @@ func (p *Peer) ReplyNodeData(id uint64, data [][]byte) error {
// ReplyReceiptsRLP is the eth/66 response to GetReceipts. // ReplyReceiptsRLP is the eth/66 response to GetReceipts.
func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error { func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error {
return p2p.Send(p.rw, ReceiptsMsg, ReceiptsRLPPacket66{ return p2p.Send(p.rw, ReceiptsMsg, &ReceiptsRLPPacket66{
RequestId: id, RequestId: id,
ReceiptsRLPPacket: receipts, ReceiptsRLPPacket: receipts,
}) })

View File

@ -168,7 +168,7 @@ type bytecodeResponse struct {
// to actual requests and to validate any security constraints. // to actual requests and to validate any security constraints.
// //
// Concurrency note: storage requests and responses are handled concurrently from // Concurrency note: storage requests and responses are handled concurrently from
// the main runloop to allow Merkel proof verifications on the peer's thread and // the main runloop to allow Merkle proof verifications on the peer's thread and
// to drop on invalid response. The request struct must contain all the data to // to drop on invalid response. The request struct must contain all the data to
// construct the response without accessing runloop internals (i.e. tasks). That // construct the response without accessing runloop internals (i.e. tasks). That
// is only included to allow the runloop to match a response to the task being // is only included to allow the runloop to match a response to the task being
@ -2826,7 +2826,10 @@ func (s *Syncer) reportSyncProgress(force bool) {
new(big.Int).Mul(new(big.Int).SetUint64(uint64(synced)), hashSpace), new(big.Int).Mul(new(big.Int).SetUint64(uint64(synced)), hashSpace),
accountFills, accountFills,
).Uint64()) ).Uint64())
// Don't report anything until we have a meaningful progress
if estBytes < 1.0 {
return
}
elapsed := time.Since(s.startTime) elapsed := time.Since(s.startTime)
estTime := elapsed / time.Duration(synced) * time.Duration(estBytes) estTime := elapsed / time.Duration(synced) * time.Duration(estBytes)

View File

@ -1349,7 +1349,7 @@ func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
accTrie, _ := trie.New(common.Hash{}, db) accTrie, _ := trie.New(common.Hash{}, db)
var entries entrySlice var entries entrySlice
for i := uint64(1); i <= uint64(n); i++ { for i := uint64(1); i <= uint64(n); i++ {
value, _ := rlp.EncodeToBytes(types.StateAccount{ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i, Nonce: i,
Balance: big.NewInt(int64(i)), Balance: big.NewInt(int64(i)),
Root: emptyRoot, Root: emptyRoot,
@ -1394,7 +1394,7 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
} }
// Fill boundary accounts // Fill boundary accounts
for i := 0; i < len(boundaries); i++ { for i := 0; i < len(boundaries); i++ {
value, _ := rlp.EncodeToBytes(types.StateAccount{ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: uint64(0), Nonce: uint64(0),
Balance: big.NewInt(int64(i)), Balance: big.NewInt(int64(i)),
Root: emptyRoot, Root: emptyRoot,
@ -1406,7 +1406,7 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
} }
// Fill other accounts if required // Fill other accounts if required
for i := uint64(1); i <= uint64(n); i++ { for i := uint64(1); i <= uint64(n); i++ {
value, _ := rlp.EncodeToBytes(types.StateAccount{ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i, Nonce: i,
Balance: big.NewInt(int64(i)), Balance: big.NewInt(int64(i)),
Root: emptyRoot, Root: emptyRoot,
@ -1442,7 +1442,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
stTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db) stTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db)
stRoot := stTrie.Hash() stRoot := stTrie.Hash()
stTrie.Commit(nil) stTrie.Commit(nil)
value, _ := rlp.EncodeToBytes(types.StateAccount{ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i, Nonce: i,
Balance: big.NewInt(int64(i)), Balance: big.NewInt(int64(i)),
Root: stRoot, Root: stRoot,
@ -1489,7 +1489,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie
if code { if code {
codehash = getCodeHash(i) codehash = getCodeHash(i)
} }
value, _ := rlp.EncodeToBytes(types.StateAccount{ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i, Nonce: i,
Balance: big.NewInt(int64(i)), Balance: big.NewInt(int64(i)),
Root: stRoot, Root: stRoot,

View File

@ -17,6 +17,7 @@
package eth package eth
import ( import (
"errors"
"math/big" "math/big"
"sync/atomic" "sync/atomic"
"time" "time"
@ -65,6 +66,7 @@ type chainSyncer struct {
handler *handler handler *handler
force *time.Timer force *time.Timer
forced bool // true when force timer fired forced bool // true when force timer fired
warned time.Time
peerEventCh chan struct{} peerEventCh chan struct{}
doneCh chan error // non-nil when sync is running doneCh chan error // non-nil when sync is running
} }
@ -119,10 +121,18 @@ func (cs *chainSyncer) loop() {
select { select {
case <-cs.peerEventCh: case <-cs.peerEventCh:
// Peer information changed, recheck. // Peer information changed, recheck.
case <-cs.doneCh: case err := <-cs.doneCh:
cs.doneCh = nil cs.doneCh = nil
cs.force.Reset(forceSyncCycle) cs.force.Reset(forceSyncCycle)
cs.forced = false cs.forced = false
// If we've reached the merge transition but no beacon client is available, or
// it has not yet switched us over, keep warning the user that their infra is
// potentially flaky.
if errors.Is(err, downloader.ErrMergeTransition) && time.Since(cs.warned) > 10*time.Second {
log.Warn("Local chain is post-merge, waiting for beacon client sync switch-over...")
cs.warned = time.Now()
}
case <-cs.force.C: case <-cs.force.C:
cs.forced = true cs.forced = true
@ -143,9 +153,16 @@ func (cs *chainSyncer) loop() {
// nextSyncOp determines whether sync is required at this time. // nextSyncOp determines whether sync is required at this time.
func (cs *chainSyncer) nextSyncOp() *chainSyncOp { func (cs *chainSyncer) nextSyncOp() *chainSyncOp {
if cs.doneCh != nil { if cs.doneCh != nil {
return nil // Sync already running. return nil // Sync already running
} }
// Disable the td based sync trigger after the transition // If a beacon client once took over control, disable the entire legacy sync
// path from here on end. Note, there is a slight "race" between reaching TTD
// and the beacon client taking over. The downloader will enforce that nothing
// above the first TTD will be delivered to the chain for import.
//
// An alternative would be to check the local chain for exceeding the TTD and
// avoid triggering a sync in that case, but that could also miss sibling or
// other family TTD block being accepted.
if cs.handler.merger.TDDReached() { if cs.handler.merger.TDDReached() {
return nil return nil
} }
@ -159,16 +176,24 @@ func (cs *chainSyncer) nextSyncOp() *chainSyncOp {
if cs.handler.peers.len() < minPeers { if cs.handler.peers.len() < minPeers {
return nil return nil
} }
// We have enough peers, check TD // We have enough peers, pick the one with the highest TD, but avoid going
// over the terminal total difficulty. Above that we expect the consensus
// clients to direct the chain head to sync to.
peer := cs.handler.peers.peerWithHighestTD() peer := cs.handler.peers.peerWithHighestTD()
if peer == nil { if peer == nil {
return nil return nil
} }
mode, ourTD := cs.modeAndLocalHead() mode, ourTD := cs.modeAndLocalHead()
op := peerToSyncOp(mode, peer) op := peerToSyncOp(mode, peer)
if op.td.Cmp(ourTD) <= 0 { if op.td.Cmp(ourTD) <= 0 {
return nil // We're in sync. // We seem to be in sync according to the legacy rules. In the merge
// world, it can also mean we're stuck on the merge block, waiting for
// a beacon client. In the latter case, notify the user.
if ttd := cs.handler.chain.Config().TerminalTotalDifficulty; ttd != nil && ourTD.Cmp(ttd) >= 0 && time.Since(cs.warned) > 10*time.Second {
log.Warn("Local chain is post-merge, waiting for beacon client sync switch-over...")
cs.warned = time.Now()
}
return nil // We're in sync
} }
return op return op
} }
@ -227,7 +252,7 @@ func (h *handler) doSync(op *chainSyncOp) error {
} }
} }
// Run the sync cycle, and disable snap sync if we're past the pivot block // Run the sync cycle, and disable snap sync if we're past the pivot block
err := h.downloader.Synchronise(op.peer.ID(), op.head, op.td, op.mode) err := h.downloader.LegacySync(op.peer.ID(), op.head, op.td, h.chain.Config().TerminalTotalDifficulty, op.mode)
if err != nil { if err != nil {
return err return err
} }

View File

@ -453,7 +453,7 @@ func (api *API) TraceBlockByHash(ctx context.Context, hash common.Hash, config *
// TraceBlock returns the structured logs created during the execution of EVM // TraceBlock returns the structured logs created during the execution of EVM
// and returns them as a JSON object. // and returns them as a JSON object.
func (api *API) TraceBlock(ctx context.Context, blob []byte, config *TraceConfig) ([]*txTraceResult, error) { func (api *API) TraceBlock(ctx context.Context, blob hexutil.Bytes, config *TraceConfig) ([]*txTraceResult, error) {
block := new(types.Block) block := new(types.Block)
if err := rlp.Decode(bytes.NewReader(blob), block); err != nil { if err := rlp.Decode(bytes.NewReader(blob), block); err != nil {
return nil, fmt.Errorf("could not decode block: %v", err) return nil, fmt.Errorf("could not decode block: %v", err)

View File

@ -43,6 +43,9 @@ type Batcher interface {
// NewBatch creates a write-only database that buffers changes to its host db // NewBatch creates a write-only database that buffers changes to its host db
// until a final write is called. // until a final write is called.
NewBatch() Batch NewBatch() Batch
// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
NewBatchWithSize(size int) Batch
} }
// HookedBatch wraps an arbitrary batch where each operation may be hooked into // HookedBatch wraps an arbitrary batch where each operation may be hooked into

View File

@ -64,6 +64,7 @@ type KeyValueStore interface {
Iteratee Iteratee
Stater Stater
Compacter Compacter
Snapshotter
io.Closer io.Closer
} }
@ -86,6 +87,10 @@ type AncientReader interface {
// Ancients returns the ancient item numbers in the ancient store. // Ancients returns the ancient item numbers in the ancient store.
Ancients() (uint64, error) Ancients() (uint64, error)
// Tail returns the number of first stored item in the freezer.
// This number can also be interpreted as the total deleted item numbers.
Tail() (uint64, error)
// AncientSize returns the ancient size of the specified category. // AncientSize returns the ancient size of the specified category.
AncientSize(kind string) (uint64, error) AncientSize(kind string) (uint64, error)
} }
@ -106,11 +111,24 @@ type AncientWriter interface {
// The integer return value is the total size of the written data. // The integer return value is the total size of the written data.
ModifyAncients(func(AncientWriteOp) error) (int64, error) ModifyAncients(func(AncientWriteOp) error) (int64, error)
// TruncateAncients discards all but the first n ancient data from the ancient store. // TruncateHead discards all but the first n ancient data from the ancient store.
TruncateAncients(n uint64) error // After the truncation, the latest item can be accessed it item_n-1(start from 0).
TruncateHead(n uint64) error
// TruncateTail discards the first n ancient data from the ancient store. The already
// deleted items are ignored. After the truncation, the earliest item can be accessed
// is item_n(start from 0). The deleted items may not be removed from the ancient store
// immediately, but only when the accumulated deleted data reach the threshold then
// will be removed all together.
TruncateTail(n uint64) error
// Sync flushes all in-memory ancient store data to disk. // Sync flushes all in-memory ancient store data to disk.
Sync() error Sync() error
// MigrateTable processes and migrates entries of a given table to a new format.
// The second argument is a function that takes a raw entry and returns it
// in the newest format.
MigrateTable(string, func([]byte) ([]byte, error)) error
} }
// AncientWriteOp is given to the function argument of ModifyAncients. // AncientWriteOp is given to the function argument of ModifyAncients.
@ -153,5 +171,6 @@ type Database interface {
Iteratee Iteratee
Stater Stater
Compacter Compacter
Snapshotter
io.Closer io.Closer
} }

View File

@ -313,6 +313,68 @@ func TestDatabaseSuite(t *testing.T, New func() ethdb.KeyValueStore) {
} }
}) })
t.Run("Snapshot", func(t *testing.T) {
db := New()
defer db.Close()
initial := map[string]string{
"k1": "v1", "k2": "v2", "k3": "", "k4": "",
}
for k, v := range initial {
db.Put([]byte(k), []byte(v))
}
snapshot, err := db.NewSnapshot()
if err != nil {
t.Fatal(err)
}
for k, v := range initial {
got, err := snapshot.Get([]byte(k))
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(got, []byte(v)) {
t.Fatalf("Unexpected value want: %v, got %v", v, got)
}
}
// Flush more modifications into the database, ensure the snapshot
// isn't affected.
var (
update = map[string]string{"k1": "v1-b", "k3": "v3-b"}
insert = map[string]string{"k5": "v5-b"}
delete = map[string]string{"k2": ""}
)
for k, v := range update {
db.Put([]byte(k), []byte(v))
}
for k, v := range insert {
db.Put([]byte(k), []byte(v))
}
for k := range delete {
db.Delete([]byte(k))
}
for k, v := range initial {
got, err := snapshot.Get([]byte(k))
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(got, []byte(v)) {
t.Fatalf("Unexpected value want: %v, got %v", v, got)
}
}
for k := range insert {
got, err := snapshot.Get([]byte(k))
if err == nil || len(got) != 0 {
t.Fatal("Unexpected value")
}
}
for k := range delete {
got, err := snapshot.Get([]byte(k))
if err != nil || len(got) == 0 {
t.Fatal("Unexpected deletion")
}
}
})
} }
func iterateKeys(it ethdb.Iterator) []string { func iterateKeys(it ethdb.Iterator) []string {

View File

@ -213,6 +213,14 @@ func (db *Database) NewBatch() ethdb.Batch {
} }
} }
// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
func (db *Database) NewBatchWithSize(size int) ethdb.Batch {
return &batch{
db: db.db,
b: leveldb.MakeBatch(size),
}
}
// NewIterator creates a binary-alphabetical iterator over a subset // NewIterator creates a binary-alphabetical iterator over a subset
// of database content with a particular key prefix, starting at a particular // of database content with a particular key prefix, starting at a particular
// initial key (or after, if it does not exist). // initial key (or after, if it does not exist).
@ -220,6 +228,19 @@ func (db *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
return db.db.NewIterator(bytesPrefixRange(prefix, start), nil) return db.db.NewIterator(bytesPrefixRange(prefix, start), nil)
} }
// NewSnapshot creates a database snapshot based on the current state.
// The created snapshot will not be affected by all following mutations
// happened on the database.
// Note don't forget to release the snapshot once it's used up, otherwise
// the stale data will never be cleaned up by the underlying compactor.
func (db *Database) NewSnapshot() (ethdb.Snapshot, error) {
snap, err := db.db.GetSnapshot()
if err != nil {
return nil, err
}
return &snapshot{db: snap}, nil
}
// Stat returns a particular internal stat of the database. // Stat returns a particular internal stat of the database.
func (db *Database) Stat(property string) (string, error) { func (db *Database) Stat(property string) (string, error) {
return db.db.GetProperty(property) return db.db.GetProperty(property)
@ -519,3 +540,26 @@ func bytesPrefixRange(prefix, start []byte) *util.Range {
r.Start = append(r.Start, start...) r.Start = append(r.Start, start...)
return r return r
} }
// snapshot wraps a leveldb snapshot for implementing the Snapshot interface.
type snapshot struct {
db *leveldb.Snapshot
}
// Has retrieves if a key is present in the snapshot backing by a key-value
// data store.
func (snap *snapshot) Has(key []byte) (bool, error) {
return snap.db.Has(key, nil)
}
// Get retrieves the given key if it's present in the snapshot backing by
// key-value data store.
func (snap *snapshot) Get(key []byte) ([]byte, error) {
return snap.db.Get(key, nil)
}
// Release releases associated resources. Release should always succeed and can
// be called multiple times without causing error.
func (snap *snapshot) Release() {
snap.db.Release()
}

View File

@ -35,6 +35,10 @@ var (
// errMemorydbNotFound is returned if a key is requested that is not found in // errMemorydbNotFound is returned if a key is requested that is not found in
// the provided memory database. // the provided memory database.
errMemorydbNotFound = errors.New("not found") errMemorydbNotFound = errors.New("not found")
// errSnapshotReleased is returned if callers want to retrieve data from a
// released snapshot.
errSnapshotReleased = errors.New("snapshot released")
) )
// Database is an ephemeral key-value store. Apart from basic data storage // Database is an ephemeral key-value store. Apart from basic data storage
@ -53,7 +57,7 @@ func New() *Database {
} }
} }
// NewWithCap returns a wrapped map pre-allocated to the provided capcity with // NewWithCap returns a wrapped map pre-allocated to the provided capacity with
// all the required database interface methods implemented. // all the required database interface methods implemented.
func NewWithCap(size int) *Database { func NewWithCap(size int) *Database {
return &Database{ return &Database{
@ -129,6 +133,13 @@ func (db *Database) NewBatch() ethdb.Batch {
} }
} }
// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
func (db *Database) NewBatchWithSize(size int) ethdb.Batch {
return &batch{
db: db,
}
}
// NewIterator creates a binary-alphabetical iterator over a subset // NewIterator creates a binary-alphabetical iterator over a subset
// of database content with a particular key prefix, starting at a particular // of database content with a particular key prefix, starting at a particular
// initial key (or after, if it does not exist). // initial key (or after, if it does not exist).
@ -163,6 +174,13 @@ func (db *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
} }
} }
// NewSnapshot creates a database snapshot based on the current state.
// The created snapshot will not be affected by all following mutations
// happened on the database.
func (db *Database) NewSnapshot() (ethdb.Snapshot, error) {
return newSnapshot(db), nil
}
// Stat returns a particular internal stat of the database. // Stat returns a particular internal stat of the database.
func (db *Database) Stat(property string) (string, error) { func (db *Database) Stat(property string) (string, error) {
return "", errors.New("unknown property") return "", errors.New("unknown property")
@ -313,3 +331,59 @@ func (it *iterator) Value() []byte {
func (it *iterator) Release() { func (it *iterator) Release() {
it.keys, it.values = nil, nil it.keys, it.values = nil, nil
} }
// snapshot wraps a batch of key-value entries deep copied from the in-memory
// database for implementing the Snapshot interface.
type snapshot struct {
db map[string][]byte
lock sync.RWMutex
}
// newSnapshot initializes the snapshot with the given database instance.
func newSnapshot(db *Database) *snapshot {
db.lock.RLock()
defer db.lock.RUnlock()
copied := make(map[string][]byte)
for key, val := range db.db {
copied[key] = common.CopyBytes(val)
}
return &snapshot{db: copied}
}
// Has retrieves if a key is present in the snapshot backing by a key-value
// data store.
func (snap *snapshot) Has(key []byte) (bool, error) {
snap.lock.RLock()
defer snap.lock.RUnlock()
if snap.db == nil {
return false, errSnapshotReleased
}
_, ok := snap.db[string(key)]
return ok, nil
}
// Get retrieves the given key if it's present in the snapshot backing by
// key-value data store.
func (snap *snapshot) Get(key []byte) ([]byte, error) {
snap.lock.RLock()
defer snap.lock.RUnlock()
if snap.db == nil {
return nil, errSnapshotReleased
}
if entry, ok := snap.db[string(key)]; ok {
return common.CopyBytes(entry), nil
}
return nil, errMemorydbNotFound
}
// Release releases associated resources. Release should always succeed and can
// be called multiple times without causing error.
func (snap *snapshot) Release() {
snap.lock.Lock()
defer snap.lock.Unlock()
snap.db = nil
}

41
ethdb/snapshot.go Normal file
View File

@ -0,0 +1,41 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package ethdb
type Snapshot interface {
// Has retrieves if a key is present in the snapshot backing by a key-value
// data store.
Has(key []byte) (bool, error)
// Get retrieves the given key if it's present in the snapshot backing by
// key-value data store.
Get(key []byte) ([]byte, error)
// Release releases associated resources. Release should always succeed and can
// be called multiple times without causing error.
Release()
}
// Snapshotter wraps the Snapshot method of a backing data store.
type Snapshotter interface {
// NewSnapshot creates a database snapshot based on the current state.
// The created snapshot will not be affected by all following mutations
// happened on the database.
// Note don't forget to release the snapshot once it's used up, otherwise
// the stale data will never be cleaned up by the underlying compactor.
NewSnapshot() (Snapshot, error)
}

Some files were not shown because too many files have changed in this diff Show More