forked from cerc-io/plugeth
all: implement forkid changes for shanghai
This commit is contained in:
parent
efc9409ca9
commit
a4e19c5ca3
@ -76,7 +76,7 @@ func (c *Chain) RootAt(height int) common.Hash {
|
|||||||
|
|
||||||
// ForkID gets the fork id of the chain.
|
// ForkID gets the fork id of the chain.
|
||||||
func (c *Chain) ForkID() forkid.ID {
|
func (c *Chain) ForkID() forkid.ID {
|
||||||
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()))
|
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()), c.blocks[0].Time())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shorten returns a copy chain of a desired height from the imported
|
// Shorten returns a copy chain of a desired height from the imported
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -65,19 +66,28 @@ type ID struct {
|
|||||||
// Filter is a fork id filter to validate a remotely advertised ID.
|
// Filter is a fork id filter to validate a remotely advertised ID.
|
||||||
type Filter func(id ID) error
|
type Filter func(id ID) error
|
||||||
|
|
||||||
// NewID calculates the Ethereum fork ID from the chain config, genesis hash, and head.
|
// NewID calculates the Ethereum fork ID from the chain config, genesis hash, head and time.
|
||||||
func NewID(config *params.ChainConfig, genesis common.Hash, head uint64) ID {
|
func NewID(config *params.ChainConfig, genesis common.Hash, head, time uint64) ID {
|
||||||
// Calculate the starting checksum from the genesis hash
|
// Calculate the starting checksum from the genesis hash
|
||||||
hash := crc32.ChecksumIEEE(genesis[:])
|
hash := crc32.ChecksumIEEE(genesis[:])
|
||||||
|
|
||||||
// Calculate the current fork checksum and the next fork block
|
// Calculate the current fork checksum and the next fork block
|
||||||
var next uint64
|
forks, forksByTime := gatherForks(config)
|
||||||
for _, fork := range gatherForks(config) {
|
for _, fork := range forks {
|
||||||
if fork <= head {
|
if fork <= head {
|
||||||
// Fork already passed, checksum the previous hash and the fork number
|
// Fork already passed, checksum the previous hash and the fork number
|
||||||
hash = checksumUpdate(hash, fork)
|
hash = checksumUpdate(hash, fork)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
return ID{Hash: checksumToBytes(hash), Next: fork}
|
||||||
|
}
|
||||||
|
var next uint64
|
||||||
|
for _, fork := range forksByTime {
|
||||||
|
if time >= fork {
|
||||||
|
// Fork passed, checksum previous hash and fork time
|
||||||
|
hash = checksumUpdate(hash, fork)
|
||||||
|
continue
|
||||||
|
}
|
||||||
next = fork
|
next = fork
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -90,6 +100,7 @@ func NewIDWithChain(chain Blockchain) ID {
|
|||||||
chain.Config(),
|
chain.Config(),
|
||||||
chain.Genesis().Hash(),
|
chain.Genesis().Hash(),
|
||||||
chain.CurrentHeader().Number.Uint64(),
|
chain.CurrentHeader().Number.Uint64(),
|
||||||
|
chain.CurrentHeader().Time,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -99,36 +110,40 @@ func NewFilter(chain Blockchain) Filter {
|
|||||||
return newFilter(
|
return newFilter(
|
||||||
chain.Config(),
|
chain.Config(),
|
||||||
chain.Genesis().Hash(),
|
chain.Genesis().Hash(),
|
||||||
func() uint64 {
|
func() (uint64, uint64) {
|
||||||
return chain.CurrentHeader().Number.Uint64()
|
return chain.CurrentHeader().Number.Uint64(), chain.CurrentHeader().Time
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStaticFilter creates a filter at block zero.
|
// NewStaticFilter creates a filter at block zero.
|
||||||
func NewStaticFilter(config *params.ChainConfig, genesis common.Hash) Filter {
|
func NewStaticFilter(config *params.ChainConfig, genesis common.Hash) Filter {
|
||||||
head := func() uint64 { return 0 }
|
head := func() (uint64, uint64) { return 0, 0 }
|
||||||
return newFilter(config, genesis, head)
|
return newFilter(config, genesis, head)
|
||||||
}
|
}
|
||||||
|
|
||||||
// newFilter is the internal version of NewFilter, taking closures as its arguments
|
// newFilter is the internal version of NewFilter, taking closures as its arguments
|
||||||
// instead of a chain. The reason is to allow testing it without having to simulate
|
// instead of a chain. The reason is to allow testing it without having to simulate
|
||||||
// an entire blockchain.
|
// an entire blockchain.
|
||||||
func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() uint64) Filter {
|
func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() (uint64, uint64)) Filter {
|
||||||
// Calculate the all the valid fork hash and fork next combos
|
// Calculate the all the valid fork hash and fork next combos
|
||||||
var (
|
var (
|
||||||
forks = gatherForks(config)
|
forks, forksByTime = gatherForks(config)
|
||||||
sums = make([][4]byte, len(forks)+1) // 0th is the genesis
|
sums = make([][4]byte, len(forks)+len(forksByTime)+1) // 0th is the genesis
|
||||||
)
|
)
|
||||||
|
allForks := append(forks, forksByTime...)
|
||||||
hash := crc32.ChecksumIEEE(genesis[:])
|
hash := crc32.ChecksumIEEE(genesis[:])
|
||||||
sums[0] = checksumToBytes(hash)
|
sums[0] = checksumToBytes(hash)
|
||||||
for i, fork := range forks {
|
for i, fork := range allForks {
|
||||||
hash = checksumUpdate(hash, fork)
|
hash = checksumUpdate(hash, fork)
|
||||||
sums[i+1] = checksumToBytes(hash)
|
sums[i+1] = checksumToBytes(hash)
|
||||||
}
|
}
|
||||||
// Add two sentries to simplify the fork checks and don't require special
|
// Add two sentries to simplify the fork checks and don't require special
|
||||||
// casing the last one.
|
// casing the last one.
|
||||||
forks = append(forks, math.MaxUint64) // Last fork will never be passed
|
if len(forksByTime) == 0 {
|
||||||
|
forks = append(forks, math.MaxUint64)
|
||||||
|
}
|
||||||
|
forksByTime = append(forksByTime, math.MaxUint64) // Last fork will never be passed
|
||||||
|
|
||||||
// Create a validator that will filter out incompatible chains
|
// Create a validator that will filter out incompatible chains
|
||||||
return func(id ID) error {
|
return func(id ID) error {
|
||||||
@ -151,19 +166,14 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
|
|||||||
// the remote, but at this current point in time we don't have enough
|
// the remote, but at this current point in time we don't have enough
|
||||||
// information.
|
// information.
|
||||||
// 4. Reject in all other cases.
|
// 4. Reject in all other cases.
|
||||||
head := headfn()
|
|
||||||
for i, fork := range forks {
|
verify := func(index int, headOrTime uint64) error {
|
||||||
// If our head is beyond this fork, continue to the next (we have a dummy
|
|
||||||
// fork of maxuint64 as the last item to always fail this check eventually).
|
|
||||||
if head >= fork {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Found the first unpassed fork block, check if our current state matches
|
// Found the first unpassed fork block, check if our current state matches
|
||||||
// the remote checksum (rule #1).
|
// the remote checksum (rule #1).
|
||||||
if sums[i] == id.Hash {
|
if sums[index] == id.Hash {
|
||||||
// Fork checksum matched, check if a remote future fork block already passed
|
// Fork checksum matched, check if a remote future fork block already passed
|
||||||
// locally without the local node being aware of it (rule #1a).
|
// locally without the local node being aware of it (rule #1a).
|
||||||
if id.Next > 0 && head >= id.Next {
|
if id.Next > 0 && headOrTime >= id.Next {
|
||||||
return ErrLocalIncompatibleOrStale
|
return ErrLocalIncompatibleOrStale
|
||||||
}
|
}
|
||||||
// Haven't passed locally a remote-only fork, accept the connection (rule #1b).
|
// Haven't passed locally a remote-only fork, accept the connection (rule #1b).
|
||||||
@ -171,10 +181,10 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
|
|||||||
}
|
}
|
||||||
// The local and remote nodes are in different forks currently, check if the
|
// The local and remote nodes are in different forks currently, check if the
|
||||||
// remote checksum is a subset of our local forks (rule #2).
|
// remote checksum is a subset of our local forks (rule #2).
|
||||||
for j := 0; j < i; j++ {
|
for j := 0; j < index; j++ {
|
||||||
if sums[j] == id.Hash {
|
if sums[j] == id.Hash {
|
||||||
// Remote checksum is a subset, validate based on the announced next fork
|
// Remote checksum is a subset, validate based on the announced next fork
|
||||||
if forks[j] != id.Next {
|
if allForks[j] != id.Next {
|
||||||
return ErrRemoteStale
|
return ErrRemoteStale
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -182,7 +192,7 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
|
|||||||
}
|
}
|
||||||
// Remote chain is not a subset of our local one, check if it's a superset by
|
// Remote chain is not a subset of our local one, check if it's a superset by
|
||||||
// any chance, signalling that we're simply out of sync (rule #3).
|
// any chance, signalling that we're simply out of sync (rule #3).
|
||||||
for j := i + 1; j < len(sums); j++ {
|
for j := index + 1; j < len(sums); j++ {
|
||||||
if sums[j] == id.Hash {
|
if sums[j] == id.Hash {
|
||||||
// Yay, remote checksum is a superset, ignore upcoming forks
|
// Yay, remote checksum is a superset, ignore upcoming forks
|
||||||
return nil
|
return nil
|
||||||
@ -191,6 +201,27 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
|
|||||||
// No exact, subset or superset match. We are on differing chains, reject.
|
// No exact, subset or superset match. We are on differing chains, reject.
|
||||||
return ErrLocalIncompatibleOrStale
|
return ErrLocalIncompatibleOrStale
|
||||||
}
|
}
|
||||||
|
|
||||||
|
head, time := headfn()
|
||||||
|
// Verify forks by block
|
||||||
|
for i, fork := range forks {
|
||||||
|
// If our head is beyond this fork, continue to the next (we have a dummy
|
||||||
|
// fork of maxuint64 as the last item to always fail this check eventually).
|
||||||
|
if head >= fork {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return verify(i, head)
|
||||||
|
}
|
||||||
|
// Verify forks by time
|
||||||
|
for i, fork := range forksByTime {
|
||||||
|
// If our head is beyond this fork, continue to the next (we have a dummy
|
||||||
|
// fork of maxuint64 as the last item to always fail this check eventually).
|
||||||
|
if time >= fork {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return verify(len(forks)+i, time)
|
||||||
|
}
|
||||||
|
|
||||||
log.Error("Impossible fork ID validation", "id", id)
|
log.Error("Impossible fork ID validation", "id", id)
|
||||||
return nil // Something's very wrong, accept rather than reject
|
return nil // Something's very wrong, accept rather than reject
|
||||||
}
|
}
|
||||||
@ -212,35 +243,40 @@ func checksumToBytes(hash uint32) [4]byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// gatherForks gathers all the known forks and creates a sorted list out of them.
|
// gatherForks gathers all the known forks and creates a sorted list out of them.
|
||||||
func gatherForks(config *params.ChainConfig) []uint64 {
|
func gatherForks(config *params.ChainConfig) ([]uint64, []uint64) {
|
||||||
// Gather all the fork block numbers via reflection
|
// Gather all the fork block numbers via reflection
|
||||||
kind := reflect.TypeOf(params.ChainConfig{})
|
kind := reflect.TypeOf(params.ChainConfig{})
|
||||||
conf := reflect.ValueOf(config).Elem()
|
conf := reflect.ValueOf(config).Elem()
|
||||||
|
|
||||||
var forks []uint64
|
var forks []uint64
|
||||||
|
var forksByTime []uint64
|
||||||
for i := 0; i < kind.NumField(); i++ {
|
for i := 0; i < kind.NumField(); i++ {
|
||||||
// Fetch the next field and skip non-fork rules
|
// Fetch the next field and skip non-fork rules
|
||||||
field := kind.Field(i)
|
field := kind.Field(i)
|
||||||
|
time := false
|
||||||
if !strings.HasSuffix(field.Name, "Block") {
|
if !strings.HasSuffix(field.Name, "Block") {
|
||||||
|
if !strings.HasSuffix(field.Name, "Time") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
time = true
|
||||||
|
}
|
||||||
if field.Type != reflect.TypeOf(new(big.Int)) {
|
if field.Type != reflect.TypeOf(new(big.Int)) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Extract the fork rule block number and aggregate it
|
// Extract the fork rule block number and aggregate it
|
||||||
rule := conf.Field(i).Interface().(*big.Int)
|
rule := conf.Field(i).Interface().(*big.Int)
|
||||||
if rule != nil {
|
if rule != nil {
|
||||||
|
if time {
|
||||||
|
forksByTime = append(forksByTime, rule.Uint64())
|
||||||
|
} else {
|
||||||
forks = append(forks, rule.Uint64())
|
forks = append(forks, rule.Uint64())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Sort the fork block numbers to permit chronological XOR
|
|
||||||
for i := 0; i < len(forks); i++ {
|
|
||||||
for j := i + 1; j < len(forks); j++ {
|
|
||||||
if forks[i] > forks[j] {
|
|
||||||
forks[i], forks[j] = forks[j], forks[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sort.Slice(forks, func(i, j int) bool { return forks[i] < forks[j] })
|
||||||
|
sort.Slice(forksByTime, func(i, j int) bool { return forksByTime[i] < forksByTime[j] })
|
||||||
|
|
||||||
// Deduplicate block numbers applying multiple forks
|
// Deduplicate block numbers applying multiple forks
|
||||||
for i := 1; i < len(forks); i++ {
|
for i := 1; i < len(forks); i++ {
|
||||||
if forks[i] == forks[i-1] {
|
if forks[i] == forks[i-1] {
|
||||||
@ -248,9 +284,18 @@ func gatherForks(config *params.ChainConfig) []uint64 {
|
|||||||
i--
|
i--
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for i := 1; i < len(forksByTime); i++ {
|
||||||
|
if forksByTime[i] == forksByTime[i-1] {
|
||||||
|
forksByTime = append(forksByTime[:i], forksByTime[i+1:]...)
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
}
|
||||||
// Skip any forks in block 0, that's the genesis ruleset
|
// Skip any forks in block 0, that's the genesis ruleset
|
||||||
if len(forks) > 0 && forks[0] == 0 {
|
if len(forks) > 0 && forks[0] == 0 {
|
||||||
forks = forks[1:]
|
forks = forks[1:]
|
||||||
}
|
}
|
||||||
return forks
|
if len(forksByTime) > 0 && forksByTime[0] == 0 {
|
||||||
|
forksByTime = forksByTime[1:]
|
||||||
|
}
|
||||||
|
return forks, forksByTime
|
||||||
}
|
}
|
||||||
|
@ -185,7 +185,105 @@ func TestCreation(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
for j, ttt := range tt.cases {
|
for j, ttt := range tt.cases {
|
||||||
if have := NewID(tt.config, tt.genesis, ttt.head); have != ttt.want {
|
if have := NewID(tt.config, tt.genesis, ttt.head, 0); have != ttt.want {
|
||||||
|
t.Errorf("test %d, case %d: fork ID mismatch: have %x, want %x", i, j, have, ttt.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCreationWithTimestamps tests that different genesis and fork rule combinations result in
|
||||||
|
// the correct fork ID even for time based forks.
|
||||||
|
func TestCreationWithTimestamps(t *testing.T) {
|
||||||
|
mergeConfig := *params.MainnetChainConfig
|
||||||
|
mergeConfig.MergeNetsplitBlock = big.NewInt(18000000)
|
||||||
|
|
||||||
|
withdrawalConfig := *params.MainnetChainConfig
|
||||||
|
withdrawalConfig.MergeNetsplitBlock = big.NewInt(18000000)
|
||||||
|
withdrawalConfig.ShanghaiTime = big.NewInt(1668000000)
|
||||||
|
type testcase struct {
|
||||||
|
head uint64
|
||||||
|
time uint64
|
||||||
|
want ID
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
config *params.ChainConfig
|
||||||
|
genesis common.Hash
|
||||||
|
cases []testcase
|
||||||
|
}{
|
||||||
|
// Mainnet test cases
|
||||||
|
{
|
||||||
|
params.MainnetChainConfig,
|
||||||
|
params.MainnetGenesisHash,
|
||||||
|
[]testcase{
|
||||||
|
{0, 0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Unsynced
|
||||||
|
{1149999, 0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Last Frontier block
|
||||||
|
{1150000, 0, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // First Homestead block
|
||||||
|
{1919999, 0, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // Last Homestead block
|
||||||
|
{1920000, 0, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // First DAO block
|
||||||
|
{2462999, 0, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // Last DAO block
|
||||||
|
{2463000, 0, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // First Tangerine block
|
||||||
|
{2674999, 0, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // Last Tangerine block
|
||||||
|
{2675000, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // First Spurious block
|
||||||
|
{4369999, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // Last Spurious block
|
||||||
|
{4370000, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // First Byzantium block
|
||||||
|
{7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block
|
||||||
|
{7280000, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block
|
||||||
|
{9068999, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block
|
||||||
|
{9069000, 0, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block
|
||||||
|
{9199999, 0, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block
|
||||||
|
{9200000, 0, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // First Muir Glacier block
|
||||||
|
{12243999, 0, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block
|
||||||
|
{12244000, 0, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block
|
||||||
|
{12964999, 0, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block
|
||||||
|
{12965000, 0, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block
|
||||||
|
{13772999, 0, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block
|
||||||
|
{13773000, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // First Arrow Glacier block
|
||||||
|
{15049999, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block
|
||||||
|
{15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}}, // First Gray Glacier block
|
||||||
|
{20000000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}}, // Future Gray Glacier block
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Withdrawal test cases
|
||||||
|
{
|
||||||
|
&withdrawalConfig,
|
||||||
|
params.MainnetGenesisHash,
|
||||||
|
[]testcase{
|
||||||
|
{0, 0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Unsynced
|
||||||
|
{1149999, 0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Last Frontier block
|
||||||
|
{1150000, 0, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // First Homestead block
|
||||||
|
{1919999, 0, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // Last Homestead block
|
||||||
|
{1920000, 0, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // First DAO block
|
||||||
|
{2462999, 0, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // Last DAO block
|
||||||
|
{2463000, 0, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // First Tangerine block
|
||||||
|
{2674999, 0, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // Last Tangerine block
|
||||||
|
{2675000, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // First Spurious block
|
||||||
|
{4369999, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // Last Spurious block
|
||||||
|
{4370000, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // First Byzantium block
|
||||||
|
{7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block
|
||||||
|
{7280000, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block
|
||||||
|
{9068999, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block
|
||||||
|
{9069000, 0, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block
|
||||||
|
{9199999, 0, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block
|
||||||
|
{9200000, 0, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // First Muir Glacier block
|
||||||
|
{12243999, 0, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block
|
||||||
|
{12244000, 0, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block
|
||||||
|
{12964999, 0, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block
|
||||||
|
{12965000, 0, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block
|
||||||
|
{13772999, 0, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block
|
||||||
|
{13773000, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // First Arrow Glacier block
|
||||||
|
{15049999, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block
|
||||||
|
{15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 18000000}}, // First Gray Glacier block
|
||||||
|
{18000000, 0, ID{Hash: checksumToBytes(0x4fb8a872), Next: 1668000000}}, // First Merge Start block
|
||||||
|
{20000000, 0, ID{Hash: checksumToBytes(0x4fb8a872), Next: 1668000000}}, // Last Merge Start block
|
||||||
|
{20000000, 1668000000, ID{Hash: checksumToBytes(0xc1fdf181), Next: 0}}, // First Merge Start block
|
||||||
|
{20000000, 2668000000, ID{Hash: checksumToBytes(0xc1fdf181), Next: 0}}, // Future Merge Start block
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for i, tt := range tests {
|
||||||
|
for j, ttt := range tt.cases {
|
||||||
|
if have := NewID(tt.config, tt.genesis, ttt.head, ttt.time); have != ttt.want {
|
||||||
t.Errorf("test %d, case %d: fork ID mismatch: have %x, want %x", i, j, have, ttt.want)
|
t.Errorf("test %d, case %d: fork ID mismatch: have %x, want %x", i, j, have, ttt.want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -267,7 +365,93 @@ func TestValidation(t *testing.T) {
|
|||||||
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale},
|
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale},
|
||||||
}
|
}
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
filter := newFilter(params.MainnetChainConfig, params.MainnetGenesisHash, func() uint64 { return tt.head })
|
filter := newFilter(params.MainnetChainConfig, params.MainnetGenesisHash, func() (uint64, uint64) { return tt.head, 0 })
|
||||||
|
if err := filter(tt.id); err != tt.err {
|
||||||
|
t.Errorf("test %d: validation error mismatch: have %v, want %v", i, err, tt.err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestValidationByTimestamp tests that a local peer correctly validates and accepts a remote
|
||||||
|
// fork ID.
|
||||||
|
func TestValidationByTimestamp(t *testing.T) {
|
||||||
|
withdrawalConfig := *params.MainnetChainConfig
|
||||||
|
withdrawalConfig.MergeNetsplitBlock = big.NewInt(18000000)
|
||||||
|
withdrawalConfig.ShanghaiTime = big.NewInt(1668000000)
|
||||||
|
tests := []struct {
|
||||||
|
head uint64
|
||||||
|
time uint64
|
||||||
|
id ID
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
// Local is mainnet Withdrawals, remote announces the same. No future fork is announced.
|
||||||
|
{20000000, 1668000001, ID{Hash: checksumToBytes(0xc1fdf181), Next: 0}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Withdrawals, remote announces the same also announces a next fork
|
||||||
|
// at block/time 0xffffffff, but that is uncertain.
|
||||||
|
{20000000, 1668000001, ID{Hash: checksumToBytes(0xc1fdf181), Next: math.MaxUint64}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg & Withdrawals), remote announces
|
||||||
|
// also Byzantium, but it's not yet aware of Petersburg (e.g. non updated node before the fork).
|
||||||
|
// In this case we don't know if Petersburg passed yet or not.
|
||||||
|
{7279999, 1667999999, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg & Withdrawals), remote announces
|
||||||
|
// also Byzantium, and it's also aware of Petersburg (e.g. updated node before the fork). We
|
||||||
|
// don't know if Petersburg passed yet (will pass) or not.
|
||||||
|
{7279999, 1667999999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg & Withdrawals), remote announces
|
||||||
|
// also Byzantium, and it's also aware of some random fork (e.g. misconfigured Petersburg). As
|
||||||
|
// neither forks passed at neither nodes, they may mismatch, but we still connect for now.
|
||||||
|
{7279999, 1667999999, ID{Hash: checksumToBytes(0xa00bc324), Next: math.MaxUint64}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet exactly on Withdrawals, remote announces Byzantium + knowledge about Petersburg. Remote
|
||||||
|
// is simply out of sync, accept.
|
||||||
|
{20000000, 1668000000, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Withdrawals, remote announces Byzantium + knowledge about Petersburg. Remote
|
||||||
|
// is simply out of sync, accept.
|
||||||
|
{20000000, 1668000001, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Withdrawals, remote announces Spurious + knowledge about Byzantium. Remote
|
||||||
|
// is definitely out of sync. It may or may not need the Petersburg update, we don't know yet.
|
||||||
|
{20000000, 1668000001, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Byzantium & pre-withdrawals, remote announces Petersburg. Local is out of sync, accept.
|
||||||
|
{7279999, 1667999999, ID{Hash: checksumToBytes(0x668db0af), Next: 0}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Spurious, remote announces Byzantium, but is not aware of Petersburg. Local
|
||||||
|
// out of sync. Local also knows about a future fork, but that is uncertain yet.
|
||||||
|
{4369999, 1667999999, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Withdrawals. remote announces Byzantium but is not aware of further forks.
|
||||||
|
// Remote needs software update.
|
||||||
|
{20000000, 1668000001, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, ErrRemoteStale},
|
||||||
|
|
||||||
|
// Local is mainnet Withdrawals, and isn't aware of more forks. Remote announces Petersburg +
|
||||||
|
// 0xffffffff. Local needs software update, reject.
|
||||||
|
{20000000, 1668000001, ID{Hash: checksumToBytes(0x5cddc0e1), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
|
// Local is mainnet Withdrawals, and is aware of Petersburg. Remote announces Petersburg +
|
||||||
|
// 0xffffffff. Local needs software update, reject.
|
||||||
|
{20000000, 1668000001, ID{Hash: checksumToBytes(0x5cddc0e1), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
|
// Local is mainnet Withdrawals, remote is Rinkeby Petersburg.
|
||||||
|
{20000000, 1668000001, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
|
// Local is mainnet Withdrawals, far in the future. Remote announces Gopherium (non existing fork)
|
||||||
|
// at some future block 88888888, for itself, but past block for local. Local is incompatible.
|
||||||
|
//
|
||||||
|
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
|
||||||
|
{88888888, 1668000001, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 88888888}, ErrRemoteStale},
|
||||||
|
|
||||||
|
// Local is mainnet Withdrawals. Remote is in Byzantium, but announces Gopherium (non existing
|
||||||
|
// fork) at block 7279999, before Petersburg. Local is incompatible.
|
||||||
|
{20000000, 1668000001, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrRemoteStale},
|
||||||
|
}
|
||||||
|
for i, tt := range tests {
|
||||||
|
filter := newFilter(&withdrawalConfig, params.MainnetGenesisHash, func() (uint64, uint64) { return tt.head, tt.time })
|
||||||
if err := filter(tt.id); err != tt.err {
|
if err := filter(tt.id); err != tt.err {
|
||||||
t.Errorf("test %d: validation error mismatch: have %v, want %v", i, err, tt.err)
|
t.Errorf("test %d: validation error mismatch: have %v, want %v", i, err, tt.err)
|
||||||
}
|
}
|
||||||
|
@ -331,7 +331,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
|
|||||||
number = head.Number.Uint64()
|
number = head.Number.Uint64()
|
||||||
td = h.chain.GetTd(hash, number)
|
td = h.chain.GetTd(hash, number)
|
||||||
)
|
)
|
||||||
forkID := forkid.NewID(h.chain.Config(), h.chain.Genesis().Hash(), h.chain.CurrentHeader().Number.Uint64())
|
forkID := forkid.NewID(h.chain.Config(), h.chain.Genesis().Hash(), h.chain.CurrentHeader().Number.Uint64(), h.chain.CurrentHeader().Time)
|
||||||
if err := peer.Handshake(h.networkID, td, hash, genesis.Hash(), forkID, h.forkFilter); err != nil {
|
if err := peer.Handshake(h.networkID, td, hash, genesis.Hash(), forkID, h.forkFilter); err != nil {
|
||||||
peer.Log().Debug("Ethereum handshake failed", "err", err)
|
peer.Log().Debug("Ethereum handshake failed", "err", err)
|
||||||
return err
|
return err
|
||||||
|
@ -60,6 +60,6 @@ func StartENRUpdater(chain *core.BlockChain, ln *enode.LocalNode) {
|
|||||||
// currentENREntry constructs an `eth` ENR entry based on the current state of the chain.
|
// currentENREntry constructs an `eth` ENR entry based on the current state of the chain.
|
||||||
func currentENREntry(chain *core.BlockChain) *enrEntry {
|
func currentENREntry(chain *core.BlockChain) *enrEntry {
|
||||||
return &enrEntry{
|
return &enrEntry{
|
||||||
ForkID: forkid.NewID(chain.Config(), chain.Genesis().Hash(), chain.CurrentHeader().Number.Uint64()),
|
ForkID: forkid.NewID(chain.Config(), chain.Genesis().Hash(), chain.CurrentHeader().Number.Uint64(), chain.CurrentHeader().Time),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,7 @@ func testHandshake(t *testing.T, protocol uint) {
|
|||||||
genesis = backend.chain.Genesis()
|
genesis = backend.chain.Genesis()
|
||||||
head = backend.chain.CurrentBlock()
|
head = backend.chain.CurrentBlock()
|
||||||
td = backend.chain.GetTd(head.Hash(), head.NumberU64())
|
td = backend.chain.GetTd(head.Hash(), head.NumberU64())
|
||||||
forkID = forkid.NewID(backend.chain.Config(), backend.chain.Genesis().Hash(), backend.chain.CurrentHeader().Number.Uint64())
|
forkID = forkid.NewID(backend.chain.Config(), backend.chain.Genesis().Hash(), backend.chain.CurrentHeader().Number.Uint64(), backend.chain.CurrentHeader().Time)
|
||||||
)
|
)
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
code uint64
|
code uint64
|
||||||
|
@ -111,7 +111,7 @@ func (h *clientHandler) handle(p *serverPeer, noInitAnnounce bool) error {
|
|||||||
p.Log().Debug("Light Ethereum peer connected", "name", p.Name())
|
p.Log().Debug("Light Ethereum peer connected", "name", p.Name())
|
||||||
|
|
||||||
// Execute the LES handshake
|
// Execute the LES handshake
|
||||||
forkid := forkid.NewID(h.backend.blockchain.Config(), h.backend.genesis, h.backend.blockchain.CurrentHeader().Number.Uint64())
|
forkid := forkid.NewID(h.backend.blockchain.Config(), h.backend.genesis, h.backend.blockchain.CurrentHeader().Number.Uint64(), h.backend.blockchain.CurrentHeader().Time)
|
||||||
if err := p.Handshake(h.backend.blockchain.Genesis().Hash(), forkid, h.forkFilter); err != nil {
|
if err := p.Handshake(h.backend.blockchain.Genesis().Hash(), forkid, h.forkFilter); err != nil {
|
||||||
p.Log().Debug("Light Ethereum handshake failed", "err", err)
|
p.Log().Debug("Light Ethereum handshake failed", "err", err)
|
||||||
return err
|
return err
|
||||||
|
@ -124,8 +124,8 @@ func TestHandshake(t *testing.T) {
|
|||||||
genesis = common.HexToHash("cafebabe")
|
genesis = common.HexToHash("cafebabe")
|
||||||
|
|
||||||
chain1, chain2 = &fakeChain{}, &fakeChain{}
|
chain1, chain2 = &fakeChain{}, &fakeChain{}
|
||||||
forkID1 = forkid.NewID(chain1.Config(), chain1.Genesis().Hash(), chain1.CurrentHeader().Number.Uint64())
|
forkID1 = forkid.NewID(chain1.Config(), chain1.Genesis().Hash(), chain1.CurrentHeader().Number.Uint64(), chain1.CurrentHeader().Time)
|
||||||
forkID2 = forkid.NewID(chain2.Config(), chain2.Genesis().Hash(), chain2.CurrentHeader().Number.Uint64())
|
forkID2 = forkid.NewID(chain2.Config(), chain2.Genesis().Hash(), chain2.CurrentHeader().Number.Uint64(), chain2.CurrentHeader().Time)
|
||||||
filter1, filter2 = forkid.NewFilter(chain1), forkid.NewFilter(chain2)
|
filter1, filter2 = forkid.NewFilter(chain1), forkid.NewFilter(chain2)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -117,7 +117,7 @@ func (h *serverHandler) handle(p *clientPeer) error {
|
|||||||
hash = head.Hash()
|
hash = head.Hash()
|
||||||
number = head.Number.Uint64()
|
number = head.Number.Uint64()
|
||||||
td = h.blockchain.GetTd(hash, number)
|
td = h.blockchain.GetTd(hash, number)
|
||||||
forkID = forkid.NewID(h.blockchain.Config(), h.blockchain.Genesis().Hash(), h.blockchain.CurrentBlock().NumberU64())
|
forkID = forkid.NewID(h.blockchain.Config(), h.blockchain.Genesis().Hash(), h.blockchain.CurrentBlock().NumberU64(), h.blockchain.CurrentBlock().Time())
|
||||||
)
|
)
|
||||||
if err := p.Handshake(td, hash, number, h.blockchain.Genesis().Hash(), forkID, h.forkFilter, h.server); err != nil {
|
if err := p.Handshake(td, hash, number, h.blockchain.Genesis().Hash(), forkID, h.forkFilter, h.server); err != nil {
|
||||||
p.Log().Debug("Light Ethereum handshake failed", "err", err)
|
p.Log().Debug("Light Ethereum handshake failed", "err", err)
|
||||||
|
@ -489,7 +489,7 @@ func (client *testClient) newRawPeer(t *testing.T, name string, version int, rec
|
|||||||
head = client.handler.backend.blockchain.CurrentHeader()
|
head = client.handler.backend.blockchain.CurrentHeader()
|
||||||
td = client.handler.backend.blockchain.GetTd(head.Hash(), head.Number.Uint64())
|
td = client.handler.backend.blockchain.GetTd(head.Hash(), head.Number.Uint64())
|
||||||
)
|
)
|
||||||
forkID := forkid.NewID(client.handler.backend.blockchain.Config(), genesis.Hash(), head.Number.Uint64())
|
forkID := forkid.NewID(client.handler.backend.blockchain.Config(), genesis.Hash(), head.Number.Uint64(), head.Time)
|
||||||
tp.handshakeWithClient(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID, testCostList(0), recentTxLookup) // disable flow control by default
|
tp.handshakeWithClient(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID, testCostList(0), recentTxLookup) // disable flow control by default
|
||||||
|
|
||||||
// Ensure the connection is established or exits when any error occurs
|
// Ensure the connection is established or exits when any error occurs
|
||||||
@ -553,7 +553,7 @@ func (server *testServer) newRawPeer(t *testing.T, name string, version int) (*t
|
|||||||
head = server.handler.blockchain.CurrentHeader()
|
head = server.handler.blockchain.CurrentHeader()
|
||||||
td = server.handler.blockchain.GetTd(head.Hash(), head.Number.Uint64())
|
td = server.handler.blockchain.GetTd(head.Hash(), head.Number.Uint64())
|
||||||
)
|
)
|
||||||
forkID := forkid.NewID(server.handler.blockchain.Config(), genesis.Hash(), head.Number.Uint64())
|
forkID := forkid.NewID(server.handler.blockchain.Config(), genesis.Hash(), head.Number.Uint64(), head.Time)
|
||||||
tp.handshakeWithServer(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID)
|
tp.handshakeWithServer(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID)
|
||||||
|
|
||||||
// Ensure the connection is established or exits when any error occurs
|
// Ensure the connection is established or exits when any error occurs
|
||||||
|
@ -183,7 +183,7 @@ func runBenchmark(b *testing.B, t *StateTest) {
|
|||||||
b.Error(err)
|
b.Error(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var rules = config.Rules(new(big.Int), false)
|
var rules = config.Rules(new(big.Int), false, new(big.Int))
|
||||||
|
|
||||||
vmconfig.ExtraEips = eips
|
vmconfig.ExtraEips = eips
|
||||||
block := t.genesis(config).ToBlock()
|
block := t.genesis(config).ToBlock()
|
||||||
|
Loading…
Reference in New Issue
Block a user