Patch for concurrent iterator & others (onto v1.11.6) #386
@ -76,7 +76,7 @@ func (c *Chain) RootAt(height int) common.Hash {
|
|||||||
|
|
||||||
// ForkID gets the fork id of the chain.
|
// ForkID gets the fork id of the chain.
|
||||||
func (c *Chain) ForkID() forkid.ID {
|
func (c *Chain) ForkID() forkid.ID {
|
||||||
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()))
|
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()), c.blocks[0].Time())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shorten returns a copy chain of a desired height from the imported
|
// Shorten returns a copy chain of a desired height from the imported
|
||||||
|
@ -158,14 +158,9 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
|||||||
// makeFullNode loads geth configuration and creates the Ethereum backend.
|
// makeFullNode loads geth configuration and creates the Ethereum backend.
|
||||||
func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
||||||
stack, cfg := makeConfigNode(ctx)
|
stack, cfg := makeConfigNode(ctx)
|
||||||
if ctx.IsSet(utils.OverrideTerminalTotalDifficulty.Name) {
|
if ctx.IsSet(utils.OverrideShanghai.Name) {
|
||||||
cfg.Eth.OverrideTerminalTotalDifficulty = flags.GlobalBig(ctx, utils.OverrideTerminalTotalDifficulty.Name)
|
cfg.Eth.OverrideShanghai = flags.GlobalBig(ctx, utils.OverrideShanghai.Name)
|
||||||
}
|
}
|
||||||
if ctx.IsSet(utils.OverrideTerminalTotalDifficultyPassed.Name) {
|
|
||||||
override := ctx.Bool(utils.OverrideTerminalTotalDifficultyPassed.Name)
|
|
||||||
cfg.Eth.OverrideTerminalTotalDifficultyPassed = &override
|
|
||||||
}
|
|
||||||
|
|
||||||
backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
|
backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
|
||||||
|
|
||||||
// Configure log filter RPC API.
|
// Configure log filter RPC API.
|
||||||
|
@ -64,8 +64,7 @@ var (
|
|||||||
utils.NoUSBFlag,
|
utils.NoUSBFlag,
|
||||||
utils.USBFlag,
|
utils.USBFlag,
|
||||||
utils.SmartCardDaemonPathFlag,
|
utils.SmartCardDaemonPathFlag,
|
||||||
utils.OverrideTerminalTotalDifficulty,
|
utils.OverrideShanghai,
|
||||||
utils.OverrideTerminalTotalDifficultyPassed,
|
|
||||||
utils.EthashCacheDirFlag,
|
utils.EthashCacheDirFlag,
|
||||||
utils.EthashCachesInMemoryFlag,
|
utils.EthashCachesInMemoryFlag,
|
||||||
utils.EthashCachesOnDiskFlag,
|
utils.EthashCachesOnDiskFlag,
|
||||||
|
@ -271,14 +271,9 @@ var (
|
|||||||
Value: 2048,
|
Value: 2048,
|
||||||
Category: flags.EthCategory,
|
Category: flags.EthCategory,
|
||||||
}
|
}
|
||||||
OverrideTerminalTotalDifficulty = &flags.BigFlag{
|
OverrideShanghai = &flags.BigFlag{
|
||||||
Name: "override.terminaltotaldifficulty",
|
Name: "override.shanghai",
|
||||||
Usage: "Manually specify TerminalTotalDifficulty, overriding the bundled setting",
|
Usage: "Manually specify the Shanghai fork timestamp, overriding the bundled setting",
|
||||||
Category: flags.EthCategory,
|
|
||||||
}
|
|
||||||
OverrideTerminalTotalDifficultyPassed = &cli.BoolFlag{
|
|
||||||
Name: "override.terminaltotaldifficultypassed",
|
|
||||||
Usage: "Manually specify TerminalTotalDifficultyPassed, overriding the bundled setting",
|
|
||||||
Category: flags.EthCategory,
|
Category: flags.EthCategory,
|
||||||
}
|
}
|
||||||
// Light server and client settings
|
// Light server and client settings
|
||||||
|
@ -318,7 +318,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
|||||||
if diskRoot != (common.Hash{}) {
|
if diskRoot != (common.Hash{}) {
|
||||||
log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot)
|
log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot)
|
||||||
|
|
||||||
snapDisk, err := bc.setHeadBeyondRoot(head.NumberU64(), diskRoot, true)
|
snapDisk, err := bc.setHeadBeyondRoot(head.NumberU64(), 0, diskRoot, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -328,7 +328,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
|
log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
|
||||||
if _, err := bc.setHeadBeyondRoot(head.NumberU64(), common.Hash{}, true); err != nil {
|
if _, err := bc.setHeadBeyondRoot(head.NumberU64(), 0, common.Hash{}, true); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -427,7 +427,11 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
|||||||
// Rewind the chain in case of an incompatible config upgrade.
|
// Rewind the chain in case of an incompatible config upgrade.
|
||||||
if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
|
if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
|
||||||
log.Warn("Rewinding chain to upgrade configuration", "err", compat)
|
log.Warn("Rewinding chain to upgrade configuration", "err", compat)
|
||||||
bc.SetHead(compat.RewindTo)
|
if compat.RewindToTime > 0 {
|
||||||
|
bc.SetHeadWithTimestamp(compat.RewindToTime)
|
||||||
|
} else {
|
||||||
|
bc.SetHead(compat.RewindToBlock)
|
||||||
|
}
|
||||||
rawdb.WriteChainConfig(db, genesisHash, chainConfig)
|
rawdb.WriteChainConfig(db, genesisHash, chainConfig)
|
||||||
}
|
}
|
||||||
// Start tx indexer/unindexer if required.
|
// Start tx indexer/unindexer if required.
|
||||||
@ -532,7 +536,20 @@ func (bc *BlockChain) loadLastState() error {
|
|||||||
// was fast synced or full synced and in which state, the method will try to
|
// was fast synced or full synced and in which state, the method will try to
|
||||||
// delete minimal data from disk whilst retaining chain consistency.
|
// delete minimal data from disk whilst retaining chain consistency.
|
||||||
func (bc *BlockChain) SetHead(head uint64) error {
|
func (bc *BlockChain) SetHead(head uint64) error {
|
||||||
if _, err := bc.setHeadBeyondRoot(head, common.Hash{}, false); err != nil {
|
if _, err := bc.setHeadBeyondRoot(head, 0, common.Hash{}, false); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Send chain head event to update the transaction pool
|
||||||
|
bc.chainHeadFeed.Send(ChainHeadEvent{Block: bc.CurrentBlock()})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetHeadWithTimestamp rewinds the local chain to a new head that has at max
|
||||||
|
// the given timestamp. Depending on whether the node was fast synced or full
|
||||||
|
// synced and in which state, the method will try to delete minimal data from
|
||||||
|
// disk whilst retaining chain consistency.
|
||||||
|
func (bc *BlockChain) SetHeadWithTimestamp(timestamp uint64) error {
|
||||||
|
if _, err := bc.setHeadBeyondRoot(0, timestamp, common.Hash{}, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Send chain head event to update the transaction pool
|
// Send chain head event to update the transaction pool
|
||||||
@ -569,8 +586,12 @@ func (bc *BlockChain) SetSafe(block *types.Block) {
|
|||||||
// in which state, the method will try to delete minimal data from disk whilst
|
// in which state, the method will try to delete minimal data from disk whilst
|
||||||
// retaining chain consistency.
|
// retaining chain consistency.
|
||||||
//
|
//
|
||||||
|
// The method also works in timestamp mode if `head == 0` but `time != 0`. In that
|
||||||
|
// case blocks are rolled back until the new head becomes older or equal to the
|
||||||
|
// requested time. If both `head` and `time` is 0, the chain is rewound to genesis.
|
||||||
|
//
|
||||||
// The method returns the block number where the requested root cap was found.
|
// The method returns the block number where the requested root cap was found.
|
||||||
func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bool) (uint64, error) {
|
func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Hash, repair bool) (uint64, error) {
|
||||||
if !bc.chainmu.TryLock() {
|
if !bc.chainmu.TryLock() {
|
||||||
return 0, errChainStopped
|
return 0, errChainStopped
|
||||||
}
|
}
|
||||||
@ -584,7 +605,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
|
|||||||
pivot := rawdb.ReadLastPivotNumber(bc.db)
|
pivot := rawdb.ReadLastPivotNumber(bc.db)
|
||||||
frozen, _ := bc.db.Ancients()
|
frozen, _ := bc.db.Ancients()
|
||||||
|
|
||||||
updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (uint64, bool) {
|
updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (*types.Header, bool) {
|
||||||
// Rewind the blockchain, ensuring we don't end up with a stateless head
|
// Rewind the blockchain, ensuring we don't end up with a stateless head
|
||||||
// block. Note, depth equality is permitted to allow using SetHead as a
|
// block. Note, depth equality is permitted to allow using SetHead as a
|
||||||
// chain reparation mechanism without deleting any data!
|
// chain reparation mechanism without deleting any data!
|
||||||
@ -665,16 +686,18 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
|
|||||||
bc.currentFastBlock.Store(newHeadFastBlock)
|
bc.currentFastBlock.Store(newHeadFastBlock)
|
||||||
headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
|
headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
|
||||||
}
|
}
|
||||||
head := bc.CurrentBlock().NumberU64()
|
var (
|
||||||
|
headHeader = bc.CurrentBlock().Header()
|
||||||
|
headNumber = headHeader.Number.Uint64()
|
||||||
|
)
|
||||||
// If setHead underflown the freezer threshold and the block processing
|
// If setHead underflown the freezer threshold and the block processing
|
||||||
// intent afterwards is full block importing, delete the chain segment
|
// intent afterwards is full block importing, delete the chain segment
|
||||||
// between the stateful-block and the sethead target.
|
// between the stateful-block and the sethead target.
|
||||||
var wipe bool
|
var wipe bool
|
||||||
if head+1 < frozen {
|
if headNumber+1 < frozen {
|
||||||
wipe = pivot == nil || head >= *pivot
|
wipe = pivot == nil || headNumber >= *pivot
|
||||||
}
|
}
|
||||||
return head, wipe // Only force wipe if full synced
|
return headHeader, wipe // Only force wipe if full synced
|
||||||
}
|
}
|
||||||
// Rewind the header chain, deleting all block bodies until then
|
// Rewind the header chain, deleting all block bodies until then
|
||||||
delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
|
delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
|
||||||
@ -701,13 +724,18 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
|
|||||||
// touching the header chain altogether, unless the freezer is broken
|
// touching the header chain altogether, unless the freezer is broken
|
||||||
if repair {
|
if repair {
|
||||||
if target, force := updateFn(bc.db, bc.CurrentBlock().Header()); force {
|
if target, force := updateFn(bc.db, bc.CurrentBlock().Header()); force {
|
||||||
bc.hc.SetHead(target, updateFn, delFn)
|
bc.hc.SetHead(target.Number.Uint64(), updateFn, delFn)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Rewind the chain to the requested head and keep going backwards until a
|
// Rewind the chain to the requested head and keep going backwards until a
|
||||||
// block with a state is found or fast sync pivot is passed
|
// block with a state is found or fast sync pivot is passed
|
||||||
log.Warn("Rewinding blockchain", "target", head)
|
if time > 0 {
|
||||||
bc.hc.SetHead(head, updateFn, delFn)
|
log.Warn("Rewinding blockchain to timestamp", "target", time)
|
||||||
|
bc.hc.SetHeadWithTimestamp(time, updateFn, delFn)
|
||||||
|
} else {
|
||||||
|
log.Warn("Rewinding blockchain to block", "target", head)
|
||||||
|
bc.hc.SetHead(head, updateFn, delFn)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Clear out any stale content from the caches
|
// Clear out any stale content from the caches
|
||||||
bc.bodyCache.Purge()
|
bc.bodyCache.Purge()
|
||||||
|
@ -4275,7 +4275,7 @@ func TestEIP3651(t *testing.T) {
|
|||||||
|
|
||||||
gspec.Config.BerlinBlock = common.Big0
|
gspec.Config.BerlinBlock = common.Big0
|
||||||
gspec.Config.LondonBlock = common.Big0
|
gspec.Config.LondonBlock = common.Big0
|
||||||
gspec.Config.ShanghaiBlock = common.Big0
|
gspec.Config.ShanghaiTime = common.Big0
|
||||||
signer := types.LatestSigner(gspec.Config)
|
signer := types.LatestSigner(gspec.Config)
|
||||||
|
|
||||||
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
|
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -44,6 +45,12 @@ var (
|
|||||||
ErrLocalIncompatibleOrStale = errors.New("local incompatible or needs update")
|
ErrLocalIncompatibleOrStale = errors.New("local incompatible or needs update")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// timestampThreshold is the Ethereum mainnet genesis timestamp. It is used to
|
||||||
|
// differentiate if a forkid.next field is a block number or a timestamp. Whilst
|
||||||
|
// very hacky, something's needed to split the validation during the transition
|
||||||
|
// period (block forks -> time forks).
|
||||||
|
const timestampThreshold = 1438269973
|
||||||
|
|
||||||
// Blockchain defines all necessary method to build a forkID.
|
// Blockchain defines all necessary method to build a forkID.
|
||||||
type Blockchain interface {
|
type Blockchain interface {
|
||||||
// Config retrieves the chain's fork configuration.
|
// Config retrieves the chain's fork configuration.
|
||||||
@ -65,31 +72,41 @@ type ID struct {
|
|||||||
// Filter is a fork id filter to validate a remotely advertised ID.
|
// Filter is a fork id filter to validate a remotely advertised ID.
|
||||||
type Filter func(id ID) error
|
type Filter func(id ID) error
|
||||||
|
|
||||||
// NewID calculates the Ethereum fork ID from the chain config, genesis hash, and head.
|
// NewID calculates the Ethereum fork ID from the chain config, genesis hash, head and time.
|
||||||
func NewID(config *params.ChainConfig, genesis common.Hash, head uint64) ID {
|
func NewID(config *params.ChainConfig, genesis common.Hash, head, time uint64) ID {
|
||||||
// Calculate the starting checksum from the genesis hash
|
// Calculate the starting checksum from the genesis hash
|
||||||
hash := crc32.ChecksumIEEE(genesis[:])
|
hash := crc32.ChecksumIEEE(genesis[:])
|
||||||
|
|
||||||
// Calculate the current fork checksum and the next fork block
|
// Calculate the current fork checksum and the next fork block
|
||||||
var next uint64
|
forksByBlock, forksByTime := gatherForks(config)
|
||||||
for _, fork := range gatherForks(config) {
|
for _, fork := range forksByBlock {
|
||||||
if fork <= head {
|
if fork <= head {
|
||||||
// Fork already passed, checksum the previous hash and the fork number
|
// Fork already passed, checksum the previous hash and the fork number
|
||||||
hash = checksumUpdate(hash, fork)
|
hash = checksumUpdate(hash, fork)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
next = fork
|
return ID{Hash: checksumToBytes(hash), Next: fork}
|
||||||
break
|
|
||||||
}
|
}
|
||||||
return ID{Hash: checksumToBytes(hash), Next: next}
|
for _, fork := range forksByTime {
|
||||||
|
if fork <= time {
|
||||||
|
// Fork already passed, checksum the previous hash and fork timestamp
|
||||||
|
hash = checksumUpdate(hash, fork)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return ID{Hash: checksumToBytes(hash), Next: fork}
|
||||||
|
}
|
||||||
|
return ID{Hash: checksumToBytes(hash), Next: 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIDWithChain calculates the Ethereum fork ID from an existing chain instance.
|
// NewIDWithChain calculates the Ethereum fork ID from an existing chain instance.
|
||||||
func NewIDWithChain(chain Blockchain) ID {
|
func NewIDWithChain(chain Blockchain) ID {
|
||||||
|
head := chain.CurrentHeader()
|
||||||
|
|
||||||
return NewID(
|
return NewID(
|
||||||
chain.Config(),
|
chain.Config(),
|
||||||
chain.Genesis().Hash(),
|
chain.Genesis().Hash(),
|
||||||
chain.CurrentHeader().Number.Uint64(),
|
head.Number.Uint64(),
|
||||||
|
head.Time,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -99,26 +116,28 @@ func NewFilter(chain Blockchain) Filter {
|
|||||||
return newFilter(
|
return newFilter(
|
||||||
chain.Config(),
|
chain.Config(),
|
||||||
chain.Genesis().Hash(),
|
chain.Genesis().Hash(),
|
||||||
func() uint64 {
|
func() (uint64, uint64) {
|
||||||
return chain.CurrentHeader().Number.Uint64()
|
head := chain.CurrentHeader()
|
||||||
|
return head.Number.Uint64(), head.Time
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStaticFilter creates a filter at block zero.
|
// NewStaticFilter creates a filter at block zero.
|
||||||
func NewStaticFilter(config *params.ChainConfig, genesis common.Hash) Filter {
|
func NewStaticFilter(config *params.ChainConfig, genesis common.Hash) Filter {
|
||||||
head := func() uint64 { return 0 }
|
head := func() (uint64, uint64) { return 0, 0 }
|
||||||
return newFilter(config, genesis, head)
|
return newFilter(config, genesis, head)
|
||||||
}
|
}
|
||||||
|
|
||||||
// newFilter is the internal version of NewFilter, taking closures as its arguments
|
// newFilter is the internal version of NewFilter, taking closures as its arguments
|
||||||
// instead of a chain. The reason is to allow testing it without having to simulate
|
// instead of a chain. The reason is to allow testing it without having to simulate
|
||||||
// an entire blockchain.
|
// an entire blockchain.
|
||||||
func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() uint64) Filter {
|
func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() (uint64, uint64)) Filter {
|
||||||
// Calculate the all the valid fork hash and fork next combos
|
// Calculate the all the valid fork hash and fork next combos
|
||||||
var (
|
var (
|
||||||
forks = gatherForks(config)
|
forksByBlock, forksByTime = gatherForks(config)
|
||||||
sums = make([][4]byte, len(forks)+1) // 0th is the genesis
|
forks = append(append([]uint64{}, forksByBlock...), forksByTime...)
|
||||||
|
sums = make([][4]byte, len(forks)+1) // 0th is the genesis
|
||||||
)
|
)
|
||||||
hash := crc32.ChecksumIEEE(genesis[:])
|
hash := crc32.ChecksumIEEE(genesis[:])
|
||||||
sums[0] = checksumToBytes(hash)
|
sums[0] = checksumToBytes(hash)
|
||||||
@ -129,7 +148,10 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
|
|||||||
// Add two sentries to simplify the fork checks and don't require special
|
// Add two sentries to simplify the fork checks and don't require special
|
||||||
// casing the last one.
|
// casing the last one.
|
||||||
forks = append(forks, math.MaxUint64) // Last fork will never be passed
|
forks = append(forks, math.MaxUint64) // Last fork will never be passed
|
||||||
|
if len(forksByTime) == 0 {
|
||||||
|
// In purely block based forks, avoid the sentry spilling into timestapt territory
|
||||||
|
forksByBlock = append(forksByBlock, math.MaxUint64) // Last fork will never be passed
|
||||||
|
}
|
||||||
// Create a validator that will filter out incompatible chains
|
// Create a validator that will filter out incompatible chains
|
||||||
return func(id ID) error {
|
return func(id ID) error {
|
||||||
// Run the fork checksum validation ruleset:
|
// Run the fork checksum validation ruleset:
|
||||||
@ -151,8 +173,13 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
|
|||||||
// the remote, but at this current point in time we don't have enough
|
// the remote, but at this current point in time we don't have enough
|
||||||
// information.
|
// information.
|
||||||
// 4. Reject in all other cases.
|
// 4. Reject in all other cases.
|
||||||
head := headfn()
|
block, time := headfn()
|
||||||
for i, fork := range forks {
|
for i, fork := range forks {
|
||||||
|
// Pick the head comparison based on fork progression
|
||||||
|
head := block
|
||||||
|
if i >= len(forksByBlock) {
|
||||||
|
head = time
|
||||||
|
}
|
||||||
// If our head is beyond this fork, continue to the next (we have a dummy
|
// If our head is beyond this fork, continue to the next (we have a dummy
|
||||||
// fork of maxuint64 as the last item to always fail this check eventually).
|
// fork of maxuint64 as the last item to always fail this check eventually).
|
||||||
if head >= fork {
|
if head >= fork {
|
||||||
@ -163,7 +190,7 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui
|
|||||||
if sums[i] == id.Hash {
|
if sums[i] == id.Hash {
|
||||||
// Fork checksum matched, check if a remote future fork block already passed
|
// Fork checksum matched, check if a remote future fork block already passed
|
||||||
// locally without the local node being aware of it (rule #1a).
|
// locally without the local node being aware of it (rule #1a).
|
||||||
if id.Next > 0 && head >= id.Next {
|
if id.Next > 0 && (head >= id.Next || (id.Next > timestampThreshold && time >= id.Next)) {
|
||||||
return ErrLocalIncompatibleOrStale
|
return ErrLocalIncompatibleOrStale
|
||||||
}
|
}
|
||||||
// Haven't passed locally a remote-only fork, accept the connection (rule #1b).
|
// Haven't passed locally a remote-only fork, accept the connection (rule #1b).
|
||||||
@ -211,46 +238,60 @@ func checksumToBytes(hash uint32) [4]byte {
|
|||||||
return blob
|
return blob
|
||||||
}
|
}
|
||||||
|
|
||||||
// gatherForks gathers all the known forks and creates a sorted list out of them.
|
// gatherForks gathers all the known forks and creates two sorted lists out of
|
||||||
func gatherForks(config *params.ChainConfig) []uint64 {
|
// them, one for the block number based forks and the second for the timestamps.
|
||||||
|
func gatherForks(config *params.ChainConfig) ([]uint64, []uint64) {
|
||||||
// Gather all the fork block numbers via reflection
|
// Gather all the fork block numbers via reflection
|
||||||
kind := reflect.TypeOf(params.ChainConfig{})
|
kind := reflect.TypeOf(params.ChainConfig{})
|
||||||
conf := reflect.ValueOf(config).Elem()
|
conf := reflect.ValueOf(config).Elem()
|
||||||
|
|
||||||
var forks []uint64
|
var (
|
||||||
|
forksByBlock []uint64
|
||||||
|
forksByTime []uint64
|
||||||
|
)
|
||||||
for i := 0; i < kind.NumField(); i++ {
|
for i := 0; i < kind.NumField(); i++ {
|
||||||
// Fetch the next field and skip non-fork rules
|
// Fetch the next field and skip non-fork rules
|
||||||
field := kind.Field(i)
|
field := kind.Field(i)
|
||||||
if !strings.HasSuffix(field.Name, "Block") {
|
|
||||||
|
time := strings.HasSuffix(field.Name, "Time")
|
||||||
|
if !time && !strings.HasSuffix(field.Name, "Block") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if field.Type != reflect.TypeOf(new(big.Int)) {
|
if field.Type != reflect.TypeOf(new(big.Int)) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Extract the fork rule block number and aggregate it
|
// Extract the fork rule block number or timestamp and aggregate it
|
||||||
rule := conf.Field(i).Interface().(*big.Int)
|
rule := conf.Field(i).Interface().(*big.Int)
|
||||||
if rule != nil {
|
if rule != nil {
|
||||||
forks = append(forks, rule.Uint64())
|
if time {
|
||||||
}
|
forksByTime = append(forksByTime, rule.Uint64())
|
||||||
}
|
} else {
|
||||||
// Sort the fork block numbers to permit chronological XOR
|
forksByBlock = append(forksByBlock, rule.Uint64())
|
||||||
for i := 0; i < len(forks); i++ {
|
|
||||||
for j := i + 1; j < len(forks); j++ {
|
|
||||||
if forks[i] > forks[j] {
|
|
||||||
forks[i], forks[j] = forks[j], forks[i]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Deduplicate block numbers applying multiple forks
|
sort.Slice(forksByBlock, func(i, j int) bool { return forksByBlock[i] < forksByBlock[j] })
|
||||||
for i := 1; i < len(forks); i++ {
|
sort.Slice(forksByTime, func(i, j int) bool { return forksByTime[i] < forksByTime[j] })
|
||||||
if forks[i] == forks[i-1] {
|
|
||||||
forks = append(forks[:i], forks[i+1:]...)
|
// Deduplicate fork identifiers applying multiple forks
|
||||||
|
for i := 1; i < len(forksByBlock); i++ {
|
||||||
|
if forksByBlock[i] == forksByBlock[i-1] {
|
||||||
|
forksByBlock = append(forksByBlock[:i], forksByBlock[i+1:]...)
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := 1; i < len(forksByTime); i++ {
|
||||||
|
if forksByTime[i] == forksByTime[i-1] {
|
||||||
|
forksByTime = append(forksByTime[:i], forksByTime[i+1:]...)
|
||||||
i--
|
i--
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Skip any forks in block 0, that's the genesis ruleset
|
// Skip any forks in block 0, that's the genesis ruleset
|
||||||
if len(forks) > 0 && forks[0] == 0 {
|
if len(forksByBlock) > 0 && forksByBlock[0] == 0 {
|
||||||
forks = forks[1:]
|
forksByBlock = forksByBlock[1:]
|
||||||
}
|
}
|
||||||
return forks
|
if len(forksByTime) > 0 && forksByTime[0] == 0 {
|
||||||
|
forksByTime = forksByTime[1:]
|
||||||
|
}
|
||||||
|
return forksByBlock, forksByTime
|
||||||
}
|
}
|
||||||
|
@ -30,10 +30,13 @@ import (
|
|||||||
// TestCreation tests that different genesis and fork rule combinations result in
|
// TestCreation tests that different genesis and fork rule combinations result in
|
||||||
// the correct fork ID.
|
// the correct fork ID.
|
||||||
func TestCreation(t *testing.T) {
|
func TestCreation(t *testing.T) {
|
||||||
mergeConfig := *params.MainnetChainConfig
|
// Temporary non-existent scenario TODO(karalabe): delete when Shanghai is enabled
|
||||||
mergeConfig.MergeNetsplitBlock = big.NewInt(18000000)
|
timestampedConfig := *params.MainnetChainConfig
|
||||||
|
timestampedConfig.ShanghaiTime = big.NewInt(1668000000)
|
||||||
|
|
||||||
type testcase struct {
|
type testcase struct {
|
||||||
head uint64
|
head uint64
|
||||||
|
time uint64
|
||||||
want ID
|
want ID
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@ -46,32 +49,32 @@ func TestCreation(t *testing.T) {
|
|||||||
params.MainnetChainConfig,
|
params.MainnetChainConfig,
|
||||||
params.MainnetGenesisHash,
|
params.MainnetGenesisHash,
|
||||||
[]testcase{
|
[]testcase{
|
||||||
{0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Unsynced
|
{0, 0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Unsynced
|
||||||
{1149999, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Last Frontier block
|
{1149999, 0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Last Frontier block
|
||||||
{1150000, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // First Homestead block
|
{1150000, 0, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // First Homestead block
|
||||||
{1919999, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // Last Homestead block
|
{1919999, 0, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // Last Homestead block
|
||||||
{1920000, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // First DAO block
|
{1920000, 0, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // First DAO block
|
||||||
{2462999, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // Last DAO block
|
{2462999, 0, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // Last DAO block
|
||||||
{2463000, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // First Tangerine block
|
{2463000, 0, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // First Tangerine block
|
||||||
{2674999, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // Last Tangerine block
|
{2674999, 0, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // Last Tangerine block
|
||||||
{2675000, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // First Spurious block
|
{2675000, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // First Spurious block
|
||||||
{4369999, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // Last Spurious block
|
{4369999, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // Last Spurious block
|
||||||
{4370000, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // First Byzantium block
|
{4370000, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // First Byzantium block
|
||||||
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block
|
{7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block
|
||||||
{7280000, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block
|
{7280000, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block
|
||||||
{9068999, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block
|
{9068999, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block
|
||||||
{9069000, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block
|
{9069000, 0, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block
|
||||||
{9199999, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block
|
{9199999, 0, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block
|
||||||
{9200000, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // First Muir Glacier block
|
{9200000, 0, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // First Muir Glacier block
|
||||||
{12243999, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block
|
{12243999, 0, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block
|
||||||
{12244000, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block
|
{12244000, 0, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block
|
||||||
{12964999, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block
|
{12964999, 0, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block
|
||||||
{12965000, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block
|
{12965000, 0, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block
|
||||||
{13772999, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block
|
{13772999, 0, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block
|
||||||
{13773000, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // First Arrow Glacier block
|
{13773000, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // First Arrow Glacier block
|
||||||
{15049999, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block
|
{15049999, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block
|
||||||
{15050000, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}}, // First Gray Glacier block
|
{15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}}, // First Gray Glacier block
|
||||||
{20000000, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}}, // Future Gray Glacier block
|
{20000000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}}, // Future Gray Glacier block
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Ropsten test cases
|
// Ropsten test cases
|
||||||
@ -79,24 +82,24 @@ func TestCreation(t *testing.T) {
|
|||||||
params.RopstenChainConfig,
|
params.RopstenChainConfig,
|
||||||
params.RopstenGenesisHash,
|
params.RopstenGenesisHash,
|
||||||
[]testcase{
|
[]testcase{
|
||||||
{0, ID{Hash: checksumToBytes(0x30c7ddbc), Next: 10}}, // Unsynced, last Frontier, Homestead and first Tangerine block
|
{0, 0, ID{Hash: checksumToBytes(0x30c7ddbc), Next: 10}}, // Unsynced, last Frontier, Homestead and first Tangerine block
|
||||||
{9, ID{Hash: checksumToBytes(0x30c7ddbc), Next: 10}}, // Last Tangerine block
|
{9, 0, ID{Hash: checksumToBytes(0x30c7ddbc), Next: 10}}, // Last Tangerine block
|
||||||
{10, ID{Hash: checksumToBytes(0x63760190), Next: 1700000}}, // First Spurious block
|
{10, 0, ID{Hash: checksumToBytes(0x63760190), Next: 1700000}}, // First Spurious block
|
||||||
{1699999, ID{Hash: checksumToBytes(0x63760190), Next: 1700000}}, // Last Spurious block
|
{1699999, 0, ID{Hash: checksumToBytes(0x63760190), Next: 1700000}}, // Last Spurious block
|
||||||
{1700000, ID{Hash: checksumToBytes(0x3ea159c7), Next: 4230000}}, // First Byzantium block
|
{1700000, 0, ID{Hash: checksumToBytes(0x3ea159c7), Next: 4230000}}, // First Byzantium block
|
||||||
{4229999, ID{Hash: checksumToBytes(0x3ea159c7), Next: 4230000}}, // Last Byzantium block
|
{4229999, 0, ID{Hash: checksumToBytes(0x3ea159c7), Next: 4230000}}, // Last Byzantium block
|
||||||
{4230000, ID{Hash: checksumToBytes(0x97b544f3), Next: 4939394}}, // First Constantinople block
|
{4230000, 0, ID{Hash: checksumToBytes(0x97b544f3), Next: 4939394}}, // First Constantinople block
|
||||||
{4939393, ID{Hash: checksumToBytes(0x97b544f3), Next: 4939394}}, // Last Constantinople block
|
{4939393, 0, ID{Hash: checksumToBytes(0x97b544f3), Next: 4939394}}, // Last Constantinople block
|
||||||
{4939394, ID{Hash: checksumToBytes(0xd6e2149b), Next: 6485846}}, // First Petersburg block
|
{4939394, 0, ID{Hash: checksumToBytes(0xd6e2149b), Next: 6485846}}, // First Petersburg block
|
||||||
{6485845, ID{Hash: checksumToBytes(0xd6e2149b), Next: 6485846}}, // Last Petersburg block
|
{6485845, 0, ID{Hash: checksumToBytes(0xd6e2149b), Next: 6485846}}, // Last Petersburg block
|
||||||
{6485846, ID{Hash: checksumToBytes(0x4bc66396), Next: 7117117}}, // First Istanbul block
|
{6485846, 0, ID{Hash: checksumToBytes(0x4bc66396), Next: 7117117}}, // First Istanbul block
|
||||||
{7117116, ID{Hash: checksumToBytes(0x4bc66396), Next: 7117117}}, // Last Istanbul block
|
{7117116, 0, ID{Hash: checksumToBytes(0x4bc66396), Next: 7117117}}, // Last Istanbul block
|
||||||
{7117117, ID{Hash: checksumToBytes(0x6727ef90), Next: 9812189}}, // First Muir Glacier block
|
{7117117, 0, ID{Hash: checksumToBytes(0x6727ef90), Next: 9812189}}, // First Muir Glacier block
|
||||||
{9812188, ID{Hash: checksumToBytes(0x6727ef90), Next: 9812189}}, // Last Muir Glacier block
|
{9812188, 0, ID{Hash: checksumToBytes(0x6727ef90), Next: 9812189}}, // Last Muir Glacier block
|
||||||
{9812189, ID{Hash: checksumToBytes(0xa157d377), Next: 10499401}}, // First Berlin block
|
{9812189, 0, ID{Hash: checksumToBytes(0xa157d377), Next: 10499401}}, // First Berlin block
|
||||||
{10499400, ID{Hash: checksumToBytes(0xa157d377), Next: 10499401}}, // Last Berlin block
|
{10499400, 0, ID{Hash: checksumToBytes(0xa157d377), Next: 10499401}}, // Last Berlin block
|
||||||
{10499401, ID{Hash: checksumToBytes(0x7119b6b3), Next: 0}}, // First London block
|
{10499401, 0, ID{Hash: checksumToBytes(0x7119b6b3), Next: 0}}, // First London block
|
||||||
{11000000, ID{Hash: checksumToBytes(0x7119b6b3), Next: 0}}, // Future London block
|
{11000000, 0, ID{Hash: checksumToBytes(0x7119b6b3), Next: 0}}, // Future London block
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Rinkeby test cases
|
// Rinkeby test cases
|
||||||
@ -104,23 +107,23 @@ func TestCreation(t *testing.T) {
|
|||||||
params.RinkebyChainConfig,
|
params.RinkebyChainConfig,
|
||||||
params.RinkebyGenesisHash,
|
params.RinkebyGenesisHash,
|
||||||
[]testcase{
|
[]testcase{
|
||||||
{0, ID{Hash: checksumToBytes(0x3b8e0691), Next: 1}}, // Unsynced, last Frontier block
|
{0, 0, ID{Hash: checksumToBytes(0x3b8e0691), Next: 1}}, // Unsynced, last Frontier block
|
||||||
{1, ID{Hash: checksumToBytes(0x60949295), Next: 2}}, // First and last Homestead block
|
{1, 0, ID{Hash: checksumToBytes(0x60949295), Next: 2}}, // First and last Homestead block
|
||||||
{2, ID{Hash: checksumToBytes(0x8bde40dd), Next: 3}}, // First and last Tangerine block
|
{2, 0, ID{Hash: checksumToBytes(0x8bde40dd), Next: 3}}, // First and last Tangerine block
|
||||||
{3, ID{Hash: checksumToBytes(0xcb3a64bb), Next: 1035301}}, // First Spurious block
|
{3, 0, ID{Hash: checksumToBytes(0xcb3a64bb), Next: 1035301}}, // First Spurious block
|
||||||
{1035300, ID{Hash: checksumToBytes(0xcb3a64bb), Next: 1035301}}, // Last Spurious block
|
{1035300, 0, ID{Hash: checksumToBytes(0xcb3a64bb), Next: 1035301}}, // Last Spurious block
|
||||||
{1035301, ID{Hash: checksumToBytes(0x8d748b57), Next: 3660663}}, // First Byzantium block
|
{1035301, 0, ID{Hash: checksumToBytes(0x8d748b57), Next: 3660663}}, // First Byzantium block
|
||||||
{3660662, ID{Hash: checksumToBytes(0x8d748b57), Next: 3660663}}, // Last Byzantium block
|
{3660662, 0, ID{Hash: checksumToBytes(0x8d748b57), Next: 3660663}}, // Last Byzantium block
|
||||||
{3660663, ID{Hash: checksumToBytes(0xe49cab14), Next: 4321234}}, // First Constantinople block
|
{3660663, 0, ID{Hash: checksumToBytes(0xe49cab14), Next: 4321234}}, // First Constantinople block
|
||||||
{4321233, ID{Hash: checksumToBytes(0xe49cab14), Next: 4321234}}, // Last Constantinople block
|
{4321233, 0, ID{Hash: checksumToBytes(0xe49cab14), Next: 4321234}}, // Last Constantinople block
|
||||||
{4321234, ID{Hash: checksumToBytes(0xafec6b27), Next: 5435345}}, // First Petersburg block
|
{4321234, 0, ID{Hash: checksumToBytes(0xafec6b27), Next: 5435345}}, // First Petersburg block
|
||||||
{5435344, ID{Hash: checksumToBytes(0xafec6b27), Next: 5435345}}, // Last Petersburg block
|
{5435344, 0, ID{Hash: checksumToBytes(0xafec6b27), Next: 5435345}}, // Last Petersburg block
|
||||||
{5435345, ID{Hash: checksumToBytes(0xcbdb8838), Next: 8290928}}, // First Istanbul block
|
{5435345, 0, ID{Hash: checksumToBytes(0xcbdb8838), Next: 8290928}}, // First Istanbul block
|
||||||
{8290927, ID{Hash: checksumToBytes(0xcbdb8838), Next: 8290928}}, // Last Istanbul block
|
{8290927, 0, ID{Hash: checksumToBytes(0xcbdb8838), Next: 8290928}}, // Last Istanbul block
|
||||||
{8290928, ID{Hash: checksumToBytes(0x6910c8bd), Next: 8897988}}, // First Berlin block
|
{8290928, 0, ID{Hash: checksumToBytes(0x6910c8bd), Next: 8897988}}, // First Berlin block
|
||||||
{8897987, ID{Hash: checksumToBytes(0x6910c8bd), Next: 8897988}}, // Last Berlin block
|
{8897987, 0, ID{Hash: checksumToBytes(0x6910c8bd), Next: 8897988}}, // Last Berlin block
|
||||||
{8897988, ID{Hash: checksumToBytes(0x8E29F2F3), Next: 0}}, // First London block
|
{8897988, 0, ID{Hash: checksumToBytes(0x8E29F2F3), Next: 0}}, // First London block
|
||||||
{10000000, ID{Hash: checksumToBytes(0x8E29F2F3), Next: 0}}, // Future London block
|
{10000000, 0, ID{Hash: checksumToBytes(0x8E29F2F3), Next: 0}}, // Future London block
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Goerli test cases
|
// Goerli test cases
|
||||||
@ -128,14 +131,14 @@ func TestCreation(t *testing.T) {
|
|||||||
params.GoerliChainConfig,
|
params.GoerliChainConfig,
|
||||||
params.GoerliGenesisHash,
|
params.GoerliGenesisHash,
|
||||||
[]testcase{
|
[]testcase{
|
||||||
{0, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople and first Petersburg block
|
{0, 0, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople and first Petersburg block
|
||||||
{1561650, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Last Petersburg block
|
{1561650, 0, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Last Petersburg block
|
||||||
{1561651, ID{Hash: checksumToBytes(0xc25efa5c), Next: 4460644}}, // First Istanbul block
|
{1561651, 0, ID{Hash: checksumToBytes(0xc25efa5c), Next: 4460644}}, // First Istanbul block
|
||||||
{4460643, ID{Hash: checksumToBytes(0xc25efa5c), Next: 4460644}}, // Last Istanbul block
|
{4460643, 0, ID{Hash: checksumToBytes(0xc25efa5c), Next: 4460644}}, // Last Istanbul block
|
||||||
{4460644, ID{Hash: checksumToBytes(0x757a1c47), Next: 5062605}}, // First Berlin block
|
{4460644, 0, ID{Hash: checksumToBytes(0x757a1c47), Next: 5062605}}, // First Berlin block
|
||||||
{5000000, ID{Hash: checksumToBytes(0x757a1c47), Next: 5062605}}, // Last Berlin block
|
{5000000, 0, ID{Hash: checksumToBytes(0x757a1c47), Next: 5062605}}, // Last Berlin block
|
||||||
{5062605, ID{Hash: checksumToBytes(0xB8C6299D), Next: 0}}, // First London block
|
{5062605, 0, ID{Hash: checksumToBytes(0xB8C6299D), Next: 0}}, // First London block
|
||||||
{6000000, ID{Hash: checksumToBytes(0xB8C6299D), Next: 0}}, // Future London block
|
{6000000, 0, ID{Hash: checksumToBytes(0xB8C6299D), Next: 0}}, // Future London block
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Sepolia test cases
|
// Sepolia test cases
|
||||||
@ -143,49 +146,50 @@ func TestCreation(t *testing.T) {
|
|||||||
params.SepoliaChainConfig,
|
params.SepoliaChainConfig,
|
||||||
params.SepoliaGenesisHash,
|
params.SepoliaGenesisHash,
|
||||||
[]testcase{
|
[]testcase{
|
||||||
{0, ID{Hash: checksumToBytes(0xfe3366e7), Next: 1735371}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople, Petersburg, Istanbul, Berlin and first London block
|
{0, 0, ID{Hash: checksumToBytes(0xfe3366e7), Next: 1735371}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople, Petersburg, Istanbul, Berlin and first London block
|
||||||
{1735370, ID{Hash: checksumToBytes(0xfe3366e7), Next: 1735371}}, // Last London block
|
{1735370, 0, ID{Hash: checksumToBytes(0xfe3366e7), Next: 1735371}}, // Last London block
|
||||||
{1735371, ID{Hash: checksumToBytes(0xb96cbd13), Next: 0}}, // First MergeNetsplit block
|
{1735371, 0, ID{Hash: checksumToBytes(0xb96cbd13), Next: 0}}, // First MergeNetsplit block
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// Merge test cases
|
// Temporary timestamped test cases
|
||||||
{
|
{
|
||||||
&mergeConfig,
|
×tampedConfig,
|
||||||
params.MainnetGenesisHash,
|
params.MainnetGenesisHash,
|
||||||
[]testcase{
|
[]testcase{
|
||||||
{0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Unsynced
|
{0, 0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Unsynced
|
||||||
{1149999, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Last Frontier block
|
{1149999, 0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Last Frontier block
|
||||||
{1150000, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // First Homestead block
|
{1150000, 0, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // First Homestead block
|
||||||
{1919999, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // Last Homestead block
|
{1919999, 0, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // Last Homestead block
|
||||||
{1920000, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // First DAO block
|
{1920000, 0, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // First DAO block
|
||||||
{2462999, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // Last DAO block
|
{2462999, 0, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // Last DAO block
|
||||||
{2463000, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // First Tangerine block
|
{2463000, 0, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // First Tangerine block
|
||||||
{2674999, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // Last Tangerine block
|
{2674999, 0, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // Last Tangerine block
|
||||||
{2675000, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // First Spurious block
|
{2675000, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // First Spurious block
|
||||||
{4369999, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // Last Spurious block
|
{4369999, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // Last Spurious block
|
||||||
{4370000, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // First Byzantium block
|
{4370000, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // First Byzantium block
|
||||||
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block
|
{7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block
|
||||||
{7280000, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block
|
{7280000, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block
|
||||||
{9068999, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block
|
{9068999, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block
|
||||||
{9069000, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block
|
{9069000, 0, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block
|
||||||
{9199999, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block
|
{9199999, 0, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block
|
||||||
{9200000, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // First Muir Glacier block
|
{9200000, 0, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // First Muir Glacier block
|
||||||
{12243999, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block
|
{12243999, 0, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block
|
||||||
{12244000, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block
|
{12244000, 0, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block
|
||||||
{12964999, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block
|
{12964999, 0, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block
|
||||||
{12965000, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block
|
{12965000, 0, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block
|
||||||
{13772999, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block
|
{13772999, 0, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block
|
||||||
{13773000, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // First Arrow Glacier block
|
{13773000, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // First Arrow Glacier block
|
||||||
{15049999, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block
|
{15049999, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block
|
||||||
{15050000, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 18000000}}, // First Gray Glacier block
|
{15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1668000000}}, // First Gray Glacier block
|
||||||
{18000000, ID{Hash: checksumToBytes(0x4fb8a872), Next: 0}}, // First Merge Start block
|
{19999999, 1667999999, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1668000000}}, // Last Gray Glacier block
|
||||||
{20000000, ID{Hash: checksumToBytes(0x4fb8a872), Next: 0}}, // Future Merge Start block
|
{20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}}, // First Shanghai block
|
||||||
|
{20000000, 2668000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}}, // Future Shanghai block
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
for j, ttt := range tt.cases {
|
for j, ttt := range tt.cases {
|
||||||
if have := NewID(tt.config, tt.genesis, ttt.head); have != ttt.want {
|
if have := NewID(tt.config, tt.genesis, ttt.head, ttt.time); have != ttt.want {
|
||||||
t.Errorf("test %d, case %d: fork ID mismatch: have %x, want %x", i, j, have, ttt.want)
|
t.Errorf("test %d, case %d: fork ID mismatch: have %x, want %x", i, j, have, ttt.want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -195,79 +199,244 @@ func TestCreation(t *testing.T) {
|
|||||||
// TestValidation tests that a local peer correctly validates and accepts a remote
|
// TestValidation tests that a local peer correctly validates and accepts a remote
|
||||||
// fork ID.
|
// fork ID.
|
||||||
func TestValidation(t *testing.T) {
|
func TestValidation(t *testing.T) {
|
||||||
tests := []struct {
|
// Temporary non-existent scenario TODO(karalabe): delete when Shanghai is enabled
|
||||||
head uint64
|
timestampedConfig := *params.MainnetChainConfig
|
||||||
id ID
|
timestampedConfig.ShanghaiTime = big.NewInt(1668000000)
|
||||||
err error
|
|
||||||
}{
|
|
||||||
// Local is mainnet Petersburg, remote announces the same. No future fork is announced.
|
|
||||||
{7987396, ID{Hash: checksumToBytes(0x668db0af), Next: 0}, nil},
|
|
||||||
|
|
||||||
// Local is mainnet Petersburg, remote announces the same. Remote also announces a next fork
|
tests := []struct {
|
||||||
|
config *params.ChainConfig
|
||||||
|
head uint64
|
||||||
|
time uint64
|
||||||
|
id ID
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
//------------------
|
||||||
|
// Block based tests
|
||||||
|
//------------------
|
||||||
|
|
||||||
|
// Local is mainnet Gray Glacier, remote announces the same. No future fork is announced.
|
||||||
|
{params.MainnetChainConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Gray Glacier, remote announces the same. Remote also announces a next fork
|
||||||
// at block 0xffffffff, but that is uncertain.
|
// at block 0xffffffff, but that is uncertain.
|
||||||
{7987396, ID{Hash: checksumToBytes(0x668db0af), Next: math.MaxUint64}, nil},
|
{params.MainnetChainConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: math.MaxUint64}, nil},
|
||||||
|
|
||||||
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote announces
|
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote announces
|
||||||
// also Byzantium, but it's not yet aware of Petersburg (e.g. non updated node before the fork).
|
// also Byzantium, but it's not yet aware of Petersburg (e.g. non updated node before the fork).
|
||||||
// In this case we don't know if Petersburg passed yet or not.
|
// In this case we don't know if Petersburg passed yet or not.
|
||||||
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, nil},
|
{params.MainnetChainConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, nil},
|
||||||
|
|
||||||
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote announces
|
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote announces
|
||||||
// also Byzantium, and it's also aware of Petersburg (e.g. updated node before the fork). We
|
// also Byzantium, and it's also aware of Petersburg (e.g. updated node before the fork). We
|
||||||
// don't know if Petersburg passed yet (will pass) or not.
|
// don't know if Petersburg passed yet (will pass) or not.
|
||||||
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil},
|
{params.MainnetChainConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil},
|
||||||
|
|
||||||
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote announces
|
// Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote announces
|
||||||
// also Byzantium, and it's also aware of some random fork (e.g. misconfigured Petersburg). As
|
// also Byzantium, and it's also aware of some random fork (e.g. misconfigured Petersburg). As
|
||||||
// neither forks passed at neither nodes, they may mismatch, but we still connect for now.
|
// neither forks passed at neither nodes, they may mismatch, but we still connect for now.
|
||||||
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: math.MaxUint64}, nil},
|
{params.MainnetChainConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: math.MaxUint64}, nil},
|
||||||
|
|
||||||
// Local is mainnet exactly on Petersburg, remote announces Byzantium + knowledge about Petersburg. Remote
|
// Local is mainnet exactly on Petersburg, remote announces Byzantium + knowledge about Petersburg. Remote
|
||||||
// is simply out of sync, accept.
|
// is simply out of sync, accept.
|
||||||
{7280000, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil},
|
{params.MainnetChainConfig, 7280000, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil},
|
||||||
|
|
||||||
// Local is mainnet Petersburg, remote announces Byzantium + knowledge about Petersburg. Remote
|
// Local is mainnet Petersburg, remote announces Byzantium + knowledge about Petersburg. Remote
|
||||||
// is simply out of sync, accept.
|
// is simply out of sync, accept.
|
||||||
{7987396, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil},
|
{params.MainnetChainConfig, 7987396, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil},
|
||||||
|
|
||||||
// Local is mainnet Petersburg, remote announces Spurious + knowledge about Byzantium. Remote
|
// Local is mainnet Petersburg, remote announces Spurious + knowledge about Byzantium. Remote
|
||||||
// is definitely out of sync. It may or may not need the Petersburg update, we don't know yet.
|
// is definitely out of sync. It may or may not need the Petersburg update, we don't know yet.
|
||||||
{7987396, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil},
|
{params.MainnetChainConfig, 7987396, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil},
|
||||||
|
|
||||||
// Local is mainnet Byzantium, remote announces Petersburg. Local is out of sync, accept.
|
// Local is mainnet Byzantium, remote announces Petersburg. Local is out of sync, accept.
|
||||||
{7279999, ID{Hash: checksumToBytes(0x668db0af), Next: 0}, nil},
|
{params.MainnetChainConfig, 7279999, 0, ID{Hash: checksumToBytes(0x668db0af), Next: 0}, nil},
|
||||||
|
|
||||||
// Local is mainnet Spurious, remote announces Byzantium, but is not aware of Petersburg. Local
|
// Local is mainnet Spurious, remote announces Byzantium, but is not aware of Petersburg. Local
|
||||||
// out of sync. Local also knows about a future fork, but that is uncertain yet.
|
// out of sync. Local also knows about a future fork, but that is uncertain yet.
|
||||||
{4369999, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, nil},
|
{params.MainnetChainConfig, 4369999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, nil},
|
||||||
|
|
||||||
// Local is mainnet Petersburg. remote announces Byzantium but is not aware of further forks.
|
// Local is mainnet Petersburg. remote announces Byzantium but is not aware of further forks.
|
||||||
// Remote needs software update.
|
// Remote needs software update.
|
||||||
{7987396, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, ErrRemoteStale},
|
{params.MainnetChainConfig, 7987396, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, ErrRemoteStale},
|
||||||
|
|
||||||
// Local is mainnet Petersburg, and isn't aware of more forks. Remote announces Petersburg +
|
// Local is mainnet Petersburg, and isn't aware of more forks. Remote announces Petersburg +
|
||||||
// 0xffffffff. Local needs software update, reject.
|
// 0xffffffff. Local needs software update, reject.
|
||||||
{7987396, ID{Hash: checksumToBytes(0x5cddc0e1), Next: 0}, ErrLocalIncompatibleOrStale},
|
{params.MainnetChainConfig, 7987396, 0, ID{Hash: checksumToBytes(0x5cddc0e1), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
// Local is mainnet Byzantium, and is aware of Petersburg. Remote announces Petersburg +
|
// Local is mainnet Byzantium, and is aware of Petersburg. Remote announces Petersburg +
|
||||||
// 0xffffffff. Local needs software update, reject.
|
// 0xffffffff. Local needs software update, reject.
|
||||||
{7279999, ID{Hash: checksumToBytes(0x5cddc0e1), Next: 0}, ErrLocalIncompatibleOrStale},
|
{params.MainnetChainConfig, 7279999, 0, ID{Hash: checksumToBytes(0x5cddc0e1), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
// Local is mainnet Petersburg, remote is Rinkeby Petersburg.
|
// Local is mainnet Petersburg, remote is Rinkeby Petersburg.
|
||||||
{7987396, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale},
|
{params.MainnetChainConfig, 7987396, 0, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
// Local is mainnet Gray Glacier, far in the future. Remote announces Gopherium (non existing fork)
|
// Local is mainnet Gray Glacier, far in the future. Remote announces Gopherium (non existing fork)
|
||||||
// at some future block 88888888, for itself, but past block for local. Local is incompatible.
|
// at some future block 88888888, for itself, but past block for local. Local is incompatible.
|
||||||
//
|
//
|
||||||
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
|
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
|
||||||
{88888888, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 88888888}, ErrLocalIncompatibleOrStale},
|
//
|
||||||
|
// TODO(karalabe): This testcase will fail once mainnet gets timestamped forks, make legacy chain config
|
||||||
|
{params.MainnetChainConfig, 88888888, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 88888888}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
|
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
|
||||||
// fork) at block 7279999, before Petersburg. Local is incompatible.
|
// fork) at block 7279999, before Petersburg. Local is incompatible.
|
||||||
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale},
|
//
|
||||||
|
// TODO(karalabe): This testcase will fail once mainnet gets timestamped forks, make legacy chain config
|
||||||
|
{params.MainnetChainConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
|
//------------------------------------
|
||||||
|
// Block to timestamp transition tests
|
||||||
|
//------------------------------------
|
||||||
|
|
||||||
|
// Local is mainnet currently in Gray Glacier only (so it's aware of Shanghai), remote announces
|
||||||
|
// also Gray Glacier, but it's not yet aware of Shanghai (e.g. non updated node before the fork).
|
||||||
|
// In this case we don't know if Shanghai passed yet or not.
|
||||||
|
{×tampedConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet currently in Gray Glacier only (so it's aware of Shanghai), remote announces
|
||||||
|
// also Gray Glacier, and it's also aware of Shanghai (e.g. updated node before the fork). We
|
||||||
|
// don't know if Shanghai passed yet (will pass) or not.
|
||||||
|
{×tampedConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1668000000}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet currently in Gray Glacier only (so it's aware of Shanghai), remote announces
|
||||||
|
// also Gray Glacier, and it's also aware of some random fork (e.g. misconfigured Shanghai). As
|
||||||
|
// neither forks passed at neither nodes, they may mismatch, but we still connect for now.
|
||||||
|
{×tampedConfig, 15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: math.MaxUint64}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet exactly on Shanghai, remote announces Gray Glacier + knowledge about Shanghai. Remote
|
||||||
|
// is simply out of sync, accept.
|
||||||
|
{×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1668000000}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Shanghai, remote announces Gray Glacier + knowledge about Shanghai. Remote
|
||||||
|
// is simply out of sync, accept.
|
||||||
|
{×tampedConfig, 20123456, 1668123456, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1668000000}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Shanghai, remote announces Arrow Glacier + knowledge about Gray Glacier. Remote
|
||||||
|
// is definitely out of sync. It may or may not need the Shanghai update, we don't know yet.
|
||||||
|
{×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Gray Glacier, remote announces Shanghai. Local is out of sync, accept.
|
||||||
|
{×tampedConfig, 15050000, 0, ID{Hash: checksumToBytes(0x71147644), Next: 0}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Arrow Glacier, remote announces Gray Glacier, but is not aware of Shanghai. Local
|
||||||
|
// out of sync. Local also knows about a future fork, but that is uncertain yet.
|
||||||
|
{×tampedConfig, 13773000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Shanghai. remote announces Gray Glacier but is not aware of further forks.
|
||||||
|
// Remote needs software update.
|
||||||
|
{×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}, ErrRemoteStale},
|
||||||
|
|
||||||
|
// Local is mainnet Gray Glacier, and isn't aware of more forks. Remote announces Gray Glacier +
|
||||||
|
// 0xffffffff. Local needs software update, reject.
|
||||||
|
{×tampedConfig, 15050000, 0, ID{Hash: checksumToBytes(checksumUpdate(0xf0afd0e3, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
|
// Local is mainnet Gray Glacier, and is aware of Shanghai. Remote announces Shanghai +
|
||||||
|
// 0xffffffff. Local needs software update, reject.
|
||||||
|
{×tampedConfig, 15050000, 0, ID{Hash: checksumToBytes(checksumUpdate(0x71147644, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
|
// Local is mainnet Gray Glacier, far in the future. Remote announces Gopherium (non existing fork)
|
||||||
|
// at some future timestamp 8888888888, for itself, but past block for local. Local is incompatible.
|
||||||
|
//
|
||||||
|
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
|
||||||
|
{params.MainnetChainConfig, 888888888, 1660000000, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1660000000}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
|
// Local is mainnet Gray Glacier. Remote is also in Gray Glacier, but announces Gopherium (non existing
|
||||||
|
// fork) at block 7279999, before Shanghai. Local is incompatible.
|
||||||
|
{×tampedConfig, 19999999, 1667999999, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1667999999}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
|
//----------------------
|
||||||
|
// Timestamp based tests
|
||||||
|
//----------------------
|
||||||
|
|
||||||
|
// Local is mainnet Shanghai, remote announces the same. No future fork is announced.
|
||||||
|
{×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Shanghai, remote announces the same. Remote also announces a next fork
|
||||||
|
// at time 0xffffffff, but that is uncertain.
|
||||||
|
{×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: math.MaxUint64}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
|
||||||
|
// also Shanghai, but it's not yet aware of Cancun (e.g. non updated node before the fork).
|
||||||
|
// In this case we don't know if Cancun passed yet or not.
|
||||||
|
//
|
||||||
|
// TODO(karalabe): Enable this when Cancun is specced
|
||||||
|
//{×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
|
||||||
|
// also Shanghai, and it's also aware of Cancun (e.g. updated node before the fork). We
|
||||||
|
// don't know if Cancun passed yet (will pass) or not.
|
||||||
|
//
|
||||||
|
// TODO(karalabe): Enable this when Cancun is specced and update next timestamp
|
||||||
|
//{×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
|
||||||
|
// also Shanghai, and it's also aware of some random fork (e.g. misconfigured Cancun). As
|
||||||
|
// neither forks passed at neither nodes, they may mismatch, but we still connect for now.
|
||||||
|
//
|
||||||
|
// TODO(karalabe): Enable this when Cancun is specced
|
||||||
|
//{×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: math.MaxUint64}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet exactly on Cancun, remote announces Shanghai + knowledge about Cancun. Remote
|
||||||
|
// is simply out of sync, accept.
|
||||||
|
//
|
||||||
|
// TODO(karalabe): Enable this when Cancun is specced, update local head and time, next timestamp
|
||||||
|
// {×tampedConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Cancun, remote announces Shanghai + knowledge about Cancun. Remote
|
||||||
|
// is simply out of sync, accept.
|
||||||
|
// TODO(karalabe): Enable this when Cancun is specced, update local head and time, next timestamp
|
||||||
|
//{×tampedConfig, 21123456, 1678123456, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Osaka, remote announces Shanghai + knowledge about Cancun. Remote
|
||||||
|
// is definitely out of sync. It may or may not need the Osaka update, we don't know yet.
|
||||||
|
//
|
||||||
|
// TODO(karalabe): Enable this when Cancun **and** Osaka is specced, update all the numbers
|
||||||
|
//{×tampedConfig, 0, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Shanghai, remote announces Cancun. Local is out of sync, accept.
|
||||||
|
//
|
||||||
|
// TODO(karalabe): Enable this when Cancun is specced, update remote checksum
|
||||||
|
//{×tampedConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x00000000), Next: 0}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Shanghai, remote announces Cancun, but is not aware of Osaka. Local
|
||||||
|
// out of sync. Local also knows about a future fork, but that is uncertain yet.
|
||||||
|
//
|
||||||
|
// TODO(karalabe): Enable this when Cancun **and** Osaka is specced, update remote checksum
|
||||||
|
//{×tampedConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x00000000), Next: 0}, nil},
|
||||||
|
|
||||||
|
// Local is mainnet Cancun. remote announces Shanghai but is not aware of further forks.
|
||||||
|
// Remote needs software update.
|
||||||
|
//
|
||||||
|
// TODO(karalabe): Enable this when Cancun is specced, update local head and time
|
||||||
|
//{×tampedConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}, ErrRemoteStale},
|
||||||
|
|
||||||
|
// Local is mainnet Shanghai, and isn't aware of more forks. Remote announces Shanghai +
|
||||||
|
// 0xffffffff. Local needs software update, reject.
|
||||||
|
{×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(checksumUpdate(0x71147644, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
|
// Local is mainnet Shanghai, and is aware of Cancun. Remote announces Cancun +
|
||||||
|
// 0xffffffff. Local needs software update, reject.
|
||||||
|
//
|
||||||
|
// TODO(karalabe): Enable this when Cancun is specced, update remote checksum
|
||||||
|
//{×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(checksumUpdate(0x00000000, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
|
// Local is mainnet Shanghai, remote is random Shanghai.
|
||||||
|
{×tampedConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x12345678), Next: 0}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
|
// Local is mainnet Shanghai, far in the future. Remote announces Gopherium (non existing fork)
|
||||||
|
// at some future timestamp 8888888888, for itself, but past block for local. Local is incompatible.
|
||||||
|
//
|
||||||
|
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
|
||||||
|
{×tampedConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0x71147644), Next: 8888888888}, ErrLocalIncompatibleOrStale},
|
||||||
|
|
||||||
|
// Local is mainnet Shanghai. Remote is also in Shanghai, but announces Gopherium (non existing
|
||||||
|
// fork) at timestamp 1668000000, before Cancun. Local is incompatible.
|
||||||
|
//
|
||||||
|
// TODO(karalabe): Enable this when Cancun is specced
|
||||||
|
//{params.MainnetChainConfig, 20999999, 1677999999, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, ErrLocalIncompatibleOrStale},
|
||||||
}
|
}
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
filter := newFilter(params.MainnetChainConfig, params.MainnetGenesisHash, func() uint64 { return tt.head })
|
filter := newFilter(tt.config, params.MainnetGenesisHash, func() (uint64, uint64) { return tt.head, tt.time })
|
||||||
if err := filter(tt.id); err != tt.err {
|
if err := filter(tt.id); err != tt.err {
|
||||||
t.Errorf("test %d: validation error mismatch: have %v, want %v", i, err, tt.err)
|
t.Errorf("test %d: validation error mismatch: have %v, want %v", i, err, tt.err)
|
||||||
}
|
}
|
||||||
|
@ -269,8 +269,7 @@ func (e *GenesisMismatchError) Error() string {
|
|||||||
|
|
||||||
// ChainOverrides contains the changes to chain config.
|
// ChainOverrides contains the changes to chain config.
|
||||||
type ChainOverrides struct {
|
type ChainOverrides struct {
|
||||||
OverrideTerminalTotalDifficulty *big.Int
|
OverrideShanghai *big.Int
|
||||||
OverrideTerminalTotalDifficultyPassed *bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetupGenesisBlock writes or updates the genesis block in db.
|
// SetupGenesisBlock writes or updates the genesis block in db.
|
||||||
@ -296,15 +295,11 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen
|
|||||||
}
|
}
|
||||||
applyOverrides := func(config *params.ChainConfig) {
|
applyOverrides := func(config *params.ChainConfig) {
|
||||||
if config != nil {
|
if config != nil {
|
||||||
if overrides != nil && overrides.OverrideTerminalTotalDifficulty != nil {
|
if overrides != nil && overrides.OverrideShanghai != nil {
|
||||||
config.TerminalTotalDifficulty = overrides.OverrideTerminalTotalDifficulty
|
config.ShanghaiTime = overrides.OverrideShanghai
|
||||||
}
|
|
||||||
if overrides != nil && overrides.OverrideTerminalTotalDifficultyPassed != nil {
|
|
||||||
config.TerminalTotalDifficultyPassed = *overrides.OverrideTerminalTotalDifficultyPassed
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Just commit the new block if there is no stored genesis block.
|
// Just commit the new block if there is no stored genesis block.
|
||||||
stored := rawdb.ReadCanonicalHash(db, 0)
|
stored := rawdb.ReadCanonicalHash(db, 0)
|
||||||
if (stored == common.Hash{}) {
|
if (stored == common.Hash{}) {
|
||||||
@ -371,12 +366,12 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen
|
|||||||
}
|
}
|
||||||
// Check config compatibility and write the config. Compatibility errors
|
// Check config compatibility and write the config. Compatibility errors
|
||||||
// are returned to the caller unless we're already at block zero.
|
// are returned to the caller unless we're already at block zero.
|
||||||
height := rawdb.ReadHeaderNumber(db, rawdb.ReadHeadHeaderHash(db))
|
head := rawdb.ReadHeadHeader(db)
|
||||||
if height == nil {
|
if head == nil {
|
||||||
return newcfg, stored, fmt.Errorf("missing block number for head header hash")
|
return newcfg, stored, fmt.Errorf("missing head header")
|
||||||
}
|
}
|
||||||
compatErr := storedcfg.CheckCompatible(newcfg, *height)
|
compatErr := storedcfg.CheckCompatible(newcfg, head.Number.Uint64(), head.Time)
|
||||||
if compatErr != nil && *height != 0 && compatErr.RewindTo != 0 {
|
if compatErr != nil && ((head.Number.Uint64() != 0 && compatErr.RewindToBlock != 0) || (head.Time != 0 && compatErr.RewindToTime != 0)) {
|
||||||
return newcfg, stored, compatErr
|
return newcfg, stored, compatErr
|
||||||
}
|
}
|
||||||
// Don't overwrite if the old is identical to the new
|
// Don't overwrite if the old is identical to the new
|
||||||
|
@ -132,10 +132,10 @@ func TestSetupGenesis(t *testing.T) {
|
|||||||
wantHash: customghash,
|
wantHash: customghash,
|
||||||
wantConfig: customg.Config,
|
wantConfig: customg.Config,
|
||||||
wantErr: ¶ms.ConfigCompatError{
|
wantErr: ¶ms.ConfigCompatError{
|
||||||
What: "Homestead fork block",
|
What: "Homestead fork block",
|
||||||
StoredConfig: big.NewInt(2),
|
StoredBlock: big.NewInt(2),
|
||||||
NewConfig: big.NewInt(3),
|
NewBlock: big.NewInt(3),
|
||||||
RewindTo: 1,
|
RewindToBlock: 1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -556,7 +556,7 @@ type (
|
|||||||
// before head header is updated. The method will return the actual block it
|
// before head header is updated. The method will return the actual block it
|
||||||
// updated the head to (missing state) and a flag if setHead should continue
|
// updated the head to (missing state) and a flag if setHead should continue
|
||||||
// rewinding till that forcefully (exceeded ancient limits)
|
// rewinding till that forcefully (exceeded ancient limits)
|
||||||
UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) (uint64, bool)
|
UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) (*types.Header, bool)
|
||||||
|
|
||||||
// DeleteBlockContentCallback is a callback function that is called by SetHead
|
// DeleteBlockContentCallback is a callback function that is called by SetHead
|
||||||
// before each header is deleted.
|
// before each header is deleted.
|
||||||
@ -566,15 +566,46 @@ type (
|
|||||||
// SetHead rewinds the local chain to a new head. Everything above the new head
|
// SetHead rewinds the local chain to a new head. Everything above the new head
|
||||||
// will be deleted and the new one set.
|
// will be deleted and the new one set.
|
||||||
func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) {
|
func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) {
|
||||||
|
hc.setHead(head, 0, updateFn, delFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetHeadWithTimestamp rewinds the local chain to a new head timestamp. Everything
|
||||||
|
// above the new head will be deleted and the new one set.
|
||||||
|
func (hc *HeaderChain) SetHeadWithTimestamp(time uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) {
|
||||||
|
hc.setHead(0, time, updateFn, delFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// setHead rewinds the local chain to a new head block or a head timestamp.
|
||||||
|
// Everything above the new head will be deleted and the new one set.
|
||||||
|
func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) {
|
||||||
|
// Sanity check that there's no attempt to undo the genesis block. This is
|
||||||
|
// a fairly synthetic case where someone enables a timestamp based fork
|
||||||
|
// below the genesis timestamp. It's nice to not allow that instead of the
|
||||||
|
// entire chain getting deleted.
|
||||||
|
if headTime > 0 && hc.genesisHeader.Time > headTime {
|
||||||
|
// Note, a critical error is quite brutal, but we should really not reach
|
||||||
|
// this point. Since pre-timestamp based forks it was impossible to have
|
||||||
|
// a fork before block 0, the setHead would always work. With timestamp
|
||||||
|
// forks it becomes possible to specify below the genesis. That said, the
|
||||||
|
// only time we setHead via timestamp is with chain config changes on the
|
||||||
|
// startup, so failing hard there is ok.
|
||||||
|
log.Crit("Rejecting genesis rewind via timestamp", "target", headTime, "genesis", hc.genesisHeader.Time)
|
||||||
|
}
|
||||||
var (
|
var (
|
||||||
parentHash common.Hash
|
parentHash common.Hash
|
||||||
batch = hc.chainDb.NewBatch()
|
batch = hc.chainDb.NewBatch()
|
||||||
origin = true
|
origin = true
|
||||||
)
|
)
|
||||||
for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() {
|
done := func(header *types.Header) bool {
|
||||||
|
if headTime > 0 {
|
||||||
|
return header.Time <= headTime
|
||||||
|
}
|
||||||
|
return header.Number.Uint64() <= headBlock
|
||||||
|
}
|
||||||
|
for hdr := hc.CurrentHeader(); hdr != nil && !done(hdr); hdr = hc.CurrentHeader() {
|
||||||
num := hdr.Number.Uint64()
|
num := hdr.Number.Uint64()
|
||||||
|
|
||||||
// Rewind block chain to new head.
|
// Rewind chain to new head
|
||||||
parent := hc.GetHeader(hdr.ParentHash, num-1)
|
parent := hc.GetHeader(hdr.ParentHash, num-1)
|
||||||
if parent == nil {
|
if parent == nil {
|
||||||
parent = hc.genesisHeader
|
parent = hc.genesisHeader
|
||||||
@ -591,9 +622,9 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d
|
|||||||
markerBatch := hc.chainDb.NewBatch()
|
markerBatch := hc.chainDb.NewBatch()
|
||||||
if updateFn != nil {
|
if updateFn != nil {
|
||||||
newHead, force := updateFn(markerBatch, parent)
|
newHead, force := updateFn(markerBatch, parent)
|
||||||
if force && newHead < head {
|
if force && ((headTime > 0 && newHead.Time < headTime) || (headTime == 0 && newHead.Number.Uint64() < headBlock)) {
|
||||||
log.Warn("Force rewinding till ancient limit", "head", newHead)
|
log.Warn("Force rewinding till ancient limit", "head", newHead.Number.Uint64())
|
||||||
head = newHead
|
headBlock, headTime = newHead.Number.Uint64(), 0 // Target timestamp passed, continue rewind in block mode (cleaner)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Update head header then.
|
// Update head header then.
|
||||||
|
@ -300,7 +300,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
|
|||||||
var (
|
var (
|
||||||
msg = st.msg
|
msg = st.msg
|
||||||
sender = vm.AccountRef(msg.From())
|
sender = vm.AccountRef(msg.From())
|
||||||
rules = st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber, st.evm.Context.Random != nil)
|
rules = st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber, st.evm.Context.Random != nil, st.evm.Context.Time)
|
||||||
contractCreation = msg.To() == nil
|
contractCreation = msg.To() == nil
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -131,7 +131,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig
|
|||||||
StateDB: statedb,
|
StateDB: statedb,
|
||||||
Config: config,
|
Config: config,
|
||||||
chainConfig: chainConfig,
|
chainConfig: chainConfig,
|
||||||
chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil),
|
chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time),
|
||||||
}
|
}
|
||||||
evm.interpreter = NewEVMInterpreter(evm, config)
|
evm.interpreter = NewEVMInterpreter(evm, config)
|
||||||
return evm
|
return evm
|
||||||
|
@ -117,7 +117,7 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) {
|
|||||||
address = common.BytesToAddress([]byte("contract"))
|
address = common.BytesToAddress([]byte("contract"))
|
||||||
vmenv = NewEnv(cfg)
|
vmenv = NewEnv(cfg)
|
||||||
sender = vm.AccountRef(cfg.Origin)
|
sender = vm.AccountRef(cfg.Origin)
|
||||||
rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil)
|
rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil, vmenv.Context.Time)
|
||||||
)
|
)
|
||||||
// Execute the preparatory steps for state transition which includes:
|
// Execute the preparatory steps for state transition which includes:
|
||||||
// - prepare accessList(post-berlin)
|
// - prepare accessList(post-berlin)
|
||||||
@ -151,7 +151,7 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) {
|
|||||||
var (
|
var (
|
||||||
vmenv = NewEnv(cfg)
|
vmenv = NewEnv(cfg)
|
||||||
sender = vm.AccountRef(cfg.Origin)
|
sender = vm.AccountRef(cfg.Origin)
|
||||||
rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil)
|
rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil, vmenv.Context.Time)
|
||||||
)
|
)
|
||||||
// Execute the preparatory steps for state transition which includes:
|
// Execute the preparatory steps for state transition which includes:
|
||||||
// - prepare accessList(post-berlin)
|
// - prepare accessList(post-berlin)
|
||||||
@ -180,7 +180,7 @@ func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, er
|
|||||||
vmenv = NewEnv(cfg)
|
vmenv = NewEnv(cfg)
|
||||||
sender = cfg.State.GetOrNewStateObject(cfg.Origin)
|
sender = cfg.State.GetOrNewStateObject(cfg.Origin)
|
||||||
statedb = cfg.State
|
statedb = cfg.State
|
||||||
rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil)
|
rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Random != nil, vmenv.Context.Time)
|
||||||
)
|
)
|
||||||
// Execute the preparatory steps for state transition which includes:
|
// Execute the preparatory steps for state transition which includes:
|
||||||
// - prepare accessList(post-berlin)
|
// - prepare accessList(post-berlin)
|
||||||
|
@ -195,11 +195,8 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
|||||||
)
|
)
|
||||||
// Override the chain config with provided settings.
|
// Override the chain config with provided settings.
|
||||||
var overrides core.ChainOverrides
|
var overrides core.ChainOverrides
|
||||||
if config.OverrideTerminalTotalDifficulty != nil {
|
if config.OverrideShanghai != nil {
|
||||||
overrides.OverrideTerminalTotalDifficulty = config.OverrideTerminalTotalDifficulty
|
overrides.OverrideShanghai = config.OverrideShanghai
|
||||||
}
|
|
||||||
if config.OverrideTerminalTotalDifficultyPassed != nil {
|
|
||||||
overrides.OverrideTerminalTotalDifficultyPassed = config.OverrideTerminalTotalDifficultyPassed
|
|
||||||
}
|
}
|
||||||
eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, &overrides, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit)
|
eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, &overrides, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -206,11 +206,8 @@ type Config struct {
|
|||||||
// CheckpointOracle is the configuration for checkpoint oracle.
|
// CheckpointOracle is the configuration for checkpoint oracle.
|
||||||
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
|
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
|
||||||
|
|
||||||
// OverrideTerminalTotalDifficulty (TODO: remove after the fork)
|
// OverrideShanghai (TODO: remove after the fork)
|
||||||
OverrideTerminalTotalDifficulty *big.Int `toml:",omitempty"`
|
OverrideShanghai *big.Int `toml:",omitempty"`
|
||||||
|
|
||||||
// OverrideTerminalTotalDifficultyPassed (TODO: remove after the fork)
|
|
||||||
OverrideTerminalTotalDifficultyPassed *bool `toml:",omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateConsensusEngine creates a consensus engine for the given chain configuration.
|
// CreateConsensusEngine creates a consensus engine for the given chain configuration.
|
||||||
|
@ -19,50 +19,49 @@ import (
|
|||||||
// MarshalTOML marshals as TOML.
|
// MarshalTOML marshals as TOML.
|
||||||
func (c Config) MarshalTOML() (interface{}, error) {
|
func (c Config) MarshalTOML() (interface{}, error) {
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Genesis *core.Genesis `toml:",omitempty"`
|
Genesis *core.Genesis `toml:",omitempty"`
|
||||||
NetworkId uint64
|
NetworkId uint64
|
||||||
SyncMode downloader.SyncMode
|
SyncMode downloader.SyncMode
|
||||||
EthDiscoveryURLs []string
|
EthDiscoveryURLs []string
|
||||||
SnapDiscoveryURLs []string
|
SnapDiscoveryURLs []string
|
||||||
NoPruning bool
|
NoPruning bool
|
||||||
NoPrefetch bool
|
NoPrefetch bool
|
||||||
TxLookupLimit uint64 `toml:",omitempty"`
|
TxLookupLimit uint64 `toml:",omitempty"`
|
||||||
RequiredBlocks map[uint64]common.Hash `toml:"-"`
|
RequiredBlocks map[uint64]common.Hash `toml:"-"`
|
||||||
LightServ int `toml:",omitempty"`
|
LightServ int `toml:",omitempty"`
|
||||||
LightIngress int `toml:",omitempty"`
|
LightIngress int `toml:",omitempty"`
|
||||||
LightEgress int `toml:",omitempty"`
|
LightEgress int `toml:",omitempty"`
|
||||||
LightPeers int `toml:",omitempty"`
|
LightPeers int `toml:",omitempty"`
|
||||||
LightNoPrune bool `toml:",omitempty"`
|
LightNoPrune bool `toml:",omitempty"`
|
||||||
LightNoSyncServe bool `toml:",omitempty"`
|
LightNoSyncServe bool `toml:",omitempty"`
|
||||||
SyncFromCheckpoint bool `toml:",omitempty"`
|
SyncFromCheckpoint bool `toml:",omitempty"`
|
||||||
UltraLightServers []string `toml:",omitempty"`
|
UltraLightServers []string `toml:",omitempty"`
|
||||||
UltraLightFraction int `toml:",omitempty"`
|
UltraLightFraction int `toml:",omitempty"`
|
||||||
UltraLightOnlyAnnounce bool `toml:",omitempty"`
|
UltraLightOnlyAnnounce bool `toml:",omitempty"`
|
||||||
SkipBcVersionCheck bool `toml:"-"`
|
SkipBcVersionCheck bool `toml:"-"`
|
||||||
DatabaseHandles int `toml:"-"`
|
DatabaseHandles int `toml:"-"`
|
||||||
DatabaseCache int
|
DatabaseCache int
|
||||||
DatabaseFreezer string
|
DatabaseFreezer string
|
||||||
TrieCleanCache int
|
TrieCleanCache int
|
||||||
TrieCleanCacheJournal string `toml:",omitempty"`
|
TrieCleanCacheJournal string `toml:",omitempty"`
|
||||||
TrieCleanCacheRejournal time.Duration `toml:",omitempty"`
|
TrieCleanCacheRejournal time.Duration `toml:",omitempty"`
|
||||||
TrieDirtyCache int
|
TrieDirtyCache int
|
||||||
TrieTimeout time.Duration
|
TrieTimeout time.Duration
|
||||||
SnapshotCache int
|
SnapshotCache int
|
||||||
Preimages bool
|
Preimages bool
|
||||||
FilterLogCacheSize int
|
FilterLogCacheSize int
|
||||||
Miner miner.Config
|
Miner miner.Config
|
||||||
Ethash ethash.Config
|
Ethash ethash.Config
|
||||||
TxPool txpool.Config
|
TxPool txpool.Config
|
||||||
GPO gasprice.Config
|
GPO gasprice.Config
|
||||||
EnablePreimageRecording bool
|
EnablePreimageRecording bool
|
||||||
DocRoot string `toml:"-"`
|
DocRoot string `toml:"-"`
|
||||||
RPCGasCap uint64
|
RPCGasCap uint64
|
||||||
RPCEVMTimeout time.Duration
|
RPCEVMTimeout time.Duration
|
||||||
RPCTxFeeCap float64
|
RPCTxFeeCap float64
|
||||||
Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
|
Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
|
||||||
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
|
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
|
||||||
OverrideTerminalTotalDifficulty *big.Int `toml:",omitempty"`
|
OverrideShanghai *big.Int `toml:",omitempty"`
|
||||||
OverrideTerminalTotalDifficultyPassed *bool `toml:",omitempty"`
|
|
||||||
}
|
}
|
||||||
var enc Config
|
var enc Config
|
||||||
enc.Genesis = c.Genesis
|
enc.Genesis = c.Genesis
|
||||||
@ -107,58 +106,56 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
|||||||
enc.RPCTxFeeCap = c.RPCTxFeeCap
|
enc.RPCTxFeeCap = c.RPCTxFeeCap
|
||||||
enc.Checkpoint = c.Checkpoint
|
enc.Checkpoint = c.Checkpoint
|
||||||
enc.CheckpointOracle = c.CheckpointOracle
|
enc.CheckpointOracle = c.CheckpointOracle
|
||||||
enc.OverrideTerminalTotalDifficulty = c.OverrideTerminalTotalDifficulty
|
enc.OverrideShanghai = c.OverrideShanghai
|
||||||
enc.OverrideTerminalTotalDifficultyPassed = c.OverrideTerminalTotalDifficultyPassed
|
|
||||||
return &enc, nil
|
return &enc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalTOML unmarshals from TOML.
|
// UnmarshalTOML unmarshals from TOML.
|
||||||
func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Genesis *core.Genesis `toml:",omitempty"`
|
Genesis *core.Genesis `toml:",omitempty"`
|
||||||
NetworkId *uint64
|
NetworkId *uint64
|
||||||
SyncMode *downloader.SyncMode
|
SyncMode *downloader.SyncMode
|
||||||
EthDiscoveryURLs []string
|
EthDiscoveryURLs []string
|
||||||
SnapDiscoveryURLs []string
|
SnapDiscoveryURLs []string
|
||||||
NoPruning *bool
|
NoPruning *bool
|
||||||
NoPrefetch *bool
|
NoPrefetch *bool
|
||||||
TxLookupLimit *uint64 `toml:",omitempty"`
|
TxLookupLimit *uint64 `toml:",omitempty"`
|
||||||
RequiredBlocks map[uint64]common.Hash `toml:"-"`
|
RequiredBlocks map[uint64]common.Hash `toml:"-"`
|
||||||
LightServ *int `toml:",omitempty"`
|
LightServ *int `toml:",omitempty"`
|
||||||
LightIngress *int `toml:",omitempty"`
|
LightIngress *int `toml:",omitempty"`
|
||||||
LightEgress *int `toml:",omitempty"`
|
LightEgress *int `toml:",omitempty"`
|
||||||
LightPeers *int `toml:",omitempty"`
|
LightPeers *int `toml:",omitempty"`
|
||||||
LightNoPrune *bool `toml:",omitempty"`
|
LightNoPrune *bool `toml:",omitempty"`
|
||||||
LightNoSyncServe *bool `toml:",omitempty"`
|
LightNoSyncServe *bool `toml:",omitempty"`
|
||||||
SyncFromCheckpoint *bool `toml:",omitempty"`
|
SyncFromCheckpoint *bool `toml:",omitempty"`
|
||||||
UltraLightServers []string `toml:",omitempty"`
|
UltraLightServers []string `toml:",omitempty"`
|
||||||
UltraLightFraction *int `toml:",omitempty"`
|
UltraLightFraction *int `toml:",omitempty"`
|
||||||
UltraLightOnlyAnnounce *bool `toml:",omitempty"`
|
UltraLightOnlyAnnounce *bool `toml:",omitempty"`
|
||||||
SkipBcVersionCheck *bool `toml:"-"`
|
SkipBcVersionCheck *bool `toml:"-"`
|
||||||
DatabaseHandles *int `toml:"-"`
|
DatabaseHandles *int `toml:"-"`
|
||||||
DatabaseCache *int
|
DatabaseCache *int
|
||||||
DatabaseFreezer *string
|
DatabaseFreezer *string
|
||||||
TrieCleanCache *int
|
TrieCleanCache *int
|
||||||
TrieCleanCacheJournal *string `toml:",omitempty"`
|
TrieCleanCacheJournal *string `toml:",omitempty"`
|
||||||
TrieCleanCacheRejournal *time.Duration `toml:",omitempty"`
|
TrieCleanCacheRejournal *time.Duration `toml:",omitempty"`
|
||||||
TrieDirtyCache *int
|
TrieDirtyCache *int
|
||||||
TrieTimeout *time.Duration
|
TrieTimeout *time.Duration
|
||||||
SnapshotCache *int
|
SnapshotCache *int
|
||||||
Preimages *bool
|
Preimages *bool
|
||||||
FilterLogCacheSize *int
|
FilterLogCacheSize *int
|
||||||
Miner *miner.Config
|
Miner *miner.Config
|
||||||
Ethash *ethash.Config
|
Ethash *ethash.Config
|
||||||
TxPool *txpool.Config
|
TxPool *txpool.Config
|
||||||
GPO *gasprice.Config
|
GPO *gasprice.Config
|
||||||
EnablePreimageRecording *bool
|
EnablePreimageRecording *bool
|
||||||
DocRoot *string `toml:"-"`
|
DocRoot *string `toml:"-"`
|
||||||
RPCGasCap *uint64
|
RPCGasCap *uint64
|
||||||
RPCEVMTimeout *time.Duration
|
RPCEVMTimeout *time.Duration
|
||||||
RPCTxFeeCap *float64
|
RPCTxFeeCap *float64
|
||||||
Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
|
Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
|
||||||
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
|
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
|
||||||
OverrideTerminalTotalDifficulty *big.Int `toml:",omitempty"`
|
OverrideShanghai *big.Int `toml:",omitempty"`
|
||||||
OverrideTerminalTotalDifficultyPassed *bool `toml:",omitempty"`
|
|
||||||
}
|
}
|
||||||
var dec Config
|
var dec Config
|
||||||
if err := unmarshal(&dec); err != nil {
|
if err := unmarshal(&dec); err != nil {
|
||||||
@ -290,11 +287,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
|||||||
if dec.CheckpointOracle != nil {
|
if dec.CheckpointOracle != nil {
|
||||||
c.CheckpointOracle = dec.CheckpointOracle
|
c.CheckpointOracle = dec.CheckpointOracle
|
||||||
}
|
}
|
||||||
if dec.OverrideTerminalTotalDifficulty != nil {
|
if dec.OverrideShanghai != nil {
|
||||||
c.OverrideTerminalTotalDifficulty = dec.OverrideTerminalTotalDifficulty
|
c.OverrideShanghai = dec.OverrideShanghai
|
||||||
}
|
|
||||||
if dec.OverrideTerminalTotalDifficultyPassed != nil {
|
|
||||||
c.OverrideTerminalTotalDifficultyPassed = dec.OverrideTerminalTotalDifficultyPassed
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -331,7 +331,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
|
|||||||
number = head.Number.Uint64()
|
number = head.Number.Uint64()
|
||||||
td = h.chain.GetTd(hash, number)
|
td = h.chain.GetTd(hash, number)
|
||||||
)
|
)
|
||||||
forkID := forkid.NewID(h.chain.Config(), h.chain.Genesis().Hash(), h.chain.CurrentHeader().Number.Uint64())
|
forkID := forkid.NewID(h.chain.Config(), genesis.Hash(), number, head.Time)
|
||||||
if err := peer.Handshake(h.networkID, td, hash, genesis.Hash(), forkID, h.forkFilter); err != nil {
|
if err := peer.Handshake(h.networkID, td, hash, genesis.Hash(), forkID, h.forkFilter); err != nil {
|
||||||
peer.Log().Debug("Ethereum handshake failed", "err", err)
|
peer.Log().Debug("Ethereum handshake failed", "err", err)
|
||||||
return err
|
return err
|
||||||
|
@ -59,7 +59,8 @@ func StartENRUpdater(chain *core.BlockChain, ln *enode.LocalNode) {
|
|||||||
|
|
||||||
// currentENREntry constructs an `eth` ENR entry based on the current state of the chain.
|
// currentENREntry constructs an `eth` ENR entry based on the current state of the chain.
|
||||||
func currentENREntry(chain *core.BlockChain) *enrEntry {
|
func currentENREntry(chain *core.BlockChain) *enrEntry {
|
||||||
|
head := chain.CurrentHeader()
|
||||||
return &enrEntry{
|
return &enrEntry{
|
||||||
ForkID: forkid.NewID(chain.Config(), chain.Genesis().Hash(), chain.CurrentHeader().Number.Uint64()),
|
ForkID: forkid.NewID(chain.Config(), chain.Genesis().Hash(), head.Number.Uint64(), head.Time),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,7 @@ func testHandshake(t *testing.T, protocol uint) {
|
|||||||
genesis = backend.chain.Genesis()
|
genesis = backend.chain.Genesis()
|
||||||
head = backend.chain.CurrentBlock()
|
head = backend.chain.CurrentBlock()
|
||||||
td = backend.chain.GetTd(head.Hash(), head.NumberU64())
|
td = backend.chain.GetTd(head.Hash(), head.NumberU64())
|
||||||
forkID = forkid.NewID(backend.chain.Config(), backend.chain.Genesis().Hash(), backend.chain.CurrentHeader().Number.Uint64())
|
forkID = forkid.NewID(backend.chain.Config(), backend.chain.Genesis().Hash(), backend.chain.CurrentHeader().Number.Uint64(), backend.chain.CurrentHeader().Time)
|
||||||
)
|
)
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
code uint64
|
code uint64
|
||||||
|
@ -242,7 +242,7 @@ func (t *jsTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Addr
|
|||||||
t.ctx["value"] = valueBig
|
t.ctx["value"] = valueBig
|
||||||
t.ctx["block"] = t.vm.ToValue(env.Context.BlockNumber.Uint64())
|
t.ctx["block"] = t.vm.ToValue(env.Context.BlockNumber.Uint64())
|
||||||
// Update list of precompiles based on current block
|
// Update list of precompiles based on current block
|
||||||
rules := env.ChainConfig().Rules(env.Context.BlockNumber, env.Context.Random != nil)
|
rules := env.ChainConfig().Rules(env.Context.BlockNumber, env.Context.Random != nil, env.Context.Time)
|
||||||
t.activePrecompiles = vm.ActivePrecompiles(rules)
|
t.activePrecompiles = vm.ActivePrecompiles(rules)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ func (t *fourByteTracer) store(id []byte, size int) {
|
|||||||
// CaptureStart implements the EVMLogger interface to initialize the tracing operation.
|
// CaptureStart implements the EVMLogger interface to initialize the tracing operation.
|
||||||
func (t *fourByteTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
|
func (t *fourByteTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
|
||||||
// Update list of precompiles based on current block
|
// Update list of precompiles based on current block
|
||||||
rules := env.ChainConfig().Rules(env.Context.BlockNumber, env.Context.Random != nil)
|
rules := env.ChainConfig().Rules(env.Context.BlockNumber, env.Context.Random != nil, env.Context.Time)
|
||||||
t.activePrecompiles = vm.ActivePrecompiles(rules)
|
t.activePrecompiles = vm.ActivePrecompiles(rules)
|
||||||
|
|
||||||
// Save the outer calldata also
|
// Save the outer calldata also
|
||||||
|
@ -1440,7 +1440,7 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH
|
|||||||
}
|
}
|
||||||
isPostMerge := header.Difficulty.Cmp(common.Big0) == 0
|
isPostMerge := header.Difficulty.Cmp(common.Big0) == 0
|
||||||
// Retrieve the precompiles since they don't need to be added to the access list
|
// Retrieve the precompiles since they don't need to be added to the access list
|
||||||
precompiles := vm.ActivePrecompiles(b.ChainConfig().Rules(header.Number, isPostMerge))
|
precompiles := vm.ActivePrecompiles(b.ChainConfig().Rules(header.Number, isPostMerge, new(big.Int).SetUint64(header.Time)))
|
||||||
|
|
||||||
// Create an initial tracer
|
// Create an initial tracer
|
||||||
prevTracer := logger.NewAccessListTracer(nil, args.from(), to, precompiles)
|
prevTracer := logger.NewAccessListTracer(nil, args.from(), to, precompiles)
|
||||||
|
@ -94,11 +94,8 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var overrides core.ChainOverrides
|
var overrides core.ChainOverrides
|
||||||
if config.OverrideTerminalTotalDifficulty != nil {
|
if config.OverrideShanghai != nil {
|
||||||
overrides.OverrideTerminalTotalDifficulty = config.OverrideTerminalTotalDifficulty
|
overrides.OverrideShanghai = config.OverrideShanghai
|
||||||
}
|
|
||||||
if config.OverrideTerminalTotalDifficultyPassed != nil {
|
|
||||||
overrides.OverrideTerminalTotalDifficultyPassed = config.OverrideTerminalTotalDifficultyPassed
|
|
||||||
}
|
}
|
||||||
chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, trie.NewDatabase(chainDb), config.Genesis, &overrides)
|
chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, trie.NewDatabase(chainDb), config.Genesis, &overrides)
|
||||||
if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat {
|
if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat {
|
||||||
@ -179,7 +176,11 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) {
|
|||||||
// Rewind the chain in case of an incompatible config upgrade.
|
// Rewind the chain in case of an incompatible config upgrade.
|
||||||
if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
|
if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
|
||||||
log.Warn("Rewinding chain to upgrade configuration", "err", compat)
|
log.Warn("Rewinding chain to upgrade configuration", "err", compat)
|
||||||
leth.blockchain.SetHead(compat.RewindTo)
|
if compat.RewindToTime > 0 {
|
||||||
|
leth.blockchain.SetHeadWithTimestamp(compat.RewindToTime)
|
||||||
|
} else {
|
||||||
|
leth.blockchain.SetHead(compat.RewindToBlock)
|
||||||
|
}
|
||||||
rawdb.WriteChainConfig(chainDb, genesisHash, chainConfig)
|
rawdb.WriteChainConfig(chainDb, genesisHash, chainConfig)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,7 +111,7 @@ func (h *clientHandler) handle(p *serverPeer, noInitAnnounce bool) error {
|
|||||||
p.Log().Debug("Light Ethereum peer connected", "name", p.Name())
|
p.Log().Debug("Light Ethereum peer connected", "name", p.Name())
|
||||||
|
|
||||||
// Execute the LES handshake
|
// Execute the LES handshake
|
||||||
forkid := forkid.NewID(h.backend.blockchain.Config(), h.backend.genesis, h.backend.blockchain.CurrentHeader().Number.Uint64())
|
forkid := forkid.NewID(h.backend.blockchain.Config(), h.backend.genesis, h.backend.blockchain.CurrentHeader().Number.Uint64(), h.backend.blockchain.CurrentHeader().Time)
|
||||||
if err := p.Handshake(h.backend.blockchain.Genesis().Hash(), forkid, h.forkFilter); err != nil {
|
if err := p.Handshake(h.backend.blockchain.Genesis().Hash(), forkid, h.forkFilter); err != nil {
|
||||||
p.Log().Debug("Light Ethereum handshake failed", "err", err)
|
p.Log().Debug("Light Ethereum handshake failed", "err", err)
|
||||||
return err
|
return err
|
||||||
|
@ -124,8 +124,8 @@ func TestHandshake(t *testing.T) {
|
|||||||
genesis = common.HexToHash("cafebabe")
|
genesis = common.HexToHash("cafebabe")
|
||||||
|
|
||||||
chain1, chain2 = &fakeChain{}, &fakeChain{}
|
chain1, chain2 = &fakeChain{}, &fakeChain{}
|
||||||
forkID1 = forkid.NewID(chain1.Config(), chain1.Genesis().Hash(), chain1.CurrentHeader().Number.Uint64())
|
forkID1 = forkid.NewID(chain1.Config(), chain1.Genesis().Hash(), chain1.CurrentHeader().Number.Uint64(), chain1.CurrentHeader().Time)
|
||||||
forkID2 = forkid.NewID(chain2.Config(), chain2.Genesis().Hash(), chain2.CurrentHeader().Number.Uint64())
|
forkID2 = forkid.NewID(chain2.Config(), chain2.Genesis().Hash(), chain2.CurrentHeader().Number.Uint64(), chain2.CurrentHeader().Time)
|
||||||
filter1, filter2 = forkid.NewFilter(chain1), forkid.NewFilter(chain2)
|
filter1, filter2 = forkid.NewFilter(chain1), forkid.NewFilter(chain2)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -117,7 +117,7 @@ func (h *serverHandler) handle(p *clientPeer) error {
|
|||||||
hash = head.Hash()
|
hash = head.Hash()
|
||||||
number = head.Number.Uint64()
|
number = head.Number.Uint64()
|
||||||
td = h.blockchain.GetTd(hash, number)
|
td = h.blockchain.GetTd(hash, number)
|
||||||
forkID = forkid.NewID(h.blockchain.Config(), h.blockchain.Genesis().Hash(), h.blockchain.CurrentBlock().NumberU64())
|
forkID = forkid.NewID(h.blockchain.Config(), h.blockchain.Genesis().Hash(), number, head.Time)
|
||||||
)
|
)
|
||||||
if err := p.Handshake(td, hash, number, h.blockchain.Genesis().Hash(), forkID, h.forkFilter, h.server); err != nil {
|
if err := p.Handshake(td, hash, number, h.blockchain.Genesis().Hash(), forkID, h.forkFilter, h.server); err != nil {
|
||||||
p.Log().Debug("Light Ethereum handshake failed", "err", err)
|
p.Log().Debug("Light Ethereum handshake failed", "err", err)
|
||||||
|
@ -489,7 +489,7 @@ func (client *testClient) newRawPeer(t *testing.T, name string, version int, rec
|
|||||||
head = client.handler.backend.blockchain.CurrentHeader()
|
head = client.handler.backend.blockchain.CurrentHeader()
|
||||||
td = client.handler.backend.blockchain.GetTd(head.Hash(), head.Number.Uint64())
|
td = client.handler.backend.blockchain.GetTd(head.Hash(), head.Number.Uint64())
|
||||||
)
|
)
|
||||||
forkID := forkid.NewID(client.handler.backend.blockchain.Config(), genesis.Hash(), head.Number.Uint64())
|
forkID := forkid.NewID(client.handler.backend.blockchain.Config(), genesis.Hash(), head.Number.Uint64(), head.Time)
|
||||||
tp.handshakeWithClient(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID, testCostList(0), recentTxLookup) // disable flow control by default
|
tp.handshakeWithClient(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID, testCostList(0), recentTxLookup) // disable flow control by default
|
||||||
|
|
||||||
// Ensure the connection is established or exits when any error occurs
|
// Ensure the connection is established or exits when any error occurs
|
||||||
@ -553,7 +553,7 @@ func (server *testServer) newRawPeer(t *testing.T, name string, version int) (*t
|
|||||||
head = server.handler.blockchain.CurrentHeader()
|
head = server.handler.blockchain.CurrentHeader()
|
||||||
td = server.handler.blockchain.GetTd(head.Hash(), head.Number.Uint64())
|
td = server.handler.blockchain.GetTd(head.Hash(), head.Number.Uint64())
|
||||||
)
|
)
|
||||||
forkID := forkid.NewID(server.handler.blockchain.Config(), genesis.Hash(), head.Number.Uint64())
|
forkID := forkid.NewID(server.handler.blockchain.Config(), genesis.Hash(), head.Number.Uint64(), head.Time)
|
||||||
tp.handshakeWithServer(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID)
|
tp.handshakeWithServer(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID)
|
||||||
|
|
||||||
// Ensure the connection is established or exits when any error occurs
|
// Ensure the connection is established or exits when any error occurs
|
||||||
|
@ -178,6 +178,17 @@ func (lc *LightChain) SetHead(head uint64) error {
|
|||||||
return lc.loadLastState()
|
return lc.loadLastState()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetHeadWithTimestamp rewinds the local chain to a new head that has at max
|
||||||
|
// the given timestamp. Everything above the new head will be deleted and the
|
||||||
|
// new one set.
|
||||||
|
func (lc *LightChain) SetHeadWithTimestamp(timestamp uint64) error {
|
||||||
|
lc.chainmu.Lock()
|
||||||
|
defer lc.chainmu.Unlock()
|
||||||
|
|
||||||
|
lc.hc.SetHeadWithTimestamp(timestamp, nil, nil)
|
||||||
|
return lc.loadLastState()
|
||||||
|
}
|
||||||
|
|
||||||
// GasLimit returns the gas limit of the current HEAD block.
|
// GasLimit returns the gas limit of the current HEAD block.
|
||||||
func (lc *LightChain) GasLimit() uint64 {
|
func (lc *LightChain) GasLimit() uint64 {
|
||||||
return lc.hc.CurrentHeader().GasLimit
|
return lc.hc.CurrentHeader().GasLimit
|
||||||
|
339
params/config.go
339
params/config.go
@ -281,7 +281,7 @@ var (
|
|||||||
|
|
||||||
TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, false, new(EthashConfig), nil}
|
TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, false, new(EthashConfig), nil}
|
||||||
NonActivatedConfig = &ChainConfig{big.NewInt(1), nil, nil, false, nil, common.Hash{}, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, new(EthashConfig), nil}
|
NonActivatedConfig = &ChainConfig{big.NewInt(1), nil, nil, false, nil, common.Hash{}, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, new(EthashConfig), nil}
|
||||||
TestRules = TestChainConfig.Rules(new(big.Int), false)
|
TestRules = TestChainConfig.Rules(new(big.Int), false, new(big.Int))
|
||||||
)
|
)
|
||||||
|
|
||||||
// NetworkNames are user friendly names to use in the chain spec banner.
|
// NetworkNames are user friendly names to use in the chain spec banner.
|
||||||
@ -371,9 +371,12 @@ type ChainConfig struct {
|
|||||||
ArrowGlacierBlock *big.Int `json:"arrowGlacierBlock,omitempty"` // Eip-4345 (bomb delay) switch block (nil = no fork, 0 = already activated)
|
ArrowGlacierBlock *big.Int `json:"arrowGlacierBlock,omitempty"` // Eip-4345 (bomb delay) switch block (nil = no fork, 0 = already activated)
|
||||||
GrayGlacierBlock *big.Int `json:"grayGlacierBlock,omitempty"` // Eip-5133 (bomb delay) switch block (nil = no fork, 0 = already activated)
|
GrayGlacierBlock *big.Int `json:"grayGlacierBlock,omitempty"` // Eip-5133 (bomb delay) switch block (nil = no fork, 0 = already activated)
|
||||||
MergeNetsplitBlock *big.Int `json:"mergeNetsplitBlock,omitempty"` // Virtual fork after The Merge to use as a network splitter
|
MergeNetsplitBlock *big.Int `json:"mergeNetsplitBlock,omitempty"` // Virtual fork after The Merge to use as a network splitter
|
||||||
ShanghaiBlock *big.Int `json:"shanghaiBlock,omitempty"` // Shanghai switch block (nil = no fork, 0 = already on shanghai)
|
|
||||||
CancunBlock *big.Int `json:"cancunBlock,omitempty"` // Cancun switch block (nil = no fork, 0 = already on cancun)
|
CancunBlock *big.Int `json:"cancunBlock,omitempty"` // Cancun switch block (nil = no fork, 0 = already on cancun)
|
||||||
|
|
||||||
|
// Fork scheduling was switched from blocks to timestamps here
|
||||||
|
|
||||||
|
ShanghaiTime *big.Int `json:"shanghaiTime,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai)
|
||||||
|
|
||||||
// TerminalTotalDifficulty is the amount of total difficulty reached by
|
// TerminalTotalDifficulty is the amount of total difficulty reached by
|
||||||
// the network that triggers the consensus upgrade.
|
// the network that triggers the consensus upgrade.
|
||||||
TerminalTotalDifficulty *big.Int `json:"terminalTotalDifficulty,omitempty"`
|
TerminalTotalDifficulty *big.Int `json:"terminalTotalDifficulty,omitempty"`
|
||||||
@ -442,121 +445,127 @@ func (c *ChainConfig) Description() string {
|
|||||||
// Create a list of forks with a short description of them. Forks that only
|
// Create a list of forks with a short description of them. Forks that only
|
||||||
// makes sense for mainnet should be optional at printing to avoid bloating
|
// makes sense for mainnet should be optional at printing to avoid bloating
|
||||||
// the output for testnets and private networks.
|
// the output for testnets and private networks.
|
||||||
banner += "Pre-Merge hard forks:\n"
|
banner += "Pre-Merge hard forks (block based):\n"
|
||||||
banner += fmt.Sprintf(" - Homestead: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/homestead.md)\n", c.HomesteadBlock)
|
banner += fmt.Sprintf(" - Homestead: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/homestead.md)\n", c.HomesteadBlock)
|
||||||
if c.DAOForkBlock != nil {
|
if c.DAOForkBlock != nil {
|
||||||
banner += fmt.Sprintf(" - DAO Fork: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/dao-fork.md)\n", c.DAOForkBlock)
|
banner += fmt.Sprintf(" - DAO Fork: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/dao-fork.md)\n", c.DAOForkBlock)
|
||||||
}
|
}
|
||||||
banner += fmt.Sprintf(" - Tangerine Whistle (EIP 150): %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/tangerine-whistle.md)\n", c.EIP150Block)
|
banner += fmt.Sprintf(" - Tangerine Whistle (EIP 150): #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/tangerine-whistle.md)\n", c.EIP150Block)
|
||||||
banner += fmt.Sprintf(" - Spurious Dragon/1 (EIP 155): %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/spurious-dragon.md)\n", c.EIP155Block)
|
banner += fmt.Sprintf(" - Spurious Dragon/1 (EIP 155): #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/spurious-dragon.md)\n", c.EIP155Block)
|
||||||
banner += fmt.Sprintf(" - Spurious Dragon/2 (EIP 158): %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/spurious-dragon.md)\n", c.EIP155Block)
|
banner += fmt.Sprintf(" - Spurious Dragon/2 (EIP 158): #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/spurious-dragon.md)\n", c.EIP155Block)
|
||||||
banner += fmt.Sprintf(" - Byzantium: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/byzantium.md)\n", c.ByzantiumBlock)
|
banner += fmt.Sprintf(" - Byzantium: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/byzantium.md)\n", c.ByzantiumBlock)
|
||||||
banner += fmt.Sprintf(" - Constantinople: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/constantinople.md)\n", c.ConstantinopleBlock)
|
banner += fmt.Sprintf(" - Constantinople: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/constantinople.md)\n", c.ConstantinopleBlock)
|
||||||
banner += fmt.Sprintf(" - Petersburg: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/petersburg.md)\n", c.PetersburgBlock)
|
banner += fmt.Sprintf(" - Petersburg: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/petersburg.md)\n", c.PetersburgBlock)
|
||||||
banner += fmt.Sprintf(" - Istanbul: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/istanbul.md)\n", c.IstanbulBlock)
|
banner += fmt.Sprintf(" - Istanbul: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/istanbul.md)\n", c.IstanbulBlock)
|
||||||
if c.MuirGlacierBlock != nil {
|
if c.MuirGlacierBlock != nil {
|
||||||
banner += fmt.Sprintf(" - Muir Glacier: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/muir-glacier.md)\n", c.MuirGlacierBlock)
|
banner += fmt.Sprintf(" - Muir Glacier: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/muir-glacier.md)\n", c.MuirGlacierBlock)
|
||||||
}
|
}
|
||||||
banner += fmt.Sprintf(" - Berlin: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/berlin.md)\n", c.BerlinBlock)
|
banner += fmt.Sprintf(" - Berlin: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/berlin.md)\n", c.BerlinBlock)
|
||||||
banner += fmt.Sprintf(" - London: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/london.md)\n", c.LondonBlock)
|
banner += fmt.Sprintf(" - London: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/london.md)\n", c.LondonBlock)
|
||||||
if c.ArrowGlacierBlock != nil {
|
if c.ArrowGlacierBlock != nil {
|
||||||
banner += fmt.Sprintf(" - Arrow Glacier: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/arrow-glacier.md)\n", c.ArrowGlacierBlock)
|
banner += fmt.Sprintf(" - Arrow Glacier: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/arrow-glacier.md)\n", c.ArrowGlacierBlock)
|
||||||
}
|
}
|
||||||
if c.GrayGlacierBlock != nil {
|
if c.GrayGlacierBlock != nil {
|
||||||
banner += fmt.Sprintf(" - Gray Glacier: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/gray-glacier.md)\n", c.GrayGlacierBlock)
|
banner += fmt.Sprintf(" - Gray Glacier: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/gray-glacier.md)\n", c.GrayGlacierBlock)
|
||||||
}
|
|
||||||
if c.ShanghaiBlock != nil {
|
|
||||||
banner += fmt.Sprintf(" - Shanghai: %-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/shanghai.md)\n", c.ShanghaiBlock)
|
|
||||||
}
|
|
||||||
if c.CancunBlock != nil {
|
|
||||||
banner += fmt.Sprintf(" - Cancun: %-8v\n", c.CancunBlock)
|
|
||||||
}
|
}
|
||||||
banner += "\n"
|
banner += "\n"
|
||||||
|
|
||||||
// Add a special section for the merge as it's non-obvious
|
// Add a special section for the merge as it's non-obvious
|
||||||
if c.TerminalTotalDifficulty == nil {
|
if c.TerminalTotalDifficulty == nil {
|
||||||
banner += "The Merge is not yet available for this network!\n"
|
banner += "The Merge is not yet available for this network!\n"
|
||||||
banner += " - Hard-fork specification: https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/paris.md"
|
banner += " - Hard-fork specification: https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/paris.md\n"
|
||||||
} else {
|
} else {
|
||||||
banner += "Merge configured:\n"
|
banner += "Merge configured:\n"
|
||||||
banner += " - Hard-fork specification: https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/paris.md\n"
|
banner += " - Hard-fork specification: https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/paris.md\n"
|
||||||
banner += fmt.Sprintf(" - Network known to be merged: %v\n", c.TerminalTotalDifficultyPassed)
|
banner += fmt.Sprintf(" - Network known to be merged: %v\n", c.TerminalTotalDifficultyPassed)
|
||||||
banner += fmt.Sprintf(" - Total terminal difficulty: %v\n", c.TerminalTotalDifficulty)
|
banner += fmt.Sprintf(" - Total terminal difficulty: %v\n", c.TerminalTotalDifficulty)
|
||||||
banner += fmt.Sprintf(" - Merge netsplit block: %-8v", c.MergeNetsplitBlock)
|
if c.MergeNetsplitBlock != nil {
|
||||||
|
banner += fmt.Sprintf(" - Merge netsplit block: #%-8v\n", c.MergeNetsplitBlock)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
banner += "\n"
|
||||||
|
|
||||||
|
// Create a list of forks post-merge
|
||||||
|
banner += "Post-Merge hard forks (timestamp based):\n"
|
||||||
|
if c.ShanghaiTime != nil {
|
||||||
|
banner += fmt.Sprintf(" - Shanghai: @%-10v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/shanghai.md)\n", c.ShanghaiTime)
|
||||||
|
}
|
||||||
|
if c.CancunBlock != nil {
|
||||||
|
banner += fmt.Sprintf(" - Cancun: @%-10v\n", c.CancunBlock)
|
||||||
}
|
}
|
||||||
return banner
|
return banner
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsHomestead returns whether num is either equal to the homestead block or greater.
|
// IsHomestead returns whether num is either equal to the homestead block or greater.
|
||||||
func (c *ChainConfig) IsHomestead(num *big.Int) bool {
|
func (c *ChainConfig) IsHomestead(num *big.Int) bool {
|
||||||
return isForked(c.HomesteadBlock, num)
|
return isBlockForked(c.HomesteadBlock, num)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsDAOFork returns whether num is either equal to the DAO fork block or greater.
|
// IsDAOFork returns whether num is either equal to the DAO fork block or greater.
|
||||||
func (c *ChainConfig) IsDAOFork(num *big.Int) bool {
|
func (c *ChainConfig) IsDAOFork(num *big.Int) bool {
|
||||||
return isForked(c.DAOForkBlock, num)
|
return isBlockForked(c.DAOForkBlock, num)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsEIP150 returns whether num is either equal to the EIP150 fork block or greater.
|
// IsEIP150 returns whether num is either equal to the EIP150 fork block or greater.
|
||||||
func (c *ChainConfig) IsEIP150(num *big.Int) bool {
|
func (c *ChainConfig) IsEIP150(num *big.Int) bool {
|
||||||
return isForked(c.EIP150Block, num)
|
return isBlockForked(c.EIP150Block, num)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsEIP155 returns whether num is either equal to the EIP155 fork block or greater.
|
// IsEIP155 returns whether num is either equal to the EIP155 fork block or greater.
|
||||||
func (c *ChainConfig) IsEIP155(num *big.Int) bool {
|
func (c *ChainConfig) IsEIP155(num *big.Int) bool {
|
||||||
return isForked(c.EIP155Block, num)
|
return isBlockForked(c.EIP155Block, num)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsEIP158 returns whether num is either equal to the EIP158 fork block or greater.
|
// IsEIP158 returns whether num is either equal to the EIP158 fork block or greater.
|
||||||
func (c *ChainConfig) IsEIP158(num *big.Int) bool {
|
func (c *ChainConfig) IsEIP158(num *big.Int) bool {
|
||||||
return isForked(c.EIP158Block, num)
|
return isBlockForked(c.EIP158Block, num)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsByzantium returns whether num is either equal to the Byzantium fork block or greater.
|
// IsByzantium returns whether num is either equal to the Byzantium fork block or greater.
|
||||||
func (c *ChainConfig) IsByzantium(num *big.Int) bool {
|
func (c *ChainConfig) IsByzantium(num *big.Int) bool {
|
||||||
return isForked(c.ByzantiumBlock, num)
|
return isBlockForked(c.ByzantiumBlock, num)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsConstantinople returns whether num is either equal to the Constantinople fork block or greater.
|
// IsConstantinople returns whether num is either equal to the Constantinople fork block or greater.
|
||||||
func (c *ChainConfig) IsConstantinople(num *big.Int) bool {
|
func (c *ChainConfig) IsConstantinople(num *big.Int) bool {
|
||||||
return isForked(c.ConstantinopleBlock, num)
|
return isBlockForked(c.ConstantinopleBlock, num)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsMuirGlacier returns whether num is either equal to the Muir Glacier (EIP-2384) fork block or greater.
|
// IsMuirGlacier returns whether num is either equal to the Muir Glacier (EIP-2384) fork block or greater.
|
||||||
func (c *ChainConfig) IsMuirGlacier(num *big.Int) bool {
|
func (c *ChainConfig) IsMuirGlacier(num *big.Int) bool {
|
||||||
return isForked(c.MuirGlacierBlock, num)
|
return isBlockForked(c.MuirGlacierBlock, num)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsPetersburg returns whether num is either
|
// IsPetersburg returns whether num is either
|
||||||
// - equal to or greater than the PetersburgBlock fork block,
|
// - equal to or greater than the PetersburgBlock fork block,
|
||||||
// - OR is nil, and Constantinople is active
|
// - OR is nil, and Constantinople is active
|
||||||
func (c *ChainConfig) IsPetersburg(num *big.Int) bool {
|
func (c *ChainConfig) IsPetersburg(num *big.Int) bool {
|
||||||
return isForked(c.PetersburgBlock, num) || c.PetersburgBlock == nil && isForked(c.ConstantinopleBlock, num)
|
return isBlockForked(c.PetersburgBlock, num) || c.PetersburgBlock == nil && isBlockForked(c.ConstantinopleBlock, num)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsIstanbul returns whether num is either equal to the Istanbul fork block or greater.
|
// IsIstanbul returns whether num is either equal to the Istanbul fork block or greater.
|
||||||
func (c *ChainConfig) IsIstanbul(num *big.Int) bool {
|
func (c *ChainConfig) IsIstanbul(num *big.Int) bool {
|
||||||
return isForked(c.IstanbulBlock, num)
|
return isBlockForked(c.IstanbulBlock, num)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsBerlin returns whether num is either equal to the Berlin fork block or greater.
|
// IsBerlin returns whether num is either equal to the Berlin fork block or greater.
|
||||||
func (c *ChainConfig) IsBerlin(num *big.Int) bool {
|
func (c *ChainConfig) IsBerlin(num *big.Int) bool {
|
||||||
return isForked(c.BerlinBlock, num)
|
return isBlockForked(c.BerlinBlock, num)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsLondon returns whether num is either equal to the London fork block or greater.
|
// IsLondon returns whether num is either equal to the London fork block or greater.
|
||||||
func (c *ChainConfig) IsLondon(num *big.Int) bool {
|
func (c *ChainConfig) IsLondon(num *big.Int) bool {
|
||||||
return isForked(c.LondonBlock, num)
|
return isBlockForked(c.LondonBlock, num)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsArrowGlacier returns whether num is either equal to the Arrow Glacier (EIP-4345) fork block or greater.
|
// IsArrowGlacier returns whether num is either equal to the Arrow Glacier (EIP-4345) fork block or greater.
|
||||||
func (c *ChainConfig) IsArrowGlacier(num *big.Int) bool {
|
func (c *ChainConfig) IsArrowGlacier(num *big.Int) bool {
|
||||||
return isForked(c.ArrowGlacierBlock, num)
|
return isBlockForked(c.ArrowGlacierBlock, num)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsGrayGlacier returns whether num is either equal to the Gray Glacier (EIP-5133) fork block or greater.
|
// IsGrayGlacier returns whether num is either equal to the Gray Glacier (EIP-5133) fork block or greater.
|
||||||
func (c *ChainConfig) IsGrayGlacier(num *big.Int) bool {
|
func (c *ChainConfig) IsGrayGlacier(num *big.Int) bool {
|
||||||
return isForked(c.GrayGlacierBlock, num)
|
return isBlockForked(c.GrayGlacierBlock, num)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsTerminalPoWBlock returns whether the given block is the last block of PoW stage.
|
// IsTerminalPoWBlock returns whether the given block is the last block of PoW stage.
|
||||||
@ -567,30 +576,37 @@ func (c *ChainConfig) IsTerminalPoWBlock(parentTotalDiff *big.Int, totalDiff *bi
|
|||||||
return parentTotalDiff.Cmp(c.TerminalTotalDifficulty) < 0 && totalDiff.Cmp(c.TerminalTotalDifficulty) >= 0
|
return parentTotalDiff.Cmp(c.TerminalTotalDifficulty) < 0 && totalDiff.Cmp(c.TerminalTotalDifficulty) >= 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsShanghai returns whether num is either equal to the Shanghai fork block or greater.
|
|
||||||
func (c *ChainConfig) IsShanghai(num *big.Int) bool {
|
|
||||||
return isForked(c.ShanghaiBlock, num)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsCancun returns whether num is either equal to the Cancun fork block or greater.
|
// IsCancun returns whether num is either equal to the Cancun fork block or greater.
|
||||||
func (c *ChainConfig) IsCancun(num *big.Int) bool {
|
func (c *ChainConfig) IsCancun(num *big.Int) bool {
|
||||||
return isForked(c.CancunBlock, num)
|
return isBlockForked(c.CancunBlock, num)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsShanghai returns whether time is either equal to the Shanghai fork time or greater.
|
||||||
|
func (c *ChainConfig) IsShanghai(time *big.Int) bool {
|
||||||
|
return isTimestampForked(c.ShanghaiTime, time)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckCompatible checks whether scheduled fork transitions have been imported
|
// CheckCompatible checks whether scheduled fork transitions have been imported
|
||||||
// with a mismatching chain configuration.
|
// with a mismatching chain configuration.
|
||||||
func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64) *ConfigCompatError {
|
func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64, time uint64) *ConfigCompatError {
|
||||||
bhead := new(big.Int).SetUint64(height)
|
var (
|
||||||
|
bhead = new(big.Int).SetUint64(height)
|
||||||
|
btime = new(big.Int).SetUint64(time)
|
||||||
|
)
|
||||||
// Iterate checkCompatible to find the lowest conflict.
|
// Iterate checkCompatible to find the lowest conflict.
|
||||||
var lasterr *ConfigCompatError
|
var lasterr *ConfigCompatError
|
||||||
for {
|
for {
|
||||||
err := c.checkCompatible(newcfg, bhead)
|
err := c.checkCompatible(newcfg, bhead, btime)
|
||||||
if err == nil || (lasterr != nil && err.RewindTo == lasterr.RewindTo) {
|
if err == nil || (lasterr != nil && err.RewindToBlock == lasterr.RewindToBlock && err.RewindToTime == lasterr.RewindToTime) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
lasterr = err
|
lasterr = err
|
||||||
bhead.SetUint64(err.RewindTo)
|
|
||||||
|
if err.RewindToTime > 0 {
|
||||||
|
btime.SetUint64(err.RewindToTime)
|
||||||
|
} else {
|
||||||
|
bhead.SetUint64(err.RewindToBlock)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return lasterr
|
return lasterr
|
||||||
}
|
}
|
||||||
@ -599,9 +615,10 @@ func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64) *Confi
|
|||||||
// to guarantee that forks can be implemented in a different order than on official networks
|
// to guarantee that forks can be implemented in a different order than on official networks
|
||||||
func (c *ChainConfig) CheckConfigForkOrder() error {
|
func (c *ChainConfig) CheckConfigForkOrder() error {
|
||||||
type fork struct {
|
type fork struct {
|
||||||
name string
|
name string
|
||||||
block *big.Int
|
block *big.Int // forks up to - and including the merge - were defined with block numbers
|
||||||
optional bool // if true, the fork may be nil and next fork is still allowed
|
timestamp *big.Int // forks after the merge are scheduled using timestamps
|
||||||
|
optional bool // if true, the fork may be nil and next fork is still allowed
|
||||||
}
|
}
|
||||||
var lastFork fork
|
var lastFork fork
|
||||||
for _, cur := range []fork{
|
for _, cur := range []fork{
|
||||||
@ -620,91 +637,107 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
|
|||||||
{name: "arrowGlacierBlock", block: c.ArrowGlacierBlock, optional: true},
|
{name: "arrowGlacierBlock", block: c.ArrowGlacierBlock, optional: true},
|
||||||
{name: "grayGlacierBlock", block: c.GrayGlacierBlock, optional: true},
|
{name: "grayGlacierBlock", block: c.GrayGlacierBlock, optional: true},
|
||||||
{name: "mergeNetsplitBlock", block: c.MergeNetsplitBlock, optional: true},
|
{name: "mergeNetsplitBlock", block: c.MergeNetsplitBlock, optional: true},
|
||||||
{name: "shanghaiBlock", block: c.ShanghaiBlock, optional: true},
|
|
||||||
{name: "cancunBlock", block: c.CancunBlock, optional: true},
|
{name: "cancunBlock", block: c.CancunBlock, optional: true},
|
||||||
|
{name: "shanghaiTime", timestamp: c.ShanghaiTime},
|
||||||
} {
|
} {
|
||||||
if lastFork.name != "" {
|
if lastFork.name != "" {
|
||||||
// Next one must be higher number
|
switch {
|
||||||
if lastFork.block == nil && cur.block != nil {
|
// Non-optional forks must all be present in the chain config up to the last defined fork
|
||||||
return fmt.Errorf("unsupported fork ordering: %v not enabled, but %v enabled at %v",
|
case lastFork.block == nil && lastFork.timestamp == nil && (cur.block != nil || cur.timestamp != nil):
|
||||||
lastFork.name, cur.name, cur.block)
|
if cur.block != nil {
|
||||||
}
|
return fmt.Errorf("unsupported fork ordering: %v not enabled, but %v enabled at block %v",
|
||||||
if lastFork.block != nil && cur.block != nil {
|
lastFork.name, cur.name, cur.block)
|
||||||
if lastFork.block.Cmp(cur.block) > 0 {
|
} else {
|
||||||
return fmt.Errorf("unsupported fork ordering: %v enabled at %v, but %v enabled at %v",
|
return fmt.Errorf("unsupported fork ordering: %v not enabled, but %v enabled at timestamp %v",
|
||||||
|
lastFork.name, cur.name, cur.timestamp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fork (whether defined by block or timestamp) must follow the fork definition sequence
|
||||||
|
case (lastFork.block != nil && cur.block != nil) || (lastFork.timestamp != nil && cur.timestamp != nil):
|
||||||
|
if lastFork.block != nil && lastFork.block.Cmp(cur.block) > 0 {
|
||||||
|
return fmt.Errorf("unsupported fork ordering: %v enabled at block %v, but %v enabled at block %v",
|
||||||
lastFork.name, lastFork.block, cur.name, cur.block)
|
lastFork.name, lastFork.block, cur.name, cur.block)
|
||||||
|
} else if lastFork.timestamp != nil && lastFork.timestamp.Cmp(cur.timestamp) > 0 {
|
||||||
|
return fmt.Errorf("unsupported fork ordering: %v enabled at timestamp %v, but %v enabled at timestamp %v",
|
||||||
|
lastFork.name, lastFork.timestamp, cur.name, cur.timestamp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timestamp based forks can follow block based ones, but not the other way around
|
||||||
|
if lastFork.timestamp != nil && cur.block != nil {
|
||||||
|
return fmt.Errorf("unsupported fork ordering: %v used timestamp ordering, but %v reverted to block ordering",
|
||||||
|
lastFork.name, cur.name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If it was optional and not set, then ignore it
|
// If it was optional and not set, then ignore it
|
||||||
if !cur.optional || cur.block != nil {
|
if !cur.optional || (cur.block != nil || cur.timestamp != nil) {
|
||||||
lastFork = cur
|
lastFork = cur
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *ConfigCompatError {
|
func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, headNumber *big.Int, headTimestamp *big.Int) *ConfigCompatError {
|
||||||
if isForkIncompatible(c.HomesteadBlock, newcfg.HomesteadBlock, head) {
|
if isForkBlockIncompatible(c.HomesteadBlock, newcfg.HomesteadBlock, headNumber) {
|
||||||
return newCompatError("Homestead fork block", c.HomesteadBlock, newcfg.HomesteadBlock)
|
return newBlockCompatError("Homestead fork block", c.HomesteadBlock, newcfg.HomesteadBlock)
|
||||||
}
|
}
|
||||||
if isForkIncompatible(c.DAOForkBlock, newcfg.DAOForkBlock, head) {
|
if isForkBlockIncompatible(c.DAOForkBlock, newcfg.DAOForkBlock, headNumber) {
|
||||||
return newCompatError("DAO fork block", c.DAOForkBlock, newcfg.DAOForkBlock)
|
return newBlockCompatError("DAO fork block", c.DAOForkBlock, newcfg.DAOForkBlock)
|
||||||
}
|
}
|
||||||
if c.IsDAOFork(head) && c.DAOForkSupport != newcfg.DAOForkSupport {
|
if c.IsDAOFork(headNumber) && c.DAOForkSupport != newcfg.DAOForkSupport {
|
||||||
return newCompatError("DAO fork support flag", c.DAOForkBlock, newcfg.DAOForkBlock)
|
return newBlockCompatError("DAO fork support flag", c.DAOForkBlock, newcfg.DAOForkBlock)
|
||||||
}
|
}
|
||||||
if isForkIncompatible(c.EIP150Block, newcfg.EIP150Block, head) {
|
if isForkBlockIncompatible(c.EIP150Block, newcfg.EIP150Block, headNumber) {
|
||||||
return newCompatError("EIP150 fork block", c.EIP150Block, newcfg.EIP150Block)
|
return newBlockCompatError("EIP150 fork block", c.EIP150Block, newcfg.EIP150Block)
|
||||||
}
|
}
|
||||||
if isForkIncompatible(c.EIP155Block, newcfg.EIP155Block, head) {
|
if isForkBlockIncompatible(c.EIP155Block, newcfg.EIP155Block, headNumber) {
|
||||||
return newCompatError("EIP155 fork block", c.EIP155Block, newcfg.EIP155Block)
|
return newBlockCompatError("EIP155 fork block", c.EIP155Block, newcfg.EIP155Block)
|
||||||
}
|
}
|
||||||
if isForkIncompatible(c.EIP158Block, newcfg.EIP158Block, head) {
|
if isForkBlockIncompatible(c.EIP158Block, newcfg.EIP158Block, headNumber) {
|
||||||
return newCompatError("EIP158 fork block", c.EIP158Block, newcfg.EIP158Block)
|
return newBlockCompatError("EIP158 fork block", c.EIP158Block, newcfg.EIP158Block)
|
||||||
}
|
}
|
||||||
if c.IsEIP158(head) && !configNumEqual(c.ChainID, newcfg.ChainID) {
|
if c.IsEIP158(headNumber) && !configBlockEqual(c.ChainID, newcfg.ChainID) {
|
||||||
return newCompatError("EIP158 chain ID", c.EIP158Block, newcfg.EIP158Block)
|
return newBlockCompatError("EIP158 chain ID", c.EIP158Block, newcfg.EIP158Block)
|
||||||
}
|
}
|
||||||
if isForkIncompatible(c.ByzantiumBlock, newcfg.ByzantiumBlock, head) {
|
if isForkBlockIncompatible(c.ByzantiumBlock, newcfg.ByzantiumBlock, headNumber) {
|
||||||
return newCompatError("Byzantium fork block", c.ByzantiumBlock, newcfg.ByzantiumBlock)
|
return newBlockCompatError("Byzantium fork block", c.ByzantiumBlock, newcfg.ByzantiumBlock)
|
||||||
}
|
}
|
||||||
if isForkIncompatible(c.ConstantinopleBlock, newcfg.ConstantinopleBlock, head) {
|
if isForkBlockIncompatible(c.ConstantinopleBlock, newcfg.ConstantinopleBlock, headNumber) {
|
||||||
return newCompatError("Constantinople fork block", c.ConstantinopleBlock, newcfg.ConstantinopleBlock)
|
return newBlockCompatError("Constantinople fork block", c.ConstantinopleBlock, newcfg.ConstantinopleBlock)
|
||||||
}
|
}
|
||||||
if isForkIncompatible(c.PetersburgBlock, newcfg.PetersburgBlock, head) {
|
if isForkBlockIncompatible(c.PetersburgBlock, newcfg.PetersburgBlock, headNumber) {
|
||||||
// the only case where we allow Petersburg to be set in the past is if it is equal to Constantinople
|
// the only case where we allow Petersburg to be set in the past is if it is equal to Constantinople
|
||||||
// mainly to satisfy fork ordering requirements which state that Petersburg fork be set if Constantinople fork is set
|
// mainly to satisfy fork ordering requirements which state that Petersburg fork be set if Constantinople fork is set
|
||||||
if isForkIncompatible(c.ConstantinopleBlock, newcfg.PetersburgBlock, head) {
|
if isForkBlockIncompatible(c.ConstantinopleBlock, newcfg.PetersburgBlock, headNumber) {
|
||||||
return newCompatError("Petersburg fork block", c.PetersburgBlock, newcfg.PetersburgBlock)
|
return newBlockCompatError("Petersburg fork block", c.PetersburgBlock, newcfg.PetersburgBlock)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if isForkIncompatible(c.IstanbulBlock, newcfg.IstanbulBlock, head) {
|
if isForkBlockIncompatible(c.IstanbulBlock, newcfg.IstanbulBlock, headNumber) {
|
||||||
return newCompatError("Istanbul fork block", c.IstanbulBlock, newcfg.IstanbulBlock)
|
return newBlockCompatError("Istanbul fork block", c.IstanbulBlock, newcfg.IstanbulBlock)
|
||||||
}
|
}
|
||||||
if isForkIncompatible(c.MuirGlacierBlock, newcfg.MuirGlacierBlock, head) {
|
if isForkBlockIncompatible(c.MuirGlacierBlock, newcfg.MuirGlacierBlock, headNumber) {
|
||||||
return newCompatError("Muir Glacier fork block", c.MuirGlacierBlock, newcfg.MuirGlacierBlock)
|
return newBlockCompatError("Muir Glacier fork block", c.MuirGlacierBlock, newcfg.MuirGlacierBlock)
|
||||||
}
|
}
|
||||||
if isForkIncompatible(c.BerlinBlock, newcfg.BerlinBlock, head) {
|
if isForkBlockIncompatible(c.BerlinBlock, newcfg.BerlinBlock, headNumber) {
|
||||||
return newCompatError("Berlin fork block", c.BerlinBlock, newcfg.BerlinBlock)
|
return newBlockCompatError("Berlin fork block", c.BerlinBlock, newcfg.BerlinBlock)
|
||||||
}
|
}
|
||||||
if isForkIncompatible(c.LondonBlock, newcfg.LondonBlock, head) {
|
if isForkBlockIncompatible(c.LondonBlock, newcfg.LondonBlock, headNumber) {
|
||||||
return newCompatError("London fork block", c.LondonBlock, newcfg.LondonBlock)
|
return newBlockCompatError("London fork block", c.LondonBlock, newcfg.LondonBlock)
|
||||||
}
|
}
|
||||||
if isForkIncompatible(c.ArrowGlacierBlock, newcfg.ArrowGlacierBlock, head) {
|
if isForkBlockIncompatible(c.ArrowGlacierBlock, newcfg.ArrowGlacierBlock, headNumber) {
|
||||||
return newCompatError("Arrow Glacier fork block", c.ArrowGlacierBlock, newcfg.ArrowGlacierBlock)
|
return newBlockCompatError("Arrow Glacier fork block", c.ArrowGlacierBlock, newcfg.ArrowGlacierBlock)
|
||||||
}
|
}
|
||||||
if isForkIncompatible(c.GrayGlacierBlock, newcfg.GrayGlacierBlock, head) {
|
if isForkBlockIncompatible(c.GrayGlacierBlock, newcfg.GrayGlacierBlock, headNumber) {
|
||||||
return newCompatError("Gray Glacier fork block", c.GrayGlacierBlock, newcfg.GrayGlacierBlock)
|
return newBlockCompatError("Gray Glacier fork block", c.GrayGlacierBlock, newcfg.GrayGlacierBlock)
|
||||||
}
|
}
|
||||||
if isForkIncompatible(c.MergeNetsplitBlock, newcfg.MergeNetsplitBlock, head) {
|
if isForkBlockIncompatible(c.MergeNetsplitBlock, newcfg.MergeNetsplitBlock, headNumber) {
|
||||||
return newCompatError("Merge netsplit fork block", c.MergeNetsplitBlock, newcfg.MergeNetsplitBlock)
|
return newBlockCompatError("Merge netsplit fork block", c.MergeNetsplitBlock, newcfg.MergeNetsplitBlock)
|
||||||
}
|
}
|
||||||
if isForkIncompatible(c.ShanghaiBlock, newcfg.ShanghaiBlock, head) {
|
if isForkBlockIncompatible(c.CancunBlock, newcfg.CancunBlock, headNumber) {
|
||||||
return newCompatError("Shanghai fork block", c.ShanghaiBlock, newcfg.ShanghaiBlock)
|
return newBlockCompatError("Cancun fork block", c.CancunBlock, newcfg.CancunBlock)
|
||||||
}
|
}
|
||||||
if isForkIncompatible(c.CancunBlock, newcfg.CancunBlock, head) {
|
if isForkTimestampIncompatible(c.ShanghaiTime, newcfg.ShanghaiTime, headTimestamp) {
|
||||||
return newCompatError("Cancun fork block", c.CancunBlock, newcfg.CancunBlock)
|
return newTimestampCompatError("Shanghai fork timestamp", c.ShanghaiTime, newcfg.ShanghaiTime)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -719,21 +752,49 @@ func (c *ChainConfig) ElasticityMultiplier() uint64 {
|
|||||||
return DefaultElasticityMultiplier
|
return DefaultElasticityMultiplier
|
||||||
}
|
}
|
||||||
|
|
||||||
// isForkIncompatible returns true if a fork scheduled at s1 cannot be rescheduled to
|
// isForkBlockIncompatible returns true if a fork scheduled at block s1 cannot be
|
||||||
// block s2 because head is already past the fork.
|
// rescheduled to block s2 because head is already past the fork.
|
||||||
func isForkIncompatible(s1, s2, head *big.Int) bool {
|
func isForkBlockIncompatible(s1, s2, head *big.Int) bool {
|
||||||
return (isForked(s1, head) || isForked(s2, head)) && !configNumEqual(s1, s2)
|
return (isBlockForked(s1, head) || isBlockForked(s2, head)) && !configBlockEqual(s1, s2)
|
||||||
}
|
}
|
||||||
|
|
||||||
// isForked returns whether a fork scheduled at block s is active at the given head block.
|
// isBlockForked returns whether a fork scheduled at block s is active at the
|
||||||
func isForked(s, head *big.Int) bool {
|
// given head block. Whilst this method is the same as isTimestampForked, they
|
||||||
|
// are explicitly separate for clearer reading.
|
||||||
|
func isBlockForked(s, head *big.Int) bool {
|
||||||
if s == nil || head == nil {
|
if s == nil || head == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return s.Cmp(head) <= 0
|
return s.Cmp(head) <= 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func configNumEqual(x, y *big.Int) bool {
|
func configBlockEqual(x, y *big.Int) bool {
|
||||||
|
if x == nil {
|
||||||
|
return y == nil
|
||||||
|
}
|
||||||
|
if y == nil {
|
||||||
|
return x == nil
|
||||||
|
}
|
||||||
|
return x.Cmp(y) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// isForkTimestampIncompatible returns true if a fork scheduled at timestamp s1
|
||||||
|
// cannot be rescheduled to timestamp s2 because head is already past the fork.
|
||||||
|
func isForkTimestampIncompatible(s1, s2, head *big.Int) bool {
|
||||||
|
return (isTimestampForked(s1, head) || isTimestampForked(s2, head)) && !configTimestampEqual(s1, s2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isTimestampForked returns whether a fork scheduled at timestamp s is active
|
||||||
|
// at the given head timestamp. Whilst this method is the same as isBlockForked,
|
||||||
|
// they are explicitly separate for clearer reading.
|
||||||
|
func isTimestampForked(s, head *big.Int) bool {
|
||||||
|
if s == nil || head == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return s.Cmp(head) <= 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func configTimestampEqual(x, y *big.Int) bool {
|
||||||
if x == nil {
|
if x == nil {
|
||||||
return y == nil
|
return y == nil
|
||||||
}
|
}
|
||||||
@ -747,13 +808,21 @@ func configNumEqual(x, y *big.Int) bool {
|
|||||||
// ChainConfig that would alter the past.
|
// ChainConfig that would alter the past.
|
||||||
type ConfigCompatError struct {
|
type ConfigCompatError struct {
|
||||||
What string
|
What string
|
||||||
// block numbers of the stored and new configurations
|
|
||||||
StoredConfig, NewConfig *big.Int
|
// block numbers of the stored and new configurations if block based forking
|
||||||
|
StoredBlock, NewBlock *big.Int
|
||||||
|
|
||||||
|
// timestamps of the stored and new configurations if time based forking
|
||||||
|
StoredTime, NewTime *big.Int
|
||||||
|
|
||||||
// the block number to which the local chain must be rewound to correct the error
|
// the block number to which the local chain must be rewound to correct the error
|
||||||
RewindTo uint64
|
RewindToBlock uint64
|
||||||
|
|
||||||
|
// the timestamp to which the local chain must be rewound to correct the error
|
||||||
|
RewindToTime uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCompatError(what string, storedblock, newblock *big.Int) *ConfigCompatError {
|
func newBlockCompatError(what string, storedblock, newblock *big.Int) *ConfigCompatError {
|
||||||
var rew *big.Int
|
var rew *big.Int
|
||||||
switch {
|
switch {
|
||||||
case storedblock == nil:
|
case storedblock == nil:
|
||||||
@ -763,15 +832,45 @@ func newCompatError(what string, storedblock, newblock *big.Int) *ConfigCompatEr
|
|||||||
default:
|
default:
|
||||||
rew = newblock
|
rew = newblock
|
||||||
}
|
}
|
||||||
err := &ConfigCompatError{what, storedblock, newblock, 0}
|
err := &ConfigCompatError{
|
||||||
|
What: what,
|
||||||
|
StoredBlock: storedblock,
|
||||||
|
NewBlock: newblock,
|
||||||
|
RewindToBlock: 0,
|
||||||
|
}
|
||||||
if rew != nil && rew.Sign() > 0 {
|
if rew != nil && rew.Sign() > 0 {
|
||||||
err.RewindTo = rew.Uint64() - 1
|
err.RewindToBlock = rew.Uint64() - 1
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTimestampCompatError(what string, storedtime, newtime *big.Int) *ConfigCompatError {
|
||||||
|
var rew *big.Int
|
||||||
|
switch {
|
||||||
|
case storedtime == nil:
|
||||||
|
rew = newtime
|
||||||
|
case newtime == nil || storedtime.Cmp(newtime) < 0:
|
||||||
|
rew = storedtime
|
||||||
|
default:
|
||||||
|
rew = newtime
|
||||||
|
}
|
||||||
|
err := &ConfigCompatError{
|
||||||
|
What: what,
|
||||||
|
StoredTime: storedtime,
|
||||||
|
NewTime: newtime,
|
||||||
|
RewindToTime: 0,
|
||||||
|
}
|
||||||
|
if rew != nil && rew.Sign() > 0 {
|
||||||
|
err.RewindToTime = rew.Uint64() - 1
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (err *ConfigCompatError) Error() string {
|
func (err *ConfigCompatError) Error() string {
|
||||||
return fmt.Sprintf("mismatching %s in database (have %d, want %d, rewindto %d)", err.What, err.StoredConfig, err.NewConfig, err.RewindTo)
|
if err.StoredBlock != nil {
|
||||||
|
return fmt.Sprintf("mismatching %s in database (have block %d, want block %d, rewindto block %d)", err.What, err.StoredBlock, err.NewBlock, err.RewindToBlock)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("mismatching %s in database (have timestamp %d, want timestamp %d, rewindto timestamp %d)", err.What, err.StoredTime, err.NewTime, err.RewindToTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rules wraps ChainConfig and is merely syntactic sugar or can be used for functions
|
// Rules wraps ChainConfig and is merely syntactic sugar or can be used for functions
|
||||||
@ -788,7 +887,7 @@ type Rules struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Rules ensures c's ChainID is not nil.
|
// Rules ensures c's ChainID is not nil.
|
||||||
func (c *ChainConfig) Rules(num *big.Int, isMerge bool) Rules {
|
func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp *big.Int) Rules {
|
||||||
chainID := c.ChainID
|
chainID := c.ChainID
|
||||||
if chainID == nil {
|
if chainID == nil {
|
||||||
chainID = new(big.Int)
|
chainID = new(big.Int)
|
||||||
@ -806,7 +905,7 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool) Rules {
|
|||||||
IsBerlin: c.IsBerlin(num),
|
IsBerlin: c.IsBerlin(num),
|
||||||
IsLondon: c.IsLondon(num),
|
IsLondon: c.IsLondon(num),
|
||||||
IsMerge: isMerge,
|
IsMerge: isMerge,
|
||||||
IsShanghai: c.IsShanghai(num),
|
IsShanghai: c.IsShanghai(timestamp),
|
||||||
isCancun: c.IsCancun(num),
|
isCancun: c.IsCancun(num),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,79 +20,99 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCheckCompatible(t *testing.T) {
|
func TestCheckCompatible(t *testing.T) {
|
||||||
type test struct {
|
type test struct {
|
||||||
stored, new *ChainConfig
|
stored, new *ChainConfig
|
||||||
head uint64
|
headBlock uint64
|
||||||
wantErr *ConfigCompatError
|
headTimestamp uint64
|
||||||
|
wantErr *ConfigCompatError
|
||||||
}
|
}
|
||||||
tests := []test{
|
tests := []test{
|
||||||
{stored: AllEthashProtocolChanges, new: AllEthashProtocolChanges, head: 0, wantErr: nil},
|
{stored: AllEthashProtocolChanges, new: AllEthashProtocolChanges, headBlock: 0, headTimestamp: 0, wantErr: nil},
|
||||||
{stored: AllEthashProtocolChanges, new: AllEthashProtocolChanges, head: 100, wantErr: nil},
|
{stored: AllEthashProtocolChanges, new: AllEthashProtocolChanges, headBlock: 0, headTimestamp: uint64(time.Now().Unix()), wantErr: nil},
|
||||||
|
{stored: AllEthashProtocolChanges, new: AllEthashProtocolChanges, headBlock: 100, wantErr: nil},
|
||||||
{
|
{
|
||||||
stored: &ChainConfig{EIP150Block: big.NewInt(10)},
|
stored: &ChainConfig{EIP150Block: big.NewInt(10)},
|
||||||
new: &ChainConfig{EIP150Block: big.NewInt(20)},
|
new: &ChainConfig{EIP150Block: big.NewInt(20)},
|
||||||
head: 9,
|
headBlock: 9,
|
||||||
wantErr: nil,
|
wantErr: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
stored: AllEthashProtocolChanges,
|
stored: AllEthashProtocolChanges,
|
||||||
new: &ChainConfig{HomesteadBlock: nil},
|
new: &ChainConfig{HomesteadBlock: nil},
|
||||||
head: 3,
|
headBlock: 3,
|
||||||
wantErr: &ConfigCompatError{
|
wantErr: &ConfigCompatError{
|
||||||
What: "Homestead fork block",
|
What: "Homestead fork block",
|
||||||
StoredConfig: big.NewInt(0),
|
StoredBlock: big.NewInt(0),
|
||||||
NewConfig: nil,
|
NewBlock: nil,
|
||||||
RewindTo: 0,
|
RewindToBlock: 0,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
stored: AllEthashProtocolChanges,
|
stored: AllEthashProtocolChanges,
|
||||||
new: &ChainConfig{HomesteadBlock: big.NewInt(1)},
|
new: &ChainConfig{HomesteadBlock: big.NewInt(1)},
|
||||||
head: 3,
|
headBlock: 3,
|
||||||
wantErr: &ConfigCompatError{
|
wantErr: &ConfigCompatError{
|
||||||
What: "Homestead fork block",
|
What: "Homestead fork block",
|
||||||
StoredConfig: big.NewInt(0),
|
StoredBlock: big.NewInt(0),
|
||||||
NewConfig: big.NewInt(1),
|
NewBlock: big.NewInt(1),
|
||||||
RewindTo: 0,
|
RewindToBlock: 0,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
stored: &ChainConfig{HomesteadBlock: big.NewInt(30), EIP150Block: big.NewInt(10)},
|
stored: &ChainConfig{HomesteadBlock: big.NewInt(30), EIP150Block: big.NewInt(10)},
|
||||||
new: &ChainConfig{HomesteadBlock: big.NewInt(25), EIP150Block: big.NewInt(20)},
|
new: &ChainConfig{HomesteadBlock: big.NewInt(25), EIP150Block: big.NewInt(20)},
|
||||||
head: 25,
|
headBlock: 25,
|
||||||
wantErr: &ConfigCompatError{
|
wantErr: &ConfigCompatError{
|
||||||
What: "EIP150 fork block",
|
What: "EIP150 fork block",
|
||||||
StoredConfig: big.NewInt(10),
|
StoredBlock: big.NewInt(10),
|
||||||
NewConfig: big.NewInt(20),
|
NewBlock: big.NewInt(20),
|
||||||
RewindTo: 9,
|
RewindToBlock: 9,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
stored: &ChainConfig{ConstantinopleBlock: big.NewInt(30)},
|
stored: &ChainConfig{ConstantinopleBlock: big.NewInt(30)},
|
||||||
new: &ChainConfig{ConstantinopleBlock: big.NewInt(30), PetersburgBlock: big.NewInt(30)},
|
new: &ChainConfig{ConstantinopleBlock: big.NewInt(30), PetersburgBlock: big.NewInt(30)},
|
||||||
head: 40,
|
headBlock: 40,
|
||||||
wantErr: nil,
|
wantErr: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
stored: &ChainConfig{ConstantinopleBlock: big.NewInt(30)},
|
stored: &ChainConfig{ConstantinopleBlock: big.NewInt(30)},
|
||||||
new: &ChainConfig{ConstantinopleBlock: big.NewInt(30), PetersburgBlock: big.NewInt(31)},
|
new: &ChainConfig{ConstantinopleBlock: big.NewInt(30), PetersburgBlock: big.NewInt(31)},
|
||||||
head: 40,
|
headBlock: 40,
|
||||||
wantErr: &ConfigCompatError{
|
wantErr: &ConfigCompatError{
|
||||||
What: "Petersburg fork block",
|
What: "Petersburg fork block",
|
||||||
StoredConfig: nil,
|
StoredBlock: nil,
|
||||||
NewConfig: big.NewInt(31),
|
NewBlock: big.NewInt(31),
|
||||||
RewindTo: 30,
|
RewindToBlock: 30,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
stored: &ChainConfig{ShanghaiTime: big.NewInt(10)},
|
||||||
|
new: &ChainConfig{ShanghaiTime: big.NewInt(20)},
|
||||||
|
headTimestamp: 9,
|
||||||
|
wantErr: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
stored: &ChainConfig{ShanghaiTime: big.NewInt(10)},
|
||||||
|
new: &ChainConfig{ShanghaiTime: big.NewInt(20)},
|
||||||
|
headTimestamp: 25,
|
||||||
|
wantErr: &ConfigCompatError{
|
||||||
|
What: "Shanghai fork timestamp",
|
||||||
|
StoredTime: big.NewInt(10),
|
||||||
|
NewTime: big.NewInt(20),
|
||||||
|
RewindToTime: 9,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
err := test.stored.CheckCompatible(test.new, test.head)
|
err := test.stored.CheckCompatible(test.new, test.headBlock, test.headTimestamp)
|
||||||
if !reflect.DeepEqual(err, test.wantErr) {
|
if !reflect.DeepEqual(err, test.wantErr) {
|
||||||
t.Errorf("error mismatch:\nstored: %v\nnew: %v\nhead: %v\nerr: %v\nwant: %v", test.stored, test.new, test.head, err, test.wantErr)
|
t.Errorf("error mismatch:\nstored: %v\nnew: %v\nheadBlock: %v\nheadTimestamp: %v\nerr: %v\nwant: %v", test.stored, test.new, test.headBlock, test.headTimestamp, err, test.wantErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -183,7 +183,7 @@ func runBenchmark(b *testing.B, t *StateTest) {
|
|||||||
b.Error(err)
|
b.Error(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var rules = config.Rules(new(big.Int), false)
|
var rules = config.Rules(new(big.Int), false, new(big.Int))
|
||||||
|
|
||||||
vmconfig.ExtraEips = eips
|
vmconfig.ExtraEips = eips
|
||||||
block := t.genesis(config).ToBlock()
|
block := t.genesis(config).ToBlock()
|
||||||
|
Loading…
Reference in New Issue
Block a user