cmd, eth: fix required blocks regression

This commit is contained in:
Péter Szilágyi 2022-05-04 19:55:17 +03:00
parent 0a9e384cd5
commit ecae8e4f65
No known key found for this signature in database
GPG Key ID: E9AE538CEDF8293D
7 changed files with 55 additions and 58 deletions

View File

@ -105,7 +105,7 @@ var (
utils.UltraLightFractionFlag,
utils.UltraLightOnlyAnnounceFlag,
utils.LightNoSyncServeFlag,
utils.EthPeerRequiredBlocksFlag,
utils.EthRequiredBlocksFlag,
utils.LegacyWhitelistFlag,
utils.BloomFilterSizeFlag,
utils.CacheFlag,

View File

@ -46,7 +46,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.EthStatsURLFlag,
utils.IdentityFlag,
utils.LightKDFFlag,
utils.EthPeerRequiredBlocksFlag,
utils.EthRequiredBlocksFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags),
},
{

View File

@ -240,13 +240,13 @@ var (
Name: "lightkdf",
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
}
EthPeerRequiredBlocksFlag = cli.StringFlag{
EthRequiredBlocksFlag = cli.StringFlag{
Name: "eth.requiredblocks",
Usage: "Comma separated block number-to-hash mappings to require for peering (<number>=<hash>)",
}
LegacyWhitelistFlag = cli.StringFlag{
Name: "whitelist",
Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>) (deprecated in favor of --peer.requiredblocks)",
Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>) (deprecated in favor of --eth.requiredblocks)",
}
BloomFilterSizeFlag = cli.Uint64Flag{
Name: "bloomfilter.size",
@ -1501,33 +1501,31 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) {
}
}
func setPeerRequiredBlocks(ctx *cli.Context, cfg *ethconfig.Config) {
peerRequiredBlocks := ctx.GlobalString(EthPeerRequiredBlocksFlag.Name)
if peerRequiredBlocks == "" {
func setRequiredBlocks(ctx *cli.Context, cfg *ethconfig.Config) {
requiredBlocks := ctx.GlobalString(EthRequiredBlocksFlag.Name)
if requiredBlocks == "" {
if ctx.GlobalIsSet(LegacyWhitelistFlag.Name) {
log.Warn("The flag --rpc is deprecated and will be removed, please use --peer.requiredblocks")
peerRequiredBlocks = ctx.GlobalString(LegacyWhitelistFlag.Name)
log.Warn("The flag --whitelist is deprecated and will be removed, please use --eth.requiredblocks")
requiredBlocks = ctx.GlobalString(LegacyWhitelistFlag.Name)
} else {
return
}
}
cfg.PeerRequiredBlocks = make(map[uint64]common.Hash)
for _, entry := range strings.Split(peerRequiredBlocks, ",") {
cfg.RequiredBlocks = make(map[uint64]common.Hash)
for _, entry := range strings.Split(requiredBlocks, ",") {
parts := strings.Split(entry, "=")
if len(parts) != 2 {
Fatalf("Invalid peer required block entry: %s", entry)
Fatalf("Invalid required block entry: %s", entry)
}
number, err := strconv.ParseUint(parts[0], 0, 64)
if err != nil {
Fatalf("Invalid peer required block number %s: %v", parts[0], err)
Fatalf("Invalid required block number %s: %v", parts[0], err)
}
var hash common.Hash
if err = hash.UnmarshalText([]byte(parts[1])); err != nil {
Fatalf("Invalid peer required block hash %s: %v", parts[1], err)
Fatalf("Invalid required block hash %s: %v", parts[1], err)
}
cfg.PeerRequiredBlocks[number] = hash
cfg.RequiredBlocks[number] = hash
}
}
@ -1594,7 +1592,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
setTxPool(ctx, &cfg.TxPool)
setEthash(ctx, cfg)
setMiner(ctx, &cfg.Miner)
setPeerRequiredBlocks(ctx, cfg)
setRequiredBlocks(ctx, cfg)
setLes(ctx, cfg)
// Cap the cache allowance and tune the garbage collector

View File

@ -220,16 +220,16 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
checkpoint = params.TrustedCheckpoints[genesisHash]
}
if eth.handler, err = newHandler(&handlerConfig{
Database: chainDb,
Chain: eth.blockchain,
TxPool: eth.txPool,
Merger: merger,
Network: config.NetworkId,
Sync: config.SyncMode,
BloomCache: uint64(cacheLimit),
EventMux: eth.eventMux,
Checkpoint: checkpoint,
PeerRequiredBlocks: config.PeerRequiredBlocks,
Database: chainDb,
Chain: eth.blockchain,
TxPool: eth.txPool,
Merger: merger,
Network: config.NetworkId,
Sync: config.SyncMode,
BloomCache: uint64(cacheLimit),
EventMux: eth.eventMux,
Checkpoint: checkpoint,
RequiredBlocks: config.RequiredBlocks,
}); err != nil {
return nil, err
}

View File

@ -138,10 +138,10 @@ type Config struct {
TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
// PeerRequiredBlocks is a set of block number -> hash mappings which must be in the
// RequiredBlocks is a set of block number -> hash mappings which must be in the
// canonical chain of all remote peers. Setting the option makes geth verify the
// presence of these blocks for every new peer connection.
PeerRequiredBlocks map[uint64]common.Hash `toml:"-"`
RequiredBlocks map[uint64]common.Hash `toml:"-"`
// Light client options
LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests

View File

@ -26,7 +26,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
NoPruning bool
NoPrefetch bool
TxLookupLimit uint64 `toml:",omitempty"`
PeerRequiredBlocks map[uint64]common.Hash `toml:"-"`
RequiredBlocks map[uint64]common.Hash `toml:"-"`
LightServ int `toml:",omitempty"`
LightIngress int `toml:",omitempty"`
LightEgress int `toml:",omitempty"`
@ -71,7 +71,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.NoPruning = c.NoPruning
enc.NoPrefetch = c.NoPrefetch
enc.TxLookupLimit = c.TxLookupLimit
enc.PeerRequiredBlocks = c.PeerRequiredBlocks
enc.RequiredBlocks = c.RequiredBlocks
enc.LightServ = c.LightServ
enc.LightIngress = c.LightIngress
enc.LightEgress = c.LightEgress
@ -120,7 +120,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
NoPruning *bool
NoPrefetch *bool
TxLookupLimit *uint64 `toml:",omitempty"`
PeerRequiredBlocks map[uint64]common.Hash `toml:"-"`
RequiredBlocks map[uint64]common.Hash `toml:"-"`
LightServ *int `toml:",omitempty"`
LightIngress *int `toml:",omitempty"`
LightEgress *int `toml:",omitempty"`
@ -184,8 +184,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.TxLookupLimit != nil {
c.TxLookupLimit = *dec.TxLookupLimit
}
if dec.PeerRequiredBlocks != nil {
c.PeerRequiredBlocks = dec.PeerRequiredBlocks
if dec.RequiredBlocks != nil {
c.RequiredBlocks = dec.RequiredBlocks
}
if dec.LightServ != nil {
c.LightServ = *dec.LightServ

View File

@ -77,17 +77,16 @@ type txPool interface {
// handlerConfig is the collection of initialization parameters to create a full
// node network handler.
type handlerConfig struct {
Database ethdb.Database // Database for direct sync insertions
Chain *core.BlockChain // Blockchain to serve data from
TxPool txPool // Transaction pool to propagate from
Merger *consensus.Merger // The manager for eth1/2 transition
Network uint64 // Network identifier to adfvertise
Sync downloader.SyncMode // Whether to snap or full sync
BloomCache uint64 // Megabytes to alloc for snap sync bloom
EventMux *event.TypeMux // Legacy event mux, deprecate for `feed`
Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges
PeerRequiredBlocks map[uint64]common.Hash // Hard coded map of required block hashes for sync challenges
Database ethdb.Database // Database for direct sync insertions
Chain *core.BlockChain // Blockchain to serve data from
TxPool txPool // Transaction pool to propagate from
Merger *consensus.Merger // The manager for eth1/2 transition
Network uint64 // Network identifier to adfvertise
Sync downloader.SyncMode // Whether to snap or full sync
BloomCache uint64 // Megabytes to alloc for snap sync bloom
EventMux *event.TypeMux // Legacy event mux, deprecate for `feed`
Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges
RequiredBlocks map[uint64]common.Hash // Hard coded map of required block hashes for sync challenges
}
type handler struct {
@ -116,7 +115,7 @@ type handler struct {
txsSub event.Subscription
minedBlockSub *event.TypeMuxSubscription
peerRequiredBlocks map[uint64]common.Hash
requiredBlocks map[uint64]common.Hash
// channels for fetcher, syncer, txsyncLoop
quitSync chan struct{}
@ -133,16 +132,16 @@ func newHandler(config *handlerConfig) (*handler, error) {
config.EventMux = new(event.TypeMux) // Nicety initialization for tests
}
h := &handler{
networkID: config.Network,
forkFilter: forkid.NewFilter(config.Chain),
eventMux: config.EventMux,
database: config.Database,
txpool: config.TxPool,
chain: config.Chain,
peers: newPeerSet(),
merger: config.Merger,
peerRequiredBlocks: config.PeerRequiredBlocks,
quitSync: make(chan struct{}),
networkID: config.Network,
forkFilter: forkid.NewFilter(config.Chain),
eventMux: config.EventMux,
database: config.Database,
txpool: config.TxPool,
chain: config.Chain,
peers: newPeerSet(),
merger: config.Merger,
requiredBlocks: config.RequiredBlocks,
quitSync: make(chan struct{}),
}
if config.Sync == downloader.FullSync {
// The database seems empty as the current block is the genesis. Yet the snap
@ -425,7 +424,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
}()
}
// If we have any explicit peer required block hashes, request them
for number := range h.peerRequiredBlocks {
for number, hash := range h.requiredBlocks {
resCh := make(chan *eth.Response)
if _, err := peer.RequestHeadersByNumber(number, 1, 0, false, resCh); err != nil {
return err