cmd/geth: rename --whitelist to --eth.requiredblocks (#24505)
* cmd, eth: Rename whitelist argument to peer.requiredblocks * eth/ethconfig: document PeerRequiredBlocks better * cmd/utils: rename new flag to --eth.requiredblocks Co-authored-by: Felix Lange <fjl@twurst.com>
This commit is contained in:
parent
6cd72660d0
commit
dbfd397262
@ -107,7 +107,8 @@ var (
|
|||||||
utils.UltraLightFractionFlag,
|
utils.UltraLightFractionFlag,
|
||||||
utils.UltraLightOnlyAnnounceFlag,
|
utils.UltraLightOnlyAnnounceFlag,
|
||||||
utils.LightNoSyncServeFlag,
|
utils.LightNoSyncServeFlag,
|
||||||
utils.WhitelistFlag,
|
utils.EthPeerRequiredBlocksFlag,
|
||||||
|
utils.LegacyWhitelistFlag,
|
||||||
utils.BloomFilterSizeFlag,
|
utils.BloomFilterSizeFlag,
|
||||||
utils.CacheFlag,
|
utils.CacheFlag,
|
||||||
utils.CacheDatabaseFlag,
|
utils.CacheDatabaseFlag,
|
||||||
|
@ -53,7 +53,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
|||||||
utils.EthStatsURLFlag,
|
utils.EthStatsURLFlag,
|
||||||
utils.IdentityFlag,
|
utils.IdentityFlag,
|
||||||
utils.LightKDFFlag,
|
utils.LightKDFFlag,
|
||||||
utils.WhitelistFlag,
|
utils.EthPeerRequiredBlocksFlag,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -225,6 +225,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
|||||||
Name: "ALIASED (deprecated)",
|
Name: "ALIASED (deprecated)",
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
utils.NoUSBFlag,
|
utils.NoUSBFlag,
|
||||||
|
utils.LegacyWhitelistFlag,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -237,9 +237,13 @@ var (
|
|||||||
Name: "lightkdf",
|
Name: "lightkdf",
|
||||||
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
|
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
|
||||||
}
|
}
|
||||||
WhitelistFlag = cli.StringFlag{
|
EthPeerRequiredBlocksFlag = cli.StringFlag{
|
||||||
|
Name: "eth.requiredblocks",
|
||||||
|
Usage: "Comma separated block number-to-hash mappings to require for peering (<number>=<hash>)",
|
||||||
|
}
|
||||||
|
LegacyWhitelistFlag = cli.StringFlag{
|
||||||
Name: "whitelist",
|
Name: "whitelist",
|
||||||
Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>)",
|
Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>) (deprecated in favor of --peer.requiredblocks)",
|
||||||
}
|
}
|
||||||
BloomFilterSizeFlag = cli.Uint64Flag{
|
BloomFilterSizeFlag = cli.Uint64Flag{
|
||||||
Name: "bloomfilter.size",
|
Name: "bloomfilter.size",
|
||||||
@ -1447,26 +1451,33 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func setWhitelist(ctx *cli.Context, cfg *ethconfig.Config) {
|
func setPeerRequiredBlocks(ctx *cli.Context, cfg *ethconfig.Config) {
|
||||||
whitelist := ctx.GlobalString(WhitelistFlag.Name)
|
peerRequiredBlocks := ctx.GlobalString(EthPeerRequiredBlocksFlag.Name)
|
||||||
if whitelist == "" {
|
|
||||||
|
if peerRequiredBlocks == "" {
|
||||||
|
if ctx.GlobalIsSet(LegacyWhitelistFlag.Name) {
|
||||||
|
log.Warn("The flag --rpc is deprecated and will be removed, please use --peer.requiredblocks")
|
||||||
|
peerRequiredBlocks = ctx.GlobalString(LegacyWhitelistFlag.Name)
|
||||||
|
} else {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cfg.Whitelist = make(map[uint64]common.Hash)
|
}
|
||||||
for _, entry := range strings.Split(whitelist, ",") {
|
|
||||||
|
cfg.PeerRequiredBlocks = make(map[uint64]common.Hash)
|
||||||
|
for _, entry := range strings.Split(peerRequiredBlocks, ",") {
|
||||||
parts := strings.Split(entry, "=")
|
parts := strings.Split(entry, "=")
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
Fatalf("Invalid whitelist entry: %s", entry)
|
Fatalf("Invalid peer required block entry: %s", entry)
|
||||||
}
|
}
|
||||||
number, err := strconv.ParseUint(parts[0], 0, 64)
|
number, err := strconv.ParseUint(parts[0], 0, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatalf("Invalid whitelist block number %s: %v", parts[0], err)
|
Fatalf("Invalid peer required block number %s: %v", parts[0], err)
|
||||||
}
|
}
|
||||||
var hash common.Hash
|
var hash common.Hash
|
||||||
if err = hash.UnmarshalText([]byte(parts[1])); err != nil {
|
if err = hash.UnmarshalText([]byte(parts[1])); err != nil {
|
||||||
Fatalf("Invalid whitelist hash %s: %v", parts[1], err)
|
Fatalf("Invalid peer required block hash %s: %v", parts[1], err)
|
||||||
}
|
}
|
||||||
cfg.Whitelist[number] = hash
|
cfg.PeerRequiredBlocks[number] = hash
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1533,7 +1544,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||||||
setTxPool(ctx, &cfg.TxPool)
|
setTxPool(ctx, &cfg.TxPool)
|
||||||
setEthash(ctx, cfg)
|
setEthash(ctx, cfg)
|
||||||
setMiner(ctx, &cfg.Miner)
|
setMiner(ctx, &cfg.Miner)
|
||||||
setWhitelist(ctx, cfg)
|
setPeerRequiredBlocks(ctx, cfg)
|
||||||
setLes(ctx, cfg)
|
setLes(ctx, cfg)
|
||||||
|
|
||||||
// Cap the cache allowance and tune the garbage collector
|
// Cap the cache allowance and tune the garbage collector
|
||||||
|
@ -229,7 +229,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
|
|||||||
BloomCache: uint64(cacheLimit),
|
BloomCache: uint64(cacheLimit),
|
||||||
EventMux: eth.eventMux,
|
EventMux: eth.eventMux,
|
||||||
Checkpoint: checkpoint,
|
Checkpoint: checkpoint,
|
||||||
Whitelist: config.Whitelist,
|
PeerRequiredBlocks: config.PeerRequiredBlocks,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -138,8 +138,10 @@ type Config struct {
|
|||||||
|
|
||||||
TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
|
TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
|
||||||
|
|
||||||
// Whitelist of required block number -> hash values to accept
|
// PeerRequiredBlocks is a set of block number -> hash mappings which must be in the
|
||||||
Whitelist map[uint64]common.Hash `toml:"-"`
|
// canonical chain of all remote peers. Setting the option makes geth verify the
|
||||||
|
// presence of these blocks for every new peer connection.
|
||||||
|
PeerRequiredBlocks map[uint64]common.Hash `toml:"-"`
|
||||||
|
|
||||||
// Light client options
|
// Light client options
|
||||||
LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests
|
LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests
|
||||||
|
@ -26,7 +26,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
|||||||
NoPruning bool
|
NoPruning bool
|
||||||
NoPrefetch bool
|
NoPrefetch bool
|
||||||
TxLookupLimit uint64 `toml:",omitempty"`
|
TxLookupLimit uint64 `toml:",omitempty"`
|
||||||
Whitelist map[uint64]common.Hash `toml:"-"`
|
PeerRequiredBlocks map[uint64]common.Hash `toml:"-"`
|
||||||
LightServ int `toml:",omitempty"`
|
LightServ int `toml:",omitempty"`
|
||||||
LightIngress int `toml:",omitempty"`
|
LightIngress int `toml:",omitempty"`
|
||||||
LightEgress int `toml:",omitempty"`
|
LightEgress int `toml:",omitempty"`
|
||||||
@ -71,7 +71,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
|
|||||||
enc.NoPruning = c.NoPruning
|
enc.NoPruning = c.NoPruning
|
||||||
enc.NoPrefetch = c.NoPrefetch
|
enc.NoPrefetch = c.NoPrefetch
|
||||||
enc.TxLookupLimit = c.TxLookupLimit
|
enc.TxLookupLimit = c.TxLookupLimit
|
||||||
enc.Whitelist = c.Whitelist
|
enc.PeerRequiredBlocks = c.PeerRequiredBlocks
|
||||||
enc.LightServ = c.LightServ
|
enc.LightServ = c.LightServ
|
||||||
enc.LightIngress = c.LightIngress
|
enc.LightIngress = c.LightIngress
|
||||||
enc.LightEgress = c.LightEgress
|
enc.LightEgress = c.LightEgress
|
||||||
@ -120,7 +120,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
|||||||
NoPruning *bool
|
NoPruning *bool
|
||||||
NoPrefetch *bool
|
NoPrefetch *bool
|
||||||
TxLookupLimit *uint64 `toml:",omitempty"`
|
TxLookupLimit *uint64 `toml:",omitempty"`
|
||||||
Whitelist map[uint64]common.Hash `toml:"-"`
|
PeerRequiredBlocks map[uint64]common.Hash `toml:"-"`
|
||||||
LightServ *int `toml:",omitempty"`
|
LightServ *int `toml:",omitempty"`
|
||||||
LightIngress *int `toml:",omitempty"`
|
LightIngress *int `toml:",omitempty"`
|
||||||
LightEgress *int `toml:",omitempty"`
|
LightEgress *int `toml:",omitempty"`
|
||||||
@ -184,8 +184,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
|
|||||||
if dec.TxLookupLimit != nil {
|
if dec.TxLookupLimit != nil {
|
||||||
c.TxLookupLimit = *dec.TxLookupLimit
|
c.TxLookupLimit = *dec.TxLookupLimit
|
||||||
}
|
}
|
||||||
if dec.Whitelist != nil {
|
if dec.PeerRequiredBlocks != nil {
|
||||||
c.Whitelist = dec.Whitelist
|
c.PeerRequiredBlocks = dec.PeerRequiredBlocks
|
||||||
}
|
}
|
||||||
if dec.LightServ != nil {
|
if dec.LightServ != nil {
|
||||||
c.LightServ = *dec.LightServ
|
c.LightServ = *dec.LightServ
|
||||||
|
@ -86,7 +86,8 @@ type handlerConfig struct {
|
|||||||
BloomCache uint64 // Megabytes to alloc for snap sync bloom
|
BloomCache uint64 // Megabytes to alloc for snap sync bloom
|
||||||
EventMux *event.TypeMux // Legacy event mux, deprecate for `feed`
|
EventMux *event.TypeMux // Legacy event mux, deprecate for `feed`
|
||||||
Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges
|
Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges
|
||||||
Whitelist map[uint64]common.Hash // Hard coded whitelist for sync challenged
|
|
||||||
|
PeerRequiredBlocks map[uint64]common.Hash // Hard coded map of required block hashes for sync challenges
|
||||||
}
|
}
|
||||||
|
|
||||||
type handler struct {
|
type handler struct {
|
||||||
@ -115,7 +116,7 @@ type handler struct {
|
|||||||
txsSub event.Subscription
|
txsSub event.Subscription
|
||||||
minedBlockSub *event.TypeMuxSubscription
|
minedBlockSub *event.TypeMuxSubscription
|
||||||
|
|
||||||
whitelist map[uint64]common.Hash
|
peerRequiredBlocks map[uint64]common.Hash
|
||||||
|
|
||||||
// channels for fetcher, syncer, txsyncLoop
|
// channels for fetcher, syncer, txsyncLoop
|
||||||
quitSync chan struct{}
|
quitSync chan struct{}
|
||||||
@ -140,7 +141,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
|
|||||||
chain: config.Chain,
|
chain: config.Chain,
|
||||||
peers: newPeerSet(),
|
peers: newPeerSet(),
|
||||||
merger: config.Merger,
|
merger: config.Merger,
|
||||||
whitelist: config.Whitelist,
|
peerRequiredBlocks: config.PeerRequiredBlocks,
|
||||||
quitSync: make(chan struct{}),
|
quitSync: make(chan struct{}),
|
||||||
}
|
}
|
||||||
if config.Sync == downloader.FullSync {
|
if config.Sync == downloader.FullSync {
|
||||||
@ -423,8 +424,8 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
// If we have any explicit whitelist block hashes, request them
|
// If we have any explicit peer required block hashes, request them
|
||||||
for number, hash := range h.whitelist {
|
for number := range h.peerRequiredBlocks {
|
||||||
resCh := make(chan *eth.Response)
|
resCh := make(chan *eth.Response)
|
||||||
if _, err := peer.RequestHeadersByNumber(number, 1, 0, false, resCh); err != nil {
|
if _, err := peer.RequestHeadersByNumber(number, 1, 0, false, resCh); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -437,25 +438,25 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
|
|||||||
case res := <-resCh:
|
case res := <-resCh:
|
||||||
headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket))
|
headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket))
|
||||||
if len(headers) == 0 {
|
if len(headers) == 0 {
|
||||||
// Whitelisted blocks are allowed to be missing if the remote
|
// Required blocks are allowed to be missing if the remote
|
||||||
// node is not yet synced
|
// node is not yet synced
|
||||||
res.Done <- nil
|
res.Done <- nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Validate the header and either drop the peer or continue
|
// Validate the header and either drop the peer or continue
|
||||||
if len(headers) > 1 {
|
if len(headers) > 1 {
|
||||||
res.Done <- errors.New("too many headers in whitelist response")
|
res.Done <- errors.New("too many headers in required block response")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if headers[0].Number.Uint64() != number || headers[0].Hash() != hash {
|
if headers[0].Number.Uint64() != number || headers[0].Hash() != hash {
|
||||||
peer.Log().Info("Whitelist mismatch, dropping peer", "number", number, "hash", headers[0].Hash(), "want", hash)
|
peer.Log().Info("Required block mismatch, dropping peer", "number", number, "hash", headers[0].Hash(), "want", hash)
|
||||||
res.Done <- errors.New("whitelist block mismatch")
|
res.Done <- errors.New("required block mismatch")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
peer.Log().Debug("Whitelist block verified", "number", number, "hash", hash)
|
peer.Log().Debug("Peer required block verified", "number", number, "hash", hash)
|
||||||
res.Done <- nil
|
res.Done <- nil
|
||||||
case <-timeout.C:
|
case <-timeout.C:
|
||||||
peer.Log().Warn("Whitelist challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name())
|
peer.Log().Warn("Required block challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name())
|
||||||
h.removePeer(peer.ID())
|
h.removePeer(peer.ID())
|
||||||
}
|
}
|
||||||
}(number, hash)
|
}(number, hash)
|
||||||
|
Loading…
Reference in New Issue
Block a user