les: remove obsolete code related to PoW header syncing (#27737)
This change removes PoW header syncing related code from LES and also deletes duplicated packages les/catalyst, les/downloader and les/fetcher. These package copies were created because people wanted to make changes in their eth/ counterparts, but weren't able to adapt LES code to the API changes.
This commit is contained in:
		
							parent
							
								
									988d84aa7c
								
							
						
					
					
						commit
						d4d88f9bce
					
				| @ -93,9 +93,6 @@ var ( | ||||
| 		utils.LightMaxPeersFlag, | ||||
| 		utils.LightNoPruneFlag, | ||||
| 		utils.LightKDFFlag, | ||||
| 		utils.UltraLightServersFlag, | ||||
| 		utils.UltraLightFractionFlag, | ||||
| 		utils.UltraLightOnlyAnnounceFlag, | ||||
| 		utils.LightNoSyncServeFlag, | ||||
| 		utils.EthRequiredBlocksFlag, | ||||
| 		utils.LegacyWhitelistFlag, | ||||
|  | ||||
| @ -61,7 +61,6 @@ import ( | ||||
| 	"github.com/ethereum/go-ethereum/internal/ethapi" | ||||
| 	"github.com/ethereum/go-ethereum/internal/flags" | ||||
| 	"github.com/ethereum/go-ethereum/les" | ||||
| 	lescatalyst "github.com/ethereum/go-ethereum/les/catalyst" | ||||
| 	"github.com/ethereum/go-ethereum/log" | ||||
| 	"github.com/ethereum/go-ethereum/metrics" | ||||
| 	"github.com/ethereum/go-ethereum/metrics/exp" | ||||
| @ -294,23 +293,6 @@ var ( | ||||
| 		Value:    ethconfig.Defaults.LightPeers, | ||||
| 		Category: flags.LightCategory, | ||||
| 	} | ||||
| 	UltraLightServersFlag = &cli.StringFlag{ | ||||
| 		Name:     "ulc.servers", | ||||
| 		Usage:    "List of trusted ultra-light servers", | ||||
| 		Value:    strings.Join(ethconfig.Defaults.UltraLightServers, ","), | ||||
| 		Category: flags.LightCategory, | ||||
| 	} | ||||
| 	UltraLightFractionFlag = &cli.IntFlag{ | ||||
| 		Name:     "ulc.fraction", | ||||
| 		Usage:    "Minimum % of trusted ultra-light servers required to announce a new head", | ||||
| 		Value:    ethconfig.Defaults.UltraLightFraction, | ||||
| 		Category: flags.LightCategory, | ||||
| 	} | ||||
| 	UltraLightOnlyAnnounceFlag = &cli.BoolFlag{ | ||||
| 		Name:     "ulc.onlyannounce", | ||||
| 		Usage:    "Ultra light server sends announcements only", | ||||
| 		Category: flags.LightCategory, | ||||
| 	} | ||||
| 	LightNoPruneFlag = &cli.BoolFlag{ | ||||
| 		Name:     "light.nopruning", | ||||
| 		Usage:    "Disable ancient light chain data pruning", | ||||
| @ -1211,19 +1193,6 @@ func setLes(ctx *cli.Context, cfg *ethconfig.Config) { | ||||
| 	if ctx.IsSet(LightMaxPeersFlag.Name) { | ||||
| 		cfg.LightPeers = ctx.Int(LightMaxPeersFlag.Name) | ||||
| 	} | ||||
| 	if ctx.IsSet(UltraLightServersFlag.Name) { | ||||
| 		cfg.UltraLightServers = strings.Split(ctx.String(UltraLightServersFlag.Name), ",") | ||||
| 	} | ||||
| 	if ctx.IsSet(UltraLightFractionFlag.Name) { | ||||
| 		cfg.UltraLightFraction = ctx.Int(UltraLightFractionFlag.Name) | ||||
| 	} | ||||
| 	if cfg.UltraLightFraction <= 0 && cfg.UltraLightFraction > 100 { | ||||
| 		log.Error("Ultra light fraction is invalid", "had", cfg.UltraLightFraction, "updated", ethconfig.Defaults.UltraLightFraction) | ||||
| 		cfg.UltraLightFraction = ethconfig.Defaults.UltraLightFraction | ||||
| 	} | ||||
| 	if ctx.IsSet(UltraLightOnlyAnnounceFlag.Name) { | ||||
| 		cfg.UltraLightOnlyAnnounce = ctx.Bool(UltraLightOnlyAnnounceFlag.Name) | ||||
| 	} | ||||
| 	if ctx.IsSet(LightNoPruneFlag.Name) { | ||||
| 		cfg.LightNoPrune = ctx.Bool(LightNoPruneFlag.Name) | ||||
| 	} | ||||
| @ -1884,9 +1853,6 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend | ||||
| 			Fatalf("Failed to register the Ethereum service: %v", err) | ||||
| 		} | ||||
| 		stack.RegisterAPIs(tracers.APIs(backend.ApiBackend)) | ||||
| 		if err := lescatalyst.Register(stack, backend); err != nil { | ||||
| 			Fatalf("Failed to register the Engine API service: %v", err) | ||||
| 		} | ||||
| 		return backend.ApiBackend, nil | ||||
| 	} | ||||
| 	backend, err := eth.New(stack, cfg) | ||||
|  | ||||
| @ -61,7 +61,6 @@ var Defaults = Config{ | ||||
| 	NetworkId:          1, | ||||
| 	TxLookupLimit:      2350000, | ||||
| 	LightPeers:         100, | ||||
| 	UltraLightFraction: 75, | ||||
| 	DatabaseCache:      512, | ||||
| 	TrieCleanCache:     154, | ||||
| 	TrieDirtyCache:     256, | ||||
| @ -111,11 +110,6 @@ type Config struct { | ||||
| 	LightNoPrune     bool `toml:",omitempty"` // Whether to disable light chain pruning
 | ||||
| 	LightNoSyncServe bool `toml:",omitempty"` // Whether to serve light clients before syncing
 | ||||
| 
 | ||||
| 	// Ultra Light client options
 | ||||
| 	UltraLightServers      []string `toml:",omitempty"` // List of trusted ultra light servers
 | ||||
| 	UltraLightFraction     int      `toml:",omitempty"` // Percentage of trusted servers to accept an announcement
 | ||||
| 	UltraLightOnlyAnnounce bool     `toml:",omitempty"` // Whether to only announce headers, or also serve them
 | ||||
| 
 | ||||
| 	// Database options
 | ||||
| 	SkipBcVersionCheck bool `toml:"-"` | ||||
| 	DatabaseHandles    int  `toml:"-"` | ||||
|  | ||||
| @ -31,9 +31,6 @@ func (c Config) MarshalTOML() (interface{}, error) { | ||||
| 		LightPeers              int                    `toml:",omitempty"` | ||||
| 		LightNoPrune            bool                   `toml:",omitempty"` | ||||
| 		LightNoSyncServe        bool                   `toml:",omitempty"` | ||||
| 		UltraLightServers       []string               `toml:",omitempty"` | ||||
| 		UltraLightFraction      int                    `toml:",omitempty"` | ||||
| 		UltraLightOnlyAnnounce  bool                   `toml:",omitempty"` | ||||
| 		SkipBcVersionCheck      bool                   `toml:"-"` | ||||
| 		DatabaseHandles         int                    `toml:"-"` | ||||
| 		DatabaseCache           int | ||||
| @ -71,9 +68,6 @@ func (c Config) MarshalTOML() (interface{}, error) { | ||||
| 	enc.LightPeers = c.LightPeers | ||||
| 	enc.LightNoPrune = c.LightNoPrune | ||||
| 	enc.LightNoSyncServe = c.LightNoSyncServe | ||||
| 	enc.UltraLightServers = c.UltraLightServers | ||||
| 	enc.UltraLightFraction = c.UltraLightFraction | ||||
| 	enc.UltraLightOnlyAnnounce = c.UltraLightOnlyAnnounce | ||||
| 	enc.SkipBcVersionCheck = c.SkipBcVersionCheck | ||||
| 	enc.DatabaseHandles = c.DatabaseHandles | ||||
| 	enc.DatabaseCache = c.DatabaseCache | ||||
| @ -115,9 +109,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { | ||||
| 		LightPeers              *int                   `toml:",omitempty"` | ||||
| 		LightNoPrune            *bool                  `toml:",omitempty"` | ||||
| 		LightNoSyncServe        *bool                  `toml:",omitempty"` | ||||
| 		UltraLightServers       []string               `toml:",omitempty"` | ||||
| 		UltraLightFraction      *int                   `toml:",omitempty"` | ||||
| 		UltraLightOnlyAnnounce  *bool                  `toml:",omitempty"` | ||||
| 		SkipBcVersionCheck      *bool                  `toml:"-"` | ||||
| 		DatabaseHandles         *int                   `toml:"-"` | ||||
| 		DatabaseCache           *int | ||||
| @ -188,15 +179,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { | ||||
| 	if dec.LightNoSyncServe != nil { | ||||
| 		c.LightNoSyncServe = *dec.LightNoSyncServe | ||||
| 	} | ||||
| 	if dec.UltraLightServers != nil { | ||||
| 		c.UltraLightServers = dec.UltraLightServers | ||||
| 	} | ||||
| 	if dec.UltraLightFraction != nil { | ||||
| 		c.UltraLightFraction = *dec.UltraLightFraction | ||||
| 	} | ||||
| 	if dec.UltraLightOnlyAnnounce != nil { | ||||
| 		c.UltraLightOnlyAnnounce = *dec.UltraLightOnlyAnnounce | ||||
| 	} | ||||
| 	if dec.SkipBcVersionCheck != nil { | ||||
| 		c.SkipBcVersionCheck = *dec.SkipBcVersionCheck | ||||
| 	} | ||||
|  | ||||
| @ -57,7 +57,6 @@ func (b *LesApiBackend) CurrentBlock() *types.Header { | ||||
| } | ||||
| 
 | ||||
| func (b *LesApiBackend) SetHead(number uint64) { | ||||
| 	b.eth.handler.downloader.Cancel() | ||||
| 	b.eth.blockchain.SetHead(number) | ||||
| } | ||||
| 
 | ||||
| @ -264,7 +263,7 @@ func (b *LesApiBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEven | ||||
| } | ||||
| 
 | ||||
| func (b *LesApiBackend) SyncProgress() ethereum.SyncProgress { | ||||
| 	return b.eth.Downloader().Progress() | ||||
| 	return ethereum.SyncProgress{} | ||||
| } | ||||
| 
 | ||||
| func (b *LesApiBackend) ProtocolVersion() int { | ||||
|  | ||||
| @ -31,9 +31,8 @@ import ( | ||||
| 	"github.com/ethereum/go-ethereum/common" | ||||
| 	"github.com/ethereum/go-ethereum/common/hexutil" | ||||
| 	"github.com/ethereum/go-ethereum/eth" | ||||
| 	ethdownloader "github.com/ethereum/go-ethereum/eth/downloader" | ||||
| 	"github.com/ethereum/go-ethereum/eth/downloader" | ||||
| 	"github.com/ethereum/go-ethereum/eth/ethconfig" | ||||
| 	"github.com/ethereum/go-ethereum/les/downloader" | ||||
| 	"github.com/ethereum/go-ethereum/les/flowcontrol" | ||||
| 	"github.com/ethereum/go-ethereum/log" | ||||
| 	"github.com/ethereum/go-ethereum/node" | ||||
| @ -493,13 +492,13 @@ func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir [] | ||||
| 
 | ||||
| func newLesClientService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) { | ||||
| 	config := ethconfig.Defaults | ||||
| 	config.SyncMode = (ethdownloader.SyncMode)(downloader.LightSync) | ||||
| 	config.SyncMode = downloader.LightSync | ||||
| 	return New(stack, &config) | ||||
| } | ||||
| 
 | ||||
| func newLesServerService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) { | ||||
| 	config := ethconfig.Defaults | ||||
| 	config.SyncMode = (ethdownloader.SyncMode)(downloader.FullSync) | ||||
| 	config.SyncMode = downloader.FullSync | ||||
| 	config.LightServ = testServerCapacity | ||||
| 	config.LightPeers = testMaxClients | ||||
| 	ethereum, err := eth.New(stack, &config) | ||||
|  | ||||
| @ -1,220 +0,0 @@ | ||||
| // Copyright 2022 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| // Package catalyst implements the temporary eth1/eth2 RPC integration.
 | ||||
| package catalyst | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/beacon/engine" | ||||
| 	"github.com/ethereum/go-ethereum/common" | ||||
| 	"github.com/ethereum/go-ethereum/common/hexutil" | ||||
| 	"github.com/ethereum/go-ethereum/les" | ||||
| 	"github.com/ethereum/go-ethereum/log" | ||||
| 	"github.com/ethereum/go-ethereum/node" | ||||
| 	"github.com/ethereum/go-ethereum/rpc" | ||||
| ) | ||||
| 
 | ||||
| // Register adds catalyst APIs to the light client.
 | ||||
| func Register(stack *node.Node, backend *les.LightEthereum) error { | ||||
| 	log.Warn("Catalyst mode enabled", "protocol", "les") | ||||
| 	stack.RegisterAPIs([]rpc.API{ | ||||
| 		{ | ||||
| 			Namespace:     "engine", | ||||
| 			Service:       NewConsensusAPI(backend), | ||||
| 			Authenticated: true, | ||||
| 		}, | ||||
| 	}) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| type ConsensusAPI struct { | ||||
| 	les *les.LightEthereum | ||||
| } | ||||
| 
 | ||||
| // NewConsensusAPI creates a new consensus api for the given backend.
 | ||||
| // The underlying blockchain needs to have a valid terminal total difficulty set.
 | ||||
| func NewConsensusAPI(les *les.LightEthereum) *ConsensusAPI { | ||||
| 	if les.BlockChain().Config().TerminalTotalDifficulty == nil { | ||||
| 		log.Warn("Catalyst started without valid total difficulty") | ||||
| 	} | ||||
| 	return &ConsensusAPI{les: les} | ||||
| } | ||||
| 
 | ||||
| // ForkchoiceUpdatedV1 has several responsibilities:
 | ||||
| //
 | ||||
| // We try to set our blockchain to the headBlock.
 | ||||
| //
 | ||||
| // If the method is called with an empty head block: we return success, which can be used
 | ||||
| // to check if the catalyst mode is enabled.
 | ||||
| //
 | ||||
| // If the total difficulty was not reached: we return INVALID.
 | ||||
| //
 | ||||
| // If the finalizedBlockHash is set: we check if we have the finalizedBlockHash in our db,
 | ||||
| // if not we start a sync.
 | ||||
| //
 | ||||
| // If there are payloadAttributes: we return an error since block creation is not
 | ||||
| // supported in les mode.
 | ||||
| func (api *ConsensusAPI) ForkchoiceUpdatedV1(heads engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) { | ||||
| 	if heads.HeadBlockHash == (common.Hash{}) { | ||||
| 		log.Warn("Forkchoice requested update to zero hash") | ||||
| 		return engine.STATUS_INVALID, nil // TODO(karalabe): Why does someone send us this?
 | ||||
| 	} | ||||
| 	if err := api.checkTerminalTotalDifficulty(heads.HeadBlockHash); err != nil { | ||||
| 		if header := api.les.BlockChain().GetHeaderByHash(heads.HeadBlockHash); header == nil { | ||||
| 			// TODO (MariusVanDerWijden) trigger sync
 | ||||
| 			return engine.STATUS_SYNCING, nil | ||||
| 		} | ||||
| 		return engine.STATUS_INVALID, err | ||||
| 	} | ||||
| 	// If the finalized block is set, check if it is in our blockchain
 | ||||
| 	if heads.FinalizedBlockHash != (common.Hash{}) { | ||||
| 		if header := api.les.BlockChain().GetHeaderByHash(heads.FinalizedBlockHash); header == nil { | ||||
| 			// TODO (MariusVanDerWijden) trigger sync
 | ||||
| 			return engine.STATUS_SYNCING, nil | ||||
| 		} | ||||
| 	} | ||||
| 	// SetHead
 | ||||
| 	if err := api.setCanonical(heads.HeadBlockHash); err != nil { | ||||
| 		return engine.STATUS_INVALID, err | ||||
| 	} | ||||
| 	if payloadAttributes != nil { | ||||
| 		return engine.STATUS_INVALID, errors.New("not supported") | ||||
| 	} | ||||
| 	return api.validForkChoiceResponse(), nil | ||||
| } | ||||
| 
 | ||||
| // GetPayloadV1 returns a cached payload by id. It's not supported in les mode.
 | ||||
| func (api *ConsensusAPI) GetPayloadV1(payloadID engine.PayloadID) (*engine.ExecutableData, error) { | ||||
| 	return nil, engine.GenericServerError.With(errors.New("not supported in light client mode")) | ||||
| } | ||||
| 
 | ||||
| // ExecutePayloadV1 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
 | ||||
| func (api *ConsensusAPI) ExecutePayloadV1(params engine.ExecutableData) (engine.PayloadStatusV1, error) { | ||||
| 	block, err := engine.ExecutableDataToBlock(params) | ||||
| 	if err != nil { | ||||
| 		return api.invalid(), err | ||||
| 	} | ||||
| 	if !api.les.BlockChain().HasHeader(block.ParentHash(), block.NumberU64()-1) { | ||||
| 		/* | ||||
| 			TODO (MariusVanDerWijden) reenable once sync is merged | ||||
| 			if err := api.eth.Downloader().BeaconSync(api.eth.SyncMode(), block.Header()); err != nil { | ||||
| 				return SYNCING, err | ||||
| 			} | ||||
| 		*/ | ||||
| 		// TODO (MariusVanDerWijden) we should return nil here not empty hash
 | ||||
| 		return engine.PayloadStatusV1{Status: engine.SYNCING, LatestValidHash: nil}, nil | ||||
| 	} | ||||
| 	parent := api.les.BlockChain().GetHeaderByHash(params.ParentHash) | ||||
| 	if parent == nil { | ||||
| 		return api.invalid(), fmt.Errorf("could not find parent %x", params.ParentHash) | ||||
| 	} | ||||
| 	td := api.les.BlockChain().GetTd(parent.Hash(), block.NumberU64()-1) | ||||
| 	ttd := api.les.BlockChain().Config().TerminalTotalDifficulty | ||||
| 	if td.Cmp(ttd) < 0 { | ||||
| 		return api.invalid(), fmt.Errorf("can not execute payload on top of block with low td got: %v threshold %v", td, ttd) | ||||
| 	} | ||||
| 	if err = api.les.BlockChain().InsertHeader(block.Header()); err != nil { | ||||
| 		return api.invalid(), err | ||||
| 	} | ||||
| 	if merger := api.les.Merger(); !merger.TDDReached() { | ||||
| 		merger.ReachTTD() | ||||
| 	} | ||||
| 	hash := block.Hash() | ||||
| 	return engine.PayloadStatusV1{Status: engine.VALID, LatestValidHash: &hash}, nil | ||||
| } | ||||
| 
 | ||||
| func (api *ConsensusAPI) validForkChoiceResponse() engine.ForkChoiceResponse { | ||||
| 	currentHash := api.les.BlockChain().CurrentHeader().Hash() | ||||
| 	return engine.ForkChoiceResponse{ | ||||
| 		PayloadStatus: engine.PayloadStatusV1{Status: engine.VALID, LatestValidHash: ¤tHash}, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // invalid returns a response "INVALID" with the latest valid hash set to the current head.
 | ||||
| func (api *ConsensusAPI) invalid() engine.PayloadStatusV1 { | ||||
| 	currentHash := api.les.BlockChain().CurrentHeader().Hash() | ||||
| 	return engine.PayloadStatusV1{Status: engine.INVALID, LatestValidHash: ¤tHash} | ||||
| } | ||||
| 
 | ||||
| func (api *ConsensusAPI) checkTerminalTotalDifficulty(head common.Hash) error { | ||||
| 	// shortcut if we entered PoS already
 | ||||
| 	if api.les.Merger().PoSFinalized() { | ||||
| 		return nil | ||||
| 	} | ||||
| 	// make sure the parent has enough terminal total difficulty
 | ||||
| 	header := api.les.BlockChain().GetHeaderByHash(head) | ||||
| 	if header == nil { | ||||
| 		return errors.New("unknown header") | ||||
| 	} | ||||
| 	td := api.les.BlockChain().GetTd(header.Hash(), header.Number.Uint64()) | ||||
| 	if td != nil && td.Cmp(api.les.BlockChain().Config().TerminalTotalDifficulty) < 0 { | ||||
| 		return errors.New("invalid ttd") | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // setCanonical is called to perform a force choice.
 | ||||
| func (api *ConsensusAPI) setCanonical(newHead common.Hash) error { | ||||
| 	log.Info("Setting head", "head", newHead) | ||||
| 
 | ||||
| 	headHeader := api.les.BlockChain().CurrentHeader() | ||||
| 	if headHeader.Hash() == newHead { | ||||
| 		return nil | ||||
| 	} | ||||
| 	newHeadHeader := api.les.BlockChain().GetHeaderByHash(newHead) | ||||
| 	if newHeadHeader == nil { | ||||
| 		return errors.New("unknown header") | ||||
| 	} | ||||
| 	if err := api.les.BlockChain().SetCanonical(newHeadHeader); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	// Trigger the transition if it's the first `NewHead` event.
 | ||||
| 	if merger := api.les.Merger(); !merger.PoSFinalized() { | ||||
| 		merger.FinalizePoS() | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // ExchangeTransitionConfigurationV1 checks the given configuration against
 | ||||
| // the configuration of the node.
 | ||||
| func (api *ConsensusAPI) ExchangeTransitionConfigurationV1(config engine.TransitionConfigurationV1) (*engine.TransitionConfigurationV1, error) { | ||||
| 	log.Trace("Engine API request received", "method", "ExchangeTransitionConfiguration", "ttd", config.TerminalTotalDifficulty) | ||||
| 	if config.TerminalTotalDifficulty == nil { | ||||
| 		return nil, errors.New("invalid terminal total difficulty") | ||||
| 	} | ||||
| 
 | ||||
| 	ttd := api.les.BlockChain().Config().TerminalTotalDifficulty | ||||
| 	if ttd == nil || ttd.Cmp(config.TerminalTotalDifficulty.ToInt()) != 0 { | ||||
| 		log.Warn("Invalid TTD configured", "geth", ttd, "beacon", config.TerminalTotalDifficulty) | ||||
| 		return nil, fmt.Errorf("invalid ttd: execution %v consensus %v", ttd, config.TerminalTotalDifficulty) | ||||
| 	} | ||||
| 
 | ||||
| 	if config.TerminalBlockHash != (common.Hash{}) { | ||||
| 		if hash := api.les.BlockChain().GetCanonicalHash(uint64(config.TerminalBlockNumber)); hash == config.TerminalBlockHash { | ||||
| 			return &engine.TransitionConfigurationV1{ | ||||
| 				TerminalTotalDifficulty: (*hexutil.Big)(ttd), | ||||
| 				TerminalBlockHash:       config.TerminalBlockHash, | ||||
| 				TerminalBlockNumber:     config.TerminalBlockNumber, | ||||
| 			}, nil | ||||
| 		} | ||||
| 		return nil, errors.New("invalid terminal block hash") | ||||
| 	} | ||||
| 
 | ||||
| 	return &engine.TransitionConfigurationV1{TerminalTotalDifficulty: (*hexutil.Big)(ttd)}, nil | ||||
| } | ||||
| @ -1,251 +0,0 @@ | ||||
| // Copyright 2022 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package catalyst | ||||
| 
 | ||||
| import ( | ||||
| 	"math/big" | ||||
| 	"testing" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/beacon/engine" | ||||
| 	"github.com/ethereum/go-ethereum/common" | ||||
| 	"github.com/ethereum/go-ethereum/consensus/ethash" | ||||
| 	"github.com/ethereum/go-ethereum/core" | ||||
| 	"github.com/ethereum/go-ethereum/core/types" | ||||
| 	"github.com/ethereum/go-ethereum/crypto" | ||||
| 	"github.com/ethereum/go-ethereum/eth/downloader" | ||||
| 	"github.com/ethereum/go-ethereum/eth/ethconfig" | ||||
| 	"github.com/ethereum/go-ethereum/les" | ||||
| 	"github.com/ethereum/go-ethereum/node" | ||||
| 	"github.com/ethereum/go-ethereum/params" | ||||
| 	"github.com/ethereum/go-ethereum/trie" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	// testKey is a private key to use for funding a tester account.
 | ||||
| 	testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") | ||||
| 
 | ||||
| 	// testAddr is the Ethereum address of the tester account.
 | ||||
| 	testAddr = crypto.PubkeyToAddress(testKey.PublicKey) | ||||
| 
 | ||||
| 	testBalance = big.NewInt(2e18) | ||||
| ) | ||||
| 
 | ||||
| func generatePreMergeChain(pre, post int) (*core.Genesis, []*types.Header, []*types.Block, []*types.Header, []*types.Block) { | ||||
| 	config := *params.AllEthashProtocolChanges | ||||
| 	genesis := &core.Genesis{ | ||||
| 		Config:    &config, | ||||
| 		Alloc:     core.GenesisAlloc{testAddr: {Balance: testBalance}}, | ||||
| 		ExtraData: []byte("test genesis"), | ||||
| 		Timestamp: 9000, | ||||
| 		BaseFee:   big.NewInt(params.InitialBaseFee), | ||||
| 	} | ||||
| 	// Pre-merge blocks
 | ||||
| 	db, preBLocks, _ := core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), pre, nil) | ||||
| 	totalDifficulty := new(big.Int).Set(params.GenesisDifficulty) | ||||
| 
 | ||||
| 	var preHeaders []*types.Header | ||||
| 	for _, b := range preBLocks { | ||||
| 		totalDifficulty.Add(totalDifficulty, b.Difficulty()) | ||||
| 		preHeaders = append(preHeaders, b.Header()) | ||||
| 	} | ||||
| 	config.TerminalTotalDifficulty = totalDifficulty | ||||
| 	// Post-merge blocks
 | ||||
| 	postBlocks, _ := core.GenerateChain(genesis.Config, | ||||
| 		preBLocks[len(preBLocks)-1], ethash.NewFaker(), db, post, | ||||
| 		func(i int, b *core.BlockGen) { | ||||
| 			b.SetPoS() | ||||
| 		}) | ||||
| 
 | ||||
| 	var postHeaders []*types.Header | ||||
| 	for _, b := range postBlocks { | ||||
| 		postHeaders = append(postHeaders, b.Header()) | ||||
| 	} | ||||
| 
 | ||||
| 	return genesis, preHeaders, preBLocks, postHeaders, postBlocks | ||||
| } | ||||
| 
 | ||||
| func TestSetHeadBeforeTotalDifficulty(t *testing.T) { | ||||
| 	genesis, headers, blocks, _, _ := generatePreMergeChain(10, 0) | ||||
| 	n, lesService := startLesService(t, genesis, headers) | ||||
| 	defer n.Close() | ||||
| 
 | ||||
| 	api := NewConsensusAPI(lesService) | ||||
| 	fcState := engine.ForkchoiceStateV1{ | ||||
| 		HeadBlockHash:      blocks[5].Hash(), | ||||
| 		SafeBlockHash:      common.Hash{}, | ||||
| 		FinalizedBlockHash: common.Hash{}, | ||||
| 	} | ||||
| 	if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err == nil { | ||||
| 		t.Errorf("fork choice updated before total terminal difficulty should fail") | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestExecutePayloadV1(t *testing.T) { | ||||
| 	genesis, headers, _, _, postBlocks := generatePreMergeChain(10, 2) | ||||
| 	n, lesService := startLesService(t, genesis, headers) | ||||
| 	lesService.Merger().ReachTTD() | ||||
| 	defer n.Close() | ||||
| 
 | ||||
| 	api := NewConsensusAPI(lesService) | ||||
| 	fcState := engine.ForkchoiceStateV1{ | ||||
| 		HeadBlockHash:      postBlocks[0].Hash(), | ||||
| 		SafeBlockHash:      common.Hash{}, | ||||
| 		FinalizedBlockHash: common.Hash{}, | ||||
| 	} | ||||
| 	if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil { | ||||
| 		t.Errorf("Failed to update head %v", err) | ||||
| 	} | ||||
| 	block := postBlocks[0] | ||||
| 
 | ||||
| 	fakeBlock := types.NewBlock(&types.Header{ | ||||
| 		ParentHash:  block.ParentHash(), | ||||
| 		UncleHash:   crypto.Keccak256Hash(nil), | ||||
| 		Coinbase:    block.Coinbase(), | ||||
| 		Root:        block.Root(), | ||||
| 		TxHash:      crypto.Keccak256Hash(nil), | ||||
| 		ReceiptHash: crypto.Keccak256Hash(nil), | ||||
| 		Bloom:       block.Bloom(), | ||||
| 		Difficulty:  big.NewInt(0), | ||||
| 		Number:      block.Number(), | ||||
| 		GasLimit:    block.GasLimit(), | ||||
| 		GasUsed:     block.GasUsed(), | ||||
| 		Time:        block.Time(), | ||||
| 		Extra:       block.Extra(), | ||||
| 		MixDigest:   block.MixDigest(), | ||||
| 		Nonce:       types.BlockNonce{}, | ||||
| 		BaseFee:     block.BaseFee(), | ||||
| 	}, nil, nil, nil, trie.NewStackTrie(nil)) | ||||
| 
 | ||||
| 	_, err := api.ExecutePayloadV1(engine.ExecutableData{ | ||||
| 		ParentHash:    fakeBlock.ParentHash(), | ||||
| 		FeeRecipient:  fakeBlock.Coinbase(), | ||||
| 		StateRoot:     fakeBlock.Root(), | ||||
| 		ReceiptsRoot:  fakeBlock.ReceiptHash(), | ||||
| 		LogsBloom:     fakeBlock.Bloom().Bytes(), | ||||
| 		Random:        fakeBlock.MixDigest(), | ||||
| 		Number:        fakeBlock.NumberU64(), | ||||
| 		GasLimit:      fakeBlock.GasLimit(), | ||||
| 		GasUsed:       fakeBlock.GasUsed(), | ||||
| 		Timestamp:     fakeBlock.Time(), | ||||
| 		ExtraData:     fakeBlock.Extra(), | ||||
| 		BaseFeePerGas: fakeBlock.BaseFee(), | ||||
| 		BlockHash:     fakeBlock.Hash(), | ||||
| 		Transactions:  encodeTransactions(fakeBlock.Transactions()), | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("Failed to execute payload %v", err) | ||||
| 	} | ||||
| 	headHeader := api.les.BlockChain().CurrentHeader() | ||||
| 	if headHeader.Number.Uint64() != fakeBlock.NumberU64()-1 { | ||||
| 		t.Fatal("Unexpected chain head update") | ||||
| 	} | ||||
| 	fcState = engine.ForkchoiceStateV1{ | ||||
| 		HeadBlockHash:      fakeBlock.Hash(), | ||||
| 		SafeBlockHash:      common.Hash{}, | ||||
| 		FinalizedBlockHash: common.Hash{}, | ||||
| 	} | ||||
| 	if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil { | ||||
| 		t.Fatal("Failed to update head") | ||||
| 	} | ||||
| 	headHeader = api.les.BlockChain().CurrentHeader() | ||||
| 	if headHeader.Number.Uint64() != fakeBlock.NumberU64() { | ||||
| 		t.Fatal("Failed to update chain head") | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestEth2DeepReorg(t *testing.T) { | ||||
| 	// TODO (MariusVanDerWijden) TestEth2DeepReorg is currently broken, because it tries to reorg
 | ||||
| 	// before the totalTerminalDifficulty threshold
 | ||||
| 	/* | ||||
| 		genesis, preMergeBlocks := generatePreMergeChain(core.TriesInMemory * 2) | ||||
| 		n, ethservice := startEthService(t, genesis, preMergeBlocks) | ||||
| 		defer n.Close() | ||||
| 
 | ||||
| 		var ( | ||||
| 			api    = NewConsensusAPI(ethservice, nil) | ||||
| 			parent = preMergeBlocks[len(preMergeBlocks)-core.TriesInMemory-1] | ||||
| 			head   = ethservice.BlockChain().CurrentBlock().NumberU64() | ||||
| 		) | ||||
| 		if ethservice.BlockChain().HasBlockAndState(parent.Hash(), parent.NumberU64()) { | ||||
| 			t.Errorf("Block %d not pruned", parent.NumberU64()) | ||||
| 		} | ||||
| 		for i := 0; i < 10; i++ { | ||||
| 			execData, err := api.assembleBlock(AssembleBlockParams{ | ||||
| 				ParentHash: parent.Hash(), | ||||
| 				Timestamp:  parent.Time() + 5, | ||||
| 			}) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to create the executable data %v", err) | ||||
| 			} | ||||
| 			block, err := ExecutableDataToBlock(ethservice.BlockChain().Config(), parent.Header(), *execData) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to convert executable data to block %v", err) | ||||
| 			} | ||||
| 			newResp, err := api.ExecutePayload(*execData) | ||||
| 			if err != nil || newResp.Status != "VALID" { | ||||
| 				t.Fatalf("Failed to insert block: %v", err) | ||||
| 			} | ||||
| 			if ethservice.BlockChain().CurrentBlock().NumberU64() != head { | ||||
| 				t.Fatalf("Chain head shouldn't be updated") | ||||
| 			} | ||||
| 			if err := api.setCanonical(block.Hash()); err != nil { | ||||
| 				t.Fatalf("Failed to set head: %v", err) | ||||
| 			} | ||||
| 			if ethservice.BlockChain().CurrentBlock().NumberU64() != block.NumberU64() { | ||||
| 				t.Fatalf("Chain head should be updated") | ||||
| 			} | ||||
| 			parent, head = block, block.NumberU64() | ||||
| 		} | ||||
| 	*/ | ||||
| } | ||||
| 
 | ||||
| // startEthService creates a full node instance for testing.
 | ||||
| func startLesService(t *testing.T, genesis *core.Genesis, headers []*types.Header) (*node.Node, *les.LightEthereum) { | ||||
| 	t.Helper() | ||||
| 
 | ||||
| 	n, err := node.New(&node.Config{}) | ||||
| 	if err != nil { | ||||
| 		t.Fatal("can't create node:", err) | ||||
| 	} | ||||
| 	ethcfg := ðconfig.Config{ | ||||
| 		Genesis:        genesis, | ||||
| 		SyncMode:       downloader.LightSync, | ||||
| 		TrieDirtyCache: 256, | ||||
| 		TrieCleanCache: 256, | ||||
| 		LightPeers:     10, | ||||
| 	} | ||||
| 	lesService, err := les.New(n, ethcfg) | ||||
| 	if err != nil { | ||||
| 		t.Fatal("can't create eth service:", err) | ||||
| 	} | ||||
| 	if err := n.Start(); err != nil { | ||||
| 		t.Fatal("can't start node:", err) | ||||
| 	} | ||||
| 	if _, err := lesService.BlockChain().InsertHeaderChain(headers); err != nil { | ||||
| 		n.Close() | ||||
| 		t.Fatal("can't import test headers:", err) | ||||
| 	} | ||||
| 	return n, lesService | ||||
| } | ||||
| 
 | ||||
| func encodeTransactions(txs []*types.Transaction) [][]byte { | ||||
| 	var enc = make([][]byte, len(txs)) | ||||
| 	for i, tx := range txs { | ||||
| 		enc[i], _ = tx.MarshalBinary() | ||||
| 	} | ||||
| 	return enc | ||||
| } | ||||
| @ -36,7 +36,6 @@ import ( | ||||
| 	"github.com/ethereum/go-ethereum/event" | ||||
| 	"github.com/ethereum/go-ethereum/internal/ethapi" | ||||
| 	"github.com/ethereum/go-ethereum/internal/shutdowncheck" | ||||
| 	"github.com/ethereum/go-ethereum/les/downloader" | ||||
| 	"github.com/ethereum/go-ethereum/les/vflux" | ||||
| 	vfc "github.com/ethereum/go-ethereum/les/vflux/client" | ||||
| 	"github.com/ethereum/go-ethereum/light" | ||||
| @ -64,7 +63,6 @@ type LightEthereum struct { | ||||
| 	blockchain         *light.LightChain | ||||
| 	serverPool         *vfc.ServerPool | ||||
| 	serverPoolIterator enode.Iterator | ||||
| 	pruner             *pruner | ||||
| 	merger             *consensus.Merger | ||||
| 
 | ||||
| 	bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests
 | ||||
| @ -146,7 +144,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { | ||||
| 	if leth.udpEnabled { | ||||
| 		prenegQuery = leth.prenegQuery | ||||
| 	} | ||||
| 	leth.serverPool, leth.serverPoolIterator = vfc.NewServerPool(lesDb, []byte("serverpool:"), time.Second, prenegQuery, &mclock.System{}, config.UltraLightServers, requestList) | ||||
| 	leth.serverPool, leth.serverPoolIterator = vfc.NewServerPool(lesDb, []byte("serverpool:"), time.Second, prenegQuery, &mclock.System{}, nil, requestList) | ||||
| 	leth.serverPool.AddMetrics(suggestedTimeoutGauge, totalValueGauge, serverSelectableGauge, serverConnectedGauge, sessionValueMeter, serverDialedMeter) | ||||
| 
 | ||||
| 	leth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool.GetTimeout) | ||||
| @ -170,9 +168,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { | ||||
| 	leth.chtIndexer.Start(leth.blockchain) | ||||
| 	leth.bloomIndexer.Start(leth.blockchain) | ||||
| 
 | ||||
| 	// Start a light chain pruner to delete useless historical data.
 | ||||
| 	leth.pruner = newPruner(chainDb, leth.chtIndexer, leth.bloomTrieIndexer) | ||||
| 
 | ||||
| 	// Rewind the chain in case of an incompatible config upgrade.
 | ||||
| 	if compat, ok := genesisErr.(*params.ConfigCompatError); ok { | ||||
| 		log.Warn("Rewinding chain to upgrade configuration", "err", compat) | ||||
| @ -191,7 +186,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { | ||||
| 	} | ||||
| 	leth.ApiBackend.gpo = gasprice.NewOracle(leth.ApiBackend, gpoParams) | ||||
| 
 | ||||
| 	leth.handler = newClientHandler(config.UltraLightServers, config.UltraLightFraction, leth) | ||||
| 	leth.handler = newClientHandler(leth) | ||||
| 	leth.netRPCService = ethapi.NewNetAPI(leth.p2pServer, leth.config.NetworkId) | ||||
| 
 | ||||
| 	// Register the backend on the node
 | ||||
| @ -298,9 +293,6 @@ func (s *LightEthereum) APIs() []rpc.API { | ||||
| 		{ | ||||
| 			Namespace: "eth", | ||||
| 			Service:   &LightDummyAPI{}, | ||||
| 		}, { | ||||
| 			Namespace: "eth", | ||||
| 			Service:   downloader.NewDownloaderAPI(s.handler.downloader, s.eventMux), | ||||
| 		}, { | ||||
| 			Namespace: "net", | ||||
| 			Service:   s.netRPCService, | ||||
| @ -315,13 +307,12 @@ func (s *LightEthereum) ResetWithGenesisBlock(gb *types.Block) { | ||||
| 	s.blockchain.ResetWithGenesisBlock(gb) | ||||
| } | ||||
| 
 | ||||
| func (s *LightEthereum) BlockChain() *light.LightChain      { return s.blockchain } | ||||
| func (s *LightEthereum) TxPool() *light.TxPool              { return s.txPool } | ||||
| func (s *LightEthereum) Engine() consensus.Engine           { return s.engine } | ||||
| func (s *LightEthereum) LesVersion() int                    { return int(ClientProtocolVersions[0]) } | ||||
| func (s *LightEthereum) Downloader() *downloader.Downloader { return s.handler.downloader } | ||||
| func (s *LightEthereum) EventMux() *event.TypeMux           { return s.eventMux } | ||||
| func (s *LightEthereum) Merger() *consensus.Merger          { return s.merger } | ||||
| func (s *LightEthereum) BlockChain() *light.LightChain { return s.blockchain } | ||||
| func (s *LightEthereum) TxPool() *light.TxPool         { return s.txPool } | ||||
| func (s *LightEthereum) Engine() consensus.Engine      { return s.engine } | ||||
| func (s *LightEthereum) LesVersion() int               { return int(ClientProtocolVersions[0]) } | ||||
| func (s *LightEthereum) EventMux() *event.TypeMux      { return s.eventMux } | ||||
| func (s *LightEthereum) Merger() *consensus.Merger     { return s.merger } | ||||
| 
 | ||||
| // Protocols returns all the currently configured network protocols to start.
 | ||||
| func (s *LightEthereum) Protocols() []p2p.Protocol { | ||||
| @ -354,7 +345,6 @@ func (s *LightEthereum) Start() error { | ||||
| 	// Start bloom request workers.
 | ||||
| 	s.wg.Add(bloomServiceThreads) | ||||
| 	s.startBloomHandlers(params.BloomBitsBlocksClient) | ||||
| 	s.handler.start() | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| @ -374,7 +364,6 @@ func (s *LightEthereum) Stop() error { | ||||
| 	s.handler.stop() | ||||
| 	s.txPool.Stop() | ||||
| 	s.engine.Close() | ||||
| 	s.pruner.close() | ||||
| 	s.eventMux.Stop() | ||||
| 	// Clean shutdown marker as the last thing before closing db
 | ||||
| 	s.shutdownTracker.Stop() | ||||
|  | ||||
| @ -17,9 +17,6 @@ | ||||
| package les | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"math/big" | ||||
| 	"math/rand" | ||||
| 	"sync" | ||||
| 	"time" | ||||
| 
 | ||||
| @ -27,68 +24,37 @@ import ( | ||||
| 	"github.com/ethereum/go-ethereum/common/mclock" | ||||
| 	"github.com/ethereum/go-ethereum/core/forkid" | ||||
| 	"github.com/ethereum/go-ethereum/core/types" | ||||
| 	"github.com/ethereum/go-ethereum/eth/protocols/eth" | ||||
| 	"github.com/ethereum/go-ethereum/les/downloader" | ||||
| 	"github.com/ethereum/go-ethereum/light" | ||||
| 	"github.com/ethereum/go-ethereum/log" | ||||
| 	"github.com/ethereum/go-ethereum/p2p" | ||||
| ) | ||||
| 
 | ||||
| // clientHandler is responsible for receiving and processing all incoming server
 | ||||
| // responses.
 | ||||
| type clientHandler struct { | ||||
| 	ulc        *ulc | ||||
| 	forkFilter forkid.Filter | ||||
| 	fetcher    *lightFetcher | ||||
| 	downloader *downloader.Downloader | ||||
| 	backend    *LightEthereum | ||||
| 
 | ||||
| 	closeCh chan struct{} | ||||
| 	wg      sync.WaitGroup // WaitGroup used to track all connected peers.
 | ||||
| 
 | ||||
| 	// Hooks used in the testing
 | ||||
| 	syncStart func(header *types.Header) // Hook called when the syncing is started
 | ||||
| 	syncEnd   func(header *types.Header) // Hook called when the syncing is done
 | ||||
| } | ||||
| 
 | ||||
| func newClientHandler(ulcServers []string, ulcFraction int, backend *LightEthereum) *clientHandler { | ||||
| func newClientHandler(backend *LightEthereum) *clientHandler { | ||||
| 	handler := &clientHandler{ | ||||
| 		forkFilter: forkid.NewFilter(backend.blockchain), | ||||
| 		backend:    backend, | ||||
| 		closeCh:    make(chan struct{}), | ||||
| 	} | ||||
| 	if ulcServers != nil { | ||||
| 		ulc, err := newULC(ulcServers, ulcFraction) | ||||
| 		if err != nil { | ||||
| 			log.Error("Failed to initialize ultra light client") | ||||
| 		} | ||||
| 		handler.ulc = ulc | ||||
| 		log.Info("Enable ultra light client mode") | ||||
| 	} | ||||
| 	handler.fetcher = newLightFetcher(backend.blockchain, backend.engine, backend.peers, handler.ulc, backend.chainDb, backend.reqDist, handler.synchronise) | ||||
| 	handler.downloader = downloader.New(0, backend.chainDb, backend.eventMux, nil, backend.blockchain, handler.removePeer) | ||||
| 	handler.backend.peers.subscribe((*downloaderPeerNotify)(handler)) | ||||
| 	return handler | ||||
| } | ||||
| 
 | ||||
| func (h *clientHandler) start() { | ||||
| 	h.fetcher.start() | ||||
| } | ||||
| 
 | ||||
| func (h *clientHandler) stop() { | ||||
| 	close(h.closeCh) | ||||
| 	h.downloader.Terminate() | ||||
| 	h.fetcher.stop() | ||||
| 	h.wg.Wait() | ||||
| } | ||||
| 
 | ||||
| // runPeer is the p2p protocol run function for the given version.
 | ||||
| func (h *clientHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error { | ||||
| 	trusted := false | ||||
| 	if h.ulc != nil { | ||||
| 		trusted = h.ulc.trusted(p.ID()) | ||||
| 	} | ||||
| 	peer := newServerPeer(int(version), h.backend.config.NetworkId, trusted, p, newMeteredMsgWriter(rw, int(version))) | ||||
| 	peer := newServerPeer(int(version), h.backend.config.NetworkId, false, p, newMeteredMsgWriter(rw, int(version))) | ||||
| 	defer peer.close() | ||||
| 	h.wg.Add(1) | ||||
| 	defer h.wg.Done() | ||||
| @ -136,12 +102,6 @@ func (h *clientHandler) handle(p *serverPeer, noInitAnnounce bool) error { | ||||
| 		serverConnectionGauge.Update(int64(h.backend.peers.len())) | ||||
| 	}() | ||||
| 
 | ||||
| 	// Discard all the announces after the transition
 | ||||
| 	// Also discarding initial signal to prevent syncing during testing.
 | ||||
| 	if !(noInitAnnounce || h.backend.merger.TDDReached()) { | ||||
| 		h.fetcher.announce(p, &announceData{Hash: p.headInfo.Hash, Number: p.headInfo.Number, Td: p.headInfo.Td}) | ||||
| 	} | ||||
| 
 | ||||
| 	// Mark the peer starts to be served.
 | ||||
| 	p.serving.Store(true) | ||||
| 	defer p.serving.Store(false) | ||||
| @ -206,11 +166,6 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { | ||||
| 
 | ||||
| 			// Update peer head information first and then notify the announcement
 | ||||
| 			p.updateHead(req.Hash, req.Number, req.Td) | ||||
| 
 | ||||
| 			// Discard all the announces after the transition
 | ||||
| 			if !h.backend.merger.TDDReached() { | ||||
| 				h.fetcher.announce(p, &req) | ||||
| 			} | ||||
| 		} | ||||
| 	case msg.Code == BlockHeadersMsg: | ||||
| 		p.Log().Trace("Received block header response message") | ||||
| @ -221,28 +176,13 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { | ||||
| 		if err := msg.Decode(&resp); err != nil { | ||||
| 			return errResp(ErrDecode, "msg %v: %v", msg, err) | ||||
| 		} | ||||
| 		headers := resp.Headers | ||||
| 		p.fcServer.ReceivedReply(resp.ReqID, resp.BV) | ||||
| 		p.answeredRequest(resp.ReqID) | ||||
| 
 | ||||
| 		// Filter out the explicitly requested header by the retriever
 | ||||
| 		if h.backend.retriever.requested(resp.ReqID) { | ||||
| 			deliverMsg = &Msg{ | ||||
| 				MsgType: MsgBlockHeaders, | ||||
| 				ReqID:   resp.ReqID, | ||||
| 				Obj:     resp.Headers, | ||||
| 			} | ||||
| 		} else { | ||||
| 			// Filter out any explicitly requested headers, deliver the rest to the downloader
 | ||||
| 			filter := len(headers) == 1 | ||||
| 			if filter { | ||||
| 				headers = h.fetcher.deliverHeaders(p, resp.ReqID, resp.Headers) | ||||
| 			} | ||||
| 			if len(headers) != 0 || !filter { | ||||
| 				if err := h.downloader.DeliverHeaders(p.id, headers); err != nil { | ||||
| 					log.Debug("Failed to deliver headers", "err", err) | ||||
| 				} | ||||
| 			} | ||||
| 		deliverMsg = &Msg{ | ||||
| 			MsgType: MsgBlockHeaders, | ||||
| 			ReqID:   resp.ReqID, | ||||
| 			Obj:     resp.Headers, | ||||
| 		} | ||||
| 	case msg.Code == BlockBodiesMsg: | ||||
| 		p.Log().Trace("Received block bodies response") | ||||
| @ -366,117 +306,3 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (h *clientHandler) removePeer(id string) { | ||||
| 	h.backend.peers.unregister(id) | ||||
| } | ||||
| 
 | ||||
| type peerConnection struct { | ||||
| 	handler *clientHandler | ||||
| 	peer    *serverPeer | ||||
| } | ||||
| 
 | ||||
| func (pc *peerConnection) Head() (common.Hash, *big.Int) { | ||||
| 	return pc.peer.HeadAndTd() | ||||
| } | ||||
| 
 | ||||
| func (pc *peerConnection) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { | ||||
| 	rq := &distReq{ | ||||
| 		getCost: func(dp distPeer) uint64 { | ||||
| 			peer := dp.(*serverPeer) | ||||
| 			return peer.getRequestCost(GetBlockHeadersMsg, amount) | ||||
| 		}, | ||||
| 		canSend: func(dp distPeer) bool { | ||||
| 			return dp.(*serverPeer) == pc.peer | ||||
| 		}, | ||||
| 		request: func(dp distPeer) func() { | ||||
| 			reqID := rand.Uint64() | ||||
| 			peer := dp.(*serverPeer) | ||||
| 			cost := peer.getRequestCost(GetBlockHeadersMsg, amount) | ||||
| 			peer.fcServer.QueuedRequest(reqID, cost) | ||||
| 			return func() { peer.requestHeadersByHash(reqID, origin, amount, skip, reverse) } | ||||
| 		}, | ||||
| 	} | ||||
| 	_, ok := <-pc.handler.backend.reqDist.queue(rq) | ||||
| 	if !ok { | ||||
| 		return light.ErrNoPeers | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (pc *peerConnection) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { | ||||
| 	rq := &distReq{ | ||||
| 		getCost: func(dp distPeer) uint64 { | ||||
| 			peer := dp.(*serverPeer) | ||||
| 			return peer.getRequestCost(GetBlockHeadersMsg, amount) | ||||
| 		}, | ||||
| 		canSend: func(dp distPeer) bool { | ||||
| 			return dp.(*serverPeer) == pc.peer | ||||
| 		}, | ||||
| 		request: func(dp distPeer) func() { | ||||
| 			reqID := rand.Uint64() | ||||
| 			peer := dp.(*serverPeer) | ||||
| 			cost := peer.getRequestCost(GetBlockHeadersMsg, amount) | ||||
| 			peer.fcServer.QueuedRequest(reqID, cost) | ||||
| 			return func() { peer.requestHeadersByNumber(reqID, origin, amount, skip, reverse) } | ||||
| 		}, | ||||
| 	} | ||||
| 	_, ok := <-pc.handler.backend.reqDist.queue(rq) | ||||
| 	if !ok { | ||||
| 		return light.ErrNoPeers | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // RetrieveSingleHeaderByNumber requests a single header by the specified block
 | ||||
| // number. This function will wait the response until it's timeout or delivered.
 | ||||
| func (pc *peerConnection) RetrieveSingleHeaderByNumber(context context.Context, number uint64) (*types.Header, error) { | ||||
| 	reqID := rand.Uint64() | ||||
| 	rq := &distReq{ | ||||
| 		getCost: func(dp distPeer) uint64 { | ||||
| 			peer := dp.(*serverPeer) | ||||
| 			return peer.getRequestCost(GetBlockHeadersMsg, 1) | ||||
| 		}, | ||||
| 		canSend: func(dp distPeer) bool { | ||||
| 			return dp.(*serverPeer) == pc.peer | ||||
| 		}, | ||||
| 		request: func(dp distPeer) func() { | ||||
| 			peer := dp.(*serverPeer) | ||||
| 			cost := peer.getRequestCost(GetBlockHeadersMsg, 1) | ||||
| 			peer.fcServer.QueuedRequest(reqID, cost) | ||||
| 			return func() { peer.requestHeadersByNumber(reqID, number, 1, 0, false) } | ||||
| 		}, | ||||
| 	} | ||||
| 	var header *types.Header | ||||
| 	if err := pc.handler.backend.retriever.retrieve(context, reqID, rq, func(peer distPeer, msg *Msg) error { | ||||
| 		if msg.MsgType != MsgBlockHeaders { | ||||
| 			return errInvalidMessageType | ||||
| 		} | ||||
| 		headers := msg.Obj.([]*types.Header) | ||||
| 		if len(headers) != 1 { | ||||
| 			return errInvalidEntryCount | ||||
| 		} | ||||
| 		header = headers[0] | ||||
| 		return nil | ||||
| 	}, nil); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return header, nil | ||||
| } | ||||
| 
 | ||||
| // downloaderPeerNotify implements peerSetNotify
 | ||||
| type downloaderPeerNotify clientHandler | ||||
| 
 | ||||
| func (d *downloaderPeerNotify) registerPeer(p *serverPeer) { | ||||
| 	h := (*clientHandler)(d) | ||||
| 	pc := &peerConnection{ | ||||
| 		handler: h, | ||||
| 		peer:    p, | ||||
| 	} | ||||
| 	h.downloader.RegisterLightPeer(p.id, eth.ETH66, pc) | ||||
| } | ||||
| 
 | ||||
| func (d *downloaderPeerNotify) unregisterPeer(p *serverPeer) { | ||||
| 	h := (*clientHandler)(d) | ||||
| 	h.downloader.UnregisterPeer(p.id) | ||||
| } | ||||
|  | ||||
| @ -1,166 +0,0 @@ | ||||
| // Copyright 2015 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package downloader | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"sync" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum" | ||||
| 	"github.com/ethereum/go-ethereum/event" | ||||
| 	"github.com/ethereum/go-ethereum/rpc" | ||||
| ) | ||||
| 
 | ||||
| // DownloaderAPI provides an API which gives information about the current synchronisation status.
 | ||||
| // It offers only methods that operates on data that can be available to anyone without security risks.
 | ||||
| type DownloaderAPI struct { | ||||
| 	d                         *Downloader | ||||
| 	mux                       *event.TypeMux | ||||
| 	installSyncSubscription   chan chan interface{} | ||||
| 	uninstallSyncSubscription chan *uninstallSyncSubscriptionRequest | ||||
| } | ||||
| 
 | ||||
| // NewDownloaderAPI create a new PublicDownloaderAPI. The API has an internal event loop that
 | ||||
| // listens for events from the downloader through the global event mux. In case it receives one of
 | ||||
| // these events it broadcasts it to all syncing subscriptions that are installed through the
 | ||||
| // installSyncSubscription channel.
 | ||||
| func NewDownloaderAPI(d *Downloader, m *event.TypeMux) *DownloaderAPI { | ||||
| 	api := &DownloaderAPI{ | ||||
| 		d:                         d, | ||||
| 		mux:                       m, | ||||
| 		installSyncSubscription:   make(chan chan interface{}), | ||||
| 		uninstallSyncSubscription: make(chan *uninstallSyncSubscriptionRequest), | ||||
| 	} | ||||
| 
 | ||||
| 	go api.eventLoop() | ||||
| 
 | ||||
| 	return api | ||||
| } | ||||
| 
 | ||||
| // eventLoop runs a loop until the event mux closes. It will install and uninstall new
 | ||||
| // sync subscriptions and broadcasts sync status updates to the installed sync subscriptions.
 | ||||
| func (api *DownloaderAPI) eventLoop() { | ||||
| 	var ( | ||||
| 		sub               = api.mux.Subscribe(StartEvent{}, DoneEvent{}, FailedEvent{}) | ||||
| 		syncSubscriptions = make(map[chan interface{}]struct{}) | ||||
| 	) | ||||
| 
 | ||||
| 	for { | ||||
| 		select { | ||||
| 		case i := <-api.installSyncSubscription: | ||||
| 			syncSubscriptions[i] = struct{}{} | ||||
| 		case u := <-api.uninstallSyncSubscription: | ||||
| 			delete(syncSubscriptions, u.c) | ||||
| 			close(u.uninstalled) | ||||
| 		case event := <-sub.Chan(): | ||||
| 			if event == nil { | ||||
| 				return | ||||
| 			} | ||||
| 
 | ||||
| 			var notification interface{} | ||||
| 			switch event.Data.(type) { | ||||
| 			case StartEvent: | ||||
| 				notification = &SyncingResult{ | ||||
| 					Syncing: true, | ||||
| 					Status:  api.d.Progress(), | ||||
| 				} | ||||
| 			case DoneEvent, FailedEvent: | ||||
| 				notification = false | ||||
| 			} | ||||
| 			// broadcast
 | ||||
| 			for c := range syncSubscriptions { | ||||
| 				c <- notification | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Syncing provides information when this nodes starts synchronising with the Ethereum network and when it's finished.
 | ||||
| func (api *DownloaderAPI) Syncing(ctx context.Context) (*rpc.Subscription, error) { | ||||
| 	notifier, supported := rpc.NotifierFromContext(ctx) | ||||
| 	if !supported { | ||||
| 		return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported | ||||
| 	} | ||||
| 
 | ||||
| 	rpcSub := notifier.CreateSubscription() | ||||
| 
 | ||||
| 	go func() { | ||||
| 		statuses := make(chan interface{}) | ||||
| 		sub := api.SubscribeSyncStatus(statuses) | ||||
| 
 | ||||
| 		for { | ||||
| 			select { | ||||
| 			case status := <-statuses: | ||||
| 				notifier.Notify(rpcSub.ID, status) | ||||
| 			case <-rpcSub.Err(): | ||||
| 				sub.Unsubscribe() | ||||
| 				return | ||||
| 			case <-notifier.Closed(): | ||||
| 				sub.Unsubscribe() | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	return rpcSub, nil | ||||
| } | ||||
| 
 | ||||
| // SyncingResult provides information about the current synchronisation status for this node.
 | ||||
| type SyncingResult struct { | ||||
| 	Syncing bool                  `json:"syncing"` | ||||
| 	Status  ethereum.SyncProgress `json:"status"` | ||||
| } | ||||
| 
 | ||||
| // uninstallSyncSubscriptionRequest uninstalls a syncing subscription in the API event loop.
 | ||||
| type uninstallSyncSubscriptionRequest struct { | ||||
| 	c           chan interface{} | ||||
| 	uninstalled chan interface{} | ||||
| } | ||||
| 
 | ||||
| // SyncStatusSubscription represents a syncing subscription.
 | ||||
| type SyncStatusSubscription struct { | ||||
| 	api       *DownloaderAPI   // register subscription in event loop of this api instance
 | ||||
| 	c         chan interface{} // channel where events are broadcasted to
 | ||||
| 	unsubOnce sync.Once        // make sure unsubscribe logic is executed once
 | ||||
| } | ||||
| 
 | ||||
| // Unsubscribe uninstalls the subscription from the DownloadAPI event loop.
 | ||||
| // The status channel that was passed to subscribeSyncStatus isn't used anymore
 | ||||
| // after this method returns.
 | ||||
| func (s *SyncStatusSubscription) Unsubscribe() { | ||||
| 	s.unsubOnce.Do(func() { | ||||
| 		req := uninstallSyncSubscriptionRequest{s.c, make(chan interface{})} | ||||
| 		s.api.uninstallSyncSubscription <- &req | ||||
| 
 | ||||
| 		for { | ||||
| 			select { | ||||
| 			case <-s.c: | ||||
| 				// drop new status events until uninstall confirmation
 | ||||
| 				continue | ||||
| 			case <-req.uninstalled: | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
| 
 | ||||
| // SubscribeSyncStatus creates a subscription that will broadcast new synchronisation updates.
 | ||||
| // The given channel must receive interface values, the result can either
 | ||||
| func (api *DownloaderAPI) SubscribeSyncStatus(status chan interface{}) *SyncStatusSubscription { | ||||
| 	api.installSyncSubscription <- status | ||||
| 	return &SyncStatusSubscription{api: api, c: status} | ||||
| } | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -1,25 +0,0 @@ | ||||
| // Copyright 2015 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package downloader | ||||
| 
 | ||||
| import "github.com/ethereum/go-ethereum/core/types" | ||||
| 
 | ||||
| type DoneEvent struct { | ||||
| 	Latest *types.Header | ||||
| } | ||||
| type StartEvent struct{} | ||||
| type FailedEvent struct{ Err error } | ||||
| @ -1,45 +0,0 @@ | ||||
| // Copyright 2015 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| // Contains the metrics collected by the downloader.
 | ||||
| 
 | ||||
| package downloader | ||||
| 
 | ||||
| import ( | ||||
| 	"github.com/ethereum/go-ethereum/metrics" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	headerInMeter      = metrics.NewRegisteredMeter("eth/downloader/headers/in", nil) | ||||
| 	headerReqTimer     = metrics.NewRegisteredTimer("eth/downloader/headers/req", nil) | ||||
| 	headerDropMeter    = metrics.NewRegisteredMeter("eth/downloader/headers/drop", nil) | ||||
| 	headerTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/headers/timeout", nil) | ||||
| 
 | ||||
| 	bodyInMeter      = metrics.NewRegisteredMeter("eth/downloader/bodies/in", nil) | ||||
| 	bodyReqTimer     = metrics.NewRegisteredTimer("eth/downloader/bodies/req", nil) | ||||
| 	bodyDropMeter    = metrics.NewRegisteredMeter("eth/downloader/bodies/drop", nil) | ||||
| 	bodyTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/bodies/timeout", nil) | ||||
| 
 | ||||
| 	receiptInMeter      = metrics.NewRegisteredMeter("eth/downloader/receipts/in", nil) | ||||
| 	receiptReqTimer     = metrics.NewRegisteredTimer("eth/downloader/receipts/req", nil) | ||||
| 	receiptDropMeter    = metrics.NewRegisteredMeter("eth/downloader/receipts/drop", nil) | ||||
| 	receiptTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/timeout", nil) | ||||
| 
 | ||||
| 	stateInMeter   = metrics.NewRegisteredMeter("eth/downloader/states/in", nil) | ||||
| 	stateDropMeter = metrics.NewRegisteredMeter("eth/downloader/states/drop", nil) | ||||
| 
 | ||||
| 	throttleCounter = metrics.NewRegisteredCounter("eth/downloader/throttle", nil) | ||||
| ) | ||||
| @ -1,81 +0,0 @@ | ||||
| // Copyright 2015 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package downloader | ||||
| 
 | ||||
| import "fmt" | ||||
| 
 | ||||
| // SyncMode represents the synchronisation mode of the downloader.
 | ||||
| // It is a uint32 as it is used with atomic operations.
 | ||||
| type SyncMode uint32 | ||||
| 
 | ||||
| const ( | ||||
| 	FullSync  SyncMode = iota // Synchronise the entire blockchain history from full blocks
 | ||||
| 	FastSync                  // Quickly download the headers, full sync only at the chain
 | ||||
| 	SnapSync                  // Download the chain and the state via compact snapshots
 | ||||
| 	LightSync                 // Download only the headers and terminate afterwards
 | ||||
| ) | ||||
| 
 | ||||
| func (mode SyncMode) IsValid() bool { | ||||
| 	return mode >= FullSync && mode <= LightSync | ||||
| } | ||||
| 
 | ||||
| // String implements the stringer interface.
 | ||||
| func (mode SyncMode) String() string { | ||||
| 	switch mode { | ||||
| 	case FullSync: | ||||
| 		return "full" | ||||
| 	case FastSync: | ||||
| 		return "fast" | ||||
| 	case SnapSync: | ||||
| 		return "snap" | ||||
| 	case LightSync: | ||||
| 		return "light" | ||||
| 	default: | ||||
| 		return "unknown" | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (mode SyncMode) MarshalText() ([]byte, error) { | ||||
| 	switch mode { | ||||
| 	case FullSync: | ||||
| 		return []byte("full"), nil | ||||
| 	case FastSync: | ||||
| 		return []byte("fast"), nil | ||||
| 	case SnapSync: | ||||
| 		return []byte("snap"), nil | ||||
| 	case LightSync: | ||||
| 		return []byte("light"), nil | ||||
| 	default: | ||||
| 		return nil, fmt.Errorf("unknown sync mode %d", mode) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (mode *SyncMode) UnmarshalText(text []byte) error { | ||||
| 	switch string(text) { | ||||
| 	case "full": | ||||
| 		*mode = FullSync | ||||
| 	case "fast": | ||||
| 		*mode = FastSync | ||||
| 	case "snap": | ||||
| 		*mode = SnapSync | ||||
| 	case "light": | ||||
| 		*mode = LightSync | ||||
| 	default: | ||||
| 		return fmt.Errorf(`unknown sync mode %q, want "full", "fast" or "light"`, text) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| @ -1,502 +0,0 @@ | ||||
| // Copyright 2015 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| // Contains the active peer-set of the downloader, maintaining both failures
 | ||||
| // as well as reputation metrics to prioritize the block retrievals.
 | ||||
| 
 | ||||
| package downloader | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"math/big" | ||||
| 	"sort" | ||||
| 	"sync" | ||||
| 	"sync/atomic" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/common" | ||||
| 	"github.com/ethereum/go-ethereum/eth/protocols/eth" | ||||
| 	"github.com/ethereum/go-ethereum/event" | ||||
| 	"github.com/ethereum/go-ethereum/log" | ||||
| 	"github.com/ethereum/go-ethereum/p2p/msgrate" | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items
 | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	errAlreadyFetching   = errors.New("already fetching blocks from peer") | ||||
| 	errAlreadyRegistered = errors.New("peer is already registered") | ||||
| 	errNotRegistered     = errors.New("peer is not registered") | ||||
| ) | ||||
| 
 | ||||
| // peerConnection represents an active peer from which hashes and blocks are retrieved.
 | ||||
| type peerConnection struct { | ||||
| 	id string // Unique identifier of the peer
 | ||||
| 
 | ||||
| 	headerIdle  int32 // Current header activity state of the peer (idle = 0, active = 1)
 | ||||
| 	blockIdle   int32 // Current block activity state of the peer (idle = 0, active = 1)
 | ||||
| 	receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1)
 | ||||
| 	stateIdle   int32 // Current node data activity state of the peer (idle = 0, active = 1)
 | ||||
| 
 | ||||
| 	headerStarted  time.Time // Time instance when the last header fetch was started
 | ||||
| 	blockStarted   time.Time // Time instance when the last block (body) fetch was started
 | ||||
| 	receiptStarted time.Time // Time instance when the last receipt fetch was started
 | ||||
| 	stateStarted   time.Time // Time instance when the last node data fetch was started
 | ||||
| 
 | ||||
| 	rates   *msgrate.Tracker         // Tracker to hone in on the number of items retrievable per second
 | ||||
| 	lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously)
 | ||||
| 
 | ||||
| 	peer Peer | ||||
| 
 | ||||
| 	version uint       // Eth protocol version number to switch strategies
 | ||||
| 	log     log.Logger // Contextual logger to add extra infos to peer logs
 | ||||
| 	lock    sync.RWMutex | ||||
| } | ||||
| 
 | ||||
| // LightPeer encapsulates the methods required to synchronise with a remote light peer.
 | ||||
| type LightPeer interface { | ||||
| 	Head() (common.Hash, *big.Int) | ||||
| 	RequestHeadersByHash(common.Hash, int, int, bool) error | ||||
| 	RequestHeadersByNumber(uint64, int, int, bool) error | ||||
| } | ||||
| 
 | ||||
| // Peer encapsulates the methods required to synchronise with a remote full peer.
 | ||||
| type Peer interface { | ||||
| 	LightPeer | ||||
| 	RequestBodies([]common.Hash) error | ||||
| 	RequestReceipts([]common.Hash) error | ||||
| 	RequestNodeData([]common.Hash) error | ||||
| } | ||||
| 
 | ||||
| // lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods.
 | ||||
| type lightPeerWrapper struct { | ||||
| 	peer LightPeer | ||||
| } | ||||
| 
 | ||||
| func (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() } | ||||
| func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool) error { | ||||
| 	return w.peer.RequestHeadersByHash(h, amount, skip, reverse) | ||||
| } | ||||
| func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool) error { | ||||
| 	return w.peer.RequestHeadersByNumber(i, amount, skip, reverse) | ||||
| } | ||||
| func (w *lightPeerWrapper) RequestBodies([]common.Hash) error { | ||||
| 	panic("RequestBodies not supported in light client mode sync") | ||||
| } | ||||
| func (w *lightPeerWrapper) RequestReceipts([]common.Hash) error { | ||||
| 	panic("RequestReceipts not supported in light client mode sync") | ||||
| } | ||||
| func (w *lightPeerWrapper) RequestNodeData([]common.Hash) error { | ||||
| 	panic("RequestNodeData not supported in light client mode sync") | ||||
| } | ||||
| 
 | ||||
| // newPeerConnection creates a new downloader peer.
 | ||||
| func newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection { | ||||
| 	return &peerConnection{ | ||||
| 		id:      id, | ||||
| 		lacking: make(map[common.Hash]struct{}), | ||||
| 		peer:    peer, | ||||
| 		version: version, | ||||
| 		log:     logger, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Reset clears the internal state of a peer entity.
 | ||||
| func (p *peerConnection) Reset() { | ||||
| 	p.lock.Lock() | ||||
| 	defer p.lock.Unlock() | ||||
| 
 | ||||
| 	atomic.StoreInt32(&p.headerIdle, 0) | ||||
| 	atomic.StoreInt32(&p.blockIdle, 0) | ||||
| 	atomic.StoreInt32(&p.receiptIdle, 0) | ||||
| 	atomic.StoreInt32(&p.stateIdle, 0) | ||||
| 
 | ||||
| 	p.lacking = make(map[common.Hash]struct{}) | ||||
| } | ||||
| 
 | ||||
| // FetchHeaders sends a header retrieval request to the remote peer.
 | ||||
| func (p *peerConnection) FetchHeaders(from uint64, count int) error { | ||||
| 	// Short circuit if the peer is already fetching
 | ||||
| 	if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) { | ||||
| 		return errAlreadyFetching | ||||
| 	} | ||||
| 	p.headerStarted = time.Now() | ||||
| 
 | ||||
| 	// Issue the header retrieval request (absolute upwards without gaps)
 | ||||
| 	go p.peer.RequestHeadersByNumber(from, count, 0, false) | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // FetchBodies sends a block body retrieval request to the remote peer.
 | ||||
| func (p *peerConnection) FetchBodies(request *fetchRequest) error { | ||||
| 	// Short circuit if the peer is already fetching
 | ||||
| 	if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) { | ||||
| 		return errAlreadyFetching | ||||
| 	} | ||||
| 	p.blockStarted = time.Now() | ||||
| 
 | ||||
| 	go func() { | ||||
| 		// Convert the header set to a retrievable slice
 | ||||
| 		hashes := make([]common.Hash, 0, len(request.Headers)) | ||||
| 		for _, header := range request.Headers { | ||||
| 			hashes = append(hashes, header.Hash()) | ||||
| 		} | ||||
| 		p.peer.RequestBodies(hashes) | ||||
| 	}() | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // FetchReceipts sends a receipt retrieval request to the remote peer.
 | ||||
| func (p *peerConnection) FetchReceipts(request *fetchRequest) error { | ||||
| 	// Short circuit if the peer is already fetching
 | ||||
| 	if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) { | ||||
| 		return errAlreadyFetching | ||||
| 	} | ||||
| 	p.receiptStarted = time.Now() | ||||
| 
 | ||||
| 	go func() { | ||||
| 		// Convert the header set to a retrievable slice
 | ||||
| 		hashes := make([]common.Hash, 0, len(request.Headers)) | ||||
| 		for _, header := range request.Headers { | ||||
| 			hashes = append(hashes, header.Hash()) | ||||
| 		} | ||||
| 		p.peer.RequestReceipts(hashes) | ||||
| 	}() | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // FetchNodeData sends a node state data retrieval request to the remote peer.
 | ||||
| func (p *peerConnection) FetchNodeData(hashes []common.Hash) error { | ||||
| 	// Short circuit if the peer is already fetching
 | ||||
| 	if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) { | ||||
| 		return errAlreadyFetching | ||||
| 	} | ||||
| 	p.stateStarted = time.Now() | ||||
| 
 | ||||
| 	go p.peer.RequestNodeData(hashes) | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval
 | ||||
| // requests. Its estimated header retrieval throughput is updated with that measured
 | ||||
| // just now.
 | ||||
| func (p *peerConnection) SetHeadersIdle(delivered int, deliveryTime time.Time) { | ||||
| 	p.rates.Update(eth.BlockHeadersMsg, deliveryTime.Sub(p.headerStarted), delivered) | ||||
| 	atomic.StoreInt32(&p.headerIdle, 0) | ||||
| } | ||||
| 
 | ||||
| // SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval
 | ||||
| // requests. Its estimated body retrieval throughput is updated with that measured
 | ||||
| // just now.
 | ||||
| func (p *peerConnection) SetBodiesIdle(delivered int, deliveryTime time.Time) { | ||||
| 	p.rates.Update(eth.BlockBodiesMsg, deliveryTime.Sub(p.blockStarted), delivered) | ||||
| 	atomic.StoreInt32(&p.blockIdle, 0) | ||||
| } | ||||
| 
 | ||||
| // SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt
 | ||||
| // retrieval requests. Its estimated receipt retrieval throughput is updated
 | ||||
| // with that measured just now.
 | ||||
| func (p *peerConnection) SetReceiptsIdle(delivered int, deliveryTime time.Time) { | ||||
| 	p.rates.Update(eth.ReceiptsMsg, deliveryTime.Sub(p.receiptStarted), delivered) | ||||
| 	atomic.StoreInt32(&p.receiptIdle, 0) | ||||
| } | ||||
| 
 | ||||
| // SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie
 | ||||
| // data retrieval requests. Its estimated state retrieval throughput is updated
 | ||||
| // with that measured just now.
 | ||||
| func (p *peerConnection) SetNodeDataIdle(delivered int, deliveryTime time.Time) { | ||||
| 	p.rates.Update(eth.NodeDataMsg, deliveryTime.Sub(p.stateStarted), delivered) | ||||
| 	atomic.StoreInt32(&p.stateIdle, 0) | ||||
| } | ||||
| 
 | ||||
| // HeaderCapacity retrieves the peers header download allowance based on its
 | ||||
| // previously discovered throughput.
 | ||||
| func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int { | ||||
| 	cap := p.rates.Capacity(eth.BlockHeadersMsg, targetRTT) | ||||
| 	if cap > MaxHeaderFetch { | ||||
| 		cap = MaxHeaderFetch | ||||
| 	} | ||||
| 	return cap | ||||
| } | ||||
| 
 | ||||
| // BlockCapacity retrieves the peers block download allowance based on its
 | ||||
| // previously discovered throughput.
 | ||||
| func (p *peerConnection) BlockCapacity(targetRTT time.Duration) int { | ||||
| 	cap := p.rates.Capacity(eth.BlockBodiesMsg, targetRTT) | ||||
| 	if cap > MaxBlockFetch { | ||||
| 		cap = MaxBlockFetch | ||||
| 	} | ||||
| 	return cap | ||||
| } | ||||
| 
 | ||||
| // ReceiptCapacity retrieves the peers receipt download allowance based on its
 | ||||
| // previously discovered throughput.
 | ||||
| func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int { | ||||
| 	cap := p.rates.Capacity(eth.ReceiptsMsg, targetRTT) | ||||
| 	if cap > MaxReceiptFetch { | ||||
| 		cap = MaxReceiptFetch | ||||
| 	} | ||||
| 	return cap | ||||
| } | ||||
| 
 | ||||
| // NodeDataCapacity retrieves the peers state download allowance based on its
 | ||||
| // previously discovered throughput.
 | ||||
| func (p *peerConnection) NodeDataCapacity(targetRTT time.Duration) int { | ||||
| 	cap := p.rates.Capacity(eth.NodeDataMsg, targetRTT) | ||||
| 	if cap > MaxStateFetch { | ||||
| 		cap = MaxStateFetch | ||||
| 	} | ||||
| 	return cap | ||||
| } | ||||
| 
 | ||||
| // MarkLacking appends a new entity to the set of items (blocks, receipts, states)
 | ||||
| // that a peer is known not to have (i.e. have been requested before). If the
 | ||||
| // set reaches its maximum allowed capacity, items are randomly dropped off.
 | ||||
| func (p *peerConnection) MarkLacking(hash common.Hash) { | ||||
| 	p.lock.Lock() | ||||
| 	defer p.lock.Unlock() | ||||
| 
 | ||||
| 	for len(p.lacking) >= maxLackingHashes { | ||||
| 		for drop := range p.lacking { | ||||
| 			delete(p.lacking, drop) | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	p.lacking[hash] = struct{}{} | ||||
| } | ||||
| 
 | ||||
| // Lacks retrieves whether the hash of a blockchain item is on the peers lacking
 | ||||
| // list (i.e. whether we know that the peer does not have it).
 | ||||
| func (p *peerConnection) Lacks(hash common.Hash) bool { | ||||
| 	p.lock.RLock() | ||||
| 	defer p.lock.RUnlock() | ||||
| 
 | ||||
| 	_, ok := p.lacking[hash] | ||||
| 	return ok | ||||
| } | ||||
| 
 | ||||
| // peerSet represents the collection of active peer participating in the chain
 | ||||
| // download procedure.
 | ||||
| type peerSet struct { | ||||
| 	peers map[string]*peerConnection | ||||
| 	rates *msgrate.Trackers // Set of rate trackers to give the sync a common beat
 | ||||
| 
 | ||||
| 	newPeerFeed  event.Feed | ||||
| 	peerDropFeed event.Feed | ||||
| 
 | ||||
| 	lock sync.RWMutex | ||||
| } | ||||
| 
 | ||||
| // newPeerSet creates a new peer set top track the active download sources.
 | ||||
| func newPeerSet() *peerSet { | ||||
| 	return &peerSet{ | ||||
| 		peers: make(map[string]*peerConnection), | ||||
| 		rates: msgrate.NewTrackers(log.New("proto", "eth")), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // SubscribeNewPeers subscribes to peer arrival events.
 | ||||
| func (ps *peerSet) SubscribeNewPeers(ch chan<- *peerConnection) event.Subscription { | ||||
| 	return ps.newPeerFeed.Subscribe(ch) | ||||
| } | ||||
| 
 | ||||
| // SubscribePeerDrops subscribes to peer departure events.
 | ||||
| func (ps *peerSet) SubscribePeerDrops(ch chan<- *peerConnection) event.Subscription { | ||||
| 	return ps.peerDropFeed.Subscribe(ch) | ||||
| } | ||||
| 
 | ||||
| // Reset iterates over the current peer set, and resets each of the known peers
 | ||||
| // to prepare for a next batch of block retrieval.
 | ||||
| func (ps *peerSet) Reset() { | ||||
| 	ps.lock.RLock() | ||||
| 	defer ps.lock.RUnlock() | ||||
| 
 | ||||
| 	for _, peer := range ps.peers { | ||||
| 		peer.Reset() | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Register injects a new peer into the working set, or returns an error if the
 | ||||
| // peer is already known.
 | ||||
| //
 | ||||
| // The method also sets the starting throughput values of the new peer to the
 | ||||
| // average of all existing peers, to give it a realistic chance of being used
 | ||||
| // for data retrievals.
 | ||||
| func (ps *peerSet) Register(p *peerConnection) error { | ||||
| 	// Register the new peer with some meaningful defaults
 | ||||
| 	ps.lock.Lock() | ||||
| 	if _, ok := ps.peers[p.id]; ok { | ||||
| 		ps.lock.Unlock() | ||||
| 		return errAlreadyRegistered | ||||
| 	} | ||||
| 	p.rates = msgrate.NewTracker(ps.rates.MeanCapacities(), ps.rates.MedianRoundTrip()) | ||||
| 	if err := ps.rates.Track(p.id, p.rates); err != nil { | ||||
| 		ps.lock.Unlock() | ||||
| 		return err | ||||
| 	} | ||||
| 	ps.peers[p.id] = p | ||||
| 	ps.lock.Unlock() | ||||
| 
 | ||||
| 	ps.newPeerFeed.Send(p) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Unregister removes a remote peer from the active set, disabling any further
 | ||||
| // actions to/from that particular entity.
 | ||||
| func (ps *peerSet) Unregister(id string) error { | ||||
| 	ps.lock.Lock() | ||||
| 	p, ok := ps.peers[id] | ||||
| 	if !ok { | ||||
| 		ps.lock.Unlock() | ||||
| 		return errNotRegistered | ||||
| 	} | ||||
| 	delete(ps.peers, id) | ||||
| 	ps.rates.Untrack(id) | ||||
| 	ps.lock.Unlock() | ||||
| 
 | ||||
| 	ps.peerDropFeed.Send(p) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Peer retrieves the registered peer with the given id.
 | ||||
| func (ps *peerSet) Peer(id string) *peerConnection { | ||||
| 	ps.lock.RLock() | ||||
| 	defer ps.lock.RUnlock() | ||||
| 
 | ||||
| 	return ps.peers[id] | ||||
| } | ||||
| 
 | ||||
| // Len returns if the current number of peers in the set.
 | ||||
| func (ps *peerSet) Len() int { | ||||
| 	ps.lock.RLock() | ||||
| 	defer ps.lock.RUnlock() | ||||
| 
 | ||||
| 	return len(ps.peers) | ||||
| } | ||||
| 
 | ||||
| // AllPeers retrieves a flat list of all the peers within the set.
 | ||||
| func (ps *peerSet) AllPeers() []*peerConnection { | ||||
| 	ps.lock.RLock() | ||||
| 	defer ps.lock.RUnlock() | ||||
| 
 | ||||
| 	list := make([]*peerConnection, 0, len(ps.peers)) | ||||
| 	for _, p := range ps.peers { | ||||
| 		list = append(list, p) | ||||
| 	} | ||||
| 	return list | ||||
| } | ||||
| 
 | ||||
| // HeaderIdlePeers retrieves a flat list of all the currently header-idle peers
 | ||||
| // within the active peer set, ordered by their reputation.
 | ||||
| func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) { | ||||
| 	idle := func(p *peerConnection) bool { | ||||
| 		return atomic.LoadInt32(&p.headerIdle) == 0 | ||||
| 	} | ||||
| 	throughput := func(p *peerConnection) int { | ||||
| 		return p.rates.Capacity(eth.BlockHeadersMsg, time.Second) | ||||
| 	} | ||||
| 	return ps.idlePeers(eth.ETH66, eth.ETH67, idle, throughput) | ||||
| } | ||||
| 
 | ||||
| // BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
 | ||||
| // the active peer set, ordered by their reputation.
 | ||||
| func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) { | ||||
| 	idle := func(p *peerConnection) bool { | ||||
| 		return atomic.LoadInt32(&p.blockIdle) == 0 | ||||
| 	} | ||||
| 	throughput := func(p *peerConnection) int { | ||||
| 		return p.rates.Capacity(eth.BlockBodiesMsg, time.Second) | ||||
| 	} | ||||
| 	return ps.idlePeers(eth.ETH66, eth.ETH67, idle, throughput) | ||||
| } | ||||
| 
 | ||||
| // ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers
 | ||||
| // within the active peer set, ordered by their reputation.
 | ||||
| func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) { | ||||
| 	idle := func(p *peerConnection) bool { | ||||
| 		return atomic.LoadInt32(&p.receiptIdle) == 0 | ||||
| 	} | ||||
| 	throughput := func(p *peerConnection) int { | ||||
| 		return p.rates.Capacity(eth.ReceiptsMsg, time.Second) | ||||
| 	} | ||||
| 	return ps.idlePeers(eth.ETH66, eth.ETH67, idle, throughput) | ||||
| } | ||||
| 
 | ||||
| // NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle
 | ||||
| // peers within the active peer set, ordered by their reputation.
 | ||||
| func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) { | ||||
| 	idle := func(p *peerConnection) bool { | ||||
| 		return atomic.LoadInt32(&p.stateIdle) == 0 | ||||
| 	} | ||||
| 	throughput := func(p *peerConnection) int { | ||||
| 		return p.rates.Capacity(eth.NodeDataMsg, time.Second) | ||||
| 	} | ||||
| 	return ps.idlePeers(eth.ETH66, eth.ETH67, idle, throughput) | ||||
| } | ||||
| 
 | ||||
| // idlePeers retrieves a flat list of all currently idle peers satisfying the
 | ||||
| // protocol version constraints, using the provided function to check idleness.
 | ||||
| // The resulting set of peers are sorted by their capacity.
 | ||||
| func (ps *peerSet) idlePeers(minProtocol, maxProtocol uint, idleCheck func(*peerConnection) bool, capacity func(*peerConnection) int) ([]*peerConnection, int) { | ||||
| 	ps.lock.RLock() | ||||
| 	defer ps.lock.RUnlock() | ||||
| 
 | ||||
| 	var ( | ||||
| 		total = 0 | ||||
| 		idle  = make([]*peerConnection, 0, len(ps.peers)) | ||||
| 		tps   = make([]int, 0, len(ps.peers)) | ||||
| 	) | ||||
| 	for _, p := range ps.peers { | ||||
| 		if p.version >= minProtocol && p.version <= maxProtocol { | ||||
| 			if idleCheck(p) { | ||||
| 				idle = append(idle, p) | ||||
| 				tps = append(tps, capacity(p)) | ||||
| 			} | ||||
| 			total++ | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// And sort them
 | ||||
| 	sortPeers := &peerCapacitySort{idle, tps} | ||||
| 	sort.Sort(sortPeers) | ||||
| 	return sortPeers.p, total | ||||
| } | ||||
| 
 | ||||
| // peerCapacitySort implements sort.Interface.
 | ||||
| // It sorts peer connections by capacity (descending).
 | ||||
| type peerCapacitySort struct { | ||||
| 	p  []*peerConnection | ||||
| 	tp []int | ||||
| } | ||||
| 
 | ||||
| func (ps *peerCapacitySort) Len() int { | ||||
| 	return len(ps.p) | ||||
| } | ||||
| 
 | ||||
| func (ps *peerCapacitySort) Less(i, j int) bool { | ||||
| 	return ps.tp[i] > ps.tp[j] | ||||
| } | ||||
| 
 | ||||
| func (ps *peerCapacitySort) Swap(i, j int) { | ||||
| 	ps.p[i], ps.p[j] = ps.p[j], ps.p[i] | ||||
| 	ps.tp[i], ps.tp[j] = ps.tp[j], ps.tp[i] | ||||
| } | ||||
| @ -1,913 +0,0 @@ | ||||
| // Copyright 2015 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| // Contains the block download scheduler to collect download tasks and schedule
 | ||||
| // them in an ordered, and throttled way.
 | ||||
| 
 | ||||
| package downloader | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"sync" | ||||
| 	"sync/atomic" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/common" | ||||
| 	"github.com/ethereum/go-ethereum/common/prque" | ||||
| 	"github.com/ethereum/go-ethereum/core/types" | ||||
| 	"github.com/ethereum/go-ethereum/log" | ||||
| 	"github.com/ethereum/go-ethereum/metrics" | ||||
| 	"github.com/ethereum/go-ethereum/trie" | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	bodyType    = uint(0) | ||||
| 	receiptType = uint(1) | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	blockCacheMaxItems     = 8192              // Maximum number of blocks to cache before throttling the download
 | ||||
| 	blockCacheInitialItems = 2048              // Initial number of blocks to start fetching, before we know the sizes of the blocks
 | ||||
| 	blockCacheMemory       = 256 * 1024 * 1024 // Maximum amount of memory to use for block caching
 | ||||
| 	blockCacheSizeWeight   = 0.1               // Multiplier to approximate the average block size based on past ones
 | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	errNoFetchesPending = errors.New("no fetches pending") | ||||
| 	errStaleDelivery    = errors.New("stale delivery") | ||||
| ) | ||||
| 
 | ||||
| // fetchRequest is a currently running data retrieval operation.
 | ||||
| type fetchRequest struct { | ||||
| 	Peer    *peerConnection // Peer to which the request was sent
 | ||||
| 	From    uint64          // [eth/62] Requested chain element index (used for skeleton fills only)
 | ||||
| 	Headers []*types.Header // [eth/62] Requested headers, sorted by request order
 | ||||
| 	Time    time.Time       // Time when the request was made
 | ||||
| } | ||||
| 
 | ||||
| // fetchResult is a struct collecting partial results from data fetchers until
 | ||||
| // all outstanding pieces complete and the result as a whole can be processed.
 | ||||
| type fetchResult struct { | ||||
| 	pending int32 // Flag telling what deliveries are outstanding
 | ||||
| 
 | ||||
| 	Header       *types.Header | ||||
| 	Uncles       []*types.Header | ||||
| 	Transactions types.Transactions | ||||
| 	Receipts     types.Receipts | ||||
| } | ||||
| 
 | ||||
| func newFetchResult(header *types.Header, fastSync bool) *fetchResult { | ||||
| 	item := &fetchResult{ | ||||
| 		Header: header, | ||||
| 	} | ||||
| 	if !header.EmptyBody() { | ||||
| 		item.pending |= (1 << bodyType) | ||||
| 	} | ||||
| 	if fastSync && !header.EmptyReceipts() { | ||||
| 		item.pending |= (1 << receiptType) | ||||
| 	} | ||||
| 	return item | ||||
| } | ||||
| 
 | ||||
| // SetBodyDone flags the body as finished.
 | ||||
| func (f *fetchResult) SetBodyDone() { | ||||
| 	if v := atomic.LoadInt32(&f.pending); (v & (1 << bodyType)) != 0 { | ||||
| 		atomic.AddInt32(&f.pending, -1) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // AllDone checks if item is done.
 | ||||
| func (f *fetchResult) AllDone() bool { | ||||
| 	return atomic.LoadInt32(&f.pending) == 0 | ||||
| } | ||||
| 
 | ||||
| // SetReceiptsDone flags the receipts as finished.
 | ||||
| func (f *fetchResult) SetReceiptsDone() { | ||||
| 	if v := atomic.LoadInt32(&f.pending); (v & (1 << receiptType)) != 0 { | ||||
| 		atomic.AddInt32(&f.pending, -2) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Done checks if the given type is done already
 | ||||
| func (f *fetchResult) Done(kind uint) bool { | ||||
| 	v := atomic.LoadInt32(&f.pending) | ||||
| 	return v&(1<<kind) == 0 | ||||
| } | ||||
| 
 | ||||
| // queue represents hashes that are either need fetching or are being fetched
 | ||||
| type queue struct { | ||||
| 	mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching
 | ||||
| 
 | ||||
| 	// Headers are "special", they download in batches, supported by a skeleton chain
 | ||||
| 	headerHead      common.Hash                    // Hash of the last queued header to verify order
 | ||||
| 	headerTaskPool  map[uint64]*types.Header       // Pending header retrieval tasks, mapping starting indexes to skeleton headers
 | ||||
| 	headerTaskQueue *prque.Prque[int64, uint64]    // Priority queue of the skeleton indexes to fetch the filling headers for
 | ||||
| 	headerPeerMiss  map[string]map[uint64]struct{} // Set of per-peer header batches known to be unavailable
 | ||||
| 	headerPendPool  map[string]*fetchRequest       // Currently pending header retrieval operations
 | ||||
| 	headerResults   []*types.Header                // Result cache accumulating the completed headers
 | ||||
| 	headerProced    int                            // Number of headers already processed from the results
 | ||||
| 	headerOffset    uint64                         // Number of the first header in the result cache
 | ||||
| 	headerContCh    chan bool                      // Channel to notify when header download finishes
 | ||||
| 
 | ||||
| 	// All data retrievals below are based on an already assembles header chain
 | ||||
| 	blockTaskPool  map[common.Hash]*types.Header      // Pending block (body) retrieval tasks, mapping hashes to headers
 | ||||
| 	blockTaskQueue *prque.Prque[int64, *types.Header] // Priority queue of the headers to fetch the blocks (bodies) for
 | ||||
| 	blockPendPool  map[string]*fetchRequest           // Currently pending block (body) retrieval operations
 | ||||
| 
 | ||||
| 	receiptTaskPool  map[common.Hash]*types.Header      // Pending receipt retrieval tasks, mapping hashes to headers
 | ||||
| 	receiptTaskQueue *prque.Prque[int64, *types.Header] // Priority queue of the headers to fetch the receipts for
 | ||||
| 	receiptPendPool  map[string]*fetchRequest           // Currently pending receipt retrieval operations
 | ||||
| 
 | ||||
| 	resultCache *resultStore       // Downloaded but not yet delivered fetch results
 | ||||
| 	resultSize  common.StorageSize // Approximate size of a block (exponential moving average)
 | ||||
| 
 | ||||
| 	lock   *sync.RWMutex | ||||
| 	active *sync.Cond | ||||
| 	closed bool | ||||
| 
 | ||||
| 	lastStatLog time.Time | ||||
| } | ||||
| 
 | ||||
| // newQueue creates a new download queue for scheduling block retrieval.
 | ||||
| func newQueue(blockCacheLimit int, thresholdInitialSize int) *queue { | ||||
| 	lock := new(sync.RWMutex) | ||||
| 	q := &queue{ | ||||
| 		headerContCh:     make(chan bool), | ||||
| 		blockTaskQueue:   prque.New[int64, *types.Header](nil), | ||||
| 		receiptTaskQueue: prque.New[int64, *types.Header](nil), | ||||
| 		active:           sync.NewCond(lock), | ||||
| 		lock:             lock, | ||||
| 	} | ||||
| 	q.Reset(blockCacheLimit, thresholdInitialSize) | ||||
| 	return q | ||||
| } | ||||
| 
 | ||||
| // Reset clears out the queue contents.
 | ||||
| func (q *queue) Reset(blockCacheLimit int, thresholdInitialSize int) { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	q.closed = false | ||||
| 	q.mode = FullSync | ||||
| 
 | ||||
| 	q.headerHead = common.Hash{} | ||||
| 	q.headerPendPool = make(map[string]*fetchRequest) | ||||
| 
 | ||||
| 	q.blockTaskPool = make(map[common.Hash]*types.Header) | ||||
| 	q.blockTaskQueue.Reset() | ||||
| 	q.blockPendPool = make(map[string]*fetchRequest) | ||||
| 
 | ||||
| 	q.receiptTaskPool = make(map[common.Hash]*types.Header) | ||||
| 	q.receiptTaskQueue.Reset() | ||||
| 	q.receiptPendPool = make(map[string]*fetchRequest) | ||||
| 
 | ||||
| 	q.resultCache = newResultStore(blockCacheLimit) | ||||
| 	q.resultCache.SetThrottleThreshold(uint64(thresholdInitialSize)) | ||||
| } | ||||
| 
 | ||||
| // Close marks the end of the sync, unblocking Results.
 | ||||
| // It may be called even if the queue is already closed.
 | ||||
| func (q *queue) Close() { | ||||
| 	q.lock.Lock() | ||||
| 	q.closed = true | ||||
| 	q.active.Signal() | ||||
| 	q.lock.Unlock() | ||||
| } | ||||
| 
 | ||||
| // PendingHeaders retrieves the number of header requests pending for retrieval.
 | ||||
| func (q *queue) PendingHeaders() int { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	return q.headerTaskQueue.Size() | ||||
| } | ||||
| 
 | ||||
| // PendingBlocks retrieves the number of block (body) requests pending for retrieval.
 | ||||
| func (q *queue) PendingBlocks() int { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	return q.blockTaskQueue.Size() | ||||
| } | ||||
| 
 | ||||
| // PendingReceipts retrieves the number of block receipts pending for retrieval.
 | ||||
| func (q *queue) PendingReceipts() int { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	return q.receiptTaskQueue.Size() | ||||
| } | ||||
| 
 | ||||
| // InFlightHeaders retrieves whether there are header fetch requests currently
 | ||||
| // in flight.
 | ||||
| func (q *queue) InFlightHeaders() bool { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	return len(q.headerPendPool) > 0 | ||||
| } | ||||
| 
 | ||||
| // InFlightBlocks retrieves whether there are block fetch requests currently in
 | ||||
| // flight.
 | ||||
| func (q *queue) InFlightBlocks() bool { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	return len(q.blockPendPool) > 0 | ||||
| } | ||||
| 
 | ||||
| // InFlightReceipts retrieves whether there are receipt fetch requests currently
 | ||||
| // in flight.
 | ||||
| func (q *queue) InFlightReceipts() bool { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	return len(q.receiptPendPool) > 0 | ||||
| } | ||||
| 
 | ||||
| // Idle returns if the queue is fully idle or has some data still inside.
 | ||||
| func (q *queue) Idle() bool { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() | ||||
| 	pending := len(q.blockPendPool) + len(q.receiptPendPool) | ||||
| 
 | ||||
| 	return (queued + pending) == 0 | ||||
| } | ||||
| 
 | ||||
| // ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
 | ||||
| // up an already retrieved header skeleton.
 | ||||
| func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	// No skeleton retrieval can be in progress, fail hard if so (huge implementation bug)
 | ||||
| 	if q.headerResults != nil { | ||||
| 		panic("skeleton assembly already in progress") | ||||
| 	} | ||||
| 	// Schedule all the header retrieval tasks for the skeleton assembly
 | ||||
| 	q.headerTaskPool = make(map[uint64]*types.Header) | ||||
| 	q.headerTaskQueue = prque.New[int64, uint64](nil) | ||||
| 	q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
 | ||||
| 	q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) | ||||
| 	q.headerProced = 0 | ||||
| 	q.headerOffset = from | ||||
| 	q.headerContCh = make(chan bool, 1) | ||||
| 
 | ||||
| 	for i, header := range skeleton { | ||||
| 		index := from + uint64(i*MaxHeaderFetch) | ||||
| 
 | ||||
| 		q.headerTaskPool[index] = header | ||||
| 		q.headerTaskQueue.Push(index, -int64(index)) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // RetrieveHeaders retrieves the header chain assemble based on the scheduled
 | ||||
| // skeleton.
 | ||||
| func (q *queue) RetrieveHeaders() ([]*types.Header, int) { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	headers, proced := q.headerResults, q.headerProced | ||||
| 	q.headerResults, q.headerProced = nil, 0 | ||||
| 
 | ||||
| 	return headers, proced | ||||
| } | ||||
| 
 | ||||
| // Schedule adds a set of headers for the download queue for scheduling, returning
 | ||||
| // the new headers encountered.
 | ||||
| func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	// Insert all the headers prioritised by the contained block number
 | ||||
| 	inserts := make([]*types.Header, 0, len(headers)) | ||||
| 	for _, header := range headers { | ||||
| 		// Make sure chain order is honoured and preserved throughout
 | ||||
| 		hash := header.Hash() | ||||
| 		if header.Number == nil || header.Number.Uint64() != from { | ||||
| 			log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from) | ||||
| 			break | ||||
| 		} | ||||
| 		if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash { | ||||
| 			log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash) | ||||
| 			break | ||||
| 		} | ||||
| 		// Make sure no duplicate requests are executed
 | ||||
| 		// We cannot skip this, even if the block is empty, since this is
 | ||||
| 		// what triggers the fetchResult creation.
 | ||||
| 		if _, ok := q.blockTaskPool[hash]; ok { | ||||
| 			log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash) | ||||
| 		} else { | ||||
| 			q.blockTaskPool[hash] = header | ||||
| 			q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) | ||||
| 		} | ||||
| 		// Queue for receipt retrieval
 | ||||
| 		if q.mode == FastSync && !header.EmptyReceipts() { | ||||
| 			if _, ok := q.receiptTaskPool[hash]; ok { | ||||
| 				log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash) | ||||
| 			} else { | ||||
| 				q.receiptTaskPool[hash] = header | ||||
| 				q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) | ||||
| 			} | ||||
| 		} | ||||
| 		inserts = append(inserts, header) | ||||
| 		q.headerHead = hash | ||||
| 		from++ | ||||
| 	} | ||||
| 	return inserts | ||||
| } | ||||
| 
 | ||||
| // Results retrieves and permanently removes a batch of fetch results from
 | ||||
| // the cache. the result slice will be empty if the queue has been closed.
 | ||||
| // Results can be called concurrently with Deliver and Schedule,
 | ||||
| // but assumes that there are not two simultaneous callers to Results
 | ||||
| func (q *queue) Results(block bool) []*fetchResult { | ||||
| 	// Abort early if there are no items and non-blocking requested
 | ||||
| 	if !block && !q.resultCache.HasCompletedItems() { | ||||
| 		return nil | ||||
| 	} | ||||
| 	closed := false | ||||
| 	for !closed && !q.resultCache.HasCompletedItems() { | ||||
| 		// In order to wait on 'active', we need to obtain the lock.
 | ||||
| 		// That may take a while, if someone is delivering at the same
 | ||||
| 		// time, so after obtaining the lock, we check again if there
 | ||||
| 		// are any results to fetch.
 | ||||
| 		// Also, in-between we ask for the lock and the lock is obtained,
 | ||||
| 		// someone can have closed the queue. In that case, we should
 | ||||
| 		// return the available results and stop blocking
 | ||||
| 		q.lock.Lock() | ||||
| 		if q.resultCache.HasCompletedItems() || q.closed { | ||||
| 			q.lock.Unlock() | ||||
| 			break | ||||
| 		} | ||||
| 		// No items available, and not closed
 | ||||
| 		q.active.Wait() | ||||
| 		closed = q.closed | ||||
| 		q.lock.Unlock() | ||||
| 	} | ||||
| 	// Regardless if closed or not, we can still deliver whatever we have
 | ||||
| 	results := q.resultCache.GetCompleted(maxResultsProcess) | ||||
| 	for _, result := range results { | ||||
| 		// Recalculate the result item weights to prevent memory exhaustion
 | ||||
| 		size := result.Header.Size() | ||||
| 		for _, uncle := range result.Uncles { | ||||
| 			size += uncle.Size() | ||||
| 		} | ||||
| 		for _, receipt := range result.Receipts { | ||||
| 			size += receipt.Size() | ||||
| 		} | ||||
| 		for _, tx := range result.Transactions { | ||||
| 			size += common.StorageSize(tx.Size()) | ||||
| 		} | ||||
| 		q.resultSize = common.StorageSize(blockCacheSizeWeight)*size + | ||||
| 			(1-common.StorageSize(blockCacheSizeWeight))*q.resultSize | ||||
| 	} | ||||
| 	// Using the newly calibrated resultsize, figure out the new throttle limit
 | ||||
| 	// on the result cache
 | ||||
| 	throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize) | ||||
| 	throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold) | ||||
| 
 | ||||
| 	// Log some info at certain times
 | ||||
| 	if time.Since(q.lastStatLog) > 60*time.Second { | ||||
| 		q.lastStatLog = time.Now() | ||||
| 		info := q.Stats() | ||||
| 		info = append(info, "throttle", throttleThreshold) | ||||
| 		log.Info("Downloader queue stats", info...) | ||||
| 	} | ||||
| 	return results | ||||
| } | ||||
| 
 | ||||
| func (q *queue) Stats() []interface{} { | ||||
| 	q.lock.RLock() | ||||
| 	defer q.lock.RUnlock() | ||||
| 
 | ||||
| 	return q.stats() | ||||
| } | ||||
| 
 | ||||
| func (q *queue) stats() []interface{} { | ||||
| 	return []interface{}{ | ||||
| 		"receiptTasks", q.receiptTaskQueue.Size(), | ||||
| 		"blockTasks", q.blockTaskQueue.Size(), | ||||
| 		"itemSize", q.resultSize, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // ReserveHeaders reserves a set of headers for the given peer, skipping any
 | ||||
| // previously failed batches.
 | ||||
| func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	// Short circuit if the peer's already downloading something (sanity check to
 | ||||
| 	// not corrupt state)
 | ||||
| 	if _, ok := q.headerPendPool[p.id]; ok { | ||||
| 		return nil | ||||
| 	} | ||||
| 	// Retrieve a batch of hashes, skipping previously failed ones
 | ||||
| 	send, skip := uint64(0), []uint64{} | ||||
| 	for send == 0 && !q.headerTaskQueue.Empty() { | ||||
| 		from, _ := q.headerTaskQueue.Pop() | ||||
| 		if q.headerPeerMiss[p.id] != nil { | ||||
| 			if _, ok := q.headerPeerMiss[p.id][from]; ok { | ||||
| 				skip = append(skip, from) | ||||
| 				continue | ||||
| 			} | ||||
| 		} | ||||
| 		send = from | ||||
| 	} | ||||
| 	// Merge all the skipped batches back
 | ||||
| 	for _, from := range skip { | ||||
| 		q.headerTaskQueue.Push(from, -int64(from)) | ||||
| 	} | ||||
| 	// Assemble and return the block download request
 | ||||
| 	if send == 0 { | ||||
| 		return nil | ||||
| 	} | ||||
| 	request := &fetchRequest{ | ||||
| 		Peer: p, | ||||
| 		From: send, | ||||
| 		Time: time.Now(), | ||||
| 	} | ||||
| 	q.headerPendPool[p.id] = request | ||||
| 	return request | ||||
| } | ||||
| 
 | ||||
| // ReserveBodies reserves a set of body fetches for the given peer, skipping any
 | ||||
| // previously failed downloads. Beside the next batch of needed fetches, it also
 | ||||
| // returns a flag whether empty blocks were queued requiring processing.
 | ||||
| func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, bool) { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyType) | ||||
| } | ||||
| 
 | ||||
| // ReserveReceipts reserves a set of receipt fetches for the given peer, skipping
 | ||||
| // any previously failed downloads. Beside the next batch of needed fetches, it
 | ||||
| // also returns a flag whether empty receipts were queued requiring importing.
 | ||||
| func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, bool) { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, receiptType) | ||||
| } | ||||
| 
 | ||||
| // reserveHeaders reserves a set of data download operations for a given peer,
 | ||||
| // skipping any previously failed ones. This method is a generic version used
 | ||||
| // by the individual special reservation functions.
 | ||||
| //
 | ||||
| // Note, this method expects the queue lock to be already held for writing. The
 | ||||
| // reason the lock is not obtained in here is because the parameters already need
 | ||||
| // to access the queue, so they already need a lock anyway.
 | ||||
| //
 | ||||
| // Returns:
 | ||||
| //
 | ||||
| //	item     - the fetchRequest
 | ||||
| //	progress - whether any progress was made
 | ||||
| //	throttle - if the caller should throttle for a while
 | ||||
| func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque[int64, *types.Header], | ||||
| 	pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) { | ||||
| 	// Short circuit if the pool has been depleted, or if the peer's already
 | ||||
| 	// downloading something (sanity check not to corrupt state)
 | ||||
| 	if taskQueue.Empty() { | ||||
| 		return nil, false, true | ||||
| 	} | ||||
| 	if _, ok := pendPool[p.id]; ok { | ||||
| 		return nil, false, false | ||||
| 	} | ||||
| 	// Retrieve a batch of tasks, skipping previously failed ones
 | ||||
| 	send := make([]*types.Header, 0, count) | ||||
| 	skip := make([]*types.Header, 0) | ||||
| 	progress := false | ||||
| 	throttled := false | ||||
| 	for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ { | ||||
| 		// the task queue will pop items in order, so the highest prio block
 | ||||
| 		// is also the lowest block number.
 | ||||
| 		header, _ := taskQueue.Peek() | ||||
| 
 | ||||
| 		// we can ask the resultcache if this header is within the
 | ||||
| 		// "prioritized" segment of blocks. If it is not, we need to throttle
 | ||||
| 
 | ||||
| 		stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == FastSync) | ||||
| 		if stale { | ||||
| 			// Don't put back in the task queue, this item has already been
 | ||||
| 			// delivered upstream
 | ||||
| 			taskQueue.PopItem() | ||||
| 			progress = true | ||||
| 			delete(taskPool, header.Hash()) | ||||
| 			proc = proc - 1 | ||||
| 			log.Error("Fetch reservation already delivered", "number", header.Number.Uint64()) | ||||
| 			continue | ||||
| 		} | ||||
| 		if throttle { | ||||
| 			// There are no resultslots available. Leave it in the task queue
 | ||||
| 			// However, if there are any left as 'skipped', we should not tell
 | ||||
| 			// the caller to throttle, since we still want some other
 | ||||
| 			// peer to fetch those for us
 | ||||
| 			throttled = len(skip) == 0 | ||||
| 			break | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			// this most definitely should _not_ happen
 | ||||
| 			log.Warn("Failed to reserve headers", "err", err) | ||||
| 			// There are no resultslots available. Leave it in the task queue
 | ||||
| 			break | ||||
| 		} | ||||
| 		if item.Done(kind) { | ||||
| 			// If it's a noop, we can skip this task
 | ||||
| 			delete(taskPool, header.Hash()) | ||||
| 			taskQueue.PopItem() | ||||
| 			proc = proc - 1 | ||||
| 			progress = true | ||||
| 			continue | ||||
| 		} | ||||
| 		// Remove it from the task queue
 | ||||
| 		taskQueue.PopItem() | ||||
| 		// Otherwise unless the peer is known not to have the data, add to the retrieve list
 | ||||
| 		if p.Lacks(header.Hash()) { | ||||
| 			skip = append(skip, header) | ||||
| 		} else { | ||||
| 			send = append(send, header) | ||||
| 		} | ||||
| 	} | ||||
| 	// Merge all the skipped headers back
 | ||||
| 	for _, header := range skip { | ||||
| 		taskQueue.Push(header, -int64(header.Number.Uint64())) | ||||
| 	} | ||||
| 	if q.resultCache.HasCompletedItems() { | ||||
| 		// Wake Results, resultCache was modified
 | ||||
| 		q.active.Signal() | ||||
| 	} | ||||
| 	// Assemble and return the block download request
 | ||||
| 	if len(send) == 0 { | ||||
| 		return nil, progress, throttled | ||||
| 	} | ||||
| 	request := &fetchRequest{ | ||||
| 		Peer:    p, | ||||
| 		Headers: send, | ||||
| 		Time:    time.Now(), | ||||
| 	} | ||||
| 	pendPool[p.id] = request | ||||
| 	return request, progress, throttled | ||||
| } | ||||
| 
 | ||||
| // CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue.
 | ||||
| func (q *queue) CancelHeaders(request *fetchRequest) { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 	q.cancel(request, q.headerTaskQueue, q.headerPendPool) | ||||
| } | ||||
| 
 | ||||
| // CancelBodies aborts a body fetch request, returning all pending headers to the
 | ||||
| // task queue.
 | ||||
| func (q *queue) CancelBodies(request *fetchRequest) { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 	q.cancel(request, q.blockTaskQueue, q.blockPendPool) | ||||
| } | ||||
| 
 | ||||
| // CancelReceipts aborts a body fetch request, returning all pending headers to
 | ||||
| // the task queue.
 | ||||
| func (q *queue) CancelReceipts(request *fetchRequest) { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 	q.cancel(request, q.receiptTaskQueue, q.receiptPendPool) | ||||
| } | ||||
| 
 | ||||
| // Cancel aborts a fetch request, returning all pending hashes to the task queue.
 | ||||
| func (q *queue) cancel(request *fetchRequest, taskQueue interface{}, pendPool map[string]*fetchRequest) { | ||||
| 	if request.From > 0 { | ||||
| 		taskQueue.(*prque.Prque[int64, uint64]).Push(request.From, -int64(request.From)) | ||||
| 	} | ||||
| 	for _, header := range request.Headers { | ||||
| 		taskQueue.(*prque.Prque[int64, *types.Header]).Push(header, -int64(header.Number.Uint64())) | ||||
| 	} | ||||
| 	delete(pendPool, request.Peer.id) | ||||
| } | ||||
| 
 | ||||
| // Revoke cancels all pending requests belonging to a given peer. This method is
 | ||||
| // meant to be called during a peer drop to quickly reassign owned data fetches
 | ||||
| // to remaining nodes.
 | ||||
| func (q *queue) Revoke(peerID string) { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	if request, ok := q.blockPendPool[peerID]; ok { | ||||
| 		for _, header := range request.Headers { | ||||
| 			q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) | ||||
| 		} | ||||
| 		delete(q.blockPendPool, peerID) | ||||
| 	} | ||||
| 	if request, ok := q.receiptPendPool[peerID]; ok { | ||||
| 		for _, header := range request.Headers { | ||||
| 			q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) | ||||
| 		} | ||||
| 		delete(q.receiptPendPool, peerID) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // ExpireHeaders checks for in flight requests that exceeded a timeout allowance,
 | ||||
| // canceling them and returning the responsible peers for penalisation.
 | ||||
| func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter) | ||||
| } | ||||
| 
 | ||||
| // ExpireBodies checks for in flight block body requests that exceeded a timeout
 | ||||
| // allowance, canceling them and returning the responsible peers for penalisation.
 | ||||
| func (q *queue) ExpireBodies(timeout time.Duration) map[string]int { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter) | ||||
| } | ||||
| 
 | ||||
| // ExpireReceipts checks for in flight receipt requests that exceeded a timeout
 | ||||
| // allowance, canceling them and returning the responsible peers for penalisation.
 | ||||
| func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter) | ||||
| } | ||||
| 
 | ||||
| // expire is the generic check that move expired tasks from a pending pool back
 | ||||
| // into a task pool, returning all entities caught with expired tasks.
 | ||||
| //
 | ||||
| // Note, this method expects the queue lock to be already held. The
 | ||||
| // reason the lock is not obtained in here is because the parameters already need
 | ||||
| // to access the queue, so they already need a lock anyway.
 | ||||
| func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue interface{}, timeoutMeter metrics.Meter) map[string]int { | ||||
| 	// Iterate over the expired requests and return each to the queue
 | ||||
| 	expiries := make(map[string]int) | ||||
| 	for id, request := range pendPool { | ||||
| 		if time.Since(request.Time) > timeout { | ||||
| 			// Update the metrics with the timeout
 | ||||
| 			timeoutMeter.Mark(1) | ||||
| 
 | ||||
| 			// Return any non satisfied requests to the pool
 | ||||
| 			if request.From > 0 { | ||||
| 				taskQueue.(*prque.Prque[int64, uint64]).Push(request.From, -int64(request.From)) | ||||
| 			} | ||||
| 			for _, header := range request.Headers { | ||||
| 				taskQueue.(*prque.Prque[int64, *types.Header]).Push(header, -int64(header.Number.Uint64())) | ||||
| 			} | ||||
| 			// Add the peer to the expiry report along the number of failed requests
 | ||||
| 			expiries[id] = len(request.Headers) | ||||
| 
 | ||||
| 			// Remove the expired requests from the pending pool directly
 | ||||
| 			delete(pendPool, id) | ||||
| 		} | ||||
| 	} | ||||
| 	return expiries | ||||
| } | ||||
| 
 | ||||
| // DeliverHeaders injects a header retrieval response into the header results
 | ||||
| // cache. This method either accepts all headers it received, or none of them
 | ||||
| // if they do not map correctly to the skeleton.
 | ||||
| //
 | ||||
| // If the headers are accepted, the method makes an attempt to deliver the set
 | ||||
| // of ready headers to the processor to keep the pipeline full. However it will
 | ||||
| // not block to prevent stalling other pending deliveries.
 | ||||
| func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	var logger log.Logger | ||||
| 	if len(id) < 16 { | ||||
| 		// Tests use short IDs, don't choke on them
 | ||||
| 		logger = log.New("peer", id) | ||||
| 	} else { | ||||
| 		logger = log.New("peer", id[:16]) | ||||
| 	} | ||||
| 	// Short circuit if the data was never requested
 | ||||
| 	request := q.headerPendPool[id] | ||||
| 	if request == nil { | ||||
| 		return 0, errNoFetchesPending | ||||
| 	} | ||||
| 	headerReqTimer.UpdateSince(request.Time) | ||||
| 	delete(q.headerPendPool, id) | ||||
| 
 | ||||
| 	// Ensure headers can be mapped onto the skeleton chain
 | ||||
| 	target := q.headerTaskPool[request.From].Hash() | ||||
| 
 | ||||
| 	accepted := len(headers) == MaxHeaderFetch | ||||
| 	if accepted { | ||||
| 		if headers[0].Number.Uint64() != request.From { | ||||
| 			logger.Trace("First header broke chain ordering", "number", headers[0].Number, "hash", headers[0].Hash(), "expected", request.From) | ||||
| 			accepted = false | ||||
| 		} else if headers[len(headers)-1].Hash() != target { | ||||
| 			logger.Trace("Last header broke skeleton structure ", "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target) | ||||
| 			accepted = false | ||||
| 		} | ||||
| 	} | ||||
| 	if accepted { | ||||
| 		parentHash := headers[0].Hash() | ||||
| 		for i, header := range headers[1:] { | ||||
| 			hash := header.Hash() | ||||
| 			if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { | ||||
| 				logger.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", want) | ||||
| 				accepted = false | ||||
| 				break | ||||
| 			} | ||||
| 			if parentHash != header.ParentHash { | ||||
| 				logger.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash) | ||||
| 				accepted = false | ||||
| 				break | ||||
| 			} | ||||
| 			// Set-up parent hash for next round
 | ||||
| 			parentHash = hash | ||||
| 		} | ||||
| 	} | ||||
| 	// If the batch of headers wasn't accepted, mark as unavailable
 | ||||
| 	if !accepted { | ||||
| 		logger.Trace("Skeleton filling not accepted", "from", request.From) | ||||
| 
 | ||||
| 		miss := q.headerPeerMiss[id] | ||||
| 		if miss == nil { | ||||
| 			q.headerPeerMiss[id] = make(map[uint64]struct{}) | ||||
| 			miss = q.headerPeerMiss[id] | ||||
| 		} | ||||
| 		miss[request.From] = struct{}{} | ||||
| 
 | ||||
| 		q.headerTaskQueue.Push(request.From, -int64(request.From)) | ||||
| 		return 0, errors.New("delivery not accepted") | ||||
| 	} | ||||
| 	// Clean up a successful fetch and try to deliver any sub-results
 | ||||
| 	copy(q.headerResults[request.From-q.headerOffset:], headers) | ||||
| 	delete(q.headerTaskPool, request.From) | ||||
| 
 | ||||
| 	ready := 0 | ||||
| 	for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil { | ||||
| 		ready += MaxHeaderFetch | ||||
| 	} | ||||
| 	if ready > 0 { | ||||
| 		// Headers are ready for delivery, gather them and push forward (non blocking)
 | ||||
| 		process := make([]*types.Header, ready) | ||||
| 		copy(process, q.headerResults[q.headerProced:q.headerProced+ready]) | ||||
| 
 | ||||
| 		select { | ||||
| 		case headerProcCh <- process: | ||||
| 			logger.Trace("Pre-scheduled new headers", "count", len(process), "from", process[0].Number) | ||||
| 			q.headerProced += len(process) | ||||
| 		default: | ||||
| 		} | ||||
| 	} | ||||
| 	// Check for termination and return
 | ||||
| 	if len(q.headerTaskPool) == 0 { | ||||
| 		q.headerContCh <- false | ||||
| 	} | ||||
| 	return len(headers), nil | ||||
| } | ||||
| 
 | ||||
| // DeliverBodies injects a block body retrieval response into the results queue.
 | ||||
| // The method returns the number of blocks bodies accepted from the delivery and
 | ||||
| // also wakes any threads waiting for data delivery.
 | ||||
| func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 	trieHasher := trie.NewStackTrie(nil) | ||||
| 	validate := func(index int, header *types.Header) error { | ||||
| 		if types.DeriveSha(types.Transactions(txLists[index]), trieHasher) != header.TxHash { | ||||
| 			return errInvalidBody | ||||
| 		} | ||||
| 		if types.CalcUncleHash(uncleLists[index]) != header.UncleHash { | ||||
| 			return errInvalidBody | ||||
| 		} | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	reconstruct := func(index int, result *fetchResult) { | ||||
| 		result.Transactions = txLists[index] | ||||
| 		result.Uncles = uncleLists[index] | ||||
| 		result.SetBodyDone() | ||||
| 	} | ||||
| 	return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, | ||||
| 		bodyReqTimer, len(txLists), validate, reconstruct) | ||||
| } | ||||
| 
 | ||||
| // DeliverReceipts injects a receipt retrieval response into the results queue.
 | ||||
| // The method returns the number of transaction receipts accepted from the delivery
 | ||||
| // and also wakes any threads waiting for data delivery.
 | ||||
| func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 	trieHasher := trie.NewStackTrie(nil) | ||||
| 	validate := func(index int, header *types.Header) error { | ||||
| 		if types.DeriveSha(types.Receipts(receiptList[index]), trieHasher) != header.ReceiptHash { | ||||
| 			return errInvalidReceipt | ||||
| 		} | ||||
| 		return nil | ||||
| 	} | ||||
| 	reconstruct := func(index int, result *fetchResult) { | ||||
| 		result.Receipts = receiptList[index] | ||||
| 		result.SetReceiptsDone() | ||||
| 	} | ||||
| 	return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, | ||||
| 		receiptReqTimer, len(receiptList), validate, reconstruct) | ||||
| } | ||||
| 
 | ||||
| // deliver injects a data retrieval response into the results queue.
 | ||||
| //
 | ||||
| // Note, this method expects the queue lock to be already held for writing. The
 | ||||
| // reason this lock is not obtained in here is because the parameters already need
 | ||||
| // to access the queue, so they already need a lock anyway.
 | ||||
| func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, | ||||
| 	taskQueue *prque.Prque[int64, *types.Header], pendPool map[string]*fetchRequest, reqTimer metrics.Timer, | ||||
| 	results int, validate func(index int, header *types.Header) error, | ||||
| 	reconstruct func(index int, result *fetchResult)) (int, error) { | ||||
| 	// Short circuit if the data was never requested
 | ||||
| 	request := pendPool[id] | ||||
| 	if request == nil { | ||||
| 		return 0, errNoFetchesPending | ||||
| 	} | ||||
| 	reqTimer.UpdateSince(request.Time) | ||||
| 	delete(pendPool, id) | ||||
| 
 | ||||
| 	// If no data items were retrieved, mark them as unavailable for the origin peer
 | ||||
| 	if results == 0 { | ||||
| 		for _, header := range request.Headers { | ||||
| 			request.Peer.MarkLacking(header.Hash()) | ||||
| 		} | ||||
| 	} | ||||
| 	// Assemble each of the results with their headers and retrieved data parts
 | ||||
| 	var ( | ||||
| 		accepted int | ||||
| 		failure  error | ||||
| 		i        int | ||||
| 		hashes   []common.Hash | ||||
| 	) | ||||
| 	for _, header := range request.Headers { | ||||
| 		// Short circuit assembly if no more fetch results are found
 | ||||
| 		if i >= results { | ||||
| 			break | ||||
| 		} | ||||
| 		// Validate the fields
 | ||||
| 		if err := validate(i, header); err != nil { | ||||
| 			failure = err | ||||
| 			break | ||||
| 		} | ||||
| 		hashes = append(hashes, header.Hash()) | ||||
| 		i++ | ||||
| 	} | ||||
| 
 | ||||
| 	for _, header := range request.Headers[:i] { | ||||
| 		if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil && !stale { | ||||
| 			reconstruct(accepted, res) | ||||
| 		} else { | ||||
| 			// else: between here and above, some other peer filled this result,
 | ||||
| 			// or it was indeed a no-op. This should not happen, but if it does it's
 | ||||
| 			// not something to panic about
 | ||||
| 			log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err) | ||||
| 			failure = errStaleDelivery | ||||
| 		} | ||||
| 		// Clean up a successful fetch
 | ||||
| 		delete(taskPool, hashes[accepted]) | ||||
| 		accepted++ | ||||
| 	} | ||||
| 	// Return all failed or missing fetches to the queue
 | ||||
| 	for _, header := range request.Headers[accepted:] { | ||||
| 		taskQueue.Push(header, -int64(header.Number.Uint64())) | ||||
| 	} | ||||
| 	// Wake up Results
 | ||||
| 	if accepted > 0 { | ||||
| 		q.active.Signal() | ||||
| 	} | ||||
| 	if failure == nil { | ||||
| 		return accepted, nil | ||||
| 	} | ||||
| 	// If none of the data was good, it's a stale delivery
 | ||||
| 	if accepted > 0 { | ||||
| 		return accepted, fmt.Errorf("partial failure: %v", failure) | ||||
| 	} | ||||
| 	return accepted, fmt.Errorf("%w: %v", failure, errStaleDelivery) | ||||
| } | ||||
| 
 | ||||
| // Prepare configures the result cache to allow accepting and caching inbound
 | ||||
| // fetch results.
 | ||||
| func (q *queue) Prepare(offset uint64, mode SyncMode) { | ||||
| 	q.lock.Lock() | ||||
| 	defer q.lock.Unlock() | ||||
| 
 | ||||
| 	// Prepare the queue for sync results
 | ||||
| 	q.resultCache.Prepare(offset) | ||||
| 	q.mode = mode | ||||
| } | ||||
| @ -1,438 +0,0 @@ | ||||
| // Copyright 2015 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package downloader | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"math/big" | ||||
| 	"math/rand" | ||||
| 	"sync" | ||||
| 	"testing" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/common" | ||||
| 	"github.com/ethereum/go-ethereum/consensus/ethash" | ||||
| 	"github.com/ethereum/go-ethereum/core" | ||||
| 	"github.com/ethereum/go-ethereum/core/types" | ||||
| 	"github.com/ethereum/go-ethereum/log" | ||||
| 	"github.com/ethereum/go-ethereum/params" | ||||
| ) | ||||
| 
 | ||||
| // makeChain creates a chain of n blocks starting at and including parent.
 | ||||
| // the returned hash chain is ordered head->parent. In addition, every 3rd block
 | ||||
| // contains a transaction and every 5th an uncle to allow testing correct block
 | ||||
| // reassembly.
 | ||||
| func makeChain(n int, seed byte, parent *types.Block, empty bool) ([]*types.Block, []types.Receipts) { | ||||
| 	blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) { | ||||
| 		block.SetCoinbase(common.Address{seed}) | ||||
| 		// Add one tx to every secondblock
 | ||||
| 		if !empty && i%2 == 0 { | ||||
| 			signer := types.MakeSigner(params.TestChainConfig, block.Number(), block.Timestamp()) | ||||
| 			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey) | ||||
| 			if err != nil { | ||||
| 				panic(err) | ||||
| 			} | ||||
| 			block.AddTx(tx) | ||||
| 		} | ||||
| 	}) | ||||
| 	return blocks, receipts | ||||
| } | ||||
| 
 | ||||
| type chainData struct { | ||||
| 	blocks []*types.Block | ||||
| 	offset int | ||||
| } | ||||
| 
 | ||||
| var chain *chainData | ||||
| var emptyChain *chainData | ||||
| 
 | ||||
| func init() { | ||||
| 	// Create a chain of blocks to import
 | ||||
| 	targetBlocks := 128 | ||||
| 	blocks, _ := makeChain(targetBlocks, 0, testGenesis, false) | ||||
| 	chain = &chainData{blocks, 0} | ||||
| 
 | ||||
| 	blocks, _ = makeChain(targetBlocks, 0, testGenesis, true) | ||||
| 	emptyChain = &chainData{blocks, 0} | ||||
| } | ||||
| 
 | ||||
| func (chain *chainData) headers() []*types.Header { | ||||
| 	hdrs := make([]*types.Header, len(chain.blocks)) | ||||
| 	for i, b := range chain.blocks { | ||||
| 		hdrs[i] = b.Header() | ||||
| 	} | ||||
| 	return hdrs | ||||
| } | ||||
| 
 | ||||
| func (chain *chainData) Len() int { | ||||
| 	return len(chain.blocks) | ||||
| } | ||||
| 
 | ||||
| func dummyPeer(id string) *peerConnection { | ||||
| 	p := &peerConnection{ | ||||
| 		id:      id, | ||||
| 		lacking: make(map[common.Hash]struct{}), | ||||
| 	} | ||||
| 	return p | ||||
| } | ||||
| 
 | ||||
| func TestBasics(t *testing.T) { | ||||
| 	numOfBlocks := len(emptyChain.blocks) | ||||
| 	numOfReceipts := len(emptyChain.blocks) / 2 | ||||
| 
 | ||||
| 	q := newQueue(10, 10) | ||||
| 	if !q.Idle() { | ||||
| 		t.Errorf("new queue should be idle") | ||||
| 	} | ||||
| 	q.Prepare(1, FastSync) | ||||
| 	if res := q.Results(false); len(res) != 0 { | ||||
| 		t.Fatal("new queue should have 0 results") | ||||
| 	} | ||||
| 
 | ||||
| 	// Schedule a batch of headers
 | ||||
| 	q.Schedule(chain.headers(), 1) | ||||
| 	if q.Idle() { | ||||
| 		t.Errorf("queue should not be idle") | ||||
| 	} | ||||
| 	if got, exp := q.PendingBlocks(), chain.Len(); got != exp { | ||||
| 		t.Errorf("wrong pending block count, got %d, exp %d", got, exp) | ||||
| 	} | ||||
| 	// Only non-empty receipts get added to task-queue
 | ||||
| 	if got, exp := q.PendingReceipts(), 64; got != exp { | ||||
| 		t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp) | ||||
| 	} | ||||
| 	// Items are now queued for downloading, next step is that we tell the
 | ||||
| 	// queue that a certain peer will deliver them for us
 | ||||
| 	{ | ||||
| 		peer := dummyPeer("peer-1") | ||||
| 		fetchReq, _, throttle := q.ReserveBodies(peer, 50) | ||||
| 		if !throttle { | ||||
| 			// queue size is only 10, so throttling should occur
 | ||||
| 			t.Fatal("should throttle") | ||||
| 		} | ||||
| 		// But we should still get the first things to fetch
 | ||||
| 		if got, exp := len(fetchReq.Headers), 5; got != exp { | ||||
| 			t.Fatalf("expected %d requests, got %d", exp, got) | ||||
| 		} | ||||
| 		if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp { | ||||
| 			t.Fatalf("expected header %d, got %d", exp, got) | ||||
| 		} | ||||
| 	} | ||||
| 	if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got { | ||||
| 		t.Errorf("expected block task queue to be %d, got %d", exp, got) | ||||
| 	} | ||||
| 	if exp, got := q.receiptTaskQueue.Size(), numOfReceipts; exp != got { | ||||
| 		t.Errorf("expected receipt task queue to be %d, got %d", exp, got) | ||||
| 	} | ||||
| 	{ | ||||
| 		peer := dummyPeer("peer-2") | ||||
| 		fetchReq, _, throttle := q.ReserveBodies(peer, 50) | ||||
| 
 | ||||
| 		// The second peer should hit throttling
 | ||||
| 		if !throttle { | ||||
| 			t.Fatalf("should throttle") | ||||
| 		} | ||||
| 		// And not get any fetches at all, since it was throttled to begin with
 | ||||
| 		if fetchReq != nil { | ||||
| 			t.Fatalf("should have no fetches, got %d", len(fetchReq.Headers)) | ||||
| 		} | ||||
| 	} | ||||
| 	if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got { | ||||
| 		t.Errorf("expected block task queue to be %d, got %d", exp, got) | ||||
| 	} | ||||
| 	if exp, got := q.receiptTaskQueue.Size(), numOfReceipts; exp != got { | ||||
| 		t.Errorf("expected receipt task queue to be %d, got %d", exp, got) | ||||
| 	} | ||||
| 	{ | ||||
| 		// The receipt delivering peer should not be affected
 | ||||
| 		// by the throttling of body deliveries
 | ||||
| 		peer := dummyPeer("peer-3") | ||||
| 		fetchReq, _, throttle := q.ReserveReceipts(peer, 50) | ||||
| 		if !throttle { | ||||
| 			// queue size is only 10, so throttling should occur
 | ||||
| 			t.Fatal("should throttle") | ||||
| 		} | ||||
| 		// But we should still get the first things to fetch
 | ||||
| 		if got, exp := len(fetchReq.Headers), 5; got != exp { | ||||
| 			t.Fatalf("expected %d requests, got %d", exp, got) | ||||
| 		} | ||||
| 		if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp { | ||||
| 			t.Fatalf("expected header %d, got %d", exp, got) | ||||
| 		} | ||||
| 	} | ||||
| 	if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got { | ||||
| 		t.Errorf("expected block task queue to be %d, got %d", exp, got) | ||||
| 	} | ||||
| 	if exp, got := q.receiptTaskQueue.Size(), numOfReceipts-5; exp != got { | ||||
| 		t.Errorf("expected receipt task queue to be %d, got %d", exp, got) | ||||
| 	} | ||||
| 	if got, exp := q.resultCache.countCompleted(), 0; got != exp { | ||||
| 		t.Errorf("wrong processable count, got %d, exp %d", got, exp) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestEmptyBlocks(t *testing.T) { | ||||
| 	numOfBlocks := len(emptyChain.blocks) | ||||
| 
 | ||||
| 	q := newQueue(10, 10) | ||||
| 
 | ||||
| 	q.Prepare(1, FastSync) | ||||
| 	// Schedule a batch of headers
 | ||||
| 	q.Schedule(emptyChain.headers(), 1) | ||||
| 	if q.Idle() { | ||||
| 		t.Errorf("queue should not be idle") | ||||
| 	} | ||||
| 	if got, exp := q.PendingBlocks(), len(emptyChain.blocks); got != exp { | ||||
| 		t.Errorf("wrong pending block count, got %d, exp %d", got, exp) | ||||
| 	} | ||||
| 	if got, exp := q.PendingReceipts(), 0; got != exp { | ||||
| 		t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp) | ||||
| 	} | ||||
| 	// They won't be processable, because the fetchresults haven't been
 | ||||
| 	// created yet
 | ||||
| 	if got, exp := q.resultCache.countCompleted(), 0; got != exp { | ||||
| 		t.Errorf("wrong processable count, got %d, exp %d", got, exp) | ||||
| 	} | ||||
| 
 | ||||
| 	// Items are now queued for downloading, next step is that we tell the
 | ||||
| 	// queue that a certain peer will deliver them for us
 | ||||
| 	// That should trigger all of them to suddenly become 'done'
 | ||||
| 	{ | ||||
| 		// Reserve blocks
 | ||||
| 		peer := dummyPeer("peer-1") | ||||
| 		fetchReq, _, _ := q.ReserveBodies(peer, 50) | ||||
| 
 | ||||
| 		// there should be nothing to fetch, blocks are empty
 | ||||
| 		if fetchReq != nil { | ||||
| 			t.Fatal("there should be no body fetch tasks remaining") | ||||
| 		} | ||||
| 	} | ||||
| 	if q.blockTaskQueue.Size() != numOfBlocks-10 { | ||||
| 		t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size()) | ||||
| 	} | ||||
| 	if q.receiptTaskQueue.Size() != 0 { | ||||
| 		t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size()) | ||||
| 	} | ||||
| 	{ | ||||
| 		peer := dummyPeer("peer-3") | ||||
| 		fetchReq, _, _ := q.ReserveReceipts(peer, 50) | ||||
| 
 | ||||
| 		// there should be nothing to fetch, blocks are empty
 | ||||
| 		if fetchReq != nil { | ||||
| 			t.Fatal("there should be no receipt fetch tasks remaining") | ||||
| 		} | ||||
| 	} | ||||
| 	if q.blockTaskQueue.Size() != numOfBlocks-10 { | ||||
| 		t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size()) | ||||
| 	} | ||||
| 	if q.receiptTaskQueue.Size() != 0 { | ||||
| 		t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size()) | ||||
| 	} | ||||
| 	if got, exp := q.resultCache.countCompleted(), 10; got != exp { | ||||
| 		t.Errorf("wrong processable count, got %d, exp %d", got, exp) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // XTestDelivery does some more extensive testing of events that happen,
 | ||||
| // blocks that become known and peers that make reservations and deliveries.
 | ||||
| // disabled since it's not really a unit-test, but can be executed to test
 | ||||
| // some more advanced scenarios
 | ||||
| func XTestDelivery(t *testing.T) { | ||||
| 	// the outside network, holding blocks
 | ||||
| 	blo, rec := makeChain(128, 0, testGenesis, false) | ||||
| 	world := newNetwork() | ||||
| 	world.receipts = rec | ||||
| 	world.chain = blo | ||||
| 	world.progress(10) | ||||
| 	if false { | ||||
| 		log.Root().SetHandler(log.StdoutHandler) | ||||
| 	} | ||||
| 	q := newQueue(10, 10) | ||||
| 	var wg sync.WaitGroup | ||||
| 	q.Prepare(1, FastSync) | ||||
| 	wg.Add(1) | ||||
| 	go func() { | ||||
| 		// deliver headers
 | ||||
| 		defer wg.Done() | ||||
| 		c := 1 | ||||
| 		for { | ||||
| 			//fmt.Printf("getting headers from %d\n", c)
 | ||||
| 			hdrs := world.headers(c) | ||||
| 			l := len(hdrs) | ||||
| 			//fmt.Printf("scheduling %d headers, first %d last %d\n",
 | ||||
| 			//	l, hdrs[0].Number.Uint64(), hdrs[len(hdrs)-1].Number.Uint64())
 | ||||
| 			q.Schedule(hdrs, uint64(c)) | ||||
| 			c += l | ||||
| 		} | ||||
| 	}() | ||||
| 	wg.Add(1) | ||||
| 	go func() { | ||||
| 		// collect results
 | ||||
| 		defer wg.Done() | ||||
| 		tot := 0 | ||||
| 		for { | ||||
| 			res := q.Results(true) | ||||
| 			tot += len(res) | ||||
| 			fmt.Printf("got %d results, %d tot\n", len(res), tot) | ||||
| 			// Now we can forget about these
 | ||||
| 			world.forget(res[len(res)-1].Header.Number.Uint64()) | ||||
| 		} | ||||
| 	}() | ||||
| 	wg.Add(1) | ||||
| 	go func() { | ||||
| 		defer wg.Done() | ||||
| 		// reserve body fetch
 | ||||
| 		i := 4 | ||||
| 		for { | ||||
| 			peer := dummyPeer(fmt.Sprintf("peer-%d", i)) | ||||
| 			f, _, _ := q.ReserveBodies(peer, rand.Intn(30)) | ||||
| 			if f != nil { | ||||
| 				var emptyList []*types.Header | ||||
| 				var txs [][]*types.Transaction | ||||
| 				var uncles [][]*types.Header | ||||
| 				numToSkip := rand.Intn(len(f.Headers)) | ||||
| 				for _, hdr := range f.Headers[0 : len(f.Headers)-numToSkip] { | ||||
| 					txs = append(txs, world.getTransactions(hdr.Number.Uint64())) | ||||
| 					uncles = append(uncles, emptyList) | ||||
| 				} | ||||
| 				time.Sleep(100 * time.Millisecond) | ||||
| 				_, err := q.DeliverBodies(peer.id, txs, uncles) | ||||
| 				if err != nil { | ||||
| 					fmt.Printf("delivered %d bodies %v\n", len(txs), err) | ||||
| 				} | ||||
| 			} else { | ||||
| 				i++ | ||||
| 				time.Sleep(200 * time.Millisecond) | ||||
| 			} | ||||
| 		} | ||||
| 	}() | ||||
| 	go func() { | ||||
| 		defer wg.Done() | ||||
| 		// reserve receiptfetch
 | ||||
| 		peer := dummyPeer("peer-3") | ||||
| 		for { | ||||
| 			f, _, _ := q.ReserveReceipts(peer, rand.Intn(50)) | ||||
| 			if f != nil { | ||||
| 				var rcs [][]*types.Receipt | ||||
| 				for _, hdr := range f.Headers { | ||||
| 					rcs = append(rcs, world.getReceipts(hdr.Number.Uint64())) | ||||
| 				} | ||||
| 				_, err := q.DeliverReceipts(peer.id, rcs) | ||||
| 				if err != nil { | ||||
| 					fmt.Printf("delivered %d receipts %v\n", len(rcs), err) | ||||
| 				} | ||||
| 				time.Sleep(100 * time.Millisecond) | ||||
| 			} else { | ||||
| 				time.Sleep(200 * time.Millisecond) | ||||
| 			} | ||||
| 		} | ||||
| 	}() | ||||
| 	wg.Add(1) | ||||
| 	go func() { | ||||
| 		defer wg.Done() | ||||
| 		for i := 0; i < 50; i++ { | ||||
| 			time.Sleep(300 * time.Millisecond) | ||||
| 			//world.tick()
 | ||||
| 			//fmt.Printf("trying to progress\n")
 | ||||
| 			world.progress(rand.Intn(100)) | ||||
| 		} | ||||
| 		for i := 0; i < 50; i++ { | ||||
| 			time.Sleep(2990 * time.Millisecond) | ||||
| 		} | ||||
| 	}() | ||||
| 	wg.Add(1) | ||||
| 	go func() { | ||||
| 		defer wg.Done() | ||||
| 		for { | ||||
| 			time.Sleep(990 * time.Millisecond) | ||||
| 			fmt.Printf("world block tip is %d\n", | ||||
| 				world.chain[len(world.chain)-1].Header().Number.Uint64()) | ||||
| 			fmt.Println(q.Stats()) | ||||
| 		} | ||||
| 	}() | ||||
| 	wg.Wait() | ||||
| } | ||||
| 
 | ||||
| func newNetwork() *network { | ||||
| 	var l sync.RWMutex | ||||
| 	return &network{ | ||||
| 		cond:   sync.NewCond(&l), | ||||
| 		offset: 1, // block 1 is at blocks[0]
 | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // represents the network
 | ||||
| type network struct { | ||||
| 	offset   int | ||||
| 	chain    []*types.Block | ||||
| 	receipts []types.Receipts | ||||
| 	lock     sync.RWMutex | ||||
| 	cond     *sync.Cond | ||||
| } | ||||
| 
 | ||||
| func (n *network) getTransactions(blocknum uint64) types.Transactions { | ||||
| 	index := blocknum - uint64(n.offset) | ||||
| 	return n.chain[index].Transactions() | ||||
| } | ||||
| func (n *network) getReceipts(blocknum uint64) types.Receipts { | ||||
| 	index := blocknum - uint64(n.offset) | ||||
| 	if got := n.chain[index].Header().Number.Uint64(); got != blocknum { | ||||
| 		fmt.Printf("Err, got %d exp %d\n", got, blocknum) | ||||
| 		panic("sd") | ||||
| 	} | ||||
| 	return n.receipts[index] | ||||
| } | ||||
| 
 | ||||
| func (n *network) forget(blocknum uint64) { | ||||
| 	index := blocknum - uint64(n.offset) | ||||
| 	n.chain = n.chain[index:] | ||||
| 	n.receipts = n.receipts[index:] | ||||
| 	n.offset = int(blocknum) | ||||
| } | ||||
| func (n *network) progress(numBlocks int) { | ||||
| 	n.lock.Lock() | ||||
| 	defer n.lock.Unlock() | ||||
| 	//fmt.Printf("progressing...\n")
 | ||||
| 	newBlocks, newR := makeChain(numBlocks, 0, n.chain[len(n.chain)-1], false) | ||||
| 	n.chain = append(n.chain, newBlocks...) | ||||
| 	n.receipts = append(n.receipts, newR...) | ||||
| 	n.cond.Broadcast() | ||||
| } | ||||
| 
 | ||||
| func (n *network) headers(from int) []*types.Header { | ||||
| 	numHeaders := 128 | ||||
| 	var hdrs []*types.Header | ||||
| 	index := from - n.offset | ||||
| 
 | ||||
| 	for index >= len(n.chain) { | ||||
| 		// wait for progress
 | ||||
| 		n.cond.L.Lock() | ||||
| 		//fmt.Printf("header going into wait\n")
 | ||||
| 		n.cond.Wait() | ||||
| 		index = from - n.offset | ||||
| 		n.cond.L.Unlock() | ||||
| 	} | ||||
| 	n.lock.RLock() | ||||
| 	defer n.lock.RUnlock() | ||||
| 	for i, b := range n.chain[index:] { | ||||
| 		hdrs = append(hdrs, b.Header()) | ||||
| 		if i >= numHeaders { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	return hdrs | ||||
| } | ||||
| @ -1,195 +0,0 @@ | ||||
| // Copyright 2020 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package downloader | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"sync" | ||||
| 	"sync/atomic" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/core/types" | ||||
| ) | ||||
| 
 | ||||
| // resultStore implements a structure for maintaining fetchResults, tracking their
 | ||||
| // download-progress and delivering (finished) results.
 | ||||
| type resultStore struct { | ||||
| 	items        []*fetchResult // Downloaded but not yet delivered fetch results
 | ||||
| 	resultOffset uint64         // Offset of the first cached fetch result in the block chain
 | ||||
| 
 | ||||
| 	// Internal index of first non-completed entry, updated atomically when needed.
 | ||||
| 	// If all items are complete, this will equal length(items), so
 | ||||
| 	// *important* : is not safe to use for indexing without checking against length
 | ||||
| 	indexIncomplete int32 // atomic access
 | ||||
| 
 | ||||
| 	// throttleThreshold is the limit up to which we _want_ to fill the
 | ||||
| 	// results. If blocks are large, we want to limit the results to less
 | ||||
| 	// than the number of available slots, and maybe only fill 1024 out of
 | ||||
| 	// 8192 possible places. The queue will, at certain times, recalibrate
 | ||||
| 	// this index.
 | ||||
| 	throttleThreshold uint64 | ||||
| 
 | ||||
| 	lock sync.RWMutex | ||||
| } | ||||
| 
 | ||||
| func newResultStore(size int) *resultStore { | ||||
| 	return &resultStore{ | ||||
| 		resultOffset:      0, | ||||
| 		items:             make([]*fetchResult, size), | ||||
| 		throttleThreshold: uint64(size), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // SetThrottleThreshold updates the throttling threshold based on the requested
 | ||||
| // limit and the total queue capacity. It returns the (possibly capped) threshold
 | ||||
| func (r *resultStore) SetThrottleThreshold(threshold uint64) uint64 { | ||||
| 	r.lock.Lock() | ||||
| 	defer r.lock.Unlock() | ||||
| 
 | ||||
| 	limit := uint64(len(r.items)) | ||||
| 	if threshold >= limit { | ||||
| 		threshold = limit | ||||
| 	} | ||||
| 	r.throttleThreshold = threshold | ||||
| 	return r.throttleThreshold | ||||
| } | ||||
| 
 | ||||
| // AddFetch adds a header for body/receipt fetching. This is used when the queue
 | ||||
| // wants to reserve headers for fetching.
 | ||||
| //
 | ||||
| // It returns the following:
 | ||||
| //
 | ||||
| //	stale     - if true, this item is already passed, and should not be requested again
 | ||||
| //	throttled - if true, the store is at capacity, this particular header is not prio now
 | ||||
| //	item      - the result to store data into
 | ||||
| //	err       - any error that occurred
 | ||||
| func (r *resultStore) AddFetch(header *types.Header, fastSync bool) (stale, throttled bool, item *fetchResult, err error) { | ||||
| 	r.lock.Lock() | ||||
| 	defer r.lock.Unlock() | ||||
| 
 | ||||
| 	var index int | ||||
| 	item, index, stale, throttled, err = r.getFetchResult(header.Number.Uint64()) | ||||
| 	if err != nil || stale || throttled { | ||||
| 		return stale, throttled, item, err | ||||
| 	} | ||||
| 	if item == nil { | ||||
| 		item = newFetchResult(header, fastSync) | ||||
| 		r.items[index] = item | ||||
| 	} | ||||
| 	return stale, throttled, item, err | ||||
| } | ||||
| 
 | ||||
| // GetDeliverySlot returns the fetchResult for the given header. If the 'stale' flag
 | ||||
| // is true, that means the header has already been delivered 'upstream'. This method
 | ||||
| // does not bubble up the 'throttle' flag, since it's moot at the point in time when
 | ||||
| // the item is downloaded and ready for delivery
 | ||||
| func (r *resultStore) GetDeliverySlot(headerNumber uint64) (*fetchResult, bool, error) { | ||||
| 	r.lock.RLock() | ||||
| 	defer r.lock.RUnlock() | ||||
| 
 | ||||
| 	res, _, stale, _, err := r.getFetchResult(headerNumber) | ||||
| 	return res, stale, err | ||||
| } | ||||
| 
 | ||||
| // getFetchResult returns the fetchResult corresponding to the given item, and
 | ||||
| // the index where the result is stored.
 | ||||
| func (r *resultStore) getFetchResult(headerNumber uint64) (item *fetchResult, index int, stale, throttle bool, err error) { | ||||
| 	index = int(int64(headerNumber) - int64(r.resultOffset)) | ||||
| 	throttle = index >= int(r.throttleThreshold) | ||||
| 	stale = index < 0 | ||||
| 
 | ||||
| 	if index >= len(r.items) { | ||||
| 		err = fmt.Errorf("%w: index allocation went beyond available resultStore space "+ | ||||
| 			"(index [%d] = header [%d] - resultOffset [%d], len(resultStore) = %d", errInvalidChain, | ||||
| 			index, headerNumber, r.resultOffset, len(r.items)) | ||||
| 		return nil, index, stale, throttle, err | ||||
| 	} | ||||
| 	if stale { | ||||
| 		return nil, index, stale, throttle, nil | ||||
| 	} | ||||
| 	item = r.items[index] | ||||
| 	return item, index, stale, throttle, nil | ||||
| } | ||||
| 
 | ||||
| // HasCompletedItems returns true if there are processable items available
 | ||||
| // this method is cheaper than countCompleted
 | ||||
| func (r *resultStore) HasCompletedItems() bool { | ||||
| 	r.lock.RLock() | ||||
| 	defer r.lock.RUnlock() | ||||
| 
 | ||||
| 	if len(r.items) == 0 { | ||||
| 		return false | ||||
| 	} | ||||
| 	if item := r.items[0]; item != nil && item.AllDone() { | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| // countCompleted returns the number of items ready for delivery, stopping at
 | ||||
| // the first non-complete item.
 | ||||
| //
 | ||||
| // The method assumes (at least) rlock is held.
 | ||||
| func (r *resultStore) countCompleted() int { | ||||
| 	// We iterate from the already known complete point, and see
 | ||||
| 	// if any more has completed since last count
 | ||||
| 	index := atomic.LoadInt32(&r.indexIncomplete) | ||||
| 	for ; ; index++ { | ||||
| 		if index >= int32(len(r.items)) { | ||||
| 			break | ||||
| 		} | ||||
| 		result := r.items[index] | ||||
| 		if result == nil || !result.AllDone() { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	atomic.StoreInt32(&r.indexIncomplete, index) | ||||
| 	return int(index) | ||||
| } | ||||
| 
 | ||||
| // GetCompleted returns the next batch of completed fetchResults
 | ||||
| func (r *resultStore) GetCompleted(limit int) []*fetchResult { | ||||
| 	r.lock.Lock() | ||||
| 	defer r.lock.Unlock() | ||||
| 
 | ||||
| 	completed := r.countCompleted() | ||||
| 	if limit > completed { | ||||
| 		limit = completed | ||||
| 	} | ||||
| 	results := make([]*fetchResult, limit) | ||||
| 	copy(results, r.items[:limit]) | ||||
| 
 | ||||
| 	// Delete the results from the cache and clear the tail.
 | ||||
| 	copy(r.items, r.items[limit:]) | ||||
| 	for i := len(r.items) - limit; i < len(r.items); i++ { | ||||
| 		r.items[i] = nil | ||||
| 	} | ||||
| 	// Advance the expected block number of the first cache entry
 | ||||
| 	r.resultOffset += uint64(limit) | ||||
| 	atomic.AddInt32(&r.indexIncomplete, int32(-limit)) | ||||
| 
 | ||||
| 	return results | ||||
| } | ||||
| 
 | ||||
| // Prepare initialises the offset with the given block number
 | ||||
| func (r *resultStore) Prepare(offset uint64) { | ||||
| 	r.lock.Lock() | ||||
| 	defer r.lock.Unlock() | ||||
| 
 | ||||
| 	if r.resultOffset < offset { | ||||
| 		r.resultOffset = offset | ||||
| 	} | ||||
| } | ||||
| @ -1,638 +0,0 @@ | ||||
| // Copyright 2017 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package downloader | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"sync" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/common" | ||||
| 	"github.com/ethereum/go-ethereum/core/rawdb" | ||||
| 	"github.com/ethereum/go-ethereum/core/state" | ||||
| 	"github.com/ethereum/go-ethereum/crypto" | ||||
| 	"github.com/ethereum/go-ethereum/ethdb" | ||||
| 	"github.com/ethereum/go-ethereum/log" | ||||
| 	"github.com/ethereum/go-ethereum/trie" | ||||
| 	"golang.org/x/crypto/sha3" | ||||
| ) | ||||
| 
 | ||||
| // stateReq represents a batch of state fetch requests grouped together into
 | ||||
| // a single data retrieval network packet.
 | ||||
| type stateReq struct { | ||||
| 	nItems    uint16                    // Number of items requested for download (max is 384, so uint16 is sufficient)
 | ||||
| 	trieTasks map[string]*trieTask      // Trie node download tasks to track previous attempts
 | ||||
| 	codeTasks map[common.Hash]*codeTask // Byte code download tasks to track previous attempts
 | ||||
| 	timeout   time.Duration             // Maximum round trip time for this to complete
 | ||||
| 	timer     *time.Timer               // Timer to fire when the RTT timeout expires
 | ||||
| 	peer      *peerConnection           // Peer that we're requesting from
 | ||||
| 	delivered time.Time                 // Time when the packet was delivered (independent when we process it)
 | ||||
| 	response  [][]byte                  // Response data of the peer (nil for timeouts)
 | ||||
| 	dropped   bool                      // Flag whether the peer dropped off early
 | ||||
| } | ||||
| 
 | ||||
| // timedOut returns if this request timed out.
 | ||||
| func (req *stateReq) timedOut() bool { | ||||
| 	return req.response == nil | ||||
| } | ||||
| 
 | ||||
| // stateSyncStats is a collection of progress stats to report during a state trie
 | ||||
| // sync to RPC requests as well as to display in user logs.
 | ||||
| type stateSyncStats struct { | ||||
| 	processed  uint64 // Number of state entries processed
 | ||||
| 	duplicate  uint64 // Number of state entries downloaded twice
 | ||||
| 	unexpected uint64 // Number of non-requested state entries received
 | ||||
| 	pending    uint64 // Number of still pending state entries
 | ||||
| } | ||||
| 
 | ||||
| // syncState starts downloading state with the given root hash.
 | ||||
| func (d *Downloader) syncState(root common.Hash) *stateSync { | ||||
| 	// Create the state sync
 | ||||
| 	s := newStateSync(d, root) | ||||
| 	select { | ||||
| 	case d.stateSyncStart <- s: | ||||
| 		// If we tell the statesync to restart with a new root, we also need
 | ||||
| 		// to wait for it to actually also start -- when old requests have timed
 | ||||
| 		// out or been delivered
 | ||||
| 		<-s.started | ||||
| 	case <-d.quitCh: | ||||
| 		s.err = errCancelStateFetch | ||||
| 		close(s.done) | ||||
| 	} | ||||
| 	return s | ||||
| } | ||||
| 
 | ||||
| // stateFetcher manages the active state sync and accepts requests
 | ||||
| // on its behalf.
 | ||||
| func (d *Downloader) stateFetcher() { | ||||
| 	for { | ||||
| 		select { | ||||
| 		case s := <-d.stateSyncStart: | ||||
| 			for next := s; next != nil; { | ||||
| 				next = d.runStateSync(next) | ||||
| 			} | ||||
| 		case <-d.stateCh: | ||||
| 			// Ignore state responses while no sync is running.
 | ||||
| 		case <-d.quitCh: | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // runStateSync runs a state synchronisation until it completes or another root
 | ||||
| // hash is requested to be switched over to.
 | ||||
| func (d *Downloader) runStateSync(s *stateSync) *stateSync { | ||||
| 	var ( | ||||
| 		active   = make(map[string]*stateReq) // Currently in-flight requests
 | ||||
| 		finished []*stateReq                  // Completed or failed requests
 | ||||
| 		timeout  = make(chan *stateReq)       // Timed out active requests
 | ||||
| 	) | ||||
| 	log.Trace("State sync starting", "root", s.root) | ||||
| 
 | ||||
| 	defer func() { | ||||
| 		// Cancel active request timers on exit. Also set peers to idle so they're
 | ||||
| 		// available for the next sync.
 | ||||
| 		for _, req := range active { | ||||
| 			req.timer.Stop() | ||||
| 			req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) | ||||
| 		} | ||||
| 	}() | ||||
| 	go s.run() | ||||
| 	defer s.Cancel() | ||||
| 
 | ||||
| 	// Listen for peer departure events to cancel assigned tasks
 | ||||
| 	peerDrop := make(chan *peerConnection, 1024) | ||||
| 	peerSub := s.d.peers.SubscribePeerDrops(peerDrop) | ||||
| 	defer peerSub.Unsubscribe() | ||||
| 
 | ||||
| 	for { | ||||
| 		// Enable sending of the first buffered element if there is one.
 | ||||
| 		var ( | ||||
| 			deliverReq   *stateReq | ||||
| 			deliverReqCh chan *stateReq | ||||
| 		) | ||||
| 		if len(finished) > 0 { | ||||
| 			deliverReq = finished[0] | ||||
| 			deliverReqCh = s.deliver | ||||
| 		} | ||||
| 
 | ||||
| 		select { | ||||
| 		// The stateSync lifecycle:
 | ||||
| 		case next := <-d.stateSyncStart: | ||||
| 			d.spindownStateSync(active, finished, timeout, peerDrop) | ||||
| 			return next | ||||
| 
 | ||||
| 		case <-s.done: | ||||
| 			d.spindownStateSync(active, finished, timeout, peerDrop) | ||||
| 			return nil | ||||
| 
 | ||||
| 		// Send the next finished request to the current sync:
 | ||||
| 		case deliverReqCh <- deliverReq: | ||||
| 			// Shift out the first request, but also set the emptied slot to nil for GC
 | ||||
| 			copy(finished, finished[1:]) | ||||
| 			finished[len(finished)-1] = nil | ||||
| 			finished = finished[:len(finished)-1] | ||||
| 
 | ||||
| 		// Handle incoming state packs:
 | ||||
| 		case pack := <-d.stateCh: | ||||
| 			// Discard any data not requested (or previously timed out)
 | ||||
| 			req := active[pack.PeerId()] | ||||
| 			if req == nil { | ||||
| 				log.Debug("Unrequested node data", "peer", pack.PeerId(), "len", pack.Items()) | ||||
| 				continue | ||||
| 			} | ||||
| 			// Finalize the request and queue up for processing
 | ||||
| 			req.timer.Stop() | ||||
| 			req.response = pack.(*statePack).states | ||||
| 			req.delivered = time.Now() | ||||
| 
 | ||||
| 			finished = append(finished, req) | ||||
| 			delete(active, pack.PeerId()) | ||||
| 
 | ||||
| 		// Handle dropped peer connections:
 | ||||
| 		case p := <-peerDrop: | ||||
| 			// Skip if no request is currently pending
 | ||||
| 			req := active[p.id] | ||||
| 			if req == nil { | ||||
| 				continue | ||||
| 			} | ||||
| 			// Finalize the request and queue up for processing
 | ||||
| 			req.timer.Stop() | ||||
| 			req.dropped = true | ||||
| 			req.delivered = time.Now() | ||||
| 
 | ||||
| 			finished = append(finished, req) | ||||
| 			delete(active, p.id) | ||||
| 
 | ||||
| 		// Handle timed-out requests:
 | ||||
| 		case req := <-timeout: | ||||
| 			// If the peer is already requesting something else, ignore the stale timeout.
 | ||||
| 			// This can happen when the timeout and the delivery happens simultaneously,
 | ||||
| 			// causing both pathways to trigger.
 | ||||
| 			if active[req.peer.id] != req { | ||||
| 				continue | ||||
| 			} | ||||
| 			req.delivered = time.Now() | ||||
| 			// Move the timed out data back into the download queue
 | ||||
| 			finished = append(finished, req) | ||||
| 			delete(active, req.peer.id) | ||||
| 
 | ||||
| 		// Track outgoing state requests:
 | ||||
| 		case req := <-d.trackStateReq: | ||||
| 			// If an active request already exists for this peer, we have a problem. In
 | ||||
| 			// theory the trie node schedule must never assign two requests to the same
 | ||||
| 			// peer. In practice however, a peer might receive a request, disconnect and
 | ||||
| 			// immediately reconnect before the previous times out. In this case the first
 | ||||
| 			// request is never honored, alas we must not silently overwrite it, as that
 | ||||
| 			// causes valid requests to go missing and sync to get stuck.
 | ||||
| 			if old := active[req.peer.id]; old != nil { | ||||
| 				log.Warn("Busy peer assigned new state fetch", "peer", old.peer.id) | ||||
| 				// Move the previous request to the finished set
 | ||||
| 				old.timer.Stop() | ||||
| 				old.dropped = true | ||||
| 				old.delivered = time.Now() | ||||
| 				finished = append(finished, old) | ||||
| 			} | ||||
| 			// Start a timer to notify the sync loop if the peer stalled.
 | ||||
| 			req.timer = time.AfterFunc(req.timeout, func() { | ||||
| 				timeout <- req | ||||
| 			}) | ||||
| 			active[req.peer.id] = req | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // spindownStateSync 'drains' the outstanding requests; some will be delivered and other
 | ||||
| // will time out. This is to ensure that when the next stateSync starts working, all peers
 | ||||
| // are marked as idle and de facto _are_ idle.
 | ||||
| func (d *Downloader) spindownStateSync(active map[string]*stateReq, finished []*stateReq, timeout chan *stateReq, peerDrop chan *peerConnection) { | ||||
| 	log.Trace("State sync spinning down", "active", len(active), "finished", len(finished)) | ||||
| 	for len(active) > 0 { | ||||
| 		var ( | ||||
| 			req    *stateReq | ||||
| 			reason string | ||||
| 		) | ||||
| 		select { | ||||
| 		// Handle (drop) incoming state packs:
 | ||||
| 		case pack := <-d.stateCh: | ||||
| 			req = active[pack.PeerId()] | ||||
| 			reason = "delivered" | ||||
| 		// Handle dropped peer connections:
 | ||||
| 		case p := <-peerDrop: | ||||
| 			req = active[p.id] | ||||
| 			reason = "peerdrop" | ||||
| 		// Handle timed-out requests:
 | ||||
| 		case req = <-timeout: | ||||
| 			reason = "timeout" | ||||
| 		} | ||||
| 		if req == nil { | ||||
| 			continue | ||||
| 		} | ||||
| 		req.peer.log.Trace("State peer marked idle (spindown)", "req.items", int(req.nItems), "reason", reason) | ||||
| 		req.timer.Stop() | ||||
| 		delete(active, req.peer.id) | ||||
| 		req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) | ||||
| 	} | ||||
| 	// The 'finished' set contains deliveries that we were going to pass to processing.
 | ||||
| 	// Those are now moot, but we still need to set those peers as idle, which would
 | ||||
| 	// otherwise have been done after processing
 | ||||
| 	for _, req := range finished { | ||||
| 		req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // stateSync schedules requests for downloading a particular state trie defined
 | ||||
| // by a given state root.
 | ||||
| type stateSync struct { | ||||
| 	d *Downloader // Downloader instance to access and manage current peerset
 | ||||
| 
 | ||||
| 	root   common.Hash        // State root currently being synced
 | ||||
| 	sched  *trie.Sync         // State trie sync scheduler defining the tasks
 | ||||
| 	keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with
 | ||||
| 
 | ||||
| 	trieTasks map[string]*trieTask      // Set of trie node tasks currently queued for retrieval, indexed by path
 | ||||
| 	codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval, indexed by hash
 | ||||
| 
 | ||||
| 	numUncommitted   int | ||||
| 	bytesUncommitted int | ||||
| 
 | ||||
| 	started chan struct{} // Started is signalled once the sync loop starts
 | ||||
| 
 | ||||
| 	deliver    chan *stateReq // Delivery channel multiplexing peer responses
 | ||||
| 	cancel     chan struct{}  // Channel to signal a termination request
 | ||||
| 	cancelOnce sync.Once      // Ensures cancel only ever gets called once
 | ||||
| 	done       chan struct{}  // Channel to signal termination completion
 | ||||
| 	err        error          // Any error hit during sync (set before completion)
 | ||||
| } | ||||
| 
 | ||||
| // trieTask represents a single trie node download task, containing a set of
 | ||||
| // peers already attempted retrieval from to detect stalled syncs and abort.
 | ||||
| type trieTask struct { | ||||
| 	hash     common.Hash | ||||
| 	path     [][]byte | ||||
| 	attempts map[string]struct{} | ||||
| } | ||||
| 
 | ||||
| // codeTask represents a single byte code download task, containing a set of
 | ||||
| // peers already attempted retrieval from to detect stalled syncs and abort.
 | ||||
| type codeTask struct { | ||||
| 	attempts map[string]struct{} | ||||
| } | ||||
| 
 | ||||
| // newStateSync creates a new state trie download scheduler. This method does not
 | ||||
| // yet start the sync. The user needs to call run to initiate.
 | ||||
| func newStateSync(d *Downloader, root common.Hash) *stateSync { | ||||
| 	// Hack the node scheme here. It's a dead code is not used
 | ||||
| 	// by light client at all. Just aim for passing tests.
 | ||||
| 	return &stateSync{ | ||||
| 		d:         d, | ||||
| 		root:      root, | ||||
| 		sched:     state.NewStateSync(root, d.stateDB, nil, rawdb.HashScheme), | ||||
| 		keccak:    sha3.NewLegacyKeccak256().(crypto.KeccakState), | ||||
| 		trieTasks: make(map[string]*trieTask), | ||||
| 		codeTasks: make(map[common.Hash]*codeTask), | ||||
| 		deliver:   make(chan *stateReq), | ||||
| 		cancel:    make(chan struct{}), | ||||
| 		done:      make(chan struct{}), | ||||
| 		started:   make(chan struct{}), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // run starts the task assignment and response processing loop, blocking until
 | ||||
| // it finishes, and finally notifying any goroutines waiting for the loop to
 | ||||
| // finish.
 | ||||
| func (s *stateSync) run() { | ||||
| 	close(s.started) | ||||
| 	if s.d.snapSync { | ||||
| 		s.err = s.d.SnapSyncer.Sync(s.root, s.cancel) | ||||
| 	} else { | ||||
| 		s.err = s.loop() | ||||
| 	} | ||||
| 	close(s.done) | ||||
| } | ||||
| 
 | ||||
| // Wait blocks until the sync is done or canceled.
 | ||||
| func (s *stateSync) Wait() error { | ||||
| 	<-s.done | ||||
| 	return s.err | ||||
| } | ||||
| 
 | ||||
| // Cancel cancels the sync and waits until it has shut down.
 | ||||
| func (s *stateSync) Cancel() error { | ||||
| 	s.cancelOnce.Do(func() { | ||||
| 		close(s.cancel) | ||||
| 	}) | ||||
| 	return s.Wait() | ||||
| } | ||||
| 
 | ||||
| // loop is the main event loop of a state trie sync. It it responsible for the
 | ||||
| // assignment of new tasks to peers (including sending it to them) as well as
 | ||||
| // for the processing of inbound data. Note, that the loop does not directly
 | ||||
| // receive data from peers, rather those are buffered up in the downloader and
 | ||||
| // pushed here async. The reason is to decouple processing from data receipt
 | ||||
| // and timeouts.
 | ||||
| func (s *stateSync) loop() (err error) { | ||||
| 	// Listen for new peer events to assign tasks to them
 | ||||
| 	newPeer := make(chan *peerConnection, 1024) | ||||
| 	peerSub := s.d.peers.SubscribeNewPeers(newPeer) | ||||
| 	defer peerSub.Unsubscribe() | ||||
| 	defer func() { | ||||
| 		cerr := s.commit(true) | ||||
| 		if err == nil { | ||||
| 			err = cerr | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	// Keep assigning new tasks until the sync completes or aborts
 | ||||
| 	for s.sched.Pending() > 0 { | ||||
| 		if err = s.commit(false); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		s.assignTasks() | ||||
| 		// Tasks assigned, wait for something to happen
 | ||||
| 		select { | ||||
| 		case <-newPeer: | ||||
| 			// New peer arrived, try to assign it download tasks
 | ||||
| 
 | ||||
| 		case <-s.cancel: | ||||
| 			return errCancelStateFetch | ||||
| 
 | ||||
| 		case <-s.d.cancelCh: | ||||
| 			return errCanceled | ||||
| 
 | ||||
| 		case req := <-s.deliver: | ||||
| 			// Response, disconnect or timeout triggered, drop the peer if stalling
 | ||||
| 			log.Trace("Received node data response", "peer", req.peer.id, "count", len(req.response), "dropped", req.dropped, "timeout", !req.dropped && req.timedOut()) | ||||
| 			if req.nItems <= 2 && !req.dropped && req.timedOut() { | ||||
| 				// 2 items are the minimum requested, if even that times out, we've no use of
 | ||||
| 				// this peer at the moment.
 | ||||
| 				log.Warn("Stalling state sync, dropping peer", "peer", req.peer.id) | ||||
| 				if s.d.dropPeer == nil { | ||||
| 					// The dropPeer method is nil when `--copydb` is used for a local copy.
 | ||||
| 					// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
 | ||||
| 					req.peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", req.peer.id) | ||||
| 				} else { | ||||
| 					s.d.dropPeer(req.peer.id) | ||||
| 
 | ||||
| 					// If this peer was the master peer, abort sync immediately
 | ||||
| 					s.d.cancelLock.RLock() | ||||
| 					master := req.peer.id == s.d.cancelPeer | ||||
| 					s.d.cancelLock.RUnlock() | ||||
| 
 | ||||
| 					if master { | ||||
| 						s.d.cancel() | ||||
| 						return errTimeout | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 			// Process all the received blobs and check for stale delivery
 | ||||
| 			delivered, err := s.process(req) | ||||
| 			req.peer.SetNodeDataIdle(delivered, req.delivered) | ||||
| 			if err != nil { | ||||
| 				log.Warn("Node data write error", "err", err) | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (s *stateSync) commit(force bool) error { | ||||
| 	if !force && s.bytesUncommitted < ethdb.IdealBatchSize { | ||||
| 		return nil | ||||
| 	} | ||||
| 	start := time.Now() | ||||
| 	b := s.d.stateDB.NewBatch() | ||||
| 	if err := s.sched.Commit(b); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if err := b.Write(); err != nil { | ||||
| 		return fmt.Errorf("DB write error: %v", err) | ||||
| 	} | ||||
| 	s.updateStats(s.numUncommitted, 0, 0, time.Since(start)) | ||||
| 	s.numUncommitted = 0 | ||||
| 	s.bytesUncommitted = 0 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // assignTasks attempts to assign new tasks to all idle peers, either from the
 | ||||
| // batch currently being retried, or fetching new data from the trie sync itself.
 | ||||
| func (s *stateSync) assignTasks() { | ||||
| 	// Iterate over all idle peers and try to assign them state fetches
 | ||||
| 	peers, _ := s.d.peers.NodeDataIdlePeers() | ||||
| 	for _, p := range peers { | ||||
| 		// Assign a batch of fetches proportional to the estimated latency/bandwidth
 | ||||
| 		cap := p.NodeDataCapacity(s.d.peers.rates.TargetRoundTrip()) | ||||
| 		req := &stateReq{peer: p, timeout: s.d.peers.rates.TargetTimeout()} | ||||
| 
 | ||||
| 		nodes, _, codes := s.fillTasks(cap, req) | ||||
| 
 | ||||
| 		// If the peer was assigned tasks to fetch, send the network request
 | ||||
| 		if len(nodes)+len(codes) > 0 { | ||||
| 			req.peer.log.Trace("Requesting batch of state data", "nodes", len(nodes), "codes", len(codes), "root", s.root) | ||||
| 			select { | ||||
| 			case s.d.trackStateReq <- req: | ||||
| 				req.peer.FetchNodeData(append(nodes, codes...)) // Unified retrieval under eth/6x
 | ||||
| 			case <-s.cancel: | ||||
| 			case <-s.d.cancelCh: | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // fillTasks fills the given request object with a maximum of n state download
 | ||||
| // tasks to send to the remote peer.
 | ||||
| func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) { | ||||
| 	// Refill available tasks from the scheduler.
 | ||||
| 	if fill := n - (len(s.trieTasks) + len(s.codeTasks)); fill > 0 { | ||||
| 		paths, hashes, codes := s.sched.Missing(fill) | ||||
| 		for i, path := range paths { | ||||
| 			s.trieTasks[path] = &trieTask{ | ||||
| 				hash:     hashes[i], | ||||
| 				path:     trie.NewSyncPath([]byte(path)), | ||||
| 				attempts: make(map[string]struct{}), | ||||
| 			} | ||||
| 		} | ||||
| 		for _, hash := range codes { | ||||
| 			s.codeTasks[hash] = &codeTask{ | ||||
| 				attempts: make(map[string]struct{}), | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	// Find tasks that haven't been tried with the request's peer. Prefer code
 | ||||
| 	// over trie nodes as those can be written to disk and forgotten about.
 | ||||
| 	nodes = make([]common.Hash, 0, n) | ||||
| 	paths = make([]trie.SyncPath, 0, n) | ||||
| 	codes = make([]common.Hash, 0, n) | ||||
| 
 | ||||
| 	req.trieTasks = make(map[string]*trieTask, n) | ||||
| 	req.codeTasks = make(map[common.Hash]*codeTask, n) | ||||
| 
 | ||||
| 	for hash, t := range s.codeTasks { | ||||
| 		// Stop when we've gathered enough requests
 | ||||
| 		if len(nodes)+len(codes) == n { | ||||
| 			break | ||||
| 		} | ||||
| 		// Skip any requests we've already tried from this peer
 | ||||
| 		if _, ok := t.attempts[req.peer.id]; ok { | ||||
| 			continue | ||||
| 		} | ||||
| 		// Assign the request to this peer
 | ||||
| 		t.attempts[req.peer.id] = struct{}{} | ||||
| 		codes = append(codes, hash) | ||||
| 		req.codeTasks[hash] = t | ||||
| 		delete(s.codeTasks, hash) | ||||
| 	} | ||||
| 	for path, t := range s.trieTasks { | ||||
| 		// Stop when we've gathered enough requests
 | ||||
| 		if len(nodes)+len(codes) == n { | ||||
| 			break | ||||
| 		} | ||||
| 		// Skip any requests we've already tried from this peer
 | ||||
| 		if _, ok := t.attempts[req.peer.id]; ok { | ||||
| 			continue | ||||
| 		} | ||||
| 		// Assign the request to this peer
 | ||||
| 		t.attempts[req.peer.id] = struct{}{} | ||||
| 
 | ||||
| 		nodes = append(nodes, t.hash) | ||||
| 		paths = append(paths, t.path) | ||||
| 
 | ||||
| 		req.trieTasks[path] = t | ||||
| 		delete(s.trieTasks, path) | ||||
| 	} | ||||
| 	req.nItems = uint16(len(nodes) + len(codes)) | ||||
| 	return nodes, paths, codes | ||||
| } | ||||
| 
 | ||||
| // process iterates over a batch of delivered state data, injecting each item
 | ||||
| // into a running state sync, re-queuing any items that were requested but not
 | ||||
| // delivered. Returns whether the peer actually managed to deliver anything of
 | ||||
| // value, and any error that occurred.
 | ||||
| func (s *stateSync) process(req *stateReq) (int, error) { | ||||
| 	// Collect processing stats and update progress if valid data was received
 | ||||
| 	duplicate, unexpected, successful := 0, 0, 0 | ||||
| 
 | ||||
| 	defer func(start time.Time) { | ||||
| 		if duplicate > 0 || unexpected > 0 { | ||||
| 			s.updateStats(0, duplicate, unexpected, time.Since(start)) | ||||
| 		} | ||||
| 	}(time.Now()) | ||||
| 
 | ||||
| 	// Iterate over all the delivered data and inject one-by-one into the trie
 | ||||
| 	for _, blob := range req.response { | ||||
| 		hash, err := s.processNodeData(req.trieTasks, req.codeTasks, blob) | ||||
| 		switch err { | ||||
| 		case nil: | ||||
| 			s.numUncommitted++ | ||||
| 			s.bytesUncommitted += len(blob) | ||||
| 			successful++ | ||||
| 		case trie.ErrNotRequested: | ||||
| 			unexpected++ | ||||
| 		case trie.ErrAlreadyProcessed: | ||||
| 			duplicate++ | ||||
| 		default: | ||||
| 			return successful, fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err) | ||||
| 		} | ||||
| 	} | ||||
| 	// Put unfulfilled tasks back into the retry queue
 | ||||
| 	npeers := s.d.peers.Len() | ||||
| 	for path, task := range req.trieTasks { | ||||
| 		// If the node did deliver something, missing items may be due to a protocol
 | ||||
| 		// limit or a previous timeout + delayed delivery. Both cases should permit
 | ||||
| 		// the node to retry the missing items (to avoid single-peer stalls).
 | ||||
| 		if len(req.response) > 0 || req.timedOut() { | ||||
| 			delete(task.attempts, req.peer.id) | ||||
| 		} | ||||
| 		// If we've requested the node too many times already, it may be a malicious
 | ||||
| 		// sync where nobody has the right data. Abort.
 | ||||
| 		if len(task.attempts) >= npeers { | ||||
| 			return successful, fmt.Errorf("trie node %s failed with all peers (%d tries, %d peers)", task.hash.TerminalString(), len(task.attempts), npeers) | ||||
| 		} | ||||
| 		// Missing item, place into the retry queue.
 | ||||
| 		s.trieTasks[path] = task | ||||
| 	} | ||||
| 	for hash, task := range req.codeTasks { | ||||
| 		// If the node did deliver something, missing items may be due to a protocol
 | ||||
| 		// limit or a previous timeout + delayed delivery. Both cases should permit
 | ||||
| 		// the node to retry the missing items (to avoid single-peer stalls).
 | ||||
| 		if len(req.response) > 0 || req.timedOut() { | ||||
| 			delete(task.attempts, req.peer.id) | ||||
| 		} | ||||
| 		// If we've requested the node too many times already, it may be a malicious
 | ||||
| 		// sync where nobody has the right data. Abort.
 | ||||
| 		if len(task.attempts) >= npeers { | ||||
| 			return successful, fmt.Errorf("byte code %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers) | ||||
| 		} | ||||
| 		// Missing item, place into the retry queue.
 | ||||
| 		s.codeTasks[hash] = task | ||||
| 	} | ||||
| 	return successful, nil | ||||
| } | ||||
| 
 | ||||
| // processNodeData tries to inject a trie node data blob delivered from a remote
 | ||||
| // peer into the state trie, returning whether anything useful was written or any
 | ||||
| // error occurred.
 | ||||
| //
 | ||||
| // If multiple requests correspond to the same hash, this method will inject the
 | ||||
| // blob as a result for the first one only, leaving the remaining duplicates to
 | ||||
| // be fetched again.
 | ||||
| func (s *stateSync) processNodeData(nodeTasks map[string]*trieTask, codeTasks map[common.Hash]*codeTask, blob []byte) (common.Hash, error) { | ||||
| 	var hash common.Hash | ||||
| 	s.keccak.Reset() | ||||
| 	s.keccak.Write(blob) | ||||
| 	s.keccak.Read(hash[:]) | ||||
| 
 | ||||
| 	if _, present := codeTasks[hash]; present { | ||||
| 		err := s.sched.ProcessCode(trie.CodeSyncResult{ | ||||
| 			Hash: hash, | ||||
| 			Data: blob, | ||||
| 		}) | ||||
| 		delete(codeTasks, hash) | ||||
| 		return hash, err | ||||
| 	} | ||||
| 	for path, task := range nodeTasks { | ||||
| 		if task.hash == hash { | ||||
| 			err := s.sched.ProcessNode(trie.NodeSyncResult{ | ||||
| 				Path: path, | ||||
| 				Data: blob, | ||||
| 			}) | ||||
| 			delete(nodeTasks, path) | ||||
| 			return hash, err | ||||
| 		} | ||||
| 	} | ||||
| 	return common.Hash{}, trie.ErrNotRequested | ||||
| } | ||||
| 
 | ||||
| // updateStats bumps the various state sync progress counters and displays a log
 | ||||
| // message for the user to see.
 | ||||
| func (s *stateSync) updateStats(written, duplicate, unexpected int, duration time.Duration) { | ||||
| 	s.d.syncStatsLock.Lock() | ||||
| 	defer s.d.syncStatsLock.Unlock() | ||||
| 
 | ||||
| 	s.d.syncStatsState.pending = uint64(s.sched.Pending()) | ||||
| 	s.d.syncStatsState.processed += uint64(written) | ||||
| 	s.d.syncStatsState.duplicate += uint64(duplicate) | ||||
| 	s.d.syncStatsState.unexpected += uint64(unexpected) | ||||
| 
 | ||||
| 	if written > 0 || duplicate > 0 || unexpected > 0 { | ||||
| 		log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "trieretry", len(s.trieTasks), "coderetry", len(s.codeTasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected) | ||||
| 	} | ||||
| 	//if written > 0 {
 | ||||
| 	//rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed)
 | ||||
| 	//}
 | ||||
| } | ||||
| @ -1,235 +0,0 @@ | ||||
| // Copyright 2018 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package downloader | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"math/big" | ||||
| 	"sync" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/common" | ||||
| 	"github.com/ethereum/go-ethereum/consensus/ethash" | ||||
| 	"github.com/ethereum/go-ethereum/core" | ||||
| 	"github.com/ethereum/go-ethereum/core/rawdb" | ||||
| 	"github.com/ethereum/go-ethereum/core/types" | ||||
| 	"github.com/ethereum/go-ethereum/crypto" | ||||
| 	"github.com/ethereum/go-ethereum/params" | ||||
| ) | ||||
| 
 | ||||
| // Test chain parameters.
 | ||||
| var ( | ||||
| 	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") | ||||
| 	testAddress = crypto.PubkeyToAddress(testKey.PublicKey) | ||||
| 	testDB      = rawdb.NewMemoryDatabase() | ||||
| 
 | ||||
| 	gspec = core.Genesis{ | ||||
| 		Alloc:   core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}}, | ||||
| 		BaseFee: big.NewInt(params.InitialBaseFee), | ||||
| 	} | ||||
| 	testGenesis = gspec.MustCommit(testDB) | ||||
| ) | ||||
| 
 | ||||
| // The common prefix of all test chains:
 | ||||
| var testChainBase = newTestChain(blockCacheMaxItems+200, testGenesis) | ||||
| 
 | ||||
| // Different forks on top of the base chain:
 | ||||
| var testChainForkLightA, testChainForkLightB, testChainForkHeavy *testChain | ||||
| 
 | ||||
| func init() { | ||||
| 	var forkLen = int(fullMaxForkAncestry + 50) | ||||
| 	var wg sync.WaitGroup | ||||
| 	wg.Add(3) | ||||
| 	go func() { testChainForkLightA = testChainBase.makeFork(forkLen, false, 1); wg.Done() }() | ||||
| 	go func() { testChainForkLightB = testChainBase.makeFork(forkLen, false, 2); wg.Done() }() | ||||
| 	go func() { testChainForkHeavy = testChainBase.makeFork(forkLen, true, 3); wg.Done() }() | ||||
| 	wg.Wait() | ||||
| } | ||||
| 
 | ||||
| type testChain struct { | ||||
| 	genesis  *types.Block | ||||
| 	chain    []common.Hash | ||||
| 	headerm  map[common.Hash]*types.Header | ||||
| 	blockm   map[common.Hash]*types.Block | ||||
| 	receiptm map[common.Hash][]*types.Receipt | ||||
| 	tdm      map[common.Hash]*big.Int | ||||
| } | ||||
| 
 | ||||
| // newTestChain creates a blockchain of the given length.
 | ||||
| func newTestChain(length int, genesis *types.Block) *testChain { | ||||
| 	tc := new(testChain).copy(length) | ||||
| 	tc.genesis = genesis | ||||
| 	tc.chain = append(tc.chain, genesis.Hash()) | ||||
| 	tc.headerm[tc.genesis.Hash()] = tc.genesis.Header() | ||||
| 	tc.tdm[tc.genesis.Hash()] = tc.genesis.Difficulty() | ||||
| 	tc.blockm[tc.genesis.Hash()] = tc.genesis | ||||
| 	tc.generate(length-1, 0, genesis, false) | ||||
| 	return tc | ||||
| } | ||||
| 
 | ||||
| // makeFork creates a fork on top of the test chain.
 | ||||
| func (tc *testChain) makeFork(length int, heavy bool, seed byte) *testChain { | ||||
| 	fork := tc.copy(tc.len() + length) | ||||
| 	fork.generate(length, seed, tc.headBlock(), heavy) | ||||
| 	return fork | ||||
| } | ||||
| 
 | ||||
| // shorten creates a copy of the chain with the given length. It panics if the
 | ||||
| // length is longer than the number of available blocks.
 | ||||
| func (tc *testChain) shorten(length int) *testChain { | ||||
| 	if length > tc.len() { | ||||
| 		panic(fmt.Errorf("can't shorten test chain to %d blocks, it's only %d blocks long", length, tc.len())) | ||||
| 	} | ||||
| 	return tc.copy(length) | ||||
| } | ||||
| 
 | ||||
| func (tc *testChain) copy(newlen int) *testChain { | ||||
| 	cpy := &testChain{ | ||||
| 		genesis:  tc.genesis, | ||||
| 		headerm:  make(map[common.Hash]*types.Header, newlen), | ||||
| 		blockm:   make(map[common.Hash]*types.Block, newlen), | ||||
| 		receiptm: make(map[common.Hash][]*types.Receipt, newlen), | ||||
| 		tdm:      make(map[common.Hash]*big.Int, newlen), | ||||
| 	} | ||||
| 	for i := 0; i < len(tc.chain) && i < newlen; i++ { | ||||
| 		hash := tc.chain[i] | ||||
| 		cpy.chain = append(cpy.chain, tc.chain[i]) | ||||
| 		cpy.tdm[hash] = tc.tdm[hash] | ||||
| 		cpy.blockm[hash] = tc.blockm[hash] | ||||
| 		cpy.headerm[hash] = tc.headerm[hash] | ||||
| 		cpy.receiptm[hash] = tc.receiptm[hash] | ||||
| 	} | ||||
| 	return cpy | ||||
| } | ||||
| 
 | ||||
| // generate creates a chain of n blocks starting at and including parent.
 | ||||
| // the returned hash chain is ordered head->parent. In addition, every 22th block
 | ||||
| // contains a transaction and every 5th an uncle to allow testing correct block
 | ||||
| // reassembly.
 | ||||
| func (tc *testChain) generate(n int, seed byte, parent *types.Block, heavy bool) { | ||||
| 	// start := time.Now()
 | ||||
| 	// defer func() { fmt.Printf("test chain generated in %v\n", time.Since(start)) }()
 | ||||
| 
 | ||||
| 	blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) { | ||||
| 		block.SetCoinbase(common.Address{seed}) | ||||
| 		// If a heavy chain is requested, delay blocks to raise difficulty
 | ||||
| 		if heavy { | ||||
| 			block.OffsetTime(-1) | ||||
| 		} | ||||
| 		// Include transactions to the miner to make blocks more interesting.
 | ||||
| 		if parent == tc.genesis && i%22 == 0 { | ||||
| 			signer := types.MakeSigner(params.TestChainConfig, block.Number(), block.Timestamp()) | ||||
| 			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey) | ||||
| 			if err != nil { | ||||
| 				panic(err) | ||||
| 			} | ||||
| 			block.AddTx(tx) | ||||
| 		} | ||||
| 		// if the block number is a multiple of 5, add a bonus uncle to the block
 | ||||
| 		if i > 0 && i%5 == 0 { | ||||
| 			block.AddUncle(&types.Header{ | ||||
| 				ParentHash: block.PrevBlock(i - 1).Hash(), | ||||
| 				Number:     big.NewInt(block.Number().Int64() - 1), | ||||
| 			}) | ||||
| 		} | ||||
| 	}) | ||||
| 
 | ||||
| 	// Convert the block-chain into a hash-chain and header/block maps
 | ||||
| 	td := new(big.Int).Set(tc.td(parent.Hash())) | ||||
| 	for i, b := range blocks { | ||||
| 		td := td.Add(td, b.Difficulty()) | ||||
| 		hash := b.Hash() | ||||
| 		tc.chain = append(tc.chain, hash) | ||||
| 		tc.blockm[hash] = b | ||||
| 		tc.headerm[hash] = b.Header() | ||||
| 		tc.receiptm[hash] = receipts[i] | ||||
| 		tc.tdm[hash] = new(big.Int).Set(td) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // len returns the total number of blocks in the chain.
 | ||||
| func (tc *testChain) len() int { | ||||
| 	return len(tc.chain) | ||||
| } | ||||
| 
 | ||||
| // headBlock returns the head of the chain.
 | ||||
| func (tc *testChain) headBlock() *types.Block { | ||||
| 	return tc.blockm[tc.chain[len(tc.chain)-1]] | ||||
| } | ||||
| 
 | ||||
| // td returns the total difficulty of the given block.
 | ||||
| func (tc *testChain) td(hash common.Hash) *big.Int { | ||||
| 	return tc.tdm[hash] | ||||
| } | ||||
| 
 | ||||
| // headersByHash returns headers in order from the given hash.
 | ||||
| func (tc *testChain) headersByHash(origin common.Hash, amount int, skip int, reverse bool) []*types.Header { | ||||
| 	num, _ := tc.hashToNumber(origin) | ||||
| 	return tc.headersByNumber(num, amount, skip, reverse) | ||||
| } | ||||
| 
 | ||||
| // headersByNumber returns headers from the given number.
 | ||||
| func (tc *testChain) headersByNumber(origin uint64, amount int, skip int, reverse bool) []*types.Header { | ||||
| 	result := make([]*types.Header, 0, amount) | ||||
| 
 | ||||
| 	if !reverse { | ||||
| 		for num := origin; num < uint64(len(tc.chain)) && len(result) < amount; num += uint64(skip) + 1 { | ||||
| 			if header, ok := tc.headerm[tc.chain[int(num)]]; ok { | ||||
| 				result = append(result, header) | ||||
| 			} | ||||
| 		} | ||||
| 	} else { | ||||
| 		for num := int64(origin); num >= 0 && len(result) < amount; num -= int64(skip) + 1 { | ||||
| 			if header, ok := tc.headerm[tc.chain[int(num)]]; ok { | ||||
| 				result = append(result, header) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return result | ||||
| } | ||||
| 
 | ||||
| // receipts returns the receipts of the given block hashes.
 | ||||
| func (tc *testChain) receipts(hashes []common.Hash) [][]*types.Receipt { | ||||
| 	results := make([][]*types.Receipt, 0, len(hashes)) | ||||
| 	for _, hash := range hashes { | ||||
| 		if receipt, ok := tc.receiptm[hash]; ok { | ||||
| 			results = append(results, receipt) | ||||
| 		} | ||||
| 	} | ||||
| 	return results | ||||
| } | ||||
| 
 | ||||
| // bodies returns the block bodies of the given block hashes.
 | ||||
| func (tc *testChain) bodies(hashes []common.Hash) ([][]*types.Transaction, [][]*types.Header) { | ||||
| 	transactions := make([][]*types.Transaction, 0, len(hashes)) | ||||
| 	uncles := make([][]*types.Header, 0, len(hashes)) | ||||
| 	for _, hash := range hashes { | ||||
| 		if block, ok := tc.blockm[hash]; ok { | ||||
| 			transactions = append(transactions, block.Transactions()) | ||||
| 			uncles = append(uncles, block.Uncles()) | ||||
| 		} | ||||
| 	} | ||||
| 	return transactions, uncles | ||||
| } | ||||
| 
 | ||||
| func (tc *testChain) hashToNumber(target common.Hash) (uint64, bool) { | ||||
| 	for num, hash := range tc.chain { | ||||
| 		if hash == target { | ||||
| 			return uint64(num), true | ||||
| 		} | ||||
| 	} | ||||
| 	return 0, false | ||||
| } | ||||
| @ -1,79 +0,0 @@ | ||||
| // Copyright 2015 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package downloader | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/core/types" | ||||
| ) | ||||
| 
 | ||||
| // peerDropFn is a callback type for dropping a peer detected as malicious.
 | ||||
| type peerDropFn func(id string) | ||||
| 
 | ||||
| // dataPack is a data message returned by a peer for some query.
 | ||||
| type dataPack interface { | ||||
| 	PeerId() string | ||||
| 	Items() int | ||||
| 	Stats() string | ||||
| } | ||||
| 
 | ||||
| // headerPack is a batch of block headers returned by a peer.
 | ||||
| type headerPack struct { | ||||
| 	peerID  string | ||||
| 	headers []*types.Header | ||||
| } | ||||
| 
 | ||||
| func (p *headerPack) PeerId() string { return p.peerID } | ||||
| func (p *headerPack) Items() int     { return len(p.headers) } | ||||
| func (p *headerPack) Stats() string  { return fmt.Sprintf("%d", len(p.headers)) } | ||||
| 
 | ||||
| // bodyPack is a batch of block bodies returned by a peer.
 | ||||
| type bodyPack struct { | ||||
| 	peerID       string | ||||
| 	transactions [][]*types.Transaction | ||||
| 	uncles       [][]*types.Header | ||||
| } | ||||
| 
 | ||||
| func (p *bodyPack) PeerId() string { return p.peerID } | ||||
| func (p *bodyPack) Items() int { | ||||
| 	if len(p.transactions) <= len(p.uncles) { | ||||
| 		return len(p.transactions) | ||||
| 	} | ||||
| 	return len(p.uncles) | ||||
| } | ||||
| func (p *bodyPack) Stats() string { return fmt.Sprintf("%d:%d", len(p.transactions), len(p.uncles)) } | ||||
| 
 | ||||
| // receiptPack is a batch of receipts returned by a peer.
 | ||||
| type receiptPack struct { | ||||
| 	peerID   string | ||||
| 	receipts [][]*types.Receipt | ||||
| } | ||||
| 
 | ||||
| func (p *receiptPack) PeerId() string { return p.peerID } | ||||
| func (p *receiptPack) Items() int     { return len(p.receipts) } | ||||
| func (p *receiptPack) Stats() string  { return fmt.Sprintf("%d", len(p.receipts)) } | ||||
| 
 | ||||
| // statePack is a batch of states returned by a peer.
 | ||||
| type statePack struct { | ||||
| 	peerID string | ||||
| 	states [][]byte | ||||
| } | ||||
| 
 | ||||
| func (p *statePack) PeerId() string { return p.peerID } | ||||
| func (p *statePack) Items() int     { return len(p.states) } | ||||
| func (p *statePack) Stats() string  { return fmt.Sprintf("%d", len(p.states)) } | ||||
							
								
								
									
										563
									
								
								les/fetcher.go
									
									
									
									
									
								
							
							
						
						
									
										563
									
								
								les/fetcher.go
									
									
									
									
									
								
							| @ -1,563 +0,0 @@ | ||||
| // Copyright 2016 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package les | ||||
| 
 | ||||
| import ( | ||||
| 	"math/big" | ||||
| 	"math/rand" | ||||
| 	"sync" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/common" | ||||
| 	"github.com/ethereum/go-ethereum/consensus" | ||||
| 	"github.com/ethereum/go-ethereum/core" | ||||
| 	"github.com/ethereum/go-ethereum/core/rawdb" | ||||
| 	"github.com/ethereum/go-ethereum/core/types" | ||||
| 	"github.com/ethereum/go-ethereum/ethdb" | ||||
| 	"github.com/ethereum/go-ethereum/les/fetcher" | ||||
| 	"github.com/ethereum/go-ethereum/light" | ||||
| 	"github.com/ethereum/go-ethereum/log" | ||||
| 	"github.com/ethereum/go-ethereum/p2p/enode" | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	blockDelayTimeout    = 10 * time.Second       // Timeout for retrieving the headers from the peer
 | ||||
| 	gatherSlack          = 100 * time.Millisecond // Interval used to collate almost-expired requests
 | ||||
| 	cachedAnnosThreshold = 64                     // The maximum queued announcements
 | ||||
| ) | ||||
| 
 | ||||
| // announce represents an new block announcement from the les server.
 | ||||
| type announce struct { | ||||
| 	data   *announceData | ||||
| 	trust  bool | ||||
| 	peerid enode.ID | ||||
| } | ||||
| 
 | ||||
| // request represents a record when the header request is sent.
 | ||||
| type request struct { | ||||
| 	reqid  uint64 | ||||
| 	peerid enode.ID | ||||
| 	sendAt time.Time | ||||
| 	hash   common.Hash | ||||
| } | ||||
| 
 | ||||
| // response represents a response packet from network as well as a channel
 | ||||
| // to return all un-requested data.
 | ||||
| type response struct { | ||||
| 	reqid   uint64 | ||||
| 	headers []*types.Header | ||||
| 	peerid  enode.ID | ||||
| 	remain  chan []*types.Header | ||||
| } | ||||
| 
 | ||||
| // fetcherPeer holds the fetcher-specific information for each active peer
 | ||||
| type fetcherPeer struct { | ||||
| 	latest *announceData // The latest announcement sent from the peer
 | ||||
| 
 | ||||
| 	// These following two fields can track the latest announces
 | ||||
| 	// from the peer with limited size for caching. We hold the
 | ||||
| 	// assumption that all enqueued announces are td-monotonic.
 | ||||
| 	announces map[common.Hash]*announce // Announcement map
 | ||||
| 	fifo      []common.Hash             // FIFO announces list
 | ||||
| } | ||||
| 
 | ||||
| // addAnno enqueues an new trusted announcement. If the queued announces overflow,
 | ||||
| // evict from the oldest.
 | ||||
| func (fp *fetcherPeer) addAnno(anno *announce) { | ||||
| 	// Short circuit if the anno already exists. In normal case it should
 | ||||
| 	// never happen since only monotonic anno is accepted. But the adversary
 | ||||
| 	// may feed us fake announces with higher td but same hash. In this case,
 | ||||
| 	// ignore the anno anyway.
 | ||||
| 	hash := anno.data.Hash | ||||
| 	if _, exist := fp.announces[hash]; exist { | ||||
| 		return | ||||
| 	} | ||||
| 	fp.announces[hash] = anno | ||||
| 	fp.fifo = append(fp.fifo, hash) | ||||
| 
 | ||||
| 	// Evict oldest if the announces are oversized.
 | ||||
| 	if len(fp.fifo)-cachedAnnosThreshold > 0 { | ||||
| 		for i := 0; i < len(fp.fifo)-cachedAnnosThreshold; i++ { | ||||
| 			delete(fp.announces, fp.fifo[i]) | ||||
| 		} | ||||
| 		copy(fp.fifo, fp.fifo[len(fp.fifo)-cachedAnnosThreshold:]) | ||||
| 		fp.fifo = fp.fifo[:cachedAnnosThreshold] | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // forwardAnno removes all announces from the map with a number lower than
 | ||||
| // the provided threshold.
 | ||||
| func (fp *fetcherPeer) forwardAnno(td *big.Int) []*announce { | ||||
| 	var ( | ||||
| 		cutset  int | ||||
| 		evicted []*announce | ||||
| 	) | ||||
| 	for ; cutset < len(fp.fifo); cutset++ { | ||||
| 		anno := fp.announces[fp.fifo[cutset]] | ||||
| 		if anno == nil { | ||||
| 			continue // In theory it should never ever happen
 | ||||
| 		} | ||||
| 		if anno.data.Td.Cmp(td) > 0 { | ||||
| 			break | ||||
| 		} | ||||
| 		evicted = append(evicted, anno) | ||||
| 		delete(fp.announces, anno.data.Hash) | ||||
| 	} | ||||
| 	if cutset > 0 { | ||||
| 		copy(fp.fifo, fp.fifo[cutset:]) | ||||
| 		fp.fifo = fp.fifo[:len(fp.fifo)-cutset] | ||||
| 	} | ||||
| 	return evicted | ||||
| } | ||||
| 
 | ||||
| // lightFetcher implements retrieval of newly announced headers. It reuses
 | ||||
| // the eth.BlockFetcher as the underlying fetcher but adding more additional
 | ||||
| // rules: e.g. evict "timeout" peers.
 | ||||
| type lightFetcher struct { | ||||
| 	// Various handlers
 | ||||
| 	ulc     *ulc | ||||
| 	chaindb ethdb.Database | ||||
| 	reqDist *requestDistributor | ||||
| 	peerset *serverPeerSet        // The global peerset of light client which shared by all components
 | ||||
| 	chain   *light.LightChain     // The local light chain which maintains the canonical header chain.
 | ||||
| 	fetcher *fetcher.BlockFetcher // The underlying fetcher which takes care block header retrieval.
 | ||||
| 
 | ||||
| 	// Peerset maintained by fetcher
 | ||||
| 	plock sync.RWMutex | ||||
| 	peers map[enode.ID]*fetcherPeer | ||||
| 
 | ||||
| 	// Various channels
 | ||||
| 	announceCh chan *announce | ||||
| 	requestCh  chan *request | ||||
| 	deliverCh  chan *response | ||||
| 	syncDone   chan *types.Header | ||||
| 
 | ||||
| 	closeCh chan struct{} | ||||
| 	wg      sync.WaitGroup | ||||
| 
 | ||||
| 	// Callback
 | ||||
| 	synchronise func(peer *serverPeer) | ||||
| 
 | ||||
| 	// Test fields or hooks
 | ||||
| 	newHeadHook func(*types.Header) | ||||
| } | ||||
| 
 | ||||
| // newLightFetcher creates a light fetcher instance.
 | ||||
| func newLightFetcher(chain *light.LightChain, engine consensus.Engine, peers *serverPeerSet, ulc *ulc, chaindb ethdb.Database, reqDist *requestDistributor, syncFn func(p *serverPeer)) *lightFetcher { | ||||
| 	// Construct the fetcher by offering all necessary APIs
 | ||||
| 	validator := func(header *types.Header) error { | ||||
| 		// Disable seal verification explicitly if we are running in ulc mode.
 | ||||
| 		return engine.VerifyHeader(chain, header) | ||||
| 	} | ||||
| 	heighter := func() uint64 { return chain.CurrentHeader().Number.Uint64() } | ||||
| 	dropper := func(id string) { peers.unregister(id) } | ||||
| 	inserter := func(headers []*types.Header) (int, error) { return chain.InsertHeaderChain(headers) } | ||||
| 	f := &lightFetcher{ | ||||
| 		ulc:         ulc, | ||||
| 		peerset:     peers, | ||||
| 		chaindb:     chaindb, | ||||
| 		chain:       chain, | ||||
| 		reqDist:     reqDist, | ||||
| 		fetcher:     fetcher.NewBlockFetcher(true, chain.GetHeaderByHash, nil, validator, nil, heighter, inserter, nil, dropper), | ||||
| 		peers:       make(map[enode.ID]*fetcherPeer), | ||||
| 		synchronise: syncFn, | ||||
| 		announceCh:  make(chan *announce), | ||||
| 		requestCh:   make(chan *request), | ||||
| 		deliverCh:   make(chan *response), | ||||
| 		syncDone:    make(chan *types.Header), | ||||
| 		closeCh:     make(chan struct{}), | ||||
| 	} | ||||
| 	peers.subscribe(f) | ||||
| 	return f | ||||
| } | ||||
| 
 | ||||
| func (f *lightFetcher) start() { | ||||
| 	f.wg.Add(1) | ||||
| 	f.fetcher.Start() | ||||
| 	go f.mainloop() | ||||
| } | ||||
| 
 | ||||
| func (f *lightFetcher) stop() { | ||||
| 	close(f.closeCh) | ||||
| 	f.fetcher.Stop() | ||||
| 	f.wg.Wait() | ||||
| } | ||||
| 
 | ||||
| // registerPeer adds an new peer to the fetcher's peer set
 | ||||
| func (f *lightFetcher) registerPeer(p *serverPeer) { | ||||
| 	f.plock.Lock() | ||||
| 	defer f.plock.Unlock() | ||||
| 
 | ||||
| 	f.peers[p.ID()] = &fetcherPeer{announces: make(map[common.Hash]*announce)} | ||||
| } | ||||
| 
 | ||||
| // unregisterPeer removes the specified peer from the fetcher's peer set
 | ||||
| func (f *lightFetcher) unregisterPeer(p *serverPeer) { | ||||
| 	f.plock.Lock() | ||||
| 	defer f.plock.Unlock() | ||||
| 
 | ||||
| 	delete(f.peers, p.ID()) | ||||
| } | ||||
| 
 | ||||
| // peer returns the peer from the fetcher peerset.
 | ||||
| func (f *lightFetcher) peer(id enode.ID) *fetcherPeer { | ||||
| 	f.plock.RLock() | ||||
| 	defer f.plock.RUnlock() | ||||
| 
 | ||||
| 	return f.peers[id] | ||||
| } | ||||
| 
 | ||||
| // forEachPeer iterates the fetcher peerset, abort the iteration if the
 | ||||
| // callback returns false.
 | ||||
| func (f *lightFetcher) forEachPeer(check func(id enode.ID, p *fetcherPeer) bool) { | ||||
| 	f.plock.RLock() | ||||
| 	defer f.plock.RUnlock() | ||||
| 
 | ||||
| 	for id, peer := range f.peers { | ||||
| 		if !check(id, peer) { | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // mainloop is the main event loop of the light fetcher, which is responsible for
 | ||||
| //
 | ||||
| //   - announcement maintenance(ulc)
 | ||||
| //
 | ||||
| //     If we are running in ultra light client mode, then all announcements from
 | ||||
| //     the trusted servers are maintained. If the same announcements from trusted
 | ||||
| //     servers reach the threshold, then the relevant header is requested for retrieval.
 | ||||
| //
 | ||||
| //   - block header retrieval
 | ||||
| //     Whenever we receive announce with higher td compared with local chain, the
 | ||||
| //     request will be made for header retrieval.
 | ||||
| //
 | ||||
| //   - re-sync trigger
 | ||||
| //     If the local chain lags too much, then the fetcher will enter "synchronise"
 | ||||
| //     mode to retrieve missing headers in batch.
 | ||||
| func (f *lightFetcher) mainloop() { | ||||
| 	defer f.wg.Done() | ||||
| 
 | ||||
| 	var ( | ||||
| 		syncInterval = uint64(1) // Interval used to trigger a light resync.
 | ||||
| 		syncing      bool        // Indicator whether the client is syncing
 | ||||
| 
 | ||||
| 		ulc          = f.ulc != nil | ||||
| 		headCh       = make(chan core.ChainHeadEvent, 100) | ||||
| 		fetching     = make(map[uint64]*request) | ||||
| 		requestTimer = time.NewTimer(0) | ||||
| 
 | ||||
| 		// Local status
 | ||||
| 		localHead = f.chain.CurrentHeader() | ||||
| 		localTd   = f.chain.GetTd(localHead.Hash(), localHead.Number.Uint64()) | ||||
| 	) | ||||
| 	defer requestTimer.Stop() | ||||
| 	sub := f.chain.SubscribeChainHeadEvent(headCh) | ||||
| 	defer sub.Unsubscribe() | ||||
| 
 | ||||
| 	// reset updates the local status with given header.
 | ||||
| 	reset := func(header *types.Header) { | ||||
| 		localHead = header | ||||
| 		localTd = f.chain.GetTd(header.Hash(), header.Number.Uint64()) | ||||
| 	} | ||||
| 	// trustedHeader returns an indicator whether the header is regarded as
 | ||||
| 	// trusted. If we are running in the ulc mode, only when we receive enough
 | ||||
| 	// same announcement from trusted server, the header will be trusted.
 | ||||
| 	trustedHeader := func(hash common.Hash, number uint64) (bool, []enode.ID) { | ||||
| 		var ( | ||||
| 			agreed  []enode.ID | ||||
| 			trusted bool | ||||
| 		) | ||||
| 		f.forEachPeer(func(id enode.ID, p *fetcherPeer) bool { | ||||
| 			if anno := p.announces[hash]; anno != nil && anno.trust && anno.data.Number == number { | ||||
| 				agreed = append(agreed, id) | ||||
| 				if 100*len(agreed)/len(f.ulc.keys) >= f.ulc.fraction { | ||||
| 					trusted = true | ||||
| 					return false // abort iteration
 | ||||
| 				} | ||||
| 			} | ||||
| 			return true | ||||
| 		}) | ||||
| 		return trusted, agreed | ||||
| 	} | ||||
| 	for { | ||||
| 		select { | ||||
| 		case anno := <-f.announceCh: | ||||
| 			peerid, data := anno.peerid, anno.data | ||||
| 			log.Debug("Received new announce", "peer", peerid, "number", data.Number, "hash", data.Hash, "reorg", data.ReorgDepth) | ||||
| 
 | ||||
| 			peer := f.peer(peerid) | ||||
| 			if peer == nil { | ||||
| 				log.Debug("Receive announce from unknown peer", "peer", peerid) | ||||
| 				continue | ||||
| 			} | ||||
| 			// Announced tds should be strictly monotonic, drop the peer if
 | ||||
| 			// the announce is out-of-order.
 | ||||
| 			if peer.latest != nil && data.Td.Cmp(peer.latest.Td) <= 0 { | ||||
| 				f.peerset.unregister(peerid.String()) | ||||
| 				log.Debug("Non-monotonic td", "peer", peerid, "current", data.Td, "previous", peer.latest.Td) | ||||
| 				continue | ||||
| 			} | ||||
| 			peer.latest = data | ||||
| 
 | ||||
| 			// Filter out any stale announce, the local chain is ahead of announce
 | ||||
| 			if localTd != nil && data.Td.Cmp(localTd) <= 0 { | ||||
| 				continue | ||||
| 			} | ||||
| 			peer.addAnno(anno) | ||||
| 
 | ||||
| 			// If we are not syncing, try to trigger a single retrieval or re-sync
 | ||||
| 			if !ulc && !syncing { | ||||
| 				// Two scenarios lead to re-sync:
 | ||||
| 				// - reorg happens
 | ||||
| 				// - local chain lags
 | ||||
| 				// We can't retrieve the parent of the announce by single retrieval
 | ||||
| 				// in both cases, so resync is necessary.
 | ||||
| 				if data.Number > localHead.Number.Uint64()+syncInterval || data.ReorgDepth > 0 { | ||||
| 					syncing = true | ||||
| 					go f.startSync(peerid) | ||||
| 					log.Debug("Trigger light sync", "peer", peerid, "local", localHead.Number, "localhash", localHead.Hash(), "remote", data.Number, "remotehash", data.Hash) | ||||
| 					continue | ||||
| 				} | ||||
| 				f.fetcher.Notify(peerid.String(), data.Hash, data.Number, time.Now(), f.requestHeaderByHash(peerid), nil) | ||||
| 				log.Debug("Trigger header retrieval", "peer", peerid, "number", data.Number, "hash", data.Hash) | ||||
| 			} | ||||
| 			// Keep collecting announces from trusted server even we are syncing.
 | ||||
| 			if ulc && anno.trust { | ||||
| 				// Notify underlying fetcher to retrieve header or trigger a resync if
 | ||||
| 				// we have receive enough announcements from trusted server.
 | ||||
| 				trusted, agreed := trustedHeader(data.Hash, data.Number) | ||||
| 				if trusted && !syncing { | ||||
| 					if data.Number > localHead.Number.Uint64()+syncInterval || data.ReorgDepth > 0 { | ||||
| 						syncing = true | ||||
| 						go f.startSync(peerid) | ||||
| 						log.Debug("Trigger trusted light sync", "local", localHead.Number, "localhash", localHead.Hash(), "remote", data.Number, "remotehash", data.Hash) | ||||
| 						continue | ||||
| 					} | ||||
| 					p := agreed[rand.Intn(len(agreed))] | ||||
| 					f.fetcher.Notify(p.String(), data.Hash, data.Number, time.Now(), f.requestHeaderByHash(p), nil) | ||||
| 					log.Debug("Trigger trusted header retrieval", "number", data.Number, "hash", data.Hash) | ||||
| 				} | ||||
| 			} | ||||
| 
 | ||||
| 		case req := <-f.requestCh: | ||||
| 			fetching[req.reqid] = req // Tracking all in-flight requests for response latency statistic.
 | ||||
| 			if len(fetching) == 1 { | ||||
| 				f.rescheduleTimer(fetching, requestTimer) | ||||
| 			} | ||||
| 
 | ||||
| 		case <-requestTimer.C: | ||||
| 			for reqid, request := range fetching { | ||||
| 				if time.Since(request.sendAt) > blockDelayTimeout-gatherSlack { | ||||
| 					delete(fetching, reqid) | ||||
| 					f.peerset.unregister(request.peerid.String()) | ||||
| 					log.Debug("Request timeout", "peer", request.peerid, "reqid", reqid) | ||||
| 				} | ||||
| 			} | ||||
| 			f.rescheduleTimer(fetching, requestTimer) | ||||
| 
 | ||||
| 		case resp := <-f.deliverCh: | ||||
| 			if req := fetching[resp.reqid]; req != nil { | ||||
| 				delete(fetching, resp.reqid) | ||||
| 				f.rescheduleTimer(fetching, requestTimer) | ||||
| 
 | ||||
| 				// The underlying fetcher does not check the consistency of request and response.
 | ||||
| 				// The adversary can send the fake announces with invalid hash and number but always
 | ||||
| 				// delivery some mismatched header. So it can't be punished by the underlying fetcher.
 | ||||
| 				// We have to add two more rules here to detect.
 | ||||
| 				if len(resp.headers) != 1 { | ||||
| 					f.peerset.unregister(req.peerid.String()) | ||||
| 					log.Debug("Deliver more than requested", "peer", req.peerid, "reqid", req.reqid) | ||||
| 					continue | ||||
| 				} | ||||
| 				if resp.headers[0].Hash() != req.hash { | ||||
| 					f.peerset.unregister(req.peerid.String()) | ||||
| 					log.Debug("Deliver invalid header", "peer", req.peerid, "reqid", req.reqid) | ||||
| 					continue | ||||
| 				} | ||||
| 				resp.remain <- f.fetcher.FilterHeaders(resp.peerid.String(), resp.headers, time.Now()) | ||||
| 			} else { | ||||
| 				// Discard the entire packet no matter it's a timeout response or unexpected one.
 | ||||
| 				resp.remain <- resp.headers | ||||
| 			} | ||||
| 
 | ||||
| 		case ev := <-headCh: | ||||
| 			// Short circuit if we are still syncing.
 | ||||
| 			if syncing { | ||||
| 				continue | ||||
| 			} | ||||
| 			reset(ev.Block.Header()) | ||||
| 
 | ||||
| 			// Clean stale announcements from les-servers.
 | ||||
| 			var droplist []enode.ID | ||||
| 			f.forEachPeer(func(id enode.ID, p *fetcherPeer) bool { | ||||
| 				removed := p.forwardAnno(localTd) | ||||
| 				for _, anno := range removed { | ||||
| 					if header := f.chain.GetHeaderByHash(anno.data.Hash); header != nil { | ||||
| 						if header.Number.Uint64() != anno.data.Number { | ||||
| 							droplist = append(droplist, id) | ||||
| 							break | ||||
| 						} | ||||
| 						// In theory td should exists.
 | ||||
| 						td := f.chain.GetTd(anno.data.Hash, anno.data.Number) | ||||
| 						if td != nil && td.Cmp(anno.data.Td) != 0 { | ||||
| 							droplist = append(droplist, id) | ||||
| 							break | ||||
| 						} | ||||
| 					} | ||||
| 				} | ||||
| 				return true | ||||
| 			}) | ||||
| 			for _, id := range droplist { | ||||
| 				f.peerset.unregister(id.String()) | ||||
| 				log.Debug("Kicked out peer for invalid announcement") | ||||
| 			} | ||||
| 			if f.newHeadHook != nil { | ||||
| 				f.newHeadHook(localHead) | ||||
| 			} | ||||
| 
 | ||||
| 		case origin := <-f.syncDone: | ||||
| 			syncing = false // Reset the status
 | ||||
| 
 | ||||
| 			// Rewind all untrusted headers for ulc mode.
 | ||||
| 			if ulc { | ||||
| 				head := f.chain.CurrentHeader() | ||||
| 				ancestor := rawdb.FindCommonAncestor(f.chaindb, origin, head) | ||||
| 
 | ||||
| 				// Recap the ancestor with genesis header in case the ancestor
 | ||||
| 				// is not found. It can happen the original head is before the
 | ||||
| 				// checkpoint while the synced headers are after it. In this
 | ||||
| 				// case there is no ancestor between them.
 | ||||
| 				if ancestor == nil { | ||||
| 					ancestor = f.chain.Genesis().Header() | ||||
| 				} | ||||
| 				var untrusted []common.Hash | ||||
| 				for head.Number.Cmp(ancestor.Number) > 0 { | ||||
| 					hash, number := head.Hash(), head.Number.Uint64() | ||||
| 					if trusted, _ := trustedHeader(hash, number); trusted { | ||||
| 						break | ||||
| 					} | ||||
| 					untrusted = append(untrusted, hash) | ||||
| 					head = f.chain.GetHeader(head.ParentHash, number-1) | ||||
| 					if head == nil { | ||||
| 						break // all the synced headers will be dropped
 | ||||
| 					} | ||||
| 				} | ||||
| 				if len(untrusted) > 0 { | ||||
| 					for i, j := 0, len(untrusted)-1; i < j; i, j = i+1, j-1 { | ||||
| 						untrusted[i], untrusted[j] = untrusted[j], untrusted[i] | ||||
| 					} | ||||
| 					f.chain.Rollback(untrusted) | ||||
| 				} | ||||
| 			} | ||||
| 			// Reset local status.
 | ||||
| 			reset(f.chain.CurrentHeader()) | ||||
| 			if f.newHeadHook != nil { | ||||
| 				f.newHeadHook(localHead) | ||||
| 			} | ||||
| 			log.Debug("light sync finished", "number", localHead.Number, "hash", localHead.Hash()) | ||||
| 
 | ||||
| 		case <-f.closeCh: | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // announce processes a new announcement message received from a peer.
 | ||||
| func (f *lightFetcher) announce(p *serverPeer, head *announceData) { | ||||
| 	select { | ||||
| 	case f.announceCh <- &announce{peerid: p.ID(), trust: p.trusted, data: head}: | ||||
| 	case <-f.closeCh: | ||||
| 		return | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // trackRequest sends a reqID to main loop for in-flight request tracking.
 | ||||
| func (f *lightFetcher) trackRequest(peerid enode.ID, reqid uint64, hash common.Hash) { | ||||
| 	select { | ||||
| 	case f.requestCh <- &request{reqid: reqid, peerid: peerid, sendAt: time.Now(), hash: hash}: | ||||
| 	case <-f.closeCh: | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // requestHeaderByHash constructs a header retrieval request and sends it to
 | ||||
| // local request distributor.
 | ||||
| //
 | ||||
| // Note, we rely on the underlying eth/fetcher to retrieve and validate the
 | ||||
| // response, so that we have to obey the rule of eth/fetcher which only accepts
 | ||||
| // the response from given peer.
 | ||||
| func (f *lightFetcher) requestHeaderByHash(peerid enode.ID) func(common.Hash) error { | ||||
| 	return func(hash common.Hash) error { | ||||
| 		req := &distReq{ | ||||
| 			getCost: func(dp distPeer) uint64 { return dp.(*serverPeer).getRequestCost(GetBlockHeadersMsg, 1) }, | ||||
| 			canSend: func(dp distPeer) bool { return dp.(*serverPeer).ID() == peerid }, | ||||
| 			request: func(dp distPeer) func() { | ||||
| 				peer, id := dp.(*serverPeer), rand.Uint64() | ||||
| 				cost := peer.getRequestCost(GetBlockHeadersMsg, 1) | ||||
| 				peer.fcServer.QueuedRequest(id, cost) | ||||
| 
 | ||||
| 				return func() { | ||||
| 					f.trackRequest(peer.ID(), id, hash) | ||||
| 					peer.requestHeadersByHash(id, hash, 1, 0, false) | ||||
| 				} | ||||
| 			}, | ||||
| 		} | ||||
| 		f.reqDist.queue(req) | ||||
| 		return nil | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // startSync invokes synchronisation callback to start syncing.
 | ||||
| func (f *lightFetcher) startSync(id enode.ID) { | ||||
| 	defer func(header *types.Header) { | ||||
| 		f.syncDone <- header | ||||
| 	}(f.chain.CurrentHeader()) | ||||
| 
 | ||||
| 	peer := f.peerset.peer(id.String()) | ||||
| 	if peer == nil || peer.onlyAnnounce { | ||||
| 		return | ||||
| 	} | ||||
| 	f.synchronise(peer) | ||||
| } | ||||
| 
 | ||||
| // deliverHeaders delivers header download request responses for processing
 | ||||
| func (f *lightFetcher) deliverHeaders(peer *serverPeer, reqid uint64, headers []*types.Header) []*types.Header { | ||||
| 	remain := make(chan []*types.Header, 1) | ||||
| 	select { | ||||
| 	case f.deliverCh <- &response{reqid: reqid, headers: headers, peerid: peer.ID(), remain: remain}: | ||||
| 	case <-f.closeCh: | ||||
| 		return nil | ||||
| 	} | ||||
| 	return <-remain | ||||
| } | ||||
| 
 | ||||
| // rescheduleTimer resets the specified timeout timer to the next request timeout.
 | ||||
| func (f *lightFetcher) rescheduleTimer(requests map[uint64]*request, timer *time.Timer) { | ||||
| 	// Short circuit if no inflight requests
 | ||||
| 	if len(requests) == 0 { | ||||
| 		timer.Stop() | ||||
| 		return | ||||
| 	} | ||||
| 	// Otherwise find the earliest expiring request
 | ||||
| 	earliest := time.Now() | ||||
| 	for _, req := range requests { | ||||
| 		if earliest.After(req.sendAt) { | ||||
| 			earliest = req.sendAt | ||||
| 		} | ||||
| 	} | ||||
| 	timer.Reset(blockDelayTimeout - time.Since(earliest)) | ||||
| } | ||||
| @ -1,888 +0,0 @@ | ||||
| // Copyright 2015 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| // Package fetcher is a temporary package whilst working on the eth/66 blocking refactors.
 | ||||
| // After that work is done, les needs to be refactored to use the new package,
 | ||||
| // or alternatively use a stripped down version of it. Either way, we need to
 | ||||
| // keep the changes scoped so duplicating temporarily seems the sanest.
 | ||||
| package fetcher | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"math/rand" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/common" | ||||
| 	"github.com/ethereum/go-ethereum/common/prque" | ||||
| 	"github.com/ethereum/go-ethereum/consensus" | ||||
| 	"github.com/ethereum/go-ethereum/core/types" | ||||
| 	"github.com/ethereum/go-ethereum/log" | ||||
| 	"github.com/ethereum/go-ethereum/metrics" | ||||
| 	"github.com/ethereum/go-ethereum/trie" | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	lightTimeout  = time.Millisecond       // Time allowance before an announced header is explicitly requested
 | ||||
| 	arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block/transaction is explicitly requested
 | ||||
| 	gatherSlack   = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
 | ||||
| 	fetchTimeout  = 5 * time.Second        // Maximum allotted time to return an explicitly requested block/transaction
 | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	maxUncleDist = 7   // Maximum allowed backward distance from the chain head
 | ||||
| 	maxQueueDist = 32  // Maximum allowed distance from the chain head to queue
 | ||||
| 	hashLimit    = 256 // Maximum number of unique blocks or headers a peer may have announced
 | ||||
| 	blockLimit   = 64  // Maximum number of unique blocks a peer may have delivered
 | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	blockAnnounceInMeter   = metrics.NewRegisteredMeter("eth/fetcher/block/announces/in", nil) | ||||
| 	blockAnnounceOutTimer  = metrics.NewRegisteredTimer("eth/fetcher/block/announces/out", nil) | ||||
| 	blockAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/drop", nil) | ||||
| 	blockAnnounceDOSMeter  = metrics.NewRegisteredMeter("eth/fetcher/block/announces/dos", nil) | ||||
| 
 | ||||
| 	blockBroadcastInMeter   = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/in", nil) | ||||
| 	blockBroadcastOutTimer  = metrics.NewRegisteredTimer("eth/fetcher/block/broadcasts/out", nil) | ||||
| 	blockBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/drop", nil) | ||||
| 	blockBroadcastDOSMeter  = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/dos", nil) | ||||
| 
 | ||||
| 	headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/headers", nil) | ||||
| 	bodyFetchMeter   = metrics.NewRegisteredMeter("eth/fetcher/block/bodies", nil) | ||||
| 
 | ||||
| 	headerFilterInMeter  = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/in", nil) | ||||
| 	headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/out", nil) | ||||
| 	bodyFilterInMeter    = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/in", nil) | ||||
| 	bodyFilterOutMeter   = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/out", nil) | ||||
| ) | ||||
| 
 | ||||
| var errTerminated = errors.New("terminated") | ||||
| 
 | ||||
| // HeaderRetrievalFn is a callback type for retrieving a header from the local chain.
 | ||||
| type HeaderRetrievalFn func(common.Hash) *types.Header | ||||
| 
 | ||||
| // blockRetrievalFn is a callback type for retrieving a block from the local chain.
 | ||||
| type blockRetrievalFn func(common.Hash) *types.Block | ||||
| 
 | ||||
| // headerRequesterFn is a callback type for sending a header retrieval request.
 | ||||
| type headerRequesterFn func(common.Hash) error | ||||
| 
 | ||||
| // bodyRequesterFn is a callback type for sending a body retrieval request.
 | ||||
| type bodyRequesterFn func([]common.Hash) error | ||||
| 
 | ||||
| // headerVerifierFn is a callback type to verify a block's header for fast propagation.
 | ||||
| type headerVerifierFn func(header *types.Header) error | ||||
| 
 | ||||
| // blockBroadcasterFn is a callback type for broadcasting a block to connected peers.
 | ||||
| type blockBroadcasterFn func(block *types.Block, propagate bool) | ||||
| 
 | ||||
| // chainHeightFn is a callback type to retrieve the current chain height.
 | ||||
| type chainHeightFn func() uint64 | ||||
| 
 | ||||
| // headersInsertFn is a callback type to insert a batch of headers into the local chain.
 | ||||
| type headersInsertFn func(headers []*types.Header) (int, error) | ||||
| 
 | ||||
| // chainInsertFn is a callback type to insert a batch of blocks into the local chain.
 | ||||
| type chainInsertFn func(types.Blocks) (int, error) | ||||
| 
 | ||||
| // peerDropFn is a callback type for dropping a peer detected as malicious.
 | ||||
| type peerDropFn func(id string) | ||||
| 
 | ||||
| // blockAnnounce is the hash notification of the availability of a new block in the
 | ||||
| // network.
 | ||||
| type blockAnnounce struct { | ||||
| 	hash   common.Hash   // Hash of the block being announced
 | ||||
| 	number uint64        // Number of the block being announced (0 = unknown | old protocol)
 | ||||
| 	header *types.Header // Header of the block partially reassembled (new protocol)
 | ||||
| 	time   time.Time     // Timestamp of the announcement
 | ||||
| 
 | ||||
| 	origin string // Identifier of the peer originating the notification
 | ||||
| 
 | ||||
| 	fetchHeader headerRequesterFn // Fetcher function to retrieve the header of an announced block
 | ||||
| 	fetchBodies bodyRequesterFn   // Fetcher function to retrieve the body of an announced block
 | ||||
| } | ||||
| 
 | ||||
| // headerFilterTask represents a batch of headers needing fetcher filtering.
 | ||||
| type headerFilterTask struct { | ||||
| 	peer    string          // The source peer of block headers
 | ||||
| 	headers []*types.Header // Collection of headers to filter
 | ||||
| 	time    time.Time       // Arrival time of the headers
 | ||||
| } | ||||
| 
 | ||||
| // bodyFilterTask represents a batch of block bodies (transactions and uncles)
 | ||||
| // needing fetcher filtering.
 | ||||
| type bodyFilterTask struct { | ||||
| 	peer         string                 // The source peer of block bodies
 | ||||
| 	transactions [][]*types.Transaction // Collection of transactions per block bodies
 | ||||
| 	uncles       [][]*types.Header      // Collection of uncles per block bodies
 | ||||
| 	time         time.Time              // Arrival time of the blocks' contents
 | ||||
| } | ||||
| 
 | ||||
| // blockOrHeaderInject represents a schedules import operation.
 | ||||
| type blockOrHeaderInject struct { | ||||
| 	origin string | ||||
| 
 | ||||
| 	header *types.Header // Used for light mode fetcher which only cares about header.
 | ||||
| 	block  *types.Block  // Used for normal mode fetcher which imports full block.
 | ||||
| } | ||||
| 
 | ||||
| // number returns the block number of the injected object.
 | ||||
| func (inject *blockOrHeaderInject) number() uint64 { | ||||
| 	if inject.header != nil { | ||||
| 		return inject.header.Number.Uint64() | ||||
| 	} | ||||
| 	return inject.block.NumberU64() | ||||
| } | ||||
| 
 | ||||
| // number returns the block hash of the injected object.
 | ||||
| func (inject *blockOrHeaderInject) hash() common.Hash { | ||||
| 	if inject.header != nil { | ||||
| 		return inject.header.Hash() | ||||
| 	} | ||||
| 	return inject.block.Hash() | ||||
| } | ||||
| 
 | ||||
| // BlockFetcher is responsible for accumulating block announcements from various peers
 | ||||
| // and scheduling them for retrieval.
 | ||||
| type BlockFetcher struct { | ||||
| 	light bool // The indicator whether it's a light fetcher or normal one.
 | ||||
| 
 | ||||
| 	// Various event channels
 | ||||
| 	notify chan *blockAnnounce | ||||
| 	inject chan *blockOrHeaderInject | ||||
| 
 | ||||
| 	headerFilter chan chan *headerFilterTask | ||||
| 	bodyFilter   chan chan *bodyFilterTask | ||||
| 
 | ||||
| 	done chan common.Hash | ||||
| 	quit chan struct{} | ||||
| 
 | ||||
| 	// Announce states
 | ||||
| 	announces  map[string]int                   // Per peer blockAnnounce counts to prevent memory exhaustion
 | ||||
| 	announced  map[common.Hash][]*blockAnnounce // Announced blocks, scheduled for fetching
 | ||||
| 	fetching   map[common.Hash]*blockAnnounce   // Announced blocks, currently fetching
 | ||||
| 	fetched    map[common.Hash][]*blockAnnounce // Blocks with headers fetched, scheduled for body retrieval
 | ||||
| 	completing map[common.Hash]*blockAnnounce   // Blocks with headers, currently body-completing
 | ||||
| 
 | ||||
| 	// Block cache
 | ||||
| 	queue  *prque.Prque[int64, *blockOrHeaderInject] // Queue containing the import operations (block number sorted)
 | ||||
| 	queues map[string]int                            // Per peer block counts to prevent memory exhaustion
 | ||||
| 	queued map[common.Hash]*blockOrHeaderInject      // Set of already queued blocks (to dedup imports)
 | ||||
| 
 | ||||
| 	// Callbacks
 | ||||
| 	getHeader      HeaderRetrievalFn  // Retrieves a header from the local chain
 | ||||
| 	getBlock       blockRetrievalFn   // Retrieves a block from the local chain
 | ||||
| 	verifyHeader   headerVerifierFn   // Checks if a block's headers have a valid proof of work
 | ||||
| 	broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
 | ||||
| 	chainHeight    chainHeightFn      // Retrieves the current chain's height
 | ||||
| 	insertHeaders  headersInsertFn    // Injects a batch of headers into the chain
 | ||||
| 	insertChain    chainInsertFn      // Injects a batch of blocks into the chain
 | ||||
| 	dropPeer       peerDropFn         // Drops a peer for misbehaving
 | ||||
| 
 | ||||
| 	// Testing hooks
 | ||||
| 	announceChangeHook func(common.Hash, bool)           // Method to call upon adding or deleting a hash from the blockAnnounce list
 | ||||
| 	queueChangeHook    func(common.Hash, bool)           // Method to call upon adding or deleting a block from the import queue
 | ||||
| 	fetchingHook       func([]common.Hash)               // Method to call upon starting a block (eth/61) or header (eth/62) fetch
 | ||||
| 	completingHook     func([]common.Hash)               // Method to call upon starting a block body fetch (eth/62)
 | ||||
| 	importedHook       func(*types.Header, *types.Block) // Method to call upon successful header or block import (both eth/61 and eth/62)
 | ||||
| } | ||||
| 
 | ||||
| // NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements.
 | ||||
| func NewBlockFetcher(light bool, getHeader HeaderRetrievalFn, getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertHeaders headersInsertFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher { | ||||
| 	return &BlockFetcher{ | ||||
| 		light:          light, | ||||
| 		notify:         make(chan *blockAnnounce), | ||||
| 		inject:         make(chan *blockOrHeaderInject), | ||||
| 		headerFilter:   make(chan chan *headerFilterTask), | ||||
| 		bodyFilter:     make(chan chan *bodyFilterTask), | ||||
| 		done:           make(chan common.Hash), | ||||
| 		quit:           make(chan struct{}), | ||||
| 		announces:      make(map[string]int), | ||||
| 		announced:      make(map[common.Hash][]*blockAnnounce), | ||||
| 		fetching:       make(map[common.Hash]*blockAnnounce), | ||||
| 		fetched:        make(map[common.Hash][]*blockAnnounce), | ||||
| 		completing:     make(map[common.Hash]*blockAnnounce), | ||||
| 		queue:          prque.New[int64, *blockOrHeaderInject](nil), | ||||
| 		queues:         make(map[string]int), | ||||
| 		queued:         make(map[common.Hash]*blockOrHeaderInject), | ||||
| 		getHeader:      getHeader, | ||||
| 		getBlock:       getBlock, | ||||
| 		verifyHeader:   verifyHeader, | ||||
| 		broadcastBlock: broadcastBlock, | ||||
| 		chainHeight:    chainHeight, | ||||
| 		insertHeaders:  insertHeaders, | ||||
| 		insertChain:    insertChain, | ||||
| 		dropPeer:       dropPeer, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Start boots up the announcement based synchroniser, accepting and processing
 | ||||
| // hash notifications and block fetches until termination requested.
 | ||||
| func (f *BlockFetcher) Start() { | ||||
| 	go f.loop() | ||||
| } | ||||
| 
 | ||||
| // Stop terminates the announcement based synchroniser, canceling all pending
 | ||||
| // operations.
 | ||||
| func (f *BlockFetcher) Stop() { | ||||
| 	close(f.quit) | ||||
| } | ||||
| 
 | ||||
| // Notify announces the fetcher of the potential availability of a new block in
 | ||||
| // the network.
 | ||||
| func (f *BlockFetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time, | ||||
| 	headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error { | ||||
| 	block := &blockAnnounce{ | ||||
| 		hash:        hash, | ||||
| 		number:      number, | ||||
| 		time:        time, | ||||
| 		origin:      peer, | ||||
| 		fetchHeader: headerFetcher, | ||||
| 		fetchBodies: bodyFetcher, | ||||
| 	} | ||||
| 	select { | ||||
| 	case f.notify <- block: | ||||
| 		return nil | ||||
| 	case <-f.quit: | ||||
| 		return errTerminated | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Enqueue tries to fill gaps the fetcher's future import queue.
 | ||||
| func (f *BlockFetcher) Enqueue(peer string, block *types.Block) error { | ||||
| 	op := &blockOrHeaderInject{ | ||||
| 		origin: peer, | ||||
| 		block:  block, | ||||
| 	} | ||||
| 	select { | ||||
| 	case f.inject <- op: | ||||
| 		return nil | ||||
| 	case <-f.quit: | ||||
| 		return errTerminated | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
 | ||||
| // returning those that should be handled differently.
 | ||||
| func (f *BlockFetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header { | ||||
| 	log.Trace("Filtering headers", "peer", peer, "headers", len(headers)) | ||||
| 
 | ||||
| 	// Send the filter channel to the fetcher
 | ||||
| 	filter := make(chan *headerFilterTask) | ||||
| 
 | ||||
| 	select { | ||||
| 	case f.headerFilter <- filter: | ||||
| 	case <-f.quit: | ||||
| 		return nil | ||||
| 	} | ||||
| 	// Request the filtering of the header list
 | ||||
| 	select { | ||||
| 	case filter <- &headerFilterTask{peer: peer, headers: headers, time: time}: | ||||
| 	case <-f.quit: | ||||
| 		return nil | ||||
| 	} | ||||
| 	// Retrieve the headers remaining after filtering
 | ||||
| 	select { | ||||
| 	case task := <-filter: | ||||
| 		return task.headers | ||||
| 	case <-f.quit: | ||||
| 		return nil | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // FilterBodies extracts all the block bodies that were explicitly requested by
 | ||||
| // the fetcher, returning those that should be handled differently.
 | ||||
| func (f *BlockFetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) { | ||||
| 	log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles)) | ||||
| 
 | ||||
| 	// Send the filter channel to the fetcher
 | ||||
| 	filter := make(chan *bodyFilterTask) | ||||
| 
 | ||||
| 	select { | ||||
| 	case f.bodyFilter <- filter: | ||||
| 	case <-f.quit: | ||||
| 		return nil, nil | ||||
| 	} | ||||
| 	// Request the filtering of the body list
 | ||||
| 	select { | ||||
| 	case filter <- &bodyFilterTask{peer: peer, transactions: transactions, uncles: uncles, time: time}: | ||||
| 	case <-f.quit: | ||||
| 		return nil, nil | ||||
| 	} | ||||
| 	// Retrieve the bodies remaining after filtering
 | ||||
| 	select { | ||||
| 	case task := <-filter: | ||||
| 		return task.transactions, task.uncles | ||||
| 	case <-f.quit: | ||||
| 		return nil, nil | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Loop is the main fetcher loop, checking and processing various notification
 | ||||
| // events.
 | ||||
| func (f *BlockFetcher) loop() { | ||||
| 	// Iterate the block fetching until a quit is requested
 | ||||
| 	var ( | ||||
| 		fetchTimer    = time.NewTimer(0) | ||||
| 		completeTimer = time.NewTimer(0) | ||||
| 	) | ||||
| 	<-fetchTimer.C // clear out the channel
 | ||||
| 	<-completeTimer.C | ||||
| 	defer fetchTimer.Stop() | ||||
| 	defer completeTimer.Stop() | ||||
| 
 | ||||
| 	for { | ||||
| 		// Clean up any expired block fetches
 | ||||
| 		for hash, announce := range f.fetching { | ||||
| 			if time.Since(announce.time) > fetchTimeout { | ||||
| 				f.forgetHash(hash) | ||||
| 			} | ||||
| 		} | ||||
| 		// Import any queued blocks that could potentially fit
 | ||||
| 		height := f.chainHeight() | ||||
| 		for !f.queue.Empty() { | ||||
| 			op := f.queue.PopItem() | ||||
| 			hash := op.hash() | ||||
| 			if f.queueChangeHook != nil { | ||||
| 				f.queueChangeHook(hash, false) | ||||
| 			} | ||||
| 			// If too high up the chain or phase, continue later
 | ||||
| 			number := op.number() | ||||
| 			if number > height+1 { | ||||
| 				f.queue.Push(op, -int64(number)) | ||||
| 				if f.queueChangeHook != nil { | ||||
| 					f.queueChangeHook(hash, true) | ||||
| 				} | ||||
| 				break | ||||
| 			} | ||||
| 			// Otherwise if fresh and still unknown, try and import
 | ||||
| 			if (number+maxUncleDist < height) || (f.light && f.getHeader(hash) != nil) || (!f.light && f.getBlock(hash) != nil) { | ||||
| 				f.forgetBlock(hash) | ||||
| 				continue | ||||
| 			} | ||||
| 			if f.light { | ||||
| 				f.importHeaders(op.origin, op.header) | ||||
| 			} else { | ||||
| 				f.importBlocks(op.origin, op.block) | ||||
| 			} | ||||
| 		} | ||||
| 		// Wait for an outside event to occur
 | ||||
| 		select { | ||||
| 		case <-f.quit: | ||||
| 			// BlockFetcher terminating, abort all operations
 | ||||
| 			return | ||||
| 
 | ||||
| 		case notification := <-f.notify: | ||||
| 			// A block was announced, make sure the peer isn't DOSing us
 | ||||
| 			blockAnnounceInMeter.Mark(1) | ||||
| 
 | ||||
| 			count := f.announces[notification.origin] + 1 | ||||
| 			if count > hashLimit { | ||||
| 				log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit) | ||||
| 				blockAnnounceDOSMeter.Mark(1) | ||||
| 				break | ||||
| 			} | ||||
| 			// If we have a valid block number, check that it's potentially useful
 | ||||
| 			if notification.number > 0 { | ||||
| 				if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { | ||||
| 					log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist) | ||||
| 					blockAnnounceDropMeter.Mark(1) | ||||
| 					break | ||||
| 				} | ||||
| 			} | ||||
| 			// All is well, schedule the announce if block's not yet downloading
 | ||||
| 			if _, ok := f.fetching[notification.hash]; ok { | ||||
| 				break | ||||
| 			} | ||||
| 			if _, ok := f.completing[notification.hash]; ok { | ||||
| 				break | ||||
| 			} | ||||
| 			f.announces[notification.origin] = count | ||||
| 			f.announced[notification.hash] = append(f.announced[notification.hash], notification) | ||||
| 			if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 { | ||||
| 				f.announceChangeHook(notification.hash, true) | ||||
| 			} | ||||
| 			if len(f.announced) == 1 { | ||||
| 				f.rescheduleFetch(fetchTimer) | ||||
| 			} | ||||
| 
 | ||||
| 		case op := <-f.inject: | ||||
| 			// A direct block insertion was requested, try and fill any pending gaps
 | ||||
| 			blockBroadcastInMeter.Mark(1) | ||||
| 
 | ||||
| 			// Now only direct block injection is allowed, drop the header injection
 | ||||
| 			// here silently if we receive.
 | ||||
| 			if f.light { | ||||
| 				continue | ||||
| 			} | ||||
| 			f.enqueue(op.origin, nil, op.block) | ||||
| 
 | ||||
| 		case hash := <-f.done: | ||||
| 			// A pending import finished, remove all traces of the notification
 | ||||
| 			f.forgetHash(hash) | ||||
| 			f.forgetBlock(hash) | ||||
| 
 | ||||
| 		case <-fetchTimer.C: | ||||
| 			// At least one block's timer ran out, check for needing retrieval
 | ||||
| 			request := make(map[string][]common.Hash) | ||||
| 
 | ||||
| 			for hash, announces := range f.announced { | ||||
| 				// In current LES protocol(les2/les3), only header announce is
 | ||||
| 				// available, no need to wait too much time for header broadcast.
 | ||||
| 				timeout := arriveTimeout - gatherSlack | ||||
| 				if f.light { | ||||
| 					timeout = 0 | ||||
| 				} | ||||
| 				if time.Since(announces[0].time) > timeout { | ||||
| 					// Pick a random peer to retrieve from, reset all others
 | ||||
| 					announce := announces[rand.Intn(len(announces))] | ||||
| 					f.forgetHash(hash) | ||||
| 
 | ||||
| 					// If the block still didn't arrive, queue for fetching
 | ||||
| 					if (f.light && f.getHeader(hash) == nil) || (!f.light && f.getBlock(hash) == nil) { | ||||
| 						request[announce.origin] = append(request[announce.origin], hash) | ||||
| 						f.fetching[hash] = announce | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 			// Send out all block header requests
 | ||||
| 			for peer, hashes := range request { | ||||
| 				log.Trace("Fetching scheduled headers", "peer", peer, "list", hashes) | ||||
| 
 | ||||
| 				// Create a closure of the fetch and schedule in on a new thread
 | ||||
| 				fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes | ||||
| 				go func() { | ||||
| 					if f.fetchingHook != nil { | ||||
| 						f.fetchingHook(hashes) | ||||
| 					} | ||||
| 					for _, hash := range hashes { | ||||
| 						headerFetchMeter.Mark(1) | ||||
| 						fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
 | ||||
| 					} | ||||
| 				}() | ||||
| 			} | ||||
| 			// Schedule the next fetch if blocks are still pending
 | ||||
| 			f.rescheduleFetch(fetchTimer) | ||||
| 
 | ||||
| 		case <-completeTimer.C: | ||||
| 			// At least one header's timer ran out, retrieve everything
 | ||||
| 			request := make(map[string][]common.Hash) | ||||
| 
 | ||||
| 			for hash, announces := range f.fetched { | ||||
| 				// Pick a random peer to retrieve from, reset all others
 | ||||
| 				announce := announces[rand.Intn(len(announces))] | ||||
| 				f.forgetHash(hash) | ||||
| 
 | ||||
| 				// If the block still didn't arrive, queue for completion
 | ||||
| 				if f.getBlock(hash) == nil { | ||||
| 					request[announce.origin] = append(request[announce.origin], hash) | ||||
| 					f.completing[hash] = announce | ||||
| 				} | ||||
| 			} | ||||
| 			// Send out all block body requests
 | ||||
| 			for peer, hashes := range request { | ||||
| 				log.Trace("Fetching scheduled bodies", "peer", peer, "list", hashes) | ||||
| 
 | ||||
| 				// Create a closure of the fetch and schedule in on a new thread
 | ||||
| 				if f.completingHook != nil { | ||||
| 					f.completingHook(hashes) | ||||
| 				} | ||||
| 				bodyFetchMeter.Mark(int64(len(hashes))) | ||||
| 				go f.completing[hashes[0]].fetchBodies(hashes) | ||||
| 			} | ||||
| 			// Schedule the next fetch if blocks are still pending
 | ||||
| 			f.rescheduleComplete(completeTimer) | ||||
| 
 | ||||
| 		case filter := <-f.headerFilter: | ||||
| 			// Headers arrived from a remote peer. Extract those that were explicitly
 | ||||
| 			// requested by the fetcher, and return everything else so it's delivered
 | ||||
| 			// to other parts of the system.
 | ||||
| 			var task *headerFilterTask | ||||
| 			select { | ||||
| 			case task = <-filter: | ||||
| 			case <-f.quit: | ||||
| 				return | ||||
| 			} | ||||
| 			headerFilterInMeter.Mark(int64(len(task.headers))) | ||||
| 
 | ||||
| 			// Split the batch of headers into unknown ones (to return to the caller),
 | ||||
| 			// known incomplete ones (requiring body retrievals) and completed blocks.
 | ||||
| 			unknown, incomplete, complete, lightHeaders := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}, []*blockAnnounce{} | ||||
| 			for _, header := range task.headers { | ||||
| 				hash := header.Hash() | ||||
| 
 | ||||
| 				// Filter fetcher-requested headers from other synchronisation algorithms
 | ||||
| 				if announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil { | ||||
| 					// If the delivered header does not match the promised number, drop the announcer
 | ||||
| 					if header.Number.Uint64() != announce.number { | ||||
| 						log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number) | ||||
| 						f.dropPeer(announce.origin) | ||||
| 						f.forgetHash(hash) | ||||
| 						continue | ||||
| 					} | ||||
| 					// Collect all headers only if we are running in light
 | ||||
| 					// mode and the headers are not imported by other means.
 | ||||
| 					if f.light { | ||||
| 						if f.getHeader(hash) == nil { | ||||
| 							announce.header = header | ||||
| 							lightHeaders = append(lightHeaders, announce) | ||||
| 						} | ||||
| 						f.forgetHash(hash) | ||||
| 						continue | ||||
| 					} | ||||
| 					// Only keep if not imported by other means
 | ||||
| 					if f.getBlock(hash) == nil { | ||||
| 						announce.header = header | ||||
| 						announce.time = task.time | ||||
| 
 | ||||
| 						// If the block is empty (header only), short circuit into the final import queue
 | ||||
| 						if header.TxHash == types.EmptyTxsHash && header.UncleHash == types.EmptyUncleHash { | ||||
| 							log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash()) | ||||
| 
 | ||||
| 							block := types.NewBlockWithHeader(header) | ||||
| 							block.ReceivedAt = task.time | ||||
| 
 | ||||
| 							complete = append(complete, block) | ||||
| 							f.completing[hash] = announce | ||||
| 							continue | ||||
| 						} | ||||
| 						// Otherwise add to the list of blocks needing completion
 | ||||
| 						incomplete = append(incomplete, announce) | ||||
| 					} else { | ||||
| 						log.Trace("Block already imported, discarding header", "peer", announce.origin, "number", header.Number, "hash", header.Hash()) | ||||
| 						f.forgetHash(hash) | ||||
| 					} | ||||
| 				} else { | ||||
| 					// BlockFetcher doesn't know about it, add to the return list
 | ||||
| 					unknown = append(unknown, header) | ||||
| 				} | ||||
| 			} | ||||
| 			headerFilterOutMeter.Mark(int64(len(unknown))) | ||||
| 			select { | ||||
| 			case filter <- &headerFilterTask{headers: unknown, time: task.time}: | ||||
| 			case <-f.quit: | ||||
| 				return | ||||
| 			} | ||||
| 			// Schedule the retrieved headers for body completion
 | ||||
| 			for _, announce := range incomplete { | ||||
| 				hash := announce.header.Hash() | ||||
| 				if _, ok := f.completing[hash]; ok { | ||||
| 					continue | ||||
| 				} | ||||
| 				f.fetched[hash] = append(f.fetched[hash], announce) | ||||
| 				if len(f.fetched) == 1 { | ||||
| 					f.rescheduleComplete(completeTimer) | ||||
| 				} | ||||
| 			} | ||||
| 			// Schedule the header for light fetcher import
 | ||||
| 			for _, announce := range lightHeaders { | ||||
| 				f.enqueue(announce.origin, announce.header, nil) | ||||
| 			} | ||||
| 			// Schedule the header-only blocks for import
 | ||||
| 			for _, block := range complete { | ||||
| 				if announce := f.completing[block.Hash()]; announce != nil { | ||||
| 					f.enqueue(announce.origin, nil, block) | ||||
| 				} | ||||
| 			} | ||||
| 
 | ||||
| 		case filter := <-f.bodyFilter: | ||||
| 			// Block bodies arrived, extract any explicitly requested blocks, return the rest
 | ||||
| 			var task *bodyFilterTask | ||||
| 			select { | ||||
| 			case task = <-filter: | ||||
| 			case <-f.quit: | ||||
| 				return | ||||
| 			} | ||||
| 			bodyFilterInMeter.Mark(int64(len(task.transactions))) | ||||
| 			blocks := []*types.Block{} | ||||
| 			// abort early if there's nothing explicitly requested
 | ||||
| 			if len(f.completing) > 0 { | ||||
| 				for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ { | ||||
| 					// Match up a body to any possible completion request
 | ||||
| 					var ( | ||||
| 						matched   = false | ||||
| 						uncleHash common.Hash // calculated lazily and reused
 | ||||
| 						txnHash   common.Hash // calculated lazily and reused
 | ||||
| 					) | ||||
| 					for hash, announce := range f.completing { | ||||
| 						if f.queued[hash] != nil || announce.origin != task.peer { | ||||
| 							continue | ||||
| 						} | ||||
| 						if uncleHash == (common.Hash{}) { | ||||
| 							uncleHash = types.CalcUncleHash(task.uncles[i]) | ||||
| 						} | ||||
| 						if uncleHash != announce.header.UncleHash { | ||||
| 							continue | ||||
| 						} | ||||
| 						if txnHash == (common.Hash{}) { | ||||
| 							txnHash = types.DeriveSha(types.Transactions(task.transactions[i]), trie.NewStackTrie(nil)) | ||||
| 						} | ||||
| 						if txnHash != announce.header.TxHash { | ||||
| 							continue | ||||
| 						} | ||||
| 						// Mark the body matched, reassemble if still unknown
 | ||||
| 						matched = true | ||||
| 						if f.getBlock(hash) == nil { | ||||
| 							block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i]) | ||||
| 							block.ReceivedAt = task.time | ||||
| 							blocks = append(blocks, block) | ||||
| 						} else { | ||||
| 							f.forgetHash(hash) | ||||
| 						} | ||||
| 					} | ||||
| 					if matched { | ||||
| 						task.transactions = append(task.transactions[:i], task.transactions[i+1:]...) | ||||
| 						task.uncles = append(task.uncles[:i], task.uncles[i+1:]...) | ||||
| 						i-- | ||||
| 						continue | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 			bodyFilterOutMeter.Mark(int64(len(task.transactions))) | ||||
| 			select { | ||||
| 			case filter <- task: | ||||
| 			case <-f.quit: | ||||
| 				return | ||||
| 			} | ||||
| 			// Schedule the retrieved blocks for ordered import
 | ||||
| 			for _, block := range blocks { | ||||
| 				if announce := f.completing[block.Hash()]; announce != nil { | ||||
| 					f.enqueue(announce.origin, nil, block) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // rescheduleFetch resets the specified fetch timer to the next blockAnnounce timeout.
 | ||||
| func (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) { | ||||
| 	// Short circuit if no blocks are announced
 | ||||
| 	if len(f.announced) == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	// Schedule announcement retrieval quickly for light mode
 | ||||
| 	// since server won't send any headers to client.
 | ||||
| 	if f.light { | ||||
| 		fetch.Reset(lightTimeout) | ||||
| 		return | ||||
| 	} | ||||
| 	// Otherwise find the earliest expiring announcement
 | ||||
| 	earliest := time.Now() | ||||
| 	for _, announces := range f.announced { | ||||
| 		if earliest.After(announces[0].time) { | ||||
| 			earliest = announces[0].time | ||||
| 		} | ||||
| 	} | ||||
| 	fetch.Reset(arriveTimeout - time.Since(earliest)) | ||||
| } | ||||
| 
 | ||||
| // rescheduleComplete resets the specified completion timer to the next fetch timeout.
 | ||||
| func (f *BlockFetcher) rescheduleComplete(complete *time.Timer) { | ||||
| 	// Short circuit if no headers are fetched
 | ||||
| 	if len(f.fetched) == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	// Otherwise find the earliest expiring announcement
 | ||||
| 	earliest := time.Now() | ||||
| 	for _, announces := range f.fetched { | ||||
| 		if earliest.After(announces[0].time) { | ||||
| 			earliest = announces[0].time | ||||
| 		} | ||||
| 	} | ||||
| 	complete.Reset(gatherSlack - time.Since(earliest)) | ||||
| } | ||||
| 
 | ||||
| // enqueue schedules a new header or block import operation, if the component
 | ||||
| // to be imported has not yet been seen.
 | ||||
| func (f *BlockFetcher) enqueue(peer string, header *types.Header, block *types.Block) { | ||||
| 	var ( | ||||
| 		hash   common.Hash | ||||
| 		number uint64 | ||||
| 	) | ||||
| 	if header != nil { | ||||
| 		hash, number = header.Hash(), header.Number.Uint64() | ||||
| 	} else { | ||||
| 		hash, number = block.Hash(), block.NumberU64() | ||||
| 	} | ||||
| 	// Ensure the peer isn't DOSing us
 | ||||
| 	count := f.queues[peer] + 1 | ||||
| 	if count > blockLimit { | ||||
| 		log.Debug("Discarded delivered header or block, exceeded allowance", "peer", peer, "number", number, "hash", hash, "limit", blockLimit) | ||||
| 		blockBroadcastDOSMeter.Mark(1) | ||||
| 		f.forgetHash(hash) | ||||
| 		return | ||||
| 	} | ||||
| 	// Discard any past or too distant blocks
 | ||||
| 	if dist := int64(number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { | ||||
| 		log.Debug("Discarded delivered header or block, too far away", "peer", peer, "number", number, "hash", hash, "distance", dist) | ||||
| 		blockBroadcastDropMeter.Mark(1) | ||||
| 		f.forgetHash(hash) | ||||
| 		return | ||||
| 	} | ||||
| 	// Schedule the block for future importing
 | ||||
| 	if _, ok := f.queued[hash]; !ok { | ||||
| 		op := &blockOrHeaderInject{origin: peer} | ||||
| 		if header != nil { | ||||
| 			op.header = header | ||||
| 		} else { | ||||
| 			op.block = block | ||||
| 		} | ||||
| 		f.queues[peer] = count | ||||
| 		f.queued[hash] = op | ||||
| 		f.queue.Push(op, -int64(number)) | ||||
| 		if f.queueChangeHook != nil { | ||||
| 			f.queueChangeHook(hash, true) | ||||
| 		} | ||||
| 		log.Debug("Queued delivered header or block", "peer", peer, "number", number, "hash", hash, "queued", f.queue.Size()) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // importHeaders spawns a new goroutine to run a header insertion into the chain.
 | ||||
| // If the header's number is at the same height as the current import phase, it
 | ||||
| // updates the phase states accordingly.
 | ||||
| func (f *BlockFetcher) importHeaders(peer string, header *types.Header) { | ||||
| 	hash := header.Hash() | ||||
| 	log.Debug("Importing propagated header", "peer", peer, "number", header.Number, "hash", hash) | ||||
| 
 | ||||
| 	go func() { | ||||
| 		defer func() { f.done <- hash }() | ||||
| 		// If the parent's unknown, abort insertion
 | ||||
| 		parent := f.getHeader(header.ParentHash) | ||||
| 		if parent == nil { | ||||
| 			log.Debug("Unknown parent of propagated header", "peer", peer, "number", header.Number, "hash", hash, "parent", header.ParentHash) | ||||
| 			return | ||||
| 		} | ||||
| 		// Validate the header and if something went wrong, drop the peer
 | ||||
| 		if err := f.verifyHeader(header); err != nil && err != consensus.ErrFutureBlock { | ||||
| 			log.Debug("Propagated header verification failed", "peer", peer, "number", header.Number, "hash", hash, "err", err) | ||||
| 			f.dropPeer(peer) | ||||
| 			return | ||||
| 		} | ||||
| 		// Run the actual import and log any issues
 | ||||
| 		if _, err := f.insertHeaders([]*types.Header{header}); err != nil { | ||||
| 			log.Debug("Propagated header import failed", "peer", peer, "number", header.Number, "hash", hash, "err", err) | ||||
| 			return | ||||
| 		} | ||||
| 		// Invoke the testing hook if needed
 | ||||
| 		if f.importedHook != nil { | ||||
| 			f.importedHook(header, nil) | ||||
| 		} | ||||
| 	}() | ||||
| } | ||||
| 
 | ||||
| // importBlocks spawns a new goroutine to run a block insertion into the chain. If the
 | ||||
| // block's number is at the same height as the current import phase, it updates
 | ||||
| // the phase states accordingly.
 | ||||
| func (f *BlockFetcher) importBlocks(peer string, block *types.Block) { | ||||
| 	hash := block.Hash() | ||||
| 
 | ||||
| 	// Run the import on a new thread
 | ||||
| 	log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash) | ||||
| 	go func() { | ||||
| 		defer func() { f.done <- hash }() | ||||
| 
 | ||||
| 		// If the parent's unknown, abort insertion
 | ||||
| 		parent := f.getBlock(block.ParentHash()) | ||||
| 		if parent == nil { | ||||
| 			log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash()) | ||||
| 			return | ||||
| 		} | ||||
| 		// Quickly validate the header and propagate the block if it passes
 | ||||
| 		switch err := f.verifyHeader(block.Header()); err { | ||||
| 		case nil: | ||||
| 			// All ok, quickly propagate to our peers
 | ||||
| 			blockBroadcastOutTimer.UpdateSince(block.ReceivedAt) | ||||
| 			go f.broadcastBlock(block, true) | ||||
| 
 | ||||
| 		case consensus.ErrFutureBlock: | ||||
| 			// Weird future block, don't fail, but neither propagate
 | ||||
| 
 | ||||
| 		default: | ||||
| 			// Something went very wrong, drop the peer
 | ||||
| 			log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err) | ||||
| 			f.dropPeer(peer) | ||||
| 			return | ||||
| 		} | ||||
| 		// Run the actual import and log any issues
 | ||||
| 		if _, err := f.insertChain(types.Blocks{block}); err != nil { | ||||
| 			log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err) | ||||
| 			return | ||||
| 		} | ||||
| 		// If import succeeded, broadcast the block
 | ||||
| 		blockAnnounceOutTimer.UpdateSince(block.ReceivedAt) | ||||
| 		go f.broadcastBlock(block, false) | ||||
| 
 | ||||
| 		// Invoke the testing hook if needed
 | ||||
| 		if f.importedHook != nil { | ||||
| 			f.importedHook(nil, block) | ||||
| 		} | ||||
| 	}() | ||||
| } | ||||
| 
 | ||||
| // forgetHash removes all traces of a block announcement from the fetcher's
 | ||||
| // internal state.
 | ||||
| func (f *BlockFetcher) forgetHash(hash common.Hash) { | ||||
| 	// Remove all pending announces and decrement DOS counters
 | ||||
| 	if announceMap, ok := f.announced[hash]; ok { | ||||
| 		for _, announce := range announceMap { | ||||
| 			f.announces[announce.origin]-- | ||||
| 			if f.announces[announce.origin] <= 0 { | ||||
| 				delete(f.announces, announce.origin) | ||||
| 			} | ||||
| 		} | ||||
| 		delete(f.announced, hash) | ||||
| 		if f.announceChangeHook != nil { | ||||
| 			f.announceChangeHook(hash, false) | ||||
| 		} | ||||
| 	} | ||||
| 	// Remove any pending fetches and decrement the DOS counters
 | ||||
| 	if announce := f.fetching[hash]; announce != nil { | ||||
| 		f.announces[announce.origin]-- | ||||
| 		if f.announces[announce.origin] <= 0 { | ||||
| 			delete(f.announces, announce.origin) | ||||
| 		} | ||||
| 		delete(f.fetching, hash) | ||||
| 	} | ||||
| 
 | ||||
| 	// Remove any pending completion requests and decrement the DOS counters
 | ||||
| 	for _, announce := range f.fetched[hash] { | ||||
| 		f.announces[announce.origin]-- | ||||
| 		if f.announces[announce.origin] <= 0 { | ||||
| 			delete(f.announces, announce.origin) | ||||
| 		} | ||||
| 	} | ||||
| 	delete(f.fetched, hash) | ||||
| 
 | ||||
| 	// Remove any pending completions and decrement the DOS counters
 | ||||
| 	if announce := f.completing[hash]; announce != nil { | ||||
| 		f.announces[announce.origin]-- | ||||
| 		if f.announces[announce.origin] <= 0 { | ||||
| 			delete(f.announces, announce.origin) | ||||
| 		} | ||||
| 		delete(f.completing, hash) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // forgetBlock removes all traces of a queued block from the fetcher's internal
 | ||||
| // state.
 | ||||
| func (f *BlockFetcher) forgetBlock(hash common.Hash) { | ||||
| 	if insert := f.queued[hash]; insert != nil { | ||||
| 		f.queues[insert.origin]-- | ||||
| 		if f.queues[insert.origin] == 0 { | ||||
| 			delete(f.queues, insert.origin) | ||||
| 		} | ||||
| 		delete(f.queued, hash) | ||||
| 	} | ||||
| } | ||||
| @ -1,901 +0,0 @@ | ||||
| // Copyright 2015 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package fetcher | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"math/big" | ||||
| 	"sync" | ||||
| 	"sync/atomic" | ||||
| 	"testing" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/common" | ||||
| 	"github.com/ethereum/go-ethereum/consensus/ethash" | ||||
| 	"github.com/ethereum/go-ethereum/core" | ||||
| 	"github.com/ethereum/go-ethereum/core/rawdb" | ||||
| 	"github.com/ethereum/go-ethereum/core/types" | ||||
| 	"github.com/ethereum/go-ethereum/crypto" | ||||
| 	"github.com/ethereum/go-ethereum/params" | ||||
| 	"github.com/ethereum/go-ethereum/trie" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	testdb      = rawdb.NewMemoryDatabase() | ||||
| 	testKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") | ||||
| 	testAddress = crypto.PubkeyToAddress(testKey.PublicKey) | ||||
| 
 | ||||
| 	gspec = core.Genesis{ | ||||
| 		Alloc:   core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}}, | ||||
| 		BaseFee: big.NewInt(params.InitialBaseFee), | ||||
| 	} | ||||
| 	genesis      = gspec.MustCommit(testdb) | ||||
| 	unknownBlock = types.NewBlock(&types.Header{Root: types.EmptyRootHash, GasLimit: params.GenesisGasLimit, BaseFee: big.NewInt(params.InitialBaseFee)}, nil, nil, nil, trie.NewStackTrie(nil)) | ||||
| ) | ||||
| 
 | ||||
| // makeChain creates a chain of n blocks starting at and including parent.
 | ||||
| // the returned hash chain is ordered head->parent. In addition, every 3rd block
 | ||||
| // contains a transaction and every 5th an uncle to allow testing correct block
 | ||||
| // reassembly.
 | ||||
| func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) { | ||||
| 	blocks, _ := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testdb, n, func(i int, block *core.BlockGen) { | ||||
| 		block.SetCoinbase(common.Address{seed}) | ||||
| 
 | ||||
| 		// If the block number is multiple of 3, send a bonus transaction to the miner
 | ||||
| 		if parent == genesis && i%3 == 0 { | ||||
| 			signer := types.MakeSigner(params.TestChainConfig, block.Number(), block.Timestamp()) | ||||
| 			tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey) | ||||
| 			if err != nil { | ||||
| 				panic(err) | ||||
| 			} | ||||
| 			block.AddTx(tx) | ||||
| 		} | ||||
| 		// If the block number is a multiple of 5, add a bonus uncle to the block
 | ||||
| 		if i > 0 && i%5 == 0 { | ||||
| 			block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 2).Hash(), Number: big.NewInt(int64(i - 1))}) | ||||
| 		} | ||||
| 	}) | ||||
| 	hashes := make([]common.Hash, n+1) | ||||
| 	hashes[len(hashes)-1] = parent.Hash() | ||||
| 	blockm := make(map[common.Hash]*types.Block, n+1) | ||||
| 	blockm[parent.Hash()] = parent | ||||
| 	for i, b := range blocks { | ||||
| 		hashes[len(hashes)-i-2] = b.Hash() | ||||
| 		blockm[b.Hash()] = b | ||||
| 	} | ||||
| 	return hashes, blockm | ||||
| } | ||||
| 
 | ||||
| // fetcherTester is a test simulator for mocking out local block chain.
 | ||||
| type fetcherTester struct { | ||||
| 	fetcher *BlockFetcher | ||||
| 
 | ||||
| 	hashes  []common.Hash                 // Hash chain belonging to the tester
 | ||||
| 	headers map[common.Hash]*types.Header // Headers belonging to the tester
 | ||||
| 	blocks  map[common.Hash]*types.Block  // Blocks belonging to the tester
 | ||||
| 	drops   map[string]bool               // Map of peers dropped by the fetcher
 | ||||
| 
 | ||||
| 	lock sync.RWMutex | ||||
| } | ||||
| 
 | ||||
| // newTester creates a new fetcher test mocker.
 | ||||
| func newTester(light bool) *fetcherTester { | ||||
| 	tester := &fetcherTester{ | ||||
| 		hashes:  []common.Hash{genesis.Hash()}, | ||||
| 		headers: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()}, | ||||
| 		blocks:  map[common.Hash]*types.Block{genesis.Hash(): genesis}, | ||||
| 		drops:   make(map[string]bool), | ||||
| 	} | ||||
| 	tester.fetcher = NewBlockFetcher(light, tester.getHeader, tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertHeaders, tester.insertChain, tester.dropPeer) | ||||
| 	tester.fetcher.Start() | ||||
| 
 | ||||
| 	return tester | ||||
| } | ||||
| 
 | ||||
| // getHeader retrieves a header from the tester's block chain.
 | ||||
| func (f *fetcherTester) getHeader(hash common.Hash) *types.Header { | ||||
| 	f.lock.RLock() | ||||
| 	defer f.lock.RUnlock() | ||||
| 
 | ||||
| 	return f.headers[hash] | ||||
| } | ||||
| 
 | ||||
| // getBlock retrieves a block from the tester's block chain.
 | ||||
| func (f *fetcherTester) getBlock(hash common.Hash) *types.Block { | ||||
| 	f.lock.RLock() | ||||
| 	defer f.lock.RUnlock() | ||||
| 
 | ||||
| 	return f.blocks[hash] | ||||
| } | ||||
| 
 | ||||
| // verifyHeader is a nop placeholder for the block header verification.
 | ||||
| func (f *fetcherTester) verifyHeader(header *types.Header) error { | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // broadcastBlock is a nop placeholder for the block broadcasting.
 | ||||
| func (f *fetcherTester) broadcastBlock(block *types.Block, propagate bool) { | ||||
| } | ||||
| 
 | ||||
| // chainHeight retrieves the current height (block number) of the chain.
 | ||||
| func (f *fetcherTester) chainHeight() uint64 { | ||||
| 	f.lock.RLock() | ||||
| 	defer f.lock.RUnlock() | ||||
| 
 | ||||
| 	if f.fetcher.light { | ||||
| 		return f.headers[f.hashes[len(f.hashes)-1]].Number.Uint64() | ||||
| 	} | ||||
| 	return f.blocks[f.hashes[len(f.hashes)-1]].NumberU64() | ||||
| } | ||||
| 
 | ||||
| // insertChain injects a new headers into the simulated chain.
 | ||||
| func (f *fetcherTester) insertHeaders(headers []*types.Header) (int, error) { | ||||
| 	f.lock.Lock() | ||||
| 	defer f.lock.Unlock() | ||||
| 
 | ||||
| 	for i, header := range headers { | ||||
| 		// Make sure the parent in known
 | ||||
| 		if _, ok := f.headers[header.ParentHash]; !ok { | ||||
| 			return i, errors.New("unknown parent") | ||||
| 		} | ||||
| 		// Discard any new blocks if the same height already exists
 | ||||
| 		if header.Number.Uint64() <= f.headers[f.hashes[len(f.hashes)-1]].Number.Uint64() { | ||||
| 			return i, nil | ||||
| 		} | ||||
| 		// Otherwise build our current chain
 | ||||
| 		f.hashes = append(f.hashes, header.Hash()) | ||||
| 		f.headers[header.Hash()] = header | ||||
| 	} | ||||
| 	return 0, nil | ||||
| } | ||||
| 
 | ||||
| // insertChain injects a new blocks into the simulated chain.
 | ||||
| func (f *fetcherTester) insertChain(blocks types.Blocks) (int, error) { | ||||
| 	f.lock.Lock() | ||||
| 	defer f.lock.Unlock() | ||||
| 
 | ||||
| 	for i, block := range blocks { | ||||
| 		// Make sure the parent in known
 | ||||
| 		if _, ok := f.blocks[block.ParentHash()]; !ok { | ||||
| 			return i, errors.New("unknown parent") | ||||
| 		} | ||||
| 		// Discard any new blocks if the same height already exists
 | ||||
| 		if block.NumberU64() <= f.blocks[f.hashes[len(f.hashes)-1]].NumberU64() { | ||||
| 			return i, nil | ||||
| 		} | ||||
| 		// Otherwise build our current chain
 | ||||
| 		f.hashes = append(f.hashes, block.Hash()) | ||||
| 		f.blocks[block.Hash()] = block | ||||
| 	} | ||||
| 	return 0, nil | ||||
| } | ||||
| 
 | ||||
| // dropPeer is an emulator for the peer removal, simply accumulating the various
 | ||||
| // peers dropped by the fetcher.
 | ||||
| func (f *fetcherTester) dropPeer(peer string) { | ||||
| 	f.lock.Lock() | ||||
| 	defer f.lock.Unlock() | ||||
| 
 | ||||
| 	f.drops[peer] = true | ||||
| } | ||||
| 
 | ||||
| // makeHeaderFetcher retrieves a block header fetcher associated with a simulated peer.
 | ||||
| func (f *fetcherTester) makeHeaderFetcher(peer string, blocks map[common.Hash]*types.Block, drift time.Duration) headerRequesterFn { | ||||
| 	closure := make(map[common.Hash]*types.Block) | ||||
| 	for hash, block := range blocks { | ||||
| 		closure[hash] = block | ||||
| 	} | ||||
| 	// Create a function that return a header from the closure
 | ||||
| 	return func(hash common.Hash) error { | ||||
| 		// Gather the blocks to return
 | ||||
| 		headers := make([]*types.Header, 0, 1) | ||||
| 		if block, ok := closure[hash]; ok { | ||||
| 			headers = append(headers, block.Header()) | ||||
| 		} | ||||
| 		// Return on a new thread
 | ||||
| 		go f.fetcher.FilterHeaders(peer, headers, time.Now().Add(drift)) | ||||
| 
 | ||||
| 		return nil | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // makeBodyFetcher retrieves a block body fetcher associated with a simulated peer.
 | ||||
| func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*types.Block, drift time.Duration) bodyRequesterFn { | ||||
| 	closure := make(map[common.Hash]*types.Block) | ||||
| 	for hash, block := range blocks { | ||||
| 		closure[hash] = block | ||||
| 	} | ||||
| 	// Create a function that returns blocks from the closure
 | ||||
| 	return func(hashes []common.Hash) error { | ||||
| 		// Gather the block bodies to return
 | ||||
| 		transactions := make([][]*types.Transaction, 0, len(hashes)) | ||||
| 		uncles := make([][]*types.Header, 0, len(hashes)) | ||||
| 
 | ||||
| 		for _, hash := range hashes { | ||||
| 			if block, ok := closure[hash]; ok { | ||||
| 				transactions = append(transactions, block.Transactions()) | ||||
| 				uncles = append(uncles, block.Uncles()) | ||||
| 			} | ||||
| 		} | ||||
| 		// Return on a new thread
 | ||||
| 		go f.fetcher.FilterBodies(peer, transactions, uncles, time.Now().Add(drift)) | ||||
| 
 | ||||
| 		return nil | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // verifyFetchingEvent verifies that one single event arrive on a fetching channel.
 | ||||
| func verifyFetchingEvent(t *testing.T, fetching chan []common.Hash, arrive bool) { | ||||
| 	if arrive { | ||||
| 		select { | ||||
| 		case <-fetching: | ||||
| 		case <-time.After(time.Second): | ||||
| 			t.Fatalf("fetching timeout") | ||||
| 		} | ||||
| 	} else { | ||||
| 		select { | ||||
| 		case <-fetching: | ||||
| 			t.Fatalf("fetching invoked") | ||||
| 		case <-time.After(10 * time.Millisecond): | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // verifyCompletingEvent verifies that one single event arrive on an completing channel.
 | ||||
| func verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive bool) { | ||||
| 	if arrive { | ||||
| 		select { | ||||
| 		case <-completing: | ||||
| 		case <-time.After(time.Second): | ||||
| 			t.Fatalf("completing timeout") | ||||
| 		} | ||||
| 	} else { | ||||
| 		select { | ||||
| 		case <-completing: | ||||
| 			t.Fatalf("completing invoked") | ||||
| 		case <-time.After(10 * time.Millisecond): | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // verifyImportEvent verifies that one single event arrive on an import channel.
 | ||||
| func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) { | ||||
| 	if arrive { | ||||
| 		select { | ||||
| 		case <-imported: | ||||
| 		case <-time.After(time.Second): | ||||
| 			t.Fatalf("import timeout") | ||||
| 		} | ||||
| 	} else { | ||||
| 		select { | ||||
| 		case <-imported: | ||||
| 			t.Fatalf("import invoked") | ||||
| 		case <-time.After(20 * time.Millisecond): | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // verifyImportCount verifies that exactly count number of events arrive on an
 | ||||
| // import hook channel.
 | ||||
| func verifyImportCount(t *testing.T, imported chan interface{}, count int) { | ||||
| 	for i := 0; i < count; i++ { | ||||
| 		select { | ||||
| 		case <-imported: | ||||
| 		case <-time.After(time.Second): | ||||
| 			t.Fatalf("block %d: import timeout", i+1) | ||||
| 		} | ||||
| 	} | ||||
| 	verifyImportDone(t, imported) | ||||
| } | ||||
| 
 | ||||
| // verifyImportDone verifies that no more events are arriving on an import channel.
 | ||||
| func verifyImportDone(t *testing.T, imported chan interface{}) { | ||||
| 	select { | ||||
| 	case <-imported: | ||||
| 		t.Fatalf("extra block imported") | ||||
| 	case <-time.After(50 * time.Millisecond): | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // verifyChainHeight verifies the chain height is as expected.
 | ||||
| func verifyChainHeight(t *testing.T, fetcher *fetcherTester, height uint64) { | ||||
| 	if fetcher.chainHeight() != height { | ||||
| 		t.Fatalf("chain height mismatch, got %d, want %d", fetcher.chainHeight(), height) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Tests that a fetcher accepts block/header announcements and initiates retrievals
 | ||||
| // for them, successfully importing into the local chain.
 | ||||
| func TestFullSequentialAnnouncements(t *testing.T)  { testSequentialAnnouncements(t, false) } | ||||
| func TestLightSequentialAnnouncements(t *testing.T) { testSequentialAnnouncements(t, true) } | ||||
| 
 | ||||
| func testSequentialAnnouncements(t *testing.T, light bool) { | ||||
| 	// Create a chain of blocks to import
 | ||||
| 	targetBlocks := 4 * hashLimit | ||||
| 	hashes, blocks := makeChain(targetBlocks, 0, genesis) | ||||
| 
 | ||||
| 	tester := newTester(light) | ||||
| 	headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) | ||||
| 	bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) | ||||
| 
 | ||||
| 	// Iteratively announce blocks until all are imported
 | ||||
| 	imported := make(chan interface{}) | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { | ||||
| 		if light { | ||||
| 			if header == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty header") | ||||
| 			} | ||||
| 			imported <- header | ||||
| 		} else { | ||||
| 			if block == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty block") | ||||
| 			} | ||||
| 			imported <- block | ||||
| 		} | ||||
| 	} | ||||
| 	for i := len(hashes) - 2; i >= 0; i-- { | ||||
| 		tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) | ||||
| 		verifyImportEvent(t, imported, true) | ||||
| 	} | ||||
| 	verifyImportDone(t, imported) | ||||
| 	verifyChainHeight(t, tester, uint64(len(hashes)-1)) | ||||
| } | ||||
| 
 | ||||
| // Tests that if blocks are announced by multiple peers (or even the same buggy
 | ||||
| // peer), they will only get downloaded at most once.
 | ||||
| func TestFullConcurrentAnnouncements(t *testing.T)  { testConcurrentAnnouncements(t, false) } | ||||
| func TestLightConcurrentAnnouncements(t *testing.T) { testConcurrentAnnouncements(t, true) } | ||||
| 
 | ||||
| func testConcurrentAnnouncements(t *testing.T, light bool) { | ||||
| 	// Create a chain of blocks to import
 | ||||
| 	targetBlocks := 4 * hashLimit | ||||
| 	hashes, blocks := makeChain(targetBlocks, 0, genesis) | ||||
| 
 | ||||
| 	// Assemble a tester with a built in counter for the requests
 | ||||
| 	tester := newTester(light) | ||||
| 	firstHeaderFetcher := tester.makeHeaderFetcher("first", blocks, -gatherSlack) | ||||
| 	firstBodyFetcher := tester.makeBodyFetcher("first", blocks, 0) | ||||
| 	secondHeaderFetcher := tester.makeHeaderFetcher("second", blocks, -gatherSlack) | ||||
| 	secondBodyFetcher := tester.makeBodyFetcher("second", blocks, 0) | ||||
| 
 | ||||
| 	counter := uint32(0) | ||||
| 	firstHeaderWrapper := func(hash common.Hash) error { | ||||
| 		atomic.AddUint32(&counter, 1) | ||||
| 		return firstHeaderFetcher(hash) | ||||
| 	} | ||||
| 	secondHeaderWrapper := func(hash common.Hash) error { | ||||
| 		atomic.AddUint32(&counter, 1) | ||||
| 		return secondHeaderFetcher(hash) | ||||
| 	} | ||||
| 	// Iteratively announce blocks until all are imported
 | ||||
| 	imported := make(chan interface{}) | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { | ||||
| 		if light { | ||||
| 			if header == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty header") | ||||
| 			} | ||||
| 			imported <- header | ||||
| 		} else { | ||||
| 			if block == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty block") | ||||
| 			} | ||||
| 			imported <- block | ||||
| 		} | ||||
| 	} | ||||
| 	for i := len(hashes) - 2; i >= 0; i-- { | ||||
| 		tester.fetcher.Notify("first", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), firstHeaderWrapper, firstBodyFetcher) | ||||
| 		tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout+time.Millisecond), secondHeaderWrapper, secondBodyFetcher) | ||||
| 		tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout-time.Millisecond), secondHeaderWrapper, secondBodyFetcher) | ||||
| 		verifyImportEvent(t, imported, true) | ||||
| 	} | ||||
| 	verifyImportDone(t, imported) | ||||
| 
 | ||||
| 	// Make sure no blocks were retrieved twice
 | ||||
| 	if int(counter) != targetBlocks { | ||||
| 		t.Fatalf("retrieval count mismatch: have %v, want %v", counter, targetBlocks) | ||||
| 	} | ||||
| 	verifyChainHeight(t, tester, uint64(len(hashes)-1)) | ||||
| } | ||||
| 
 | ||||
| // Tests that announcements arriving while a previous is being fetched still
 | ||||
| // results in a valid import.
 | ||||
| func TestFullOverlappingAnnouncements(t *testing.T)  { testOverlappingAnnouncements(t, false) } | ||||
| func TestLightOverlappingAnnouncements(t *testing.T) { testOverlappingAnnouncements(t, true) } | ||||
| 
 | ||||
| func testOverlappingAnnouncements(t *testing.T, light bool) { | ||||
| 	// Create a chain of blocks to import
 | ||||
| 	targetBlocks := 4 * hashLimit | ||||
| 	hashes, blocks := makeChain(targetBlocks, 0, genesis) | ||||
| 
 | ||||
| 	tester := newTester(light) | ||||
| 	headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) | ||||
| 	bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) | ||||
| 
 | ||||
| 	// Iteratively announce blocks, but overlap them continuously
 | ||||
| 	overlap := 16 | ||||
| 	imported := make(chan interface{}, len(hashes)-1) | ||||
| 	for i := 0; i < overlap; i++ { | ||||
| 		imported <- nil | ||||
| 	} | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { | ||||
| 		if light { | ||||
| 			if header == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty header") | ||||
| 			} | ||||
| 			imported <- header | ||||
| 		} else { | ||||
| 			if block == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty block") | ||||
| 			} | ||||
| 			imported <- block | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	for i := len(hashes) - 2; i >= 0; i-- { | ||||
| 		tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) | ||||
| 		select { | ||||
| 		case <-imported: | ||||
| 		case <-time.After(time.Second): | ||||
| 			t.Fatalf("block %d: import timeout", len(hashes)-i) | ||||
| 		} | ||||
| 	} | ||||
| 	// Wait for all the imports to complete and check count
 | ||||
| 	verifyImportCount(t, imported, overlap) | ||||
| 	verifyChainHeight(t, tester, uint64(len(hashes)-1)) | ||||
| } | ||||
| 
 | ||||
| // Tests that announces already being retrieved will not be duplicated.
 | ||||
| func TestFullPendingDeduplication(t *testing.T)  { testPendingDeduplication(t, false) } | ||||
| func TestLightPendingDeduplication(t *testing.T) { testPendingDeduplication(t, true) } | ||||
| 
 | ||||
| func testPendingDeduplication(t *testing.T, light bool) { | ||||
| 	// Create a hash and corresponding block
 | ||||
| 	hashes, blocks := makeChain(1, 0, genesis) | ||||
| 
 | ||||
| 	// Assemble a tester with a built in counter and delayed fetcher
 | ||||
| 	tester := newTester(light) | ||||
| 	headerFetcher := tester.makeHeaderFetcher("repeater", blocks, -gatherSlack) | ||||
| 	bodyFetcher := tester.makeBodyFetcher("repeater", blocks, 0) | ||||
| 
 | ||||
| 	delay := 50 * time.Millisecond | ||||
| 	counter := uint32(0) | ||||
| 	headerWrapper := func(hash common.Hash) error { | ||||
| 		atomic.AddUint32(&counter, 1) | ||||
| 
 | ||||
| 		// Simulate a long running fetch
 | ||||
| 		go func() { | ||||
| 			time.Sleep(delay) | ||||
| 			headerFetcher(hash) | ||||
| 		}() | ||||
| 		return nil | ||||
| 	} | ||||
| 	checkNonExist := func() bool { | ||||
| 		return tester.getBlock(hashes[0]) == nil | ||||
| 	} | ||||
| 	if light { | ||||
| 		checkNonExist = func() bool { | ||||
| 			return tester.getHeader(hashes[0]) == nil | ||||
| 		} | ||||
| 	} | ||||
| 	// Announce the same block many times until it's fetched (wait for any pending ops)
 | ||||
| 	for checkNonExist() { | ||||
| 		tester.fetcher.Notify("repeater", hashes[0], 1, time.Now().Add(-arriveTimeout), headerWrapper, bodyFetcher) | ||||
| 		time.Sleep(time.Millisecond) | ||||
| 	} | ||||
| 	time.Sleep(delay) | ||||
| 
 | ||||
| 	// Check that all blocks were imported and none fetched twice
 | ||||
| 	if int(counter) != 1 { | ||||
| 		t.Fatalf("retrieval count mismatch: have %v, want %v", counter, 1) | ||||
| 	} | ||||
| 	verifyChainHeight(t, tester, 1) | ||||
| } | ||||
| 
 | ||||
| // Tests that announcements retrieved in a random order are cached and eventually
 | ||||
| // imported when all the gaps are filled in.
 | ||||
| func TestFullRandomArrivalImport(t *testing.T)  { testRandomArrivalImport(t, false) } | ||||
| func TestLightRandomArrivalImport(t *testing.T) { testRandomArrivalImport(t, true) } | ||||
| 
 | ||||
| func testRandomArrivalImport(t *testing.T, light bool) { | ||||
| 	// Create a chain of blocks to import, and choose one to delay
 | ||||
| 	targetBlocks := maxQueueDist | ||||
| 	hashes, blocks := makeChain(targetBlocks, 0, genesis) | ||||
| 	skip := targetBlocks / 2 | ||||
| 
 | ||||
| 	tester := newTester(light) | ||||
| 	headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) | ||||
| 	bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) | ||||
| 
 | ||||
| 	// Iteratively announce blocks, skipping one entry
 | ||||
| 	imported := make(chan interface{}, len(hashes)-1) | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { | ||||
| 		if light { | ||||
| 			if header == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty header") | ||||
| 			} | ||||
| 			imported <- header | ||||
| 		} else { | ||||
| 			if block == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty block") | ||||
| 			} | ||||
| 			imported <- block | ||||
| 		} | ||||
| 	} | ||||
| 	for i := len(hashes) - 1; i >= 0; i-- { | ||||
| 		if i != skip { | ||||
| 			tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) | ||||
| 			time.Sleep(time.Millisecond) | ||||
| 		} | ||||
| 	} | ||||
| 	// Finally announce the skipped entry and check full import
 | ||||
| 	tester.fetcher.Notify("valid", hashes[skip], uint64(len(hashes)-skip-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) | ||||
| 	verifyImportCount(t, imported, len(hashes)-1) | ||||
| 	verifyChainHeight(t, tester, uint64(len(hashes)-1)) | ||||
| } | ||||
| 
 | ||||
| // Tests that direct block enqueues (due to block propagation vs. hash announce)
 | ||||
| // are correctly schedule, filling and import queue gaps.
 | ||||
| func TestQueueGapFill(t *testing.T) { | ||||
| 	// Create a chain of blocks to import, and choose one to not announce at all
 | ||||
| 	targetBlocks := maxQueueDist | ||||
| 	hashes, blocks := makeChain(targetBlocks, 0, genesis) | ||||
| 	skip := targetBlocks / 2 | ||||
| 
 | ||||
| 	tester := newTester(false) | ||||
| 	headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) | ||||
| 	bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) | ||||
| 
 | ||||
| 	// Iteratively announce blocks, skipping one entry
 | ||||
| 	imported := make(chan interface{}, len(hashes)-1) | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } | ||||
| 
 | ||||
| 	for i := len(hashes) - 1; i >= 0; i-- { | ||||
| 		if i != skip { | ||||
| 			tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) | ||||
| 			time.Sleep(time.Millisecond) | ||||
| 		} | ||||
| 	} | ||||
| 	// Fill the missing block directly as if propagated
 | ||||
| 	tester.fetcher.Enqueue("valid", blocks[hashes[skip]]) | ||||
| 	verifyImportCount(t, imported, len(hashes)-1) | ||||
| 	verifyChainHeight(t, tester, uint64(len(hashes)-1)) | ||||
| } | ||||
| 
 | ||||
| // Tests that blocks arriving from various sources (multiple propagations, hash
 | ||||
| // announces, etc) do not get scheduled for import multiple times.
 | ||||
| func TestImportDeduplication(t *testing.T) { | ||||
| 	// Create two blocks to import (one for duplication, the other for stalling)
 | ||||
| 	hashes, blocks := makeChain(2, 0, genesis) | ||||
| 
 | ||||
| 	// Create the tester and wrap the importer with a counter
 | ||||
| 	tester := newTester(false) | ||||
| 	headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) | ||||
| 	bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) | ||||
| 
 | ||||
| 	counter := uint32(0) | ||||
| 	tester.fetcher.insertChain = func(blocks types.Blocks) (int, error) { | ||||
| 		atomic.AddUint32(&counter, uint32(len(blocks))) | ||||
| 		return tester.insertChain(blocks) | ||||
| 	} | ||||
| 	// Instrument the fetching and imported events
 | ||||
| 	fetching := make(chan []common.Hash) | ||||
| 	imported := make(chan interface{}, len(hashes)-1) | ||||
| 	tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes } | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } | ||||
| 
 | ||||
| 	// Announce the duplicating block, wait for retrieval, and also propagate directly
 | ||||
| 	tester.fetcher.Notify("valid", hashes[0], 1, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) | ||||
| 	<-fetching | ||||
| 
 | ||||
| 	tester.fetcher.Enqueue("valid", blocks[hashes[0]]) | ||||
| 	tester.fetcher.Enqueue("valid", blocks[hashes[0]]) | ||||
| 	tester.fetcher.Enqueue("valid", blocks[hashes[0]]) | ||||
| 
 | ||||
| 	// Fill the missing block directly as if propagated, and check import uniqueness
 | ||||
| 	tester.fetcher.Enqueue("valid", blocks[hashes[1]]) | ||||
| 	verifyImportCount(t, imported, 2) | ||||
| 
 | ||||
| 	if counter != 2 { | ||||
| 		t.Fatalf("import invocation count mismatch: have %v, want %v", counter, 2) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Tests that blocks with numbers much lower or higher than out current head get
 | ||||
| // discarded to prevent wasting resources on useless blocks from faulty peers.
 | ||||
| func TestDistantPropagationDiscarding(t *testing.T) { | ||||
| 	// Create a long chain to import and define the discard boundaries
 | ||||
| 	hashes, blocks := makeChain(3*maxQueueDist, 0, genesis) | ||||
| 	head := hashes[len(hashes)/2] | ||||
| 
 | ||||
| 	low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1 | ||||
| 
 | ||||
| 	// Create a tester and simulate a head block being the middle of the above chain
 | ||||
| 	tester := newTester(false) | ||||
| 
 | ||||
| 	tester.lock.Lock() | ||||
| 	tester.hashes = []common.Hash{head} | ||||
| 	tester.blocks = map[common.Hash]*types.Block{head: blocks[head]} | ||||
| 	tester.lock.Unlock() | ||||
| 
 | ||||
| 	// Ensure that a block with a lower number than the threshold is discarded
 | ||||
| 	tester.fetcher.Enqueue("lower", blocks[hashes[low]]) | ||||
| 	time.Sleep(10 * time.Millisecond) | ||||
| 	if !tester.fetcher.queue.Empty() { | ||||
| 		t.Fatalf("fetcher queued stale block") | ||||
| 	} | ||||
| 	// Ensure that a block with a higher number than the threshold is discarded
 | ||||
| 	tester.fetcher.Enqueue("higher", blocks[hashes[high]]) | ||||
| 	time.Sleep(10 * time.Millisecond) | ||||
| 	if !tester.fetcher.queue.Empty() { | ||||
| 		t.Fatalf("fetcher queued future block") | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Tests that announcements with numbers much lower or higher than out current
 | ||||
| // head get discarded to prevent wasting resources on useless blocks from faulty
 | ||||
| // peers.
 | ||||
| func TestFullDistantAnnouncementDiscarding(t *testing.T)  { testDistantAnnouncementDiscarding(t, false) } | ||||
| func TestLightDistantAnnouncementDiscarding(t *testing.T) { testDistantAnnouncementDiscarding(t, true) } | ||||
| 
 | ||||
| func testDistantAnnouncementDiscarding(t *testing.T, light bool) { | ||||
| 	// Create a long chain to import and define the discard boundaries
 | ||||
| 	hashes, blocks := makeChain(3*maxQueueDist, 0, genesis) | ||||
| 	head := hashes[len(hashes)/2] | ||||
| 
 | ||||
| 	low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1 | ||||
| 
 | ||||
| 	// Create a tester and simulate a head block being the middle of the above chain
 | ||||
| 	tester := newTester(light) | ||||
| 
 | ||||
| 	tester.lock.Lock() | ||||
| 	tester.hashes = []common.Hash{head} | ||||
| 	tester.headers = map[common.Hash]*types.Header{head: blocks[head].Header()} | ||||
| 	tester.blocks = map[common.Hash]*types.Block{head: blocks[head]} | ||||
| 	tester.lock.Unlock() | ||||
| 
 | ||||
| 	headerFetcher := tester.makeHeaderFetcher("lower", blocks, -gatherSlack) | ||||
| 	bodyFetcher := tester.makeBodyFetcher("lower", blocks, 0) | ||||
| 
 | ||||
| 	fetching := make(chan struct{}, 2) | ||||
| 	tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- struct{}{} } | ||||
| 
 | ||||
| 	// Ensure that a block with a lower number than the threshold is discarded
 | ||||
| 	tester.fetcher.Notify("lower", hashes[low], blocks[hashes[low]].NumberU64(), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) | ||||
| 	select { | ||||
| 	case <-time.After(50 * time.Millisecond): | ||||
| 	case <-fetching: | ||||
| 		t.Fatalf("fetcher requested stale header") | ||||
| 	} | ||||
| 	// Ensure that a block with a higher number than the threshold is discarded
 | ||||
| 	tester.fetcher.Notify("higher", hashes[high], blocks[hashes[high]].NumberU64(), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) | ||||
| 	select { | ||||
| 	case <-time.After(50 * time.Millisecond): | ||||
| 	case <-fetching: | ||||
| 		t.Fatalf("fetcher requested future header") | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Tests that peers announcing blocks with invalid numbers (i.e. not matching
 | ||||
| // the headers provided afterwards) get dropped as malicious.
 | ||||
| func TestFullInvalidNumberAnnouncement(t *testing.T)  { testInvalidNumberAnnouncement(t, false) } | ||||
| func TestLightInvalidNumberAnnouncement(t *testing.T) { testInvalidNumberAnnouncement(t, true) } | ||||
| 
 | ||||
| func testInvalidNumberAnnouncement(t *testing.T, light bool) { | ||||
| 	// Create a single block to import and check numbers against
 | ||||
| 	hashes, blocks := makeChain(1, 0, genesis) | ||||
| 
 | ||||
| 	tester := newTester(light) | ||||
| 	badHeaderFetcher := tester.makeHeaderFetcher("bad", blocks, -gatherSlack) | ||||
| 	badBodyFetcher := tester.makeBodyFetcher("bad", blocks, 0) | ||||
| 
 | ||||
| 	imported := make(chan interface{}) | ||||
| 	announced := make(chan interface{}) | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { | ||||
| 		if light { | ||||
| 			if header == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty header") | ||||
| 			} | ||||
| 			imported <- header | ||||
| 		} else { | ||||
| 			if block == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty block") | ||||
| 			} | ||||
| 			imported <- block | ||||
| 		} | ||||
| 	} | ||||
| 	// Announce a block with a bad number, check for immediate drop
 | ||||
| 	tester.fetcher.announceChangeHook = func(hash common.Hash, b bool) { | ||||
| 		announced <- nil | ||||
| 	} | ||||
| 	tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), badHeaderFetcher, badBodyFetcher) | ||||
| 	verifyAnnounce := func() { | ||||
| 		for i := 0; i < 2; i++ { | ||||
| 			select { | ||||
| 			case <-announced: | ||||
| 				continue | ||||
| 			case <-time.After(1 * time.Second): | ||||
| 				t.Fatal("announce timeout") | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	verifyAnnounce() | ||||
| 	verifyImportEvent(t, imported, false) | ||||
| 	tester.lock.RLock() | ||||
| 	dropped := tester.drops["bad"] | ||||
| 	tester.lock.RUnlock() | ||||
| 
 | ||||
| 	if !dropped { | ||||
| 		t.Fatalf("peer with invalid numbered announcement not dropped") | ||||
| 	} | ||||
| 	goodHeaderFetcher := tester.makeHeaderFetcher("good", blocks, -gatherSlack) | ||||
| 	goodBodyFetcher := tester.makeBodyFetcher("good", blocks, 0) | ||||
| 	// Make sure a good announcement passes without a drop
 | ||||
| 	tester.fetcher.Notify("good", hashes[0], 1, time.Now().Add(-arriveTimeout), goodHeaderFetcher, goodBodyFetcher) | ||||
| 	verifyAnnounce() | ||||
| 	verifyImportEvent(t, imported, true) | ||||
| 
 | ||||
| 	tester.lock.RLock() | ||||
| 	dropped = tester.drops["good"] | ||||
| 	tester.lock.RUnlock() | ||||
| 
 | ||||
| 	if dropped { | ||||
| 		t.Fatalf("peer with valid numbered announcement dropped") | ||||
| 	} | ||||
| 	verifyImportDone(t, imported) | ||||
| } | ||||
| 
 | ||||
| // Tests that if a block is empty (i.e. header only), no body request should be
 | ||||
| // made, and instead the header should be assembled into a whole block in itself.
 | ||||
| func TestEmptyBlockShortCircuit(t *testing.T) { | ||||
| 	// Create a chain of blocks to import
 | ||||
| 	hashes, blocks := makeChain(32, 0, genesis) | ||||
| 
 | ||||
| 	tester := newTester(false) | ||||
| 	headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) | ||||
| 	bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) | ||||
| 
 | ||||
| 	// Add a monitoring hook for all internal events
 | ||||
| 	fetching := make(chan []common.Hash) | ||||
| 	tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes } | ||||
| 
 | ||||
| 	completing := make(chan []common.Hash) | ||||
| 	tester.fetcher.completingHook = func(hashes []common.Hash) { completing <- hashes } | ||||
| 
 | ||||
| 	imported := make(chan interface{}) | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { | ||||
| 		if block == nil { | ||||
| 			t.Fatalf("Fetcher try to import empty block") | ||||
| 		} | ||||
| 		imported <- block | ||||
| 	} | ||||
| 	// Iteratively announce blocks until all are imported
 | ||||
| 	for i := len(hashes) - 2; i >= 0; i-- { | ||||
| 		tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) | ||||
| 
 | ||||
| 		// All announces should fetch the header
 | ||||
| 		verifyFetchingEvent(t, fetching, true) | ||||
| 
 | ||||
| 		// Only blocks with data contents should request bodies
 | ||||
| 		verifyCompletingEvent(t, completing, len(blocks[hashes[i]].Transactions()) > 0 || len(blocks[hashes[i]].Uncles()) > 0) | ||||
| 
 | ||||
| 		// Irrelevant of the construct, import should succeed
 | ||||
| 		verifyImportEvent(t, imported, true) | ||||
| 	} | ||||
| 	verifyImportDone(t, imported) | ||||
| } | ||||
| 
 | ||||
| // Tests that a peer is unable to use unbounded memory with sending infinite
 | ||||
| // block announcements to a node, but that even in the face of such an attack,
 | ||||
| // the fetcher remains operational.
 | ||||
| func TestHashMemoryExhaustionAttack(t *testing.T) { | ||||
| 	// Create a tester with instrumented import hooks
 | ||||
| 	tester := newTester(false) | ||||
| 
 | ||||
| 	imported, announces := make(chan interface{}), int32(0) | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } | ||||
| 	tester.fetcher.announceChangeHook = func(hash common.Hash, added bool) { | ||||
| 		if added { | ||||
| 			atomic.AddInt32(&announces, 1) | ||||
| 		} else { | ||||
| 			atomic.AddInt32(&announces, -1) | ||||
| 		} | ||||
| 	} | ||||
| 	// Create a valid chain and an infinite junk chain
 | ||||
| 	targetBlocks := hashLimit + 2*maxQueueDist | ||||
| 	hashes, blocks := makeChain(targetBlocks, 0, genesis) | ||||
| 	validHeaderFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) | ||||
| 	validBodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) | ||||
| 
 | ||||
| 	attack, _ := makeChain(targetBlocks, 0, unknownBlock) | ||||
| 	attackerHeaderFetcher := tester.makeHeaderFetcher("attacker", nil, -gatherSlack) | ||||
| 	attackerBodyFetcher := tester.makeBodyFetcher("attacker", nil, 0) | ||||
| 
 | ||||
| 	// Feed the tester a huge hashset from the attacker, and a limited from the valid peer
 | ||||
| 	for i := 0; i < len(attack); i++ { | ||||
| 		if i < maxQueueDist { | ||||
| 			tester.fetcher.Notify("valid", hashes[len(hashes)-2-i], uint64(i+1), time.Now(), validHeaderFetcher, validBodyFetcher) | ||||
| 		} | ||||
| 		tester.fetcher.Notify("attacker", attack[i], 1 /* don't distance drop */, time.Now(), attackerHeaderFetcher, attackerBodyFetcher) | ||||
| 	} | ||||
| 	if count := atomic.LoadInt32(&announces); count != hashLimit+maxQueueDist { | ||||
| 		t.Fatalf("queued announce count mismatch: have %d, want %d", count, hashLimit+maxQueueDist) | ||||
| 	} | ||||
| 	// Wait for fetches to complete
 | ||||
| 	verifyImportCount(t, imported, maxQueueDist) | ||||
| 
 | ||||
| 	// Feed the remaining valid hashes to ensure DOS protection state remains clean
 | ||||
| 	for i := len(hashes) - maxQueueDist - 2; i >= 0; i-- { | ||||
| 		tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), validHeaderFetcher, validBodyFetcher) | ||||
| 		verifyImportEvent(t, imported, true) | ||||
| 	} | ||||
| 	verifyImportDone(t, imported) | ||||
| } | ||||
| 
 | ||||
| // Tests that blocks sent to the fetcher (either through propagation or via hash
 | ||||
| // announces and retrievals) don't pile up indefinitely, exhausting available
 | ||||
| // system memory.
 | ||||
| func TestBlockMemoryExhaustionAttack(t *testing.T) { | ||||
| 	// Create a tester with instrumented import hooks
 | ||||
| 	tester := newTester(false) | ||||
| 
 | ||||
| 	imported, enqueued := make(chan interface{}), int32(0) | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } | ||||
| 	tester.fetcher.queueChangeHook = func(hash common.Hash, added bool) { | ||||
| 		if added { | ||||
| 			atomic.AddInt32(&enqueued, 1) | ||||
| 		} else { | ||||
| 			atomic.AddInt32(&enqueued, -1) | ||||
| 		} | ||||
| 	} | ||||
| 	// Create a valid chain and a batch of dangling (but in range) blocks
 | ||||
| 	targetBlocks := hashLimit + 2*maxQueueDist | ||||
| 	hashes, blocks := makeChain(targetBlocks, 0, genesis) | ||||
| 	attack := make(map[common.Hash]*types.Block) | ||||
| 	for i := byte(0); len(attack) < blockLimit+2*maxQueueDist; i++ { | ||||
| 		hashes, blocks := makeChain(maxQueueDist-1, i, unknownBlock) | ||||
| 		for _, hash := range hashes[:maxQueueDist-2] { | ||||
| 			attack[hash] = blocks[hash] | ||||
| 		} | ||||
| 	} | ||||
| 	// Try to feed all the attacker blocks make sure only a limited batch is accepted
 | ||||
| 	for _, block := range attack { | ||||
| 		tester.fetcher.Enqueue("attacker", block) | ||||
| 	} | ||||
| 	time.Sleep(200 * time.Millisecond) | ||||
| 	if queued := atomic.LoadInt32(&enqueued); queued != blockLimit { | ||||
| 		t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit) | ||||
| 	} | ||||
| 	// Queue up a batch of valid blocks, and check that a new peer is allowed to do so
 | ||||
| 	for i := 0; i < maxQueueDist-1; i++ { | ||||
| 		tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-3-i]]) | ||||
| 	} | ||||
| 	time.Sleep(100 * time.Millisecond) | ||||
| 	if queued := atomic.LoadInt32(&enqueued); queued != blockLimit+maxQueueDist-1 { | ||||
| 		t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit+maxQueueDist-1) | ||||
| 	} | ||||
| 	// Insert the missing piece (and sanity check the import)
 | ||||
| 	tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2]]) | ||||
| 	verifyImportCount(t, imported, maxQueueDist) | ||||
| 
 | ||||
| 	// Insert the remaining blocks in chunks to ensure clean DOS protection
 | ||||
| 	for i := maxQueueDist; i < len(hashes)-1; i++ { | ||||
| 		tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2-i]]) | ||||
| 		verifyImportEvent(t, imported, true) | ||||
| 	} | ||||
| 	verifyImportDone(t, imported) | ||||
| } | ||||
| @ -1,189 +0,0 @@ | ||||
| // Copyright 2019 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package les | ||||
| 
 | ||||
| import ( | ||||
| 	"math/big" | ||||
| 	"testing" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/consensus/ethash" | ||||
| 	"github.com/ethereum/go-ethereum/core" | ||||
| 	"github.com/ethereum/go-ethereum/core/rawdb" | ||||
| 	"github.com/ethereum/go-ethereum/core/types" | ||||
| 	"github.com/ethereum/go-ethereum/params" | ||||
| ) | ||||
| 
 | ||||
| // verifyImportEvent verifies that one single event arrive on an import channel.
 | ||||
| func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) { | ||||
| 	if arrive { | ||||
| 		select { | ||||
| 		case <-imported: | ||||
| 		case <-time.After(time.Second): | ||||
| 			t.Fatalf("import timeout") | ||||
| 		} | ||||
| 	} else { | ||||
| 		select { | ||||
| 		case <-imported: | ||||
| 			t.Fatalf("import invoked") | ||||
| 		case <-time.After(20 * time.Millisecond): | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // verifyImportDone verifies that no more events are arriving on an import channel.
 | ||||
| func verifyImportDone(t *testing.T, imported chan interface{}) { | ||||
| 	select { | ||||
| 	case <-imported: | ||||
| 		t.Fatalf("extra block imported") | ||||
| 	case <-time.After(50 * time.Millisecond): | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // verifyChainHeight verifies the chain height is as expected.
 | ||||
| func verifyChainHeight(t *testing.T, fetcher *lightFetcher, height uint64) { | ||||
| 	local := fetcher.chain.CurrentHeader().Number.Uint64() | ||||
| 	if local != height { | ||||
| 		t.Fatalf("chain height mismatch, got %d, want %d", local, height) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestSequentialAnnouncementsLes2(t *testing.T) { testSequentialAnnouncements(t, 2) } | ||||
| func TestSequentialAnnouncementsLes3(t *testing.T) { testSequentialAnnouncements(t, 3) } | ||||
| 
 | ||||
| func testSequentialAnnouncements(t *testing.T, protocol int) { | ||||
| 	netconfig := testnetConfig{ | ||||
| 		blocks:    4, | ||||
| 		protocol:  protocol, | ||||
| 		nopruning: true, | ||||
| 	} | ||||
| 	s, c, teardown := newClientServerEnv(t, netconfig) | ||||
| 	defer teardown() | ||||
| 
 | ||||
| 	// Create connected peer pair, the initial signal from LES server
 | ||||
| 	// is discarded to prevent syncing.
 | ||||
| 	p1, _, err := newTestPeerPair("peer", protocol, s.handler, c.handler, true) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to create peer pair %v", err) | ||||
| 	} | ||||
| 	importCh := make(chan interface{}) | ||||
| 	c.handler.fetcher.newHeadHook = func(header *types.Header) { | ||||
| 		importCh <- header | ||||
| 	} | ||||
| 	for i := uint64(1); i <= s.backend.Blockchain().CurrentHeader().Number.Uint64(); i++ { | ||||
| 		header := s.backend.Blockchain().GetHeaderByNumber(i) | ||||
| 		hash, number := header.Hash(), header.Number.Uint64() | ||||
| 		td := rawdb.ReadTd(s.db, hash, number) | ||||
| 
 | ||||
| 		announce := announceData{hash, number, td, 0, nil} | ||||
| 		if p1.cpeer.announceType == announceTypeSigned { | ||||
| 			announce.sign(s.handler.server.privateKey) | ||||
| 		} | ||||
| 		p1.cpeer.sendAnnounce(announce) | ||||
| 		verifyImportEvent(t, importCh, true) | ||||
| 	} | ||||
| 	verifyImportDone(t, importCh) | ||||
| 	verifyChainHeight(t, c.handler.fetcher, 4) | ||||
| } | ||||
| 
 | ||||
| func TestGappedAnnouncementsLes2(t *testing.T) { testGappedAnnouncements(t, 2) } | ||||
| func TestGappedAnnouncementsLes3(t *testing.T) { testGappedAnnouncements(t, 3) } | ||||
| 
 | ||||
| func testGappedAnnouncements(t *testing.T, protocol int) { | ||||
| 	netconfig := testnetConfig{ | ||||
| 		blocks:    4, | ||||
| 		protocol:  protocol, | ||||
| 		nopruning: true, | ||||
| 	} | ||||
| 	s, c, teardown := newClientServerEnv(t, netconfig) | ||||
| 	defer teardown() | ||||
| 
 | ||||
| 	// Create connected peer pair, the initial signal from LES server
 | ||||
| 	// is discarded to prevent syncing.
 | ||||
| 	peer, _, err := newTestPeerPair("peer", protocol, s.handler, c.handler, true) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to create peer pair %v", err) | ||||
| 	} | ||||
| 	done := make(chan *types.Header, 1) | ||||
| 	c.handler.fetcher.newHeadHook = func(header *types.Header) { done <- header } | ||||
| 
 | ||||
| 	// Prepare announcement by latest header.
 | ||||
| 	latest := s.backend.Blockchain().CurrentHeader() | ||||
| 	hash, number := latest.Hash(), latest.Number.Uint64() | ||||
| 	td := rawdb.ReadTd(s.db, hash, number) | ||||
| 
 | ||||
| 	// Sign the announcement if necessary.
 | ||||
| 	announce := announceData{hash, number, td, 0, nil} | ||||
| 	if peer.cpeer.announceType == announceTypeSigned { | ||||
| 		announce.sign(s.handler.server.privateKey) | ||||
| 	} | ||||
| 	peer.cpeer.sendAnnounce(announce) | ||||
| 
 | ||||
| 	<-done // Wait syncing
 | ||||
| 	verifyChainHeight(t, c.handler.fetcher, 4) | ||||
| 
 | ||||
| 	// Send a reorged announcement
 | ||||
| 	blocks, _ := core.GenerateChain(rawdb.ReadChainConfig(s.db, s.backend.Blockchain().Genesis().Hash()), s.backend.Blockchain().GetBlockByNumber(3), | ||||
| 		ethash.NewFaker(), s.db, 2, func(i int, gen *core.BlockGen) { | ||||
| 			gen.OffsetTime(-9) // higher block difficulty
 | ||||
| 		}) | ||||
| 	s.backend.Blockchain().InsertChain(blocks) | ||||
| 
 | ||||
| 	<-done // Wait syncing
 | ||||
| 	verifyChainHeight(t, c.handler.fetcher, 5) | ||||
| } | ||||
| 
 | ||||
| func TestInvalidAnnouncesLES2(t *testing.T) { testInvalidAnnounces(t, lpv2) } | ||||
| func TestInvalidAnnouncesLES3(t *testing.T) { testInvalidAnnounces(t, lpv3) } | ||||
| func TestInvalidAnnouncesLES4(t *testing.T) { testInvalidAnnounces(t, lpv4) } | ||||
| 
 | ||||
| func testInvalidAnnounces(t *testing.T, protocol int) { | ||||
| 	netconfig := testnetConfig{ | ||||
| 		blocks:    4, | ||||
| 		protocol:  protocol, | ||||
| 		nopruning: true, | ||||
| 	} | ||||
| 	s, c, teardown := newClientServerEnv(t, netconfig) | ||||
| 	defer teardown() | ||||
| 
 | ||||
| 	// Create connected peer pair, the initial signal from LES server
 | ||||
| 	// is discarded to prevent syncing.
 | ||||
| 	peer, _, err := newTestPeerPair("peer", lpv3, s.handler, c.handler, true) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to create peer pair %v", err) | ||||
| 	} | ||||
| 	done := make(chan *types.Header, 1) | ||||
| 	c.handler.fetcher.newHeadHook = func(header *types.Header) { done <- header } | ||||
| 
 | ||||
| 	// Prepare announcement by latest header.
 | ||||
| 	headerOne := s.backend.Blockchain().GetHeaderByNumber(1) | ||||
| 	hash, number := headerOne.Hash(), headerOne.Number.Uint64() | ||||
| 	td := big.NewInt(params.GenesisDifficulty.Int64() + 200) // bad td
 | ||||
| 
 | ||||
| 	// Sign the announcement if necessary.
 | ||||
| 	announce := announceData{hash, number, td, 0, nil} | ||||
| 	if peer.cpeer.announceType == announceTypeSigned { | ||||
| 		announce.sign(s.handler.server.privateKey) | ||||
| 	} | ||||
| 	peer.cpeer.sendAnnounce(announce) | ||||
| 	<-done // Wait syncing
 | ||||
| 
 | ||||
| 	// Ensure the bad peer is evicted
 | ||||
| 	if c.handler.backend.peers.len() != 0 { | ||||
| 		t.Fatalf("Failed to evict invalid peer") | ||||
| 	} | ||||
| } | ||||
| @ -31,7 +31,7 @@ import ( | ||||
| 	"github.com/ethereum/go-ethereum/core/txpool" | ||||
| 	"github.com/ethereum/go-ethereum/core/types" | ||||
| 	"github.com/ethereum/go-ethereum/crypto" | ||||
| 	"github.com/ethereum/go-ethereum/les/downloader" | ||||
| 	"github.com/ethereum/go-ethereum/eth/downloader" | ||||
| 	"github.com/ethereum/go-ethereum/light" | ||||
| 	"github.com/ethereum/go-ethereum/p2p" | ||||
| 	"github.com/ethereum/go-ethereum/params" | ||||
|  | ||||
| @ -16,6 +16,10 @@ | ||||
| 
 | ||||
| package les | ||||
| 
 | ||||
| // Note: these tests are disabled now because they cannot work with the old sync
 | ||||
| // mechanism removed but will be useful again once the PoS ultralight mode is implemented
 | ||||
| 
 | ||||
| /* | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"context" | ||||
| @ -451,3 +455,4 @@ func randomHash() common.Hash { | ||||
| 	} | ||||
| 	return hash | ||||
| } | ||||
| */ | ||||
|  | ||||
							
								
								
									
										25
									
								
								les/peer.go
									
									
									
									
									
								
							
							
						
						
									
										25
									
								
								les/peer.go
									
									
									
									
									
								
							| @ -998,9 +998,6 @@ func (p *clientPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, ge | ||||
| 			recentTx -= blockSafetyMargin - txIndexRecentOffset | ||||
| 		} | ||||
| 	} | ||||
| 	if server.config.UltraLightOnlyAnnounce { | ||||
| 		recentTx = txIndexDisabled | ||||
| 	} | ||||
| 	if recentTx != txIndexUnlimited && p.version < lpv4 { | ||||
| 		return errors.New("Cannot serve old clients without a complete tx index") | ||||
| 	} | ||||
| @ -1009,20 +1006,18 @@ func (p *clientPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, ge | ||||
| 	p.headInfo = blockInfo{Hash: head, Number: headNum, Td: td} | ||||
| 	return p.handshake(td, head, headNum, genesis, forkID, forkFilter, func(lists *keyValueList) { | ||||
| 		// Add some information which services server can offer.
 | ||||
| 		if !server.config.UltraLightOnlyAnnounce { | ||||
| 			*lists = (*lists).add("serveHeaders", nil) | ||||
| 			*lists = (*lists).add("serveChainSince", uint64(0)) | ||||
| 			*lists = (*lists).add("serveStateSince", uint64(0)) | ||||
| 		*lists = (*lists).add("serveHeaders", nil) | ||||
| 		*lists = (*lists).add("serveChainSince", uint64(0)) | ||||
| 		*lists = (*lists).add("serveStateSince", uint64(0)) | ||||
| 
 | ||||
| 			// If local ethereum node is running in archive mode, advertise ourselves we have
 | ||||
| 			// all version state data. Otherwise only recent state is available.
 | ||||
| 			stateRecent := uint64(core.TriesInMemory - blockSafetyMargin) | ||||
| 			if server.archiveMode { | ||||
| 				stateRecent = 0 | ||||
| 			} | ||||
| 			*lists = (*lists).add("serveRecentState", stateRecent) | ||||
| 			*lists = (*lists).add("txRelay", nil) | ||||
| 		// If local ethereum node is running in archive mode, advertise ourselves we have
 | ||||
| 		// all version state data. Otherwise only recent state is available.
 | ||||
| 		stateRecent := uint64(core.TriesInMemory - blockSafetyMargin) | ||||
| 		if server.archiveMode { | ||||
| 			stateRecent = 0 | ||||
| 		} | ||||
| 		*lists = (*lists).add("serveRecentState", stateRecent) | ||||
| 		*lists = (*lists).add("txRelay", nil) | ||||
| 		if p.version >= lpv4 { | ||||
| 			*lists = (*lists).add("recentTxLookup", recentTx) | ||||
| 		} | ||||
|  | ||||
| @ -1,99 +0,0 @@ | ||||
| // Copyright 2020 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package les | ||||
| 
 | ||||
| import ( | ||||
| 	"sync" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/common/math" | ||||
| 	"github.com/ethereum/go-ethereum/core" | ||||
| 	"github.com/ethereum/go-ethereum/ethdb" | ||||
| 	"github.com/ethereum/go-ethereum/log" | ||||
| ) | ||||
| 
 | ||||
| // pruner is responsible for pruning historical light chain data.
 | ||||
| type pruner struct { | ||||
| 	db       ethdb.Database | ||||
| 	indexers []*core.ChainIndexer | ||||
| 	closeCh  chan struct{} | ||||
| 	wg       sync.WaitGroup | ||||
| } | ||||
| 
 | ||||
| // newPruner returns a light chain pruner instance.
 | ||||
| func newPruner(db ethdb.Database, indexers ...*core.ChainIndexer) *pruner { | ||||
| 	pruner := &pruner{ | ||||
| 		db:       db, | ||||
| 		indexers: indexers, | ||||
| 		closeCh:  make(chan struct{}), | ||||
| 	} | ||||
| 	pruner.wg.Add(1) | ||||
| 	go pruner.loop() | ||||
| 	return pruner | ||||
| } | ||||
| 
 | ||||
| // close notifies all background goroutines belonging to pruner to exit.
 | ||||
| func (p *pruner) close() { | ||||
| 	close(p.closeCh) | ||||
| 	p.wg.Wait() | ||||
| } | ||||
| 
 | ||||
| // loop periodically queries the status of chain indexers and prunes useless
 | ||||
| // historical chain data. Notably, whenever Geth restarts, it will iterate
 | ||||
| // all historical sections even they don't exist at all(below checkpoint) so
 | ||||
| // that light client can prune cached chain data that was ODRed after pruning
 | ||||
| // that section.
 | ||||
| func (p *pruner) loop() { | ||||
| 	defer p.wg.Done() | ||||
| 
 | ||||
| 	// cleanTicker is the ticker used to trigger a history clean 2 times a day.
 | ||||
| 	var cleanTicker = time.NewTicker(12 * time.Hour) | ||||
| 	defer cleanTicker.Stop() | ||||
| 
 | ||||
| 	// pruning finds the sections that have been processed by all indexers
 | ||||
| 	// and deletes all historical chain data.
 | ||||
| 	// Note, if some indexers don't support pruning(e.g. eth.BloomIndexer),
 | ||||
| 	// pruning operations can be silently ignored.
 | ||||
| 	pruning := func() { | ||||
| 		min := uint64(math.MaxUint64) | ||||
| 		for _, indexer := range p.indexers { | ||||
| 			sections, _, _ := indexer.Sections() | ||||
| 			if sections < min { | ||||
| 				min = sections | ||||
| 			} | ||||
| 		} | ||||
| 		// Always keep the latest section data in database.
 | ||||
| 		if min < 2 || len(p.indexers) == 0 { | ||||
| 			return | ||||
| 		} | ||||
| 		for _, indexer := range p.indexers { | ||||
| 			if err := indexer.Prune(min - 2); err != nil { | ||||
| 				log.Debug("Failed to prune historical data", "err", err) | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 		p.db.Compact(nil, nil) // Compact entire database, ensure all removed data are deleted.
 | ||||
| 	} | ||||
| 	for { | ||||
| 		pruning() | ||||
| 		select { | ||||
| 		case <-cleanTicker.C: | ||||
| 		case <-p.closeCh: | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| @ -1,204 +0,0 @@ | ||||
| // Copyright 2020 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package les | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"context" | ||||
| 	"encoding/binary" | ||||
| 	"testing" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/core" | ||||
| 	"github.com/ethereum/go-ethereum/light" | ||||
| ) | ||||
| 
 | ||||
| func TestLightPruner(t *testing.T) { | ||||
| 	var ( | ||||
| 		waitIndexers = func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) { | ||||
| 			for { | ||||
| 				cs, _, _ := cIndexer.Sections() | ||||
| 				bts, _, _ := btIndexer.Sections() | ||||
| 				if cs >= 3 && bts >= 3 { | ||||
| 					break | ||||
| 				} | ||||
| 				time.Sleep(10 * time.Millisecond) | ||||
| 			} | ||||
| 		} | ||||
| 		config    = light.TestClientIndexerConfig | ||||
| 		netconfig = testnetConfig{ | ||||
| 			blocks:   int(3*config.ChtSize + config.ChtConfirms), | ||||
| 			protocol: 3, | ||||
| 			indexFn:  waitIndexers, | ||||
| 			connect:  true, | ||||
| 		} | ||||
| 	) | ||||
| 	server, client, tearDown := newClientServerEnv(t, netconfig) | ||||
| 	defer tearDown() | ||||
| 
 | ||||
| 	// checkDB iterates the chain with given prefix, resolves the block number
 | ||||
| 	// with given callback and ensures this entry should exist or not.
 | ||||
| 	checkDB := func(from, to uint64, prefix []byte, resolve func(key, value []byte) *uint64, exist bool) bool { | ||||
| 		it := client.db.NewIterator(prefix, nil) | ||||
| 		defer it.Release() | ||||
| 
 | ||||
| 		var next = from | ||||
| 		for it.Next() { | ||||
| 			number := resolve(it.Key(), it.Value()) | ||||
| 			if number == nil || *number < from { | ||||
| 				continue | ||||
| 			} else if *number > to { | ||||
| 				return true | ||||
| 			} | ||||
| 			if exist { | ||||
| 				if *number != next { | ||||
| 					return false | ||||
| 				} | ||||
| 				next++ | ||||
| 			} else { | ||||
| 				return false | ||||
| 			} | ||||
| 		} | ||||
| 		return true | ||||
| 	} | ||||
| 	// checkPruned checks and ensures the stale chain data has been pruned.
 | ||||
| 	checkPruned := func(from, to uint64) { | ||||
| 		// Iterate canonical hash
 | ||||
| 		if !checkDB(from, to, []byte("h"), func(key, value []byte) *uint64 { | ||||
| 			if len(key) == 1+8+1 && bytes.Equal(key[9:10], []byte("n")) { | ||||
| 				n := binary.BigEndian.Uint64(key[1:9]) | ||||
| 				return &n | ||||
| 			} | ||||
| 			return nil | ||||
| 		}, false) { | ||||
| 			t.Fatalf("canonical hash mappings are not properly pruned") | ||||
| 		} | ||||
| 		// Iterate header
 | ||||
| 		if !checkDB(from, to, []byte("h"), func(key, value []byte) *uint64 { | ||||
| 			if len(key) == 1+8+32 { | ||||
| 				n := binary.BigEndian.Uint64(key[1:9]) | ||||
| 				return &n | ||||
| 			} | ||||
| 			return nil | ||||
| 		}, false) { | ||||
| 			t.Fatalf("headers are not properly pruned") | ||||
| 		} | ||||
| 		// Iterate body
 | ||||
| 		if !checkDB(from, to, []byte("b"), func(key, value []byte) *uint64 { | ||||
| 			if len(key) == 1+8+32 { | ||||
| 				n := binary.BigEndian.Uint64(key[1:9]) | ||||
| 				return &n | ||||
| 			} | ||||
| 			return nil | ||||
| 		}, false) { | ||||
| 			t.Fatalf("block bodies are not properly pruned") | ||||
| 		} | ||||
| 		// Iterate receipts
 | ||||
| 		if !checkDB(from, to, []byte("r"), func(key, value []byte) *uint64 { | ||||
| 			if len(key) == 1+8+32 { | ||||
| 				n := binary.BigEndian.Uint64(key[1:9]) | ||||
| 				return &n | ||||
| 			} | ||||
| 			return nil | ||||
| 		}, false) { | ||||
| 			t.Fatalf("receipts are not properly pruned") | ||||
| 		} | ||||
| 		// Iterate td
 | ||||
| 		if !checkDB(from, to, []byte("h"), func(key, value []byte) *uint64 { | ||||
| 			if len(key) == 1+8+32+1 && bytes.Equal(key[41:42], []byte("t")) { | ||||
| 				n := binary.BigEndian.Uint64(key[1:9]) | ||||
| 				return &n | ||||
| 			} | ||||
| 			return nil | ||||
| 		}, false) { | ||||
| 			t.Fatalf("tds are not properly pruned") | ||||
| 		} | ||||
| 	} | ||||
| 	// Start light pruner.
 | ||||
| 	time.Sleep(1500 * time.Millisecond) // Ensure light client has finished the syncing and indexing
 | ||||
| 	newPruner(client.db, client.chtIndexer, client.bloomTrieIndexer) | ||||
| 
 | ||||
| 	time.Sleep(1500 * time.Millisecond) // Ensure pruner have enough time to prune data.
 | ||||
| 	checkPruned(1, config.ChtSize-1) | ||||
| 
 | ||||
| 	// Ensure all APIs still work after pruning.
 | ||||
| 	var cases = []struct { | ||||
| 		from, to   uint64 | ||||
| 		methodName string | ||||
| 		method     func(uint64) bool | ||||
| 	}{ | ||||
| 		{ | ||||
| 			1, 10, "GetHeaderByNumber", | ||||
| 			func(n uint64) bool { | ||||
| 				_, err := light.GetHeaderByNumber(context.Background(), client.handler.backend.odr, n) | ||||
| 				return err == nil | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			11, 20, "GetCanonicalHash", | ||||
| 			func(n uint64) bool { | ||||
| 				_, err := light.GetCanonicalHash(context.Background(), client.handler.backend.odr, n) | ||||
| 				return err == nil | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			21, 30, "GetTd", | ||||
| 			func(n uint64) bool { | ||||
| 				_, err := light.GetTd(context.Background(), client.handler.backend.odr, server.handler.blockchain.GetHeaderByNumber(n).Hash(), n) | ||||
| 				return err == nil | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			31, 40, "GetBodyRLP", | ||||
| 			func(n uint64) bool { | ||||
| 				_, err := light.GetBodyRLP(context.Background(), client.handler.backend.odr, server.handler.blockchain.GetHeaderByNumber(n).Hash(), n) | ||||
| 				return err == nil | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			41, 50, "GetBlock", | ||||
| 			func(n uint64) bool { | ||||
| 				_, err := light.GetBlock(context.Background(), client.handler.backend.odr, server.handler.blockchain.GetHeaderByNumber(n).Hash(), n) | ||||
| 				return err == nil | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			51, 60, "GetBlockReceipts", | ||||
| 			func(n uint64) bool { | ||||
| 				_, err := light.GetBlockReceipts(context.Background(), client.handler.backend.odr, server.handler.blockchain.GetHeaderByNumber(n).Hash(), n) | ||||
| 				return err == nil | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	for _, c := range cases { | ||||
| 		for i := c.from; i <= c.to; i++ { | ||||
| 			if !c.method(i) { | ||||
| 				t.Fatalf("rpc method %s failed, number %d", c.methodName, i) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	// Check GetBloombits
 | ||||
| 	_, err := light.GetBloomBits(context.Background(), client.handler.backend.odr, 0, []uint64{0}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to retrieve bloombits of pruned section: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	// Ensure the ODR cached data can be cleaned by pruner.
 | ||||
| 	newPruner(client.db, client.chtIndexer, client.bloomTrieIndexer) | ||||
| 	time.Sleep(50 * time.Millisecond) // Ensure pruner have enough time to prune data.
 | ||||
| 	checkPruned(1, config.ChtSize-1)  // Ensure all cached data(by odr) is cleaned.
 | ||||
| } | ||||
| @ -16,6 +16,10 @@ | ||||
| 
 | ||||
| package les | ||||
| 
 | ||||
| // Note: these tests are disabled now because they cannot work with the old sync
 | ||||
| // mechanism removed but will be useful again once the PoS ultralight mode is implemented
 | ||||
| 
 | ||||
| /* | ||||
| import ( | ||||
| 	"context" | ||||
| 	"testing" | ||||
| @ -122,3 +126,4 @@ func testAccess(t *testing.T, protocol int, fn accessTestFn) { | ||||
| 	} | ||||
| 	test(5) | ||||
| } | ||||
| */ | ||||
|  | ||||
| @ -153,15 +153,6 @@ func (rm *retrieveManager) sendReq(reqID uint64, req *distReq, val validatorFunc | ||||
| 	return r | ||||
| } | ||||
| 
 | ||||
| // requested reports whether the request with given reqid is sent by the retriever.
 | ||||
| func (rm *retrieveManager) requested(reqId uint64) bool { | ||||
| 	rm.lock.RLock() | ||||
| 	defer rm.lock.RUnlock() | ||||
| 
 | ||||
| 	_, ok := rm.sentReqs[reqId] | ||||
| 	return ok | ||||
| } | ||||
| 
 | ||||
| // deliver is called by the LES protocol manager to deliver reply messages to waiting requests
 | ||||
| func (rm *retrieveManager) deliver(peer distPeer, msg *Msg) error { | ||||
| 	rm.lock.RLock() | ||||
|  | ||||
							
								
								
									
										56
									
								
								les/sync.go
									
									
									
									
									
								
							
							
						
						
									
										56
									
								
								les/sync.go
									
									
									
									
									
								
							| @ -1,56 +0,0 @@ | ||||
| // Copyright 2016 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package les | ||||
| 
 | ||||
| import ( | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/common" | ||||
| 	"github.com/ethereum/go-ethereum/core/rawdb" | ||||
| 	"github.com/ethereum/go-ethereum/les/downloader" | ||||
| 	"github.com/ethereum/go-ethereum/log" | ||||
| ) | ||||
| 
 | ||||
| // synchronise tries to sync up our local chain with a remote peer.
 | ||||
| func (h *clientHandler) synchronise(peer *serverPeer) { | ||||
| 	// Short circuit if the peer is nil.
 | ||||
| 	if peer == nil { | ||||
| 		return | ||||
| 	} | ||||
| 	// Make sure the peer's TD is higher than our own.
 | ||||
| 	latest := h.backend.blockchain.CurrentHeader() | ||||
| 	currentTd := rawdb.ReadTd(h.backend.chainDb, latest.Hash(), latest.Number.Uint64()) | ||||
| 	if currentTd != nil && peer.Td().Cmp(currentTd) < 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	// Notify testing framework if syncing has completed (for testing purpose).
 | ||||
| 	defer func() { | ||||
| 		if h.syncEnd != nil { | ||||
| 			h.syncEnd(h.backend.blockchain.CurrentHeader()) | ||||
| 		} | ||||
| 	}() | ||||
| 	start := time.Now() | ||||
| 	if h.syncStart != nil { | ||||
| 		h.syncStart(h.backend.blockchain.CurrentHeader()) | ||||
| 	} | ||||
| 	// Fetch the remaining block headers based on the current chain header.
 | ||||
| 	if err := h.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), downloader.LightSync); err != nil { | ||||
| 		log.Debug("Synchronise failed", "reason", err) | ||||
| 		return | ||||
| 	} | ||||
| 	log.Debug("Synchronise finished", "elapsed", common.PrettyDuration(time.Since(start))) | ||||
| } | ||||
| @ -1,83 +0,0 @@ | ||||
| // Copyright 2019 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package les | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"testing" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/core" | ||||
| 	"github.com/ethereum/go-ethereum/core/types" | ||||
| 	"github.com/ethereum/go-ethereum/light" | ||||
| ) | ||||
| 
 | ||||
| // Test light syncing which will download all headers from genesis.
 | ||||
| func TestLightSyncingLes3(t *testing.T) { testSyncing(t, lpv3) } | ||||
| 
 | ||||
| func testSyncing(t *testing.T, protocol int) { | ||||
| 	config := light.TestServerIndexerConfig | ||||
| 
 | ||||
| 	waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) { | ||||
| 		for { | ||||
| 			cs, _, _ := cIndexer.Sections() | ||||
| 			bts, _, _ := btIndexer.Sections() | ||||
| 			if cs >= 1 && bts >= 1 { | ||||
| 				break | ||||
| 			} | ||||
| 			time.Sleep(10 * time.Millisecond) | ||||
| 		} | ||||
| 	} | ||||
| 	// Generate 128+1 blocks (totally 1 CHT section)
 | ||||
| 	netconfig := testnetConfig{ | ||||
| 		blocks:    int(config.ChtSize + config.ChtConfirms), | ||||
| 		protocol:  protocol, | ||||
| 		indexFn:   waitIndexers, | ||||
| 		nopruning: true, | ||||
| 	} | ||||
| 	server, client, tearDown := newClientServerEnv(t, netconfig) | ||||
| 	defer tearDown() | ||||
| 
 | ||||
| 	expected := config.ChtSize + config.ChtConfirms | ||||
| 
 | ||||
| 	done := make(chan error) | ||||
| 	client.handler.syncEnd = func(header *types.Header) { | ||||
| 		if header.Number.Uint64() == expected { | ||||
| 			done <- nil | ||||
| 		} else { | ||||
| 			done <- fmt.Errorf("blockchain length mismatch, want %d, got %d", expected, header.Number) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// Create connected peer pair.
 | ||||
| 	peer1, peer2, err := newTestPeerPair("peer", protocol, server.handler, client.handler, false) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to connect testing peers %v", err) | ||||
| 	} | ||||
| 	defer peer1.close() | ||||
| 	defer peer2.close() | ||||
| 
 | ||||
| 	select { | ||||
| 	case err := <-done: | ||||
| 		if err != nil { | ||||
| 			t.Error("sync failed", err) | ||||
| 		} | ||||
| 		return | ||||
| 	case <-time.NewTimer(10 * time.Second).C: | ||||
| 		t.Error("checkpoint syncing timeout") | ||||
| 	} | ||||
| } | ||||
| @ -177,7 +177,7 @@ func testIndexers(db ethdb.Database, odr light.OdrBackend, config *light.Indexer | ||||
| 	return indexers[:] | ||||
| } | ||||
| 
 | ||||
| func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, indexers []*core.ChainIndexer, db ethdb.Database, peers *serverPeerSet, ulcServers []string, ulcFraction int) (*clientHandler, func()) { | ||||
| func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, indexers []*core.ChainIndexer, db ethdb.Database, peers *serverPeerSet) (*clientHandler, func()) { | ||||
| 	var ( | ||||
| 		evmux  = new(event.TypeMux) | ||||
| 		engine = ethash.NewFaker() | ||||
| @ -210,9 +210,8 @@ func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, index | ||||
| 		eventMux:   evmux, | ||||
| 		merger:     consensus.NewMerger(rawdb.NewMemoryDatabase()), | ||||
| 	} | ||||
| 	client.handler = newClientHandler(ulcServers, ulcFraction, client) | ||||
| 	client.handler = newClientHandler(client) | ||||
| 
 | ||||
| 	client.handler.start() | ||||
| 	return client.handler, func() { | ||||
| 		client.handler.stop() | ||||
| 	} | ||||
| @ -307,7 +306,8 @@ func (p *testPeer) handshakeWithServer(t *testing.T, td *big.Int, head common.Ha | ||||
| } | ||||
| 
 | ||||
| // handshakeWithClient executes the handshake with the remote client peer.
 | ||||
| func (p *testPeer) handshakeWithClient(t *testing.T, td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, forkID forkid.ID, costList RequestCostList, recentTxLookup uint64) { | ||||
| // (used by temporarily disabled tests)
 | ||||
| /*func (p *testPeer) handshakeWithClient(t *testing.T, td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, forkID forkid.ID, costList RequestCostList, recentTxLookup uint64) { | ||||
| 	// It only works for the simulated client peer
 | ||||
| 	if p.speer == nil { | ||||
| 		t.Fatal("handshake for server peer only") | ||||
| @ -337,7 +337,7 @@ func (p *testPeer) handshakeWithClient(t *testing.T, td *big.Int, head common.Ha | ||||
| 	if err := p2p.Send(p.app, StatusMsg, &sendList); err != nil { | ||||
| 		t.Fatalf("status send: %v", err) | ||||
| 	} | ||||
| } | ||||
| }*/ | ||||
| 
 | ||||
| // close terminates the local side of the peer, notifying the remote protocol
 | ||||
| // manager of termination.
 | ||||
| @ -405,7 +405,8 @@ type testClient struct { | ||||
| } | ||||
| 
 | ||||
| // newRawPeer creates a new server peer connects to the server and do the handshake.
 | ||||
| func (client *testClient) newRawPeer(t *testing.T, name string, version int, recentTxLookup uint64) (*testPeer, func(), <-chan error) { | ||||
| // (used by temporarily disabled tests)
 | ||||
| /*func (client *testClient) newRawPeer(t *testing.T, name string, version int, recentTxLookup uint64) (*testPeer, func(), <-chan error) { | ||||
| 	// Create a message pipe to communicate through
 | ||||
| 	app, net := p2p.MsgPipe() | ||||
| 
 | ||||
| @ -453,7 +454,7 @@ func (client *testClient) newRawPeer(t *testing.T, name string, version int, rec | ||||
| 		tp.close() | ||||
| 	} | ||||
| 	return tp, closePeer, errCh | ||||
| } | ||||
| }*/ | ||||
| 
 | ||||
| // testServer represents a server object for testing with necessary auxiliary fields.
 | ||||
| type testServer struct { | ||||
| @ -521,14 +522,12 @@ func (server *testServer) newRawPeer(t *testing.T, name string, version int) (*t | ||||
| 
 | ||||
| // testnetConfig wraps all the configurations for testing network.
 | ||||
| type testnetConfig struct { | ||||
| 	blocks      int | ||||
| 	protocol    int | ||||
| 	indexFn     indexerCallback | ||||
| 	ulcServers  []string | ||||
| 	ulcFraction int | ||||
| 	simClock    bool | ||||
| 	connect     bool | ||||
| 	nopruning   bool | ||||
| 	blocks    int | ||||
| 	protocol  int | ||||
| 	indexFn   indexerCallback | ||||
| 	simClock  bool | ||||
| 	connect   bool | ||||
| 	nopruning bool | ||||
| } | ||||
| 
 | ||||
| func newClientServerEnv(t *testing.T, config testnetConfig) (*testServer, *testClient, func()) { | ||||
| @ -553,7 +552,7 @@ func newClientServerEnv(t *testing.T, config testnetConfig) (*testServer, *testC | ||||
| 	odr.SetIndexers(ccIndexer, cbIndexer, cbtIndexer) | ||||
| 
 | ||||
| 	server, b, serverClose := newTestServerHandler(config.blocks, sindexers, sdb, clock) | ||||
| 	client, clientClose := newTestClientHandler(b, odr, cIndexers, cdb, speers, config.ulcServers, config.ulcFraction) | ||||
| 	client, clientClose := newTestClientHandler(b, odr, cIndexers, cdb, speers) | ||||
| 
 | ||||
| 	scIndexer.Start(server.blockchain) | ||||
| 	sbIndexer.Start(server.blockchain) | ||||
| @ -569,7 +568,6 @@ func newClientServerEnv(t *testing.T, config testnetConfig) (*testServer, *testC | ||||
| 	) | ||||
| 	if config.connect { | ||||
| 		done := make(chan struct{}) | ||||
| 		client.syncEnd = func(_ *types.Header) { close(done) } | ||||
| 		cpeer, speer, err = newTestPeerPair("peer", config.protocol, server, client, false) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Failed to connect testing peers %v", err) | ||||
|  | ||||
							
								
								
									
										54
									
								
								les/ulc.go
									
									
									
									
									
								
							
							
						
						
									
										54
									
								
								les/ulc.go
									
									
									
									
									
								
							| @ -1,54 +0,0 @@ | ||||
| // Copyright 2019 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package les | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/log" | ||||
| 	"github.com/ethereum/go-ethereum/p2p/enode" | ||||
| ) | ||||
| 
 | ||||
| type ulc struct { | ||||
| 	keys     map[string]bool | ||||
| 	fraction int | ||||
| } | ||||
| 
 | ||||
| // newULC creates and returns an ultra light client instance.
 | ||||
| func newULC(servers []string, fraction int) (*ulc, error) { | ||||
| 	keys := make(map[string]bool) | ||||
| 	for _, id := range servers { | ||||
| 		node, err := enode.Parse(enode.ValidSchemes, id) | ||||
| 		if err != nil { | ||||
| 			log.Warn("Failed to parse trusted server", "id", id, "err", err) | ||||
| 			continue | ||||
| 		} | ||||
| 		keys[node.ID().String()] = true | ||||
| 	} | ||||
| 	if len(keys) == 0 { | ||||
| 		return nil, errors.New("no trusted servers") | ||||
| 	} | ||||
| 	return &ulc{ | ||||
| 		keys:     keys, | ||||
| 		fraction: fraction, | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| // trusted return an indicator that whether the specified peer is trusted.
 | ||||
| func (u *ulc) trusted(p enode.ID) bool { | ||||
| 	return u.keys[p.String()] | ||||
| } | ||||
							
								
								
									
										162
									
								
								les/ulc_test.go
									
									
									
									
									
								
							
							
						
						
									
										162
									
								
								les/ulc_test.go
									
									
									
									
									
								
							| @ -1,162 +0,0 @@ | ||||
| // Copyright 2018 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package les | ||||
| 
 | ||||
| import ( | ||||
| 	"crypto/rand" | ||||
| 	"fmt" | ||||
| 	"net" | ||||
| 	"testing" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/crypto" | ||||
| 	"github.com/ethereum/go-ethereum/p2p" | ||||
| 	"github.com/ethereum/go-ethereum/p2p/enode" | ||||
| ) | ||||
| 
 | ||||
| func TestULCAnnounceThresholdLes2(t *testing.T) { testULCAnnounceThreshold(t, 2) } | ||||
| func TestULCAnnounceThresholdLes3(t *testing.T) { testULCAnnounceThreshold(t, 3) } | ||||
| 
 | ||||
| func testULCAnnounceThreshold(t *testing.T, protocol int) { | ||||
| 	// todo figure out why it takes fetcher so longer to fetcher the announced header.
 | ||||
| 	t.Skip("Sometimes it can failed") | ||||
| 
 | ||||
| 	// newTestLightPeer creates node with light sync mode
 | ||||
| 	newTestLightPeer := func(t *testing.T, protocol int, ulcServers []string, ulcFraction int) (*testClient, func()) { | ||||
| 		netconfig := testnetConfig{ | ||||
| 			protocol:    protocol, | ||||
| 			ulcServers:  ulcServers, | ||||
| 			ulcFraction: ulcFraction, | ||||
| 			nopruning:   true, | ||||
| 		} | ||||
| 		_, c, teardown := newClientServerEnv(t, netconfig) | ||||
| 		return c, teardown | ||||
| 	} | ||||
| 
 | ||||
| 	var cases = []struct { | ||||
| 		height    []int | ||||
| 		threshold int | ||||
| 		expect    uint64 | ||||
| 	}{ | ||||
| 		{[]int{1}, 100, 1}, | ||||
| 		{[]int{0, 0, 0}, 100, 0}, | ||||
| 		{[]int{1, 2, 3}, 30, 3}, | ||||
| 		{[]int{1, 2, 3}, 60, 2}, | ||||
| 		{[]int{3, 2, 1}, 67, 1}, | ||||
| 		{[]int{3, 2, 1}, 100, 1}, | ||||
| 	} | ||||
| 	for _, testcase := range cases { | ||||
| 		var ( | ||||
| 			servers   []*testServer | ||||
| 			teardowns []func() | ||||
| 			nodes     []*enode.Node | ||||
| 			ids       []string | ||||
| 		) | ||||
| 		for i := 0; i < len(testcase.height); i++ { | ||||
| 			s, n, teardown := newTestServerPeer(t, 0, protocol, nil) | ||||
| 
 | ||||
| 			servers = append(servers, s) | ||||
| 			nodes = append(nodes, n) | ||||
| 			teardowns = append(teardowns, teardown) | ||||
| 			ids = append(ids, n.String()) | ||||
| 		} | ||||
| 		c, teardown := newTestLightPeer(t, protocol, ids, testcase.threshold) | ||||
| 
 | ||||
| 		// Connect all servers.
 | ||||
| 		for i := 0; i < len(servers); i++ { | ||||
| 			connect(servers[i].handler, nodes[i].ID(), c.handler, protocol, false) | ||||
| 		} | ||||
| 		for i := 0; i < len(servers); i++ { | ||||
| 			for j := 0; j < testcase.height[i]; j++ { | ||||
| 				servers[i].backend.Commit() | ||||
| 			} | ||||
| 		} | ||||
| 		time.Sleep(1500 * time.Millisecond) // Ensure the fetcher has done its work.
 | ||||
| 		head := c.handler.backend.blockchain.CurrentHeader().Number.Uint64() | ||||
| 		if head != testcase.expect { | ||||
| 			t.Fatalf("chain height mismatch, want %d, got %d", testcase.expect, head) | ||||
| 		} | ||||
| 
 | ||||
| 		// Release all servers and client resources.
 | ||||
| 		teardown() | ||||
| 		for i := 0; i < len(teardowns); i++ { | ||||
| 			teardowns[i]() | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func connect(server *serverHandler, serverId enode.ID, client *clientHandler, protocol int, noInitAnnounce bool) (*serverPeer, *clientPeer, error) { | ||||
| 	// Create a message pipe to communicate through
 | ||||
| 	app, net := p2p.MsgPipe() | ||||
| 
 | ||||
| 	var id enode.ID | ||||
| 	rand.Read(id[:]) | ||||
| 
 | ||||
| 	peer1 := newServerPeer(protocol, NetworkId, true, p2p.NewPeer(serverId, "", nil), net) // Mark server as trusted
 | ||||
| 	peer2 := newClientPeer(protocol, NetworkId, p2p.NewPeer(id, "", nil), app) | ||||
| 
 | ||||
| 	// Start the peerLight on a new thread
 | ||||
| 	errc1 := make(chan error, 1) | ||||
| 	errc2 := make(chan error, 1) | ||||
| 	go func() { | ||||
| 		select { | ||||
| 		case <-server.closeCh: | ||||
| 			errc1 <- p2p.DiscQuitting | ||||
| 		case errc1 <- server.handle(peer2): | ||||
| 		} | ||||
| 	}() | ||||
| 	go func() { | ||||
| 		select { | ||||
| 		case <-client.closeCh: | ||||
| 			errc1 <- p2p.DiscQuitting | ||||
| 		case errc1 <- client.handle(peer1, noInitAnnounce): | ||||
| 		} | ||||
| 	}() | ||||
| 	// Ensure the connection is established or exits when any error occurs
 | ||||
| 	for { | ||||
| 		select { | ||||
| 		case err := <-errc1: | ||||
| 			return nil, nil, fmt.Errorf("failed to establish protocol connection %v", err) | ||||
| 		case err := <-errc2: | ||||
| 			return nil, nil, fmt.Errorf("failed to establish protocol connection %v", err) | ||||
| 		default: | ||||
| 		} | ||||
| 		if peer1.serving.Load() && peer2.serving.Load() { | ||||
| 			break | ||||
| 		} | ||||
| 		time.Sleep(50 * time.Millisecond) | ||||
| 	} | ||||
| 	return peer1, peer2, nil | ||||
| } | ||||
| 
 | ||||
| // newTestServerPeer creates server peer.
 | ||||
| func newTestServerPeer(t *testing.T, blocks int, protocol int, indexFn indexerCallback) (*testServer, *enode.Node, func()) { | ||||
| 	netconfig := testnetConfig{ | ||||
| 		blocks:    blocks, | ||||
| 		protocol:  protocol, | ||||
| 		indexFn:   indexFn, | ||||
| 		nopruning: true, | ||||
| 	} | ||||
| 	s, _, teardown := newClientServerEnv(t, netconfig) | ||||
| 	key, err := crypto.GenerateKey() | ||||
| 	if err != nil { | ||||
| 		t.Fatal("generate key err:", err) | ||||
| 	} | ||||
| 	s.handler.server.privateKey = key | ||||
| 	n := enode.NewV4(&key.PublicKey, net.ParseIP("127.0.0.1"), 35000, 35000) | ||||
| 	return s, n, teardown | ||||
| } | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user