les: implement new les fetcher (#20692)
* cmd, consensus, eth, les: implement light fetcher * les: address comment * les: address comment * les: address comments * les: check td after delivery * les: add linearExpiredValue for error counter * les: fix import * les: fix dead lock * les: order announces by td * les: encapsulate invalid counter * les: address comment * les: add more checks during the delivery * les: fix log * eth, les: fix lint * eth/fetcher: address comment
This commit is contained in:
		
							parent
							
								
									93da0cf8a1
								
							
						
					
					
						commit
						28c5a8a54b
					
				| @ -200,11 +200,11 @@ func (e *NoRewardEngine) Author(header *types.Header) (common.Address, error) { | ||||
| 	return e.inner.Author(header) | ||||
| } | ||||
| 
 | ||||
| func (e *NoRewardEngine) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error { | ||||
| func (e *NoRewardEngine) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error { | ||||
| 	return e.inner.VerifyHeader(chain, header, seal) | ||||
| } | ||||
| 
 | ||||
| func (e *NoRewardEngine) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { | ||||
| func (e *NoRewardEngine) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { | ||||
| 	return e.inner.VerifyHeaders(chain, headers, seals) | ||||
| } | ||||
| 
 | ||||
| @ -212,11 +212,11 @@ func (e *NoRewardEngine) VerifyUncles(chain consensus.ChainReader, block *types. | ||||
| 	return e.inner.VerifyUncles(chain, block) | ||||
| } | ||||
| 
 | ||||
| func (e *NoRewardEngine) VerifySeal(chain consensus.ChainReader, header *types.Header) error { | ||||
| func (e *NoRewardEngine) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error { | ||||
| 	return e.inner.VerifySeal(chain, header) | ||||
| } | ||||
| 
 | ||||
| func (e *NoRewardEngine) Prepare(chain consensus.ChainReader, header *types.Header) error { | ||||
| func (e *NoRewardEngine) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { | ||||
| 	return e.inner.Prepare(chain, header) | ||||
| } | ||||
| 
 | ||||
| @ -229,7 +229,7 @@ func (e *NoRewardEngine) accumulateRewards(config *params.ChainConfig, state *st | ||||
| 	state.AddBalance(header.Coinbase, reward) | ||||
| } | ||||
| 
 | ||||
| func (e *NoRewardEngine) Finalize(chain consensus.ChainReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction, | ||||
| func (e *NoRewardEngine) Finalize(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction, | ||||
| 	uncles []*types.Header) { | ||||
| 	if e.rewardsOn { | ||||
| 		e.inner.Finalize(chain, header, statedb, txs, uncles) | ||||
| @ -239,7 +239,7 @@ func (e *NoRewardEngine) Finalize(chain consensus.ChainReader, header *types.Hea | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (e *NoRewardEngine) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction, | ||||
| func (e *NoRewardEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction, | ||||
| 	uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { | ||||
| 	if e.rewardsOn { | ||||
| 		return e.inner.FinalizeAndAssemble(chain, header, statedb, txs, uncles, receipts) | ||||
| @ -252,7 +252,7 @@ func (e *NoRewardEngine) FinalizeAndAssemble(chain consensus.ChainReader, header | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (e *NoRewardEngine) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { | ||||
| func (e *NoRewardEngine) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { | ||||
| 	return e.inner.Seal(chain, block, results, stop) | ||||
| } | ||||
| 
 | ||||
| @ -260,11 +260,11 @@ func (e *NoRewardEngine) SealHash(header *types.Header) common.Hash { | ||||
| 	return e.inner.SealHash(header) | ||||
| } | ||||
| 
 | ||||
| func (e *NoRewardEngine) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int { | ||||
| func (e *NoRewardEngine) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int { | ||||
| 	return e.inner.CalcDifficulty(chain, time, parent) | ||||
| } | ||||
| 
 | ||||
| func (e *NoRewardEngine) APIs(chain consensus.ChainReader) []rpc.API { | ||||
| func (e *NoRewardEngine) APIs(chain consensus.ChainHeaderReader) []rpc.API { | ||||
| 	return e.inner.APIs(chain) | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -28,7 +28,7 @@ import ( | ||||
| // API is a user facing RPC API to allow controlling the signer and voting
 | ||||
| // mechanisms of the proof-of-authority scheme.
 | ||||
| type API struct { | ||||
| 	chain  consensus.ChainReader | ||||
| 	chain  consensus.ChainHeaderReader | ||||
| 	clique *Clique | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -213,14 +213,14 @@ func (c *Clique) Author(header *types.Header) (common.Address, error) { | ||||
| } | ||||
| 
 | ||||
| // VerifyHeader checks whether a header conforms to the consensus rules.
 | ||||
| func (c *Clique) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error { | ||||
| func (c *Clique) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error { | ||||
| 	return c.verifyHeader(chain, header, nil) | ||||
| } | ||||
| 
 | ||||
| // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers. The
 | ||||
| // method returns a quit channel to abort the operations and a results channel to
 | ||||
| // retrieve the async verifications (the order is that of the input slice).
 | ||||
| func (c *Clique) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { | ||||
| func (c *Clique) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { | ||||
| 	abort := make(chan struct{}) | ||||
| 	results := make(chan error, len(headers)) | ||||
| 
 | ||||
| @ -242,7 +242,7 @@ func (c *Clique) VerifyHeaders(chain consensus.ChainReader, headers []*types.Hea | ||||
| // caller may optionally pass in a batch of parents (ascending order) to avoid
 | ||||
| // looking those up from the database. This is useful for concurrently verifying
 | ||||
| // a batch of new headers.
 | ||||
| func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error { | ||||
| func (c *Clique) verifyHeader(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error { | ||||
| 	if header.Number == nil { | ||||
| 		return errUnknownBlock | ||||
| 	} | ||||
| @ -305,7 +305,7 @@ func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header, | ||||
| // rather depend on a batch of previous headers. The caller may optionally pass
 | ||||
| // in a batch of parents (ascending order) to avoid looking those up from the
 | ||||
| // database. This is useful for concurrently verifying a batch of new headers.
 | ||||
| func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error { | ||||
| func (c *Clique) verifyCascadingFields(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error { | ||||
| 	// The genesis block is the always valid dead-end
 | ||||
| 	number := header.Number.Uint64() | ||||
| 	if number == 0 { | ||||
| @ -345,7 +345,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *type | ||||
| } | ||||
| 
 | ||||
| // snapshot retrieves the authorization snapshot at a given point in time.
 | ||||
| func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash common.Hash, parents []*types.Header) (*Snapshot, error) { | ||||
| func (c *Clique) snapshot(chain consensus.ChainHeaderReader, number uint64, hash common.Hash, parents []*types.Header) (*Snapshot, error) { | ||||
| 	// Search for a snapshot in memory or on disk for checkpoints
 | ||||
| 	var ( | ||||
| 		headers []*types.Header | ||||
| @ -436,7 +436,7 @@ func (c *Clique) VerifyUncles(chain consensus.ChainReader, block *types.Block) e | ||||
| 
 | ||||
| // VerifySeal implements consensus.Engine, checking whether the signature contained
 | ||||
| // in the header satisfies the consensus protocol requirements.
 | ||||
| func (c *Clique) VerifySeal(chain consensus.ChainReader, header *types.Header) error { | ||||
| func (c *Clique) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error { | ||||
| 	return c.verifySeal(chain, header, nil) | ||||
| } | ||||
| 
 | ||||
| @ -444,7 +444,7 @@ func (c *Clique) VerifySeal(chain consensus.ChainReader, header *types.Header) e | ||||
| // consensus protocol requirements. The method accepts an optional list of parent
 | ||||
| // headers that aren't yet part of the local blockchain to generate the snapshots
 | ||||
| // from.
 | ||||
| func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error { | ||||
| func (c *Clique) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, parents []*types.Header) error { | ||||
| 	// Verifying the genesis block is not supported
 | ||||
| 	number := header.Number.Uint64() | ||||
| 	if number == 0 { | ||||
| @ -487,7 +487,7 @@ func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, p | ||||
| 
 | ||||
| // Prepare implements consensus.Engine, preparing all the consensus fields of the
 | ||||
| // header for running the transactions on top.
 | ||||
| func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) error { | ||||
| func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { | ||||
| 	// If the block isn't a checkpoint, cast a random vote (good enough for now)
 | ||||
| 	header.Coinbase = common.Address{} | ||||
| 	header.Nonce = types.BlockNonce{} | ||||
| @ -552,7 +552,7 @@ func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) erro | ||||
| 
 | ||||
| // Finalize implements consensus.Engine, ensuring no uncles are set, nor block
 | ||||
| // rewards given.
 | ||||
| func (c *Clique) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) { | ||||
| func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) { | ||||
| 	// No block rewards in PoA, so the state remains as is and uncles are dropped
 | ||||
| 	header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) | ||||
| 	header.UncleHash = types.CalcUncleHash(nil) | ||||
| @ -560,7 +560,7 @@ func (c *Clique) Finalize(chain consensus.ChainReader, header *types.Header, sta | ||||
| 
 | ||||
| // FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set,
 | ||||
| // nor block rewards given, and returns the final block.
 | ||||
| func (c *Clique) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { | ||||
| func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { | ||||
| 	// No block rewards in PoA, so the state remains as is and uncles are dropped
 | ||||
| 	header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) | ||||
| 	header.UncleHash = types.CalcUncleHash(nil) | ||||
| @ -581,7 +581,7 @@ func (c *Clique) Authorize(signer common.Address, signFn SignerFn) { | ||||
| 
 | ||||
| // Seal implements consensus.Engine, attempting to create a sealed block using
 | ||||
| // the local signing credentials.
 | ||||
| func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { | ||||
| func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { | ||||
| 	header := block.Header() | ||||
| 
 | ||||
| 	// Sealing the genesis block is not supported
 | ||||
| @ -654,7 +654,7 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results c | ||||
| // CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
 | ||||
| // that a new block should have based on the previous blocks in the chain and the
 | ||||
| // current signer.
 | ||||
| func (c *Clique) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int { | ||||
| func (c *Clique) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int { | ||||
| 	snap, err := c.snapshot(chain, parent.Number.Uint64(), parent.Hash(), nil) | ||||
| 	if err != nil { | ||||
| 		return nil | ||||
| @ -684,7 +684,7 @@ func (c *Clique) Close() error { | ||||
| 
 | ||||
| // APIs implements consensus.Engine, returning the user facing RPC API to allow
 | ||||
| // controlling the signer voting.
 | ||||
| func (c *Clique) APIs(chain consensus.ChainReader) []rpc.API { | ||||
| func (c *Clique) APIs(chain consensus.ChainHeaderReader) []rpc.API { | ||||
| 	return []rpc.API{{ | ||||
| 		Namespace: "clique", | ||||
| 		Version:   "1.0", | ||||
|  | ||||
| @ -27,9 +27,9 @@ import ( | ||||
| 	"github.com/ethereum/go-ethereum/rpc" | ||||
| ) | ||||
| 
 | ||||
| // ChainReader defines a small collection of methods needed to access the local
 | ||||
| // blockchain during header and/or uncle verification.
 | ||||
| type ChainReader interface { | ||||
| // ChainHeaderReader defines a small collection of methods needed to access the local
 | ||||
| // blockchain during header verification.
 | ||||
| type ChainHeaderReader interface { | ||||
| 	// Config retrieves the blockchain's chain configuration.
 | ||||
| 	Config() *params.ChainConfig | ||||
| 
 | ||||
| @ -44,6 +44,12 @@ type ChainReader interface { | ||||
| 
 | ||||
| 	// GetHeaderByHash retrieves a block header from the database by its hash.
 | ||||
| 	GetHeaderByHash(hash common.Hash) *types.Header | ||||
| } | ||||
| 
 | ||||
| // ChainReader defines a small collection of methods needed to access the local
 | ||||
| // blockchain during header and/or uncle verification.
 | ||||
| type ChainReader interface { | ||||
| 	ChainHeaderReader | ||||
| 
 | ||||
| 	// GetBlock retrieves a block from the database by hash and number.
 | ||||
| 	GetBlock(hash common.Hash, number uint64) *types.Block | ||||
| @ -59,13 +65,13 @@ type Engine interface { | ||||
| 	// VerifyHeader checks whether a header conforms to the consensus rules of a
 | ||||
| 	// given engine. Verifying the seal may be done optionally here, or explicitly
 | ||||
| 	// via the VerifySeal method.
 | ||||
| 	VerifyHeader(chain ChainReader, header *types.Header, seal bool) error | ||||
| 	VerifyHeader(chain ChainHeaderReader, header *types.Header, seal bool) error | ||||
| 
 | ||||
| 	// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
 | ||||
| 	// concurrently. The method returns a quit channel to abort the operations and
 | ||||
| 	// a results channel to retrieve the async verifications (the order is that of
 | ||||
| 	// the input slice).
 | ||||
| 	VerifyHeaders(chain ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) | ||||
| 	VerifyHeaders(chain ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) | ||||
| 
 | ||||
| 	// VerifyUncles verifies that the given block's uncles conform to the consensus
 | ||||
| 	// rules of a given engine.
 | ||||
| @ -73,18 +79,18 @@ type Engine interface { | ||||
| 
 | ||||
| 	// VerifySeal checks whether the crypto seal on a header is valid according to
 | ||||
| 	// the consensus rules of the given engine.
 | ||||
| 	VerifySeal(chain ChainReader, header *types.Header) error | ||||
| 	VerifySeal(chain ChainHeaderReader, header *types.Header) error | ||||
| 
 | ||||
| 	// Prepare initializes the consensus fields of a block header according to the
 | ||||
| 	// rules of a particular engine. The changes are executed inline.
 | ||||
| 	Prepare(chain ChainReader, header *types.Header) error | ||||
| 	Prepare(chain ChainHeaderReader, header *types.Header) error | ||||
| 
 | ||||
| 	// Finalize runs any post-transaction state modifications (e.g. block rewards)
 | ||||
| 	// but does not assemble the block.
 | ||||
| 	//
 | ||||
| 	// Note: The block header and state database might be updated to reflect any
 | ||||
| 	// consensus rules that happen at finalization (e.g. block rewards).
 | ||||
| 	Finalize(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, | ||||
| 	Finalize(chain ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, | ||||
| 		uncles []*types.Header) | ||||
| 
 | ||||
| 	// FinalizeAndAssemble runs any post-transaction state modifications (e.g. block
 | ||||
| @ -92,7 +98,7 @@ type Engine interface { | ||||
| 	//
 | ||||
| 	// Note: The block header and state database might be updated to reflect any
 | ||||
| 	// consensus rules that happen at finalization (e.g. block rewards).
 | ||||
| 	FinalizeAndAssemble(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, | ||||
| 	FinalizeAndAssemble(chain ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, | ||||
| 		uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) | ||||
| 
 | ||||
| 	// Seal generates a new sealing request for the given input block and pushes
 | ||||
| @ -100,17 +106,17 @@ type Engine interface { | ||||
| 	//
 | ||||
| 	// Note, the method returns immediately and will send the result async. More
 | ||||
| 	// than one result may also be returned depending on the consensus algorithm.
 | ||||
| 	Seal(chain ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error | ||||
| 	Seal(chain ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error | ||||
| 
 | ||||
| 	// SealHash returns the hash of a block prior to it being sealed.
 | ||||
| 	SealHash(header *types.Header) common.Hash | ||||
| 
 | ||||
| 	// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
 | ||||
| 	// that a new block should have.
 | ||||
| 	CalcDifficulty(chain ChainReader, time uint64, parent *types.Header) *big.Int | ||||
| 	CalcDifficulty(chain ChainHeaderReader, time uint64, parent *types.Header) *big.Int | ||||
| 
 | ||||
| 	// APIs returns the RPC APIs this consensus engine provides.
 | ||||
| 	APIs(chain ChainReader) []rpc.API | ||||
| 	APIs(chain ChainHeaderReader) []rpc.API | ||||
| 
 | ||||
| 	// Close terminates any background threads maintained by the consensus engine.
 | ||||
| 	Close() error | ||||
|  | ||||
| @ -86,7 +86,7 @@ func (ethash *Ethash) Author(header *types.Header) (common.Address, error) { | ||||
| 
 | ||||
| // VerifyHeader checks whether a header conforms to the consensus rules of the
 | ||||
| // stock Ethereum ethash engine.
 | ||||
| func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error { | ||||
| func (ethash *Ethash) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error { | ||||
| 	// If we're running a full engine faking, accept any input as valid
 | ||||
| 	if ethash.config.PowMode == ModeFullFake { | ||||
| 		return nil | ||||
| @ -107,7 +107,7 @@ func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.He | ||||
| // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
 | ||||
| // concurrently. The method returns a quit channel to abort the operations and
 | ||||
| // a results channel to retrieve the async verifications.
 | ||||
| func (ethash *Ethash) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { | ||||
| func (ethash *Ethash) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { | ||||
| 	// If we're running a full engine faking, accept any input as valid
 | ||||
| 	if ethash.config.PowMode == ModeFullFake || len(headers) == 0 { | ||||
| 		abort, results := make(chan struct{}), make(chan error, len(headers)) | ||||
| @ -169,7 +169,7 @@ func (ethash *Ethash) VerifyHeaders(chain consensus.ChainReader, headers []*type | ||||
| 	return abort, errorsOut | ||||
| } | ||||
| 
 | ||||
| func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainReader, headers []*types.Header, seals []bool, index int) error { | ||||
| func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool, index int) error { | ||||
| 	var parent *types.Header | ||||
| 	if index == 0 { | ||||
| 		parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1) | ||||
| @ -243,7 +243,7 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo | ||||
| // verifyHeader checks whether a header conforms to the consensus rules of the
 | ||||
| // stock Ethereum ethash engine.
 | ||||
| // See YP section 4.3.4. "Block Header Validity"
 | ||||
| func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error { | ||||
| func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, seal bool) error { | ||||
| 	// Ensure that the header's extra-data section is of a reasonable size
 | ||||
| 	if uint64(len(header.Extra)) > params.MaximumExtraDataSize { | ||||
| 		return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize) | ||||
| @ -306,7 +306,7 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent * | ||||
| // CalcDifficulty is the difficulty adjustment algorithm. It returns
 | ||||
| // the difficulty that a new block should have when created at time
 | ||||
| // given the parent block's time and difficulty.
 | ||||
| func (ethash *Ethash) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int { | ||||
| func (ethash *Ethash) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int { | ||||
| 	return CalcDifficulty(chain.Config(), time, parent) | ||||
| } | ||||
| 
 | ||||
| @ -486,14 +486,14 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int { | ||||
| 
 | ||||
| // VerifySeal implements consensus.Engine, checking whether the given block satisfies
 | ||||
| // the PoW difficulty requirements.
 | ||||
| func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Header) error { | ||||
| func (ethash *Ethash) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error { | ||||
| 	return ethash.verifySeal(chain, header, false) | ||||
| } | ||||
| 
 | ||||
| // verifySeal checks whether a block satisfies the PoW difficulty requirements,
 | ||||
| // either using the usual ethash cache for it, or alternatively using a full DAG
 | ||||
| // to make remote mining fast.
 | ||||
| func (ethash *Ethash) verifySeal(chain consensus.ChainReader, header *types.Header, fulldag bool) error { | ||||
| func (ethash *Ethash) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, fulldag bool) error { | ||||
| 	// If we're running a fake PoW, accept any seal as valid
 | ||||
| 	if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake { | ||||
| 		time.Sleep(ethash.fakeDelay) | ||||
| @ -558,7 +558,7 @@ func (ethash *Ethash) verifySeal(chain consensus.ChainReader, header *types.Head | ||||
| 
 | ||||
| // Prepare implements consensus.Engine, initializing the difficulty field of a
 | ||||
| // header to conform to the ethash protocol. The changes are done inline.
 | ||||
| func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header) error { | ||||
| func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { | ||||
| 	parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) | ||||
| 	if parent == nil { | ||||
| 		return consensus.ErrUnknownAncestor | ||||
| @ -569,7 +569,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header) | ||||
| 
 | ||||
| // Finalize implements consensus.Engine, accumulating the block and uncle rewards,
 | ||||
| // setting the final state on the header
 | ||||
| func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) { | ||||
| func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) { | ||||
| 	// Accumulate any block and uncle rewards and commit the final state root
 | ||||
| 	accumulateRewards(chain.Config(), state, header, uncles) | ||||
| 	header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) | ||||
| @ -577,7 +577,7 @@ func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header | ||||
| 
 | ||||
| // FinalizeAndAssemble implements consensus.Engine, accumulating the block and
 | ||||
| // uncle rewards, setting the final state and assembling the block.
 | ||||
| func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { | ||||
| func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { | ||||
| 	// Accumulate any block and uncle rewards and commit the final state root
 | ||||
| 	accumulateRewards(chain.Config(), state, header, uncles) | ||||
| 	header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) | ||||
|  | ||||
| @ -656,7 +656,7 @@ func (ethash *Ethash) Hashrate() float64 { | ||||
| } | ||||
| 
 | ||||
| // APIs implements consensus.Engine, returning the user facing RPC APIs.
 | ||||
| func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API { | ||||
| func (ethash *Ethash) APIs(chain consensus.ChainHeaderReader) []rpc.API { | ||||
| 	// In order to ensure backward compatibility, we exposes ethash RPC APIs
 | ||||
| 	// to both eth and ethash namespaces.
 | ||||
| 	return []rpc.API{ | ||||
|  | ||||
| @ -48,7 +48,7 @@ var ( | ||||
| 
 | ||||
| // Seal implements consensus.Engine, attempting to find a nonce that satisfies
 | ||||
| // the block's difficulty requirements.
 | ||||
| func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { | ||||
| func (ethash *Ethash) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { | ||||
| 	// If we're running a fake PoW, simply return a 0 nonce immediately
 | ||||
| 	if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake { | ||||
| 		header := block.Header() | ||||
|  | ||||
| @ -14,7 +14,7 @@ | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| // Package fetcher contains the announcement based blocks or transaction synchronisation.
 | ||||
| // Package fetcher contains the announcement based header, blocks or transaction synchronisation.
 | ||||
| package fetcher | ||||
| 
 | ||||
| import ( | ||||
| @ -31,6 +31,7 @@ import ( | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	lightTimeout  = time.Millisecond       // Time allowance before an announced header is explicitly requested
 | ||||
| 	arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block/transaction is explicitly requested
 | ||||
| 	gatherSlack   = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
 | ||||
| 	fetchTimeout  = 5 * time.Second        // Maximum allotted time to return an explicitly requested block/transaction
 | ||||
| @ -39,7 +40,7 @@ const ( | ||||
| const ( | ||||
| 	maxUncleDist = 7   // Maximum allowed backward distance from the chain head
 | ||||
| 	maxQueueDist = 32  // Maximum allowed distance from the chain head to queue
 | ||||
| 	hashLimit    = 256 // Maximum number of unique blocks a peer may have announced
 | ||||
| 	hashLimit    = 256 // Maximum number of unique blocks or headers a peer may have announced
 | ||||
| 	blockLimit   = 64  // Maximum number of unique blocks a peer may have delivered
 | ||||
| ) | ||||
| 
 | ||||
| @ -63,9 +64,10 @@ var ( | ||||
| 	bodyFilterOutMeter   = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/out", nil) | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	errTerminated = errors.New("terminated") | ||||
| ) | ||||
| var errTerminated = errors.New("terminated") | ||||
| 
 | ||||
| // HeaderRetrievalFn is a callback type for retrieving a header from the local chain.
 | ||||
| type HeaderRetrievalFn func(common.Hash) *types.Header | ||||
| 
 | ||||
| // blockRetrievalFn is a callback type for retrieving a block from the local chain.
 | ||||
| type blockRetrievalFn func(common.Hash) *types.Block | ||||
| @ -85,6 +87,9 @@ type blockBroadcasterFn func(block *types.Block, propagate bool) | ||||
| // chainHeightFn is a callback type to retrieve the current chain height.
 | ||||
| type chainHeightFn func() uint64 | ||||
| 
 | ||||
| // headersInsertFn is a callback type to insert a batch of headers into the local chain.
 | ||||
| type headersInsertFn func(headers []*types.Header) (int, error) | ||||
| 
 | ||||
| // chainInsertFn is a callback type to insert a batch of blocks into the local chain.
 | ||||
| type chainInsertFn func(types.Blocks) (int, error) | ||||
| 
 | ||||
| @ -121,18 +126,38 @@ type bodyFilterTask struct { | ||||
| 	time         time.Time              // Arrival time of the blocks' contents
 | ||||
| } | ||||
| 
 | ||||
| // blockInject represents a schedules import operation.
 | ||||
| type blockInject struct { | ||||
| // blockOrHeaderInject represents a schedules import operation.
 | ||||
| type blockOrHeaderInject struct { | ||||
| 	origin string | ||||
| 	block  *types.Block | ||||
| 
 | ||||
| 	header *types.Header // Used for light mode fetcher which only cares about header.
 | ||||
| 	block  *types.Block  // Used for normal mode fetcher which imports full block.
 | ||||
| } | ||||
| 
 | ||||
| // number returns the block number of the injected object.
 | ||||
| func (inject *blockOrHeaderInject) number() uint64 { | ||||
| 	if inject.header != nil { | ||||
| 		return inject.header.Number.Uint64() | ||||
| 	} | ||||
| 	return inject.block.NumberU64() | ||||
| } | ||||
| 
 | ||||
| // number returns the block hash of the injected object.
 | ||||
| func (inject *blockOrHeaderInject) hash() common.Hash { | ||||
| 	if inject.header != nil { | ||||
| 		return inject.header.Hash() | ||||
| 	} | ||||
| 	return inject.block.Hash() | ||||
| } | ||||
| 
 | ||||
| // BlockFetcher is responsible for accumulating block announcements from various peers
 | ||||
| // and scheduling them for retrieval.
 | ||||
| type BlockFetcher struct { | ||||
| 	light bool // The indicator whether it's a light fetcher or normal one.
 | ||||
| 
 | ||||
| 	// Various event channels
 | ||||
| 	notify chan *blockAnnounce | ||||
| 	inject chan *blockInject | ||||
| 	inject chan *blockOrHeaderInject | ||||
| 
 | ||||
| 	headerFilter chan chan *headerFilterTask | ||||
| 	bodyFilter   chan chan *bodyFilterTask | ||||
| @ -148,31 +173,34 @@ type BlockFetcher struct { | ||||
| 	completing map[common.Hash]*blockAnnounce   // Blocks with headers, currently body-completing
 | ||||
| 
 | ||||
| 	// Block cache
 | ||||
| 	queue  *prque.Prque                 // Queue containing the import operations (block number sorted)
 | ||||
| 	queues map[string]int               // Per peer block counts to prevent memory exhaustion
 | ||||
| 	queued map[common.Hash]*blockInject // Set of already queued blocks (to dedupe imports)
 | ||||
| 	queue  *prque.Prque                         // Queue containing the import operations (block number sorted)
 | ||||
| 	queues map[string]int                       // Per peer block counts to prevent memory exhaustion
 | ||||
| 	queued map[common.Hash]*blockOrHeaderInject // Set of already queued blocks (to dedup imports)
 | ||||
| 
 | ||||
| 	// Callbacks
 | ||||
| 	getHeader      HeaderRetrievalFn  // Retrieves a header from the local chain
 | ||||
| 	getBlock       blockRetrievalFn   // Retrieves a block from the local chain
 | ||||
| 	verifyHeader   headerVerifierFn   // Checks if a block's headers have a valid proof of work
 | ||||
| 	broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
 | ||||
| 	chainHeight    chainHeightFn      // Retrieves the current chain's height
 | ||||
| 	insertHeaders  headersInsertFn    // Injects a batch of headers into the chain
 | ||||
| 	insertChain    chainInsertFn      // Injects a batch of blocks into the chain
 | ||||
| 	dropPeer       peerDropFn         // Drops a peer for misbehaving
 | ||||
| 
 | ||||
| 	// Testing hooks
 | ||||
| 	announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the blockAnnounce list
 | ||||
| 	queueChangeHook    func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue
 | ||||
| 	fetchingHook       func([]common.Hash)     // Method to call upon starting a block (eth/61) or header (eth/62) fetch
 | ||||
| 	completingHook     func([]common.Hash)     // Method to call upon starting a block body fetch (eth/62)
 | ||||
| 	importedHook       func(*types.Block)      // Method to call upon successful block import (both eth/61 and eth/62)
 | ||||
| 	announceChangeHook func(common.Hash, bool)           // Method to call upon adding or deleting a hash from the blockAnnounce list
 | ||||
| 	queueChangeHook    func(common.Hash, bool)           // Method to call upon adding or deleting a block from the import queue
 | ||||
| 	fetchingHook       func([]common.Hash)               // Method to call upon starting a block (eth/61) or header (eth/62) fetch
 | ||||
| 	completingHook     func([]common.Hash)               // Method to call upon starting a block body fetch (eth/62)
 | ||||
| 	importedHook       func(*types.Header, *types.Block) // Method to call upon successful header or block import (both eth/61 and eth/62)
 | ||||
| } | ||||
| 
 | ||||
| // NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements.
 | ||||
| func NewBlockFetcher(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher { | ||||
| func NewBlockFetcher(light bool, getHeader HeaderRetrievalFn, getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertHeaders headersInsertFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher { | ||||
| 	return &BlockFetcher{ | ||||
| 		light:          light, | ||||
| 		notify:         make(chan *blockAnnounce), | ||||
| 		inject:         make(chan *blockInject), | ||||
| 		inject:         make(chan *blockOrHeaderInject), | ||||
| 		headerFilter:   make(chan chan *headerFilterTask), | ||||
| 		bodyFilter:     make(chan chan *bodyFilterTask), | ||||
| 		done:           make(chan common.Hash), | ||||
| @ -184,11 +212,13 @@ func NewBlockFetcher(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, b | ||||
| 		completing:     make(map[common.Hash]*blockAnnounce), | ||||
| 		queue:          prque.New(nil), | ||||
| 		queues:         make(map[string]int), | ||||
| 		queued:         make(map[common.Hash]*blockInject), | ||||
| 		queued:         make(map[common.Hash]*blockOrHeaderInject), | ||||
| 		getHeader:      getHeader, | ||||
| 		getBlock:       getBlock, | ||||
| 		verifyHeader:   verifyHeader, | ||||
| 		broadcastBlock: broadcastBlock, | ||||
| 		chainHeight:    chainHeight, | ||||
| 		insertHeaders:  insertHeaders, | ||||
| 		insertChain:    insertChain, | ||||
| 		dropPeer:       dropPeer, | ||||
| 	} | ||||
| @ -228,7 +258,7 @@ func (f *BlockFetcher) Notify(peer string, hash common.Hash, number uint64, time | ||||
| 
 | ||||
| // Enqueue tries to fill gaps the fetcher's future import queue.
 | ||||
| func (f *BlockFetcher) Enqueue(peer string, block *types.Block) error { | ||||
| 	op := &blockInject{ | ||||
| 	op := &blockOrHeaderInject{ | ||||
| 		origin: peer, | ||||
| 		block:  block, | ||||
| 	} | ||||
| @ -315,13 +345,13 @@ func (f *BlockFetcher) loop() { | ||||
| 		// Import any queued blocks that could potentially fit
 | ||||
| 		height := f.chainHeight() | ||||
| 		for !f.queue.Empty() { | ||||
| 			op := f.queue.PopItem().(*blockInject) | ||||
| 			hash := op.block.Hash() | ||||
| 			op := f.queue.PopItem().(*blockOrHeaderInject) | ||||
| 			hash := op.hash() | ||||
| 			if f.queueChangeHook != nil { | ||||
| 				f.queueChangeHook(hash, false) | ||||
| 			} | ||||
| 			// If too high up the chain or phase, continue later
 | ||||
| 			number := op.block.NumberU64() | ||||
| 			number := op.number() | ||||
| 			if number > height+1 { | ||||
| 				f.queue.Push(op, -int64(number)) | ||||
| 				if f.queueChangeHook != nil { | ||||
| @ -330,11 +360,15 @@ func (f *BlockFetcher) loop() { | ||||
| 				break | ||||
| 			} | ||||
| 			// Otherwise if fresh and still unknown, try and import
 | ||||
| 			if number+maxUncleDist < height || f.getBlock(hash) != nil { | ||||
| 			if (number+maxUncleDist < height) || (f.light && f.getHeader(hash) != nil) || (!f.light && f.getBlock(hash) != nil) { | ||||
| 				f.forgetBlock(hash) | ||||
| 				continue | ||||
| 			} | ||||
| 			f.insert(op.origin, op.block) | ||||
| 			if f.light { | ||||
| 				f.importHeaders(op.origin, op.header) | ||||
| 			} else { | ||||
| 				f.importBlocks(op.origin, op.block) | ||||
| 			} | ||||
| 		} | ||||
| 		// Wait for an outside event to occur
 | ||||
| 		select { | ||||
| @ -379,7 +413,13 @@ func (f *BlockFetcher) loop() { | ||||
| 		case op := <-f.inject: | ||||
| 			// A direct block insertion was requested, try and fill any pending gaps
 | ||||
| 			blockBroadcastInMeter.Mark(1) | ||||
| 			f.enqueue(op.origin, op.block) | ||||
| 
 | ||||
| 			// Now only direct block injection is allowed, drop the header injection
 | ||||
| 			// here silently if we receive.
 | ||||
| 			if f.light { | ||||
| 				continue | ||||
| 			} | ||||
| 			f.enqueue(op.origin, nil, op.block) | ||||
| 
 | ||||
| 		case hash := <-f.done: | ||||
| 			// A pending import finished, remove all traces of the notification
 | ||||
| @ -391,13 +431,19 @@ func (f *BlockFetcher) loop() { | ||||
| 			request := make(map[string][]common.Hash) | ||||
| 
 | ||||
| 			for hash, announces := range f.announced { | ||||
| 				if time.Since(announces[0].time) > arriveTimeout-gatherSlack { | ||||
| 				// In current LES protocol(les2/les3), only header announce is
 | ||||
| 				// available, no need to wait too much time for header broadcast.
 | ||||
| 				timeout := arriveTimeout - gatherSlack | ||||
| 				if f.light { | ||||
| 					timeout = 0 | ||||
| 				} | ||||
| 				if time.Since(announces[0].time) > timeout { | ||||
| 					// Pick a random peer to retrieve from, reset all others
 | ||||
| 					announce := announces[rand.Intn(len(announces))] | ||||
| 					f.forgetHash(hash) | ||||
| 
 | ||||
| 					// If the block still didn't arrive, queue for fetching
 | ||||
| 					if f.getBlock(hash) == nil { | ||||
| 					if (f.light && f.getHeader(hash) == nil) || (!f.light && f.getBlock(hash) == nil) { | ||||
| 						request[announce.origin] = append(request[announce.origin], hash) | ||||
| 						f.fetching[hash] = announce | ||||
| 					} | ||||
| @ -465,7 +511,7 @@ func (f *BlockFetcher) loop() { | ||||
| 
 | ||||
| 			// Split the batch of headers into unknown ones (to return to the caller),
 | ||||
| 			// known incomplete ones (requiring body retrievals) and completed blocks.
 | ||||
| 			unknown, incomplete, complete := []*types.Header{}, []*blockAnnounce{}, []*types.Block{} | ||||
| 			unknown, incomplete, complete, lightHeaders := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}, []*blockAnnounce{} | ||||
| 			for _, header := range task.headers { | ||||
| 				hash := header.Hash() | ||||
| 
 | ||||
| @ -478,6 +524,16 @@ func (f *BlockFetcher) loop() { | ||||
| 						f.forgetHash(hash) | ||||
| 						continue | ||||
| 					} | ||||
| 					// Collect all headers only if we are running in light
 | ||||
| 					// mode and the headers are not imported by other means.
 | ||||
| 					if f.light { | ||||
| 						if f.getHeader(hash) == nil { | ||||
| 							announce.header = header | ||||
| 							lightHeaders = append(lightHeaders, announce) | ||||
| 						} | ||||
| 						f.forgetHash(hash) | ||||
| 						continue | ||||
| 					} | ||||
| 					// Only keep if not imported by other means
 | ||||
| 					if f.getBlock(hash) == nil { | ||||
| 						announce.header = header | ||||
| @ -522,10 +578,14 @@ func (f *BlockFetcher) loop() { | ||||
| 					f.rescheduleComplete(completeTimer) | ||||
| 				} | ||||
| 			} | ||||
| 			// Schedule the header for light fetcher import
 | ||||
| 			for _, announce := range lightHeaders { | ||||
| 				f.enqueue(announce.origin, announce.header, nil) | ||||
| 			} | ||||
| 			// Schedule the header-only blocks for import
 | ||||
| 			for _, block := range complete { | ||||
| 				if announce := f.completing[block.Hash()]; announce != nil { | ||||
| 					f.enqueue(announce.origin, block) | ||||
| 					f.enqueue(announce.origin, nil, block) | ||||
| 				} | ||||
| 			} | ||||
| 
 | ||||
| @ -592,7 +652,7 @@ func (f *BlockFetcher) loop() { | ||||
| 			// Schedule the retrieved blocks for ordered import
 | ||||
| 			for _, block := range blocks { | ||||
| 				if announce := f.completing[block.Hash()]; announce != nil { | ||||
| 					f.enqueue(announce.origin, block) | ||||
| 					f.enqueue(announce.origin, nil, block) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| @ -605,6 +665,12 @@ func (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) { | ||||
| 	if len(f.announced) == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	// Schedule announcement retrieval quickly for light mode
 | ||||
| 	// since server won't send any headers to client.
 | ||||
| 	if f.light { | ||||
| 		fetch.Reset(lightTimeout) | ||||
| 		return | ||||
| 	} | ||||
| 	// Otherwise find the earliest expiring announcement
 | ||||
| 	earliest := time.Now() | ||||
| 	for _, announces := range f.announced { | ||||
| @ -631,46 +697,88 @@ func (f *BlockFetcher) rescheduleComplete(complete *time.Timer) { | ||||
| 	complete.Reset(gatherSlack - time.Since(earliest)) | ||||
| } | ||||
| 
 | ||||
| // enqueue schedules a new future import operation, if the block to be imported
 | ||||
| // has not yet been seen.
 | ||||
| func (f *BlockFetcher) enqueue(peer string, block *types.Block) { | ||||
| 	hash := block.Hash() | ||||
| 
 | ||||
| // enqueue schedules a new header or block import operation, if the component
 | ||||
| // to be imported has not yet been seen.
 | ||||
| func (f *BlockFetcher) enqueue(peer string, header *types.Header, block *types.Block) { | ||||
| 	var ( | ||||
| 		hash   common.Hash | ||||
| 		number uint64 | ||||
| 	) | ||||
| 	if header != nil { | ||||
| 		hash, number = header.Hash(), header.Number.Uint64() | ||||
| 	} else { | ||||
| 		hash, number = block.Hash(), block.NumberU64() | ||||
| 	} | ||||
| 	// Ensure the peer isn't DOSing us
 | ||||
| 	count := f.queues[peer] + 1 | ||||
| 	if count > blockLimit { | ||||
| 		log.Debug("Discarded propagated block, exceeded allowance", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit) | ||||
| 		log.Debug("Discarded delivered header or block, exceeded allowance", "peer", peer, "number", number, "hash", hash, "limit", blockLimit) | ||||
| 		blockBroadcastDOSMeter.Mark(1) | ||||
| 		f.forgetHash(hash) | ||||
| 		return | ||||
| 	} | ||||
| 	// Discard any past or too distant blocks
 | ||||
| 	if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { | ||||
| 		log.Debug("Discarded propagated block, too far away", "peer", peer, "number", block.Number(), "hash", hash, "distance", dist) | ||||
| 	if dist := int64(number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { | ||||
| 		log.Debug("Discarded delivered header or block, too far away", "peer", peer, "number", number, "hash", hash, "distance", dist) | ||||
| 		blockBroadcastDropMeter.Mark(1) | ||||
| 		f.forgetHash(hash) | ||||
| 		return | ||||
| 	} | ||||
| 	// Schedule the block for future importing
 | ||||
| 	if _, ok := f.queued[hash]; !ok { | ||||
| 		op := &blockInject{ | ||||
| 			origin: peer, | ||||
| 			block:  block, | ||||
| 		op := &blockOrHeaderInject{origin: peer} | ||||
| 		if header != nil { | ||||
| 			op.header = header | ||||
| 		} else { | ||||
| 			op.block = block | ||||
| 		} | ||||
| 		f.queues[peer] = count | ||||
| 		f.queued[hash] = op | ||||
| 		f.queue.Push(op, -int64(block.NumberU64())) | ||||
| 		f.queue.Push(op, -int64(number)) | ||||
| 		if f.queueChangeHook != nil { | ||||
| 			f.queueChangeHook(op.block.Hash(), true) | ||||
| 			f.queueChangeHook(hash, true) | ||||
| 		} | ||||
| 		log.Debug("Queued propagated block", "peer", peer, "number", block.Number(), "hash", hash, "queued", f.queue.Size()) | ||||
| 		log.Debug("Queued delivered header or block", "peer", peer, "number", number, "hash", hash, "queued", f.queue.Size()) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // insert spawns a new goroutine to run a block insertion into the chain. If the
 | ||||
| // importHeaders spawns a new goroutine to run a header insertion into the chain.
 | ||||
| // If the header's number is at the same height as the current import phase, it
 | ||||
| // updates the phase states accordingly.
 | ||||
| func (f *BlockFetcher) importHeaders(peer string, header *types.Header) { | ||||
| 	hash := header.Hash() | ||||
| 	log.Debug("Importing propagated header", "peer", peer, "number", header.Number, "hash", hash) | ||||
| 
 | ||||
| 	go func() { | ||||
| 		defer func() { f.done <- hash }() | ||||
| 		// If the parent's unknown, abort insertion
 | ||||
| 		parent := f.getHeader(header.ParentHash) | ||||
| 		if parent == nil { | ||||
| 			log.Debug("Unknown parent of propagated header", "peer", peer, "number", header.Number, "hash", hash, "parent", header.ParentHash) | ||||
| 			return | ||||
| 		} | ||||
| 		// Validate the header and if something went wrong, drop the peer
 | ||||
| 		if err := f.verifyHeader(header); err != nil && err != consensus.ErrFutureBlock { | ||||
| 			log.Debug("Propagated header verification failed", "peer", peer, "number", header.Number, "hash", hash, "err", err) | ||||
| 			f.dropPeer(peer) | ||||
| 			return | ||||
| 		} | ||||
| 		// Run the actual import and log any issues
 | ||||
| 		if _, err := f.insertHeaders([]*types.Header{header}); err != nil { | ||||
| 			log.Debug("Propagated header import failed", "peer", peer, "number", header.Number, "hash", hash, "err", err) | ||||
| 			return | ||||
| 		} | ||||
| 		// Invoke the testing hook if needed
 | ||||
| 		if f.importedHook != nil { | ||||
| 			f.importedHook(header, nil) | ||||
| 		} | ||||
| 	}() | ||||
| } | ||||
| 
 | ||||
| // importBlocks spawns a new goroutine to run a block insertion into the chain. If the
 | ||||
| // block's number is at the same height as the current import phase, it updates
 | ||||
| // the phase states accordingly.
 | ||||
| func (f *BlockFetcher) insert(peer string, block *types.Block) { | ||||
| func (f *BlockFetcher) importBlocks(peer string, block *types.Block) { | ||||
| 	hash := block.Hash() | ||||
| 
 | ||||
| 	// Run the import on a new thread
 | ||||
| @ -711,7 +819,7 @@ func (f *BlockFetcher) insert(peer string, block *types.Block) { | ||||
| 
 | ||||
| 		// Invoke the testing hook if needed
 | ||||
| 		if f.importedHook != nil { | ||||
| 			f.importedHook(block) | ||||
| 			f.importedHook(nil, block) | ||||
| 		} | ||||
| 	}() | ||||
| } | ||||
|  | ||||
| @ -78,26 +78,36 @@ func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common | ||||
| type fetcherTester struct { | ||||
| 	fetcher *BlockFetcher | ||||
| 
 | ||||
| 	hashes []common.Hash                // Hash chain belonging to the tester
 | ||||
| 	blocks map[common.Hash]*types.Block // Blocks belonging to the tester
 | ||||
| 	drops  map[string]bool              // Map of peers dropped by the fetcher
 | ||||
| 	hashes  []common.Hash                 // Hash chain belonging to the tester
 | ||||
| 	headers map[common.Hash]*types.Header // Headers belonging to the tester
 | ||||
| 	blocks  map[common.Hash]*types.Block  // Blocks belonging to the tester
 | ||||
| 	drops   map[string]bool               // Map of peers dropped by the fetcher
 | ||||
| 
 | ||||
| 	lock sync.RWMutex | ||||
| } | ||||
| 
 | ||||
| // newTester creates a new fetcher test mocker.
 | ||||
| func newTester() *fetcherTester { | ||||
| func newTester(light bool) *fetcherTester { | ||||
| 	tester := &fetcherTester{ | ||||
| 		hashes: []common.Hash{genesis.Hash()}, | ||||
| 		blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis}, | ||||
| 		drops:  make(map[string]bool), | ||||
| 		hashes:  []common.Hash{genesis.Hash()}, | ||||
| 		headers: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()}, | ||||
| 		blocks:  map[common.Hash]*types.Block{genesis.Hash(): genesis}, | ||||
| 		drops:   make(map[string]bool), | ||||
| 	} | ||||
| 	tester.fetcher = NewBlockFetcher(tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer) | ||||
| 	tester.fetcher = NewBlockFetcher(light, tester.getHeader, tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertHeaders, tester.insertChain, tester.dropPeer) | ||||
| 	tester.fetcher.Start() | ||||
| 
 | ||||
| 	return tester | ||||
| } | ||||
| 
 | ||||
| // getHeader retrieves a header from the tester's block chain.
 | ||||
| func (f *fetcherTester) getHeader(hash common.Hash) *types.Header { | ||||
| 	f.lock.RLock() | ||||
| 	defer f.lock.RUnlock() | ||||
| 
 | ||||
| 	return f.headers[hash] | ||||
| } | ||||
| 
 | ||||
| // getBlock retrieves a block from the tester's block chain.
 | ||||
| func (f *fetcherTester) getBlock(hash common.Hash) *types.Block { | ||||
| 	f.lock.RLock() | ||||
| @ -120,9 +130,33 @@ func (f *fetcherTester) chainHeight() uint64 { | ||||
| 	f.lock.RLock() | ||||
| 	defer f.lock.RUnlock() | ||||
| 
 | ||||
| 	if f.fetcher.light { | ||||
| 		return f.headers[f.hashes[len(f.hashes)-1]].Number.Uint64() | ||||
| 	} | ||||
| 	return f.blocks[f.hashes[len(f.hashes)-1]].NumberU64() | ||||
| } | ||||
| 
 | ||||
| // insertChain injects a new headers into the simulated chain.
 | ||||
| func (f *fetcherTester) insertHeaders(headers []*types.Header) (int, error) { | ||||
| 	f.lock.Lock() | ||||
| 	defer f.lock.Unlock() | ||||
| 
 | ||||
| 	for i, header := range headers { | ||||
| 		// Make sure the parent in known
 | ||||
| 		if _, ok := f.headers[header.ParentHash]; !ok { | ||||
| 			return i, errors.New("unknown parent") | ||||
| 		} | ||||
| 		// Discard any new blocks if the same height already exists
 | ||||
| 		if header.Number.Uint64() <= f.headers[f.hashes[len(f.hashes)-1]].Number.Uint64() { | ||||
| 			return i, nil | ||||
| 		} | ||||
| 		// Otherwise build our current chain
 | ||||
| 		f.hashes = append(f.hashes, header.Hash()) | ||||
| 		f.headers[header.Hash()] = header | ||||
| 	} | ||||
| 	return 0, nil | ||||
| } | ||||
| 
 | ||||
| // insertChain injects a new blocks into the simulated chain.
 | ||||
| func (f *fetcherTester) insertChain(blocks types.Blocks) (int, error) { | ||||
| 	f.lock.Lock() | ||||
| @ -233,7 +267,7 @@ func verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive b | ||||
| } | ||||
| 
 | ||||
| // verifyImportEvent verifies that one single event arrive on an import channel.
 | ||||
| func verifyImportEvent(t *testing.T, imported chan *types.Block, arrive bool) { | ||||
| func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) { | ||||
| 	if arrive { | ||||
| 		select { | ||||
| 		case <-imported: | ||||
| @ -251,7 +285,7 @@ func verifyImportEvent(t *testing.T, imported chan *types.Block, arrive bool) { | ||||
| 
 | ||||
| // verifyImportCount verifies that exactly count number of events arrive on an
 | ||||
| // import hook channel.
 | ||||
| func verifyImportCount(t *testing.T, imported chan *types.Block, count int) { | ||||
| func verifyImportCount(t *testing.T, imported chan interface{}, count int) { | ||||
| 	for i := 0; i < count; i++ { | ||||
| 		select { | ||||
| 		case <-imported: | ||||
| @ -263,7 +297,7 @@ func verifyImportCount(t *testing.T, imported chan *types.Block, count int) { | ||||
| } | ||||
| 
 | ||||
| // verifyImportDone verifies that no more events are arriving on an import channel.
 | ||||
| func verifyImportDone(t *testing.T, imported chan *types.Block) { | ||||
| func verifyImportDone(t *testing.T, imported chan interface{}) { | ||||
| 	select { | ||||
| 	case <-imported: | ||||
| 		t.Fatalf("extra block imported") | ||||
| @ -271,45 +305,62 @@ func verifyImportDone(t *testing.T, imported chan *types.Block) { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Tests that a fetcher accepts block announcements and initiates retrievals for
 | ||||
| // them, successfully importing into the local chain.
 | ||||
| func TestSequentialAnnouncements62(t *testing.T) { testSequentialAnnouncements(t, 62) } | ||||
| func TestSequentialAnnouncements63(t *testing.T) { testSequentialAnnouncements(t, 63) } | ||||
| func TestSequentialAnnouncements64(t *testing.T) { testSequentialAnnouncements(t, 64) } | ||||
| // verifyChainHeight verifies the chain height is as expected.
 | ||||
| func verifyChainHeight(t *testing.T, fetcher *fetcherTester, height uint64) { | ||||
| 	if fetcher.chainHeight() != height { | ||||
| 		t.Fatalf("chain height mismatch, got %d, want %d", fetcher.chainHeight(), height) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func testSequentialAnnouncements(t *testing.T, protocol int) { | ||||
| // Tests that a fetcher accepts block/header announcements and initiates retrievals
 | ||||
| // for them, successfully importing into the local chain.
 | ||||
| func TestFullSequentialAnnouncements(t *testing.T)  { testSequentialAnnouncements(t, false) } | ||||
| func TestLightSequentialAnnouncements(t *testing.T) { testSequentialAnnouncements(t, true) } | ||||
| 
 | ||||
| func testSequentialAnnouncements(t *testing.T, light bool) { | ||||
| 	// Create a chain of blocks to import
 | ||||
| 	targetBlocks := 4 * hashLimit | ||||
| 	hashes, blocks := makeChain(targetBlocks, 0, genesis) | ||||
| 
 | ||||
| 	tester := newTester() | ||||
| 	tester := newTester(light) | ||||
| 	headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) | ||||
| 	bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) | ||||
| 
 | ||||
| 	// Iteratively announce blocks until all are imported
 | ||||
| 	imported := make(chan *types.Block) | ||||
| 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block } | ||||
| 
 | ||||
| 	imported := make(chan interface{}) | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { | ||||
| 		if light { | ||||
| 			if header == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty header") | ||||
| 			} | ||||
| 			imported <- header | ||||
| 		} else { | ||||
| 			if block == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty block") | ||||
| 			} | ||||
| 			imported <- block | ||||
| 		} | ||||
| 	} | ||||
| 	for i := len(hashes) - 2; i >= 0; i-- { | ||||
| 		tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) | ||||
| 		verifyImportEvent(t, imported, true) | ||||
| 	} | ||||
| 	verifyImportDone(t, imported) | ||||
| 	verifyChainHeight(t, tester, uint64(len(hashes)-1)) | ||||
| } | ||||
| 
 | ||||
| // Tests that if blocks are announced by multiple peers (or even the same buggy
 | ||||
| // peer), they will only get downloaded at most once.
 | ||||
| func TestConcurrentAnnouncements62(t *testing.T) { testConcurrentAnnouncements(t, 62) } | ||||
| func TestConcurrentAnnouncements63(t *testing.T) { testConcurrentAnnouncements(t, 63) } | ||||
| func TestConcurrentAnnouncements64(t *testing.T) { testConcurrentAnnouncements(t, 64) } | ||||
| func TestFullConcurrentAnnouncements(t *testing.T)  { testConcurrentAnnouncements(t, false) } | ||||
| func TestLightConcurrentAnnouncements(t *testing.T) { testConcurrentAnnouncements(t, true) } | ||||
| 
 | ||||
| func testConcurrentAnnouncements(t *testing.T, protocol int) { | ||||
| func testConcurrentAnnouncements(t *testing.T, light bool) { | ||||
| 	// Create a chain of blocks to import
 | ||||
| 	targetBlocks := 4 * hashLimit | ||||
| 	hashes, blocks := makeChain(targetBlocks, 0, genesis) | ||||
| 
 | ||||
| 	// Assemble a tester with a built in counter for the requests
 | ||||
| 	tester := newTester() | ||||
| 	tester := newTester(light) | ||||
| 	firstHeaderFetcher := tester.makeHeaderFetcher("first", blocks, -gatherSlack) | ||||
| 	firstBodyFetcher := tester.makeBodyFetcher("first", blocks, 0) | ||||
| 	secondHeaderFetcher := tester.makeHeaderFetcher("second", blocks, -gatherSlack) | ||||
| @ -325,9 +376,20 @@ func testConcurrentAnnouncements(t *testing.T, protocol int) { | ||||
| 		return secondHeaderFetcher(hash) | ||||
| 	} | ||||
| 	// Iteratively announce blocks until all are imported
 | ||||
| 	imported := make(chan *types.Block) | ||||
| 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block } | ||||
| 
 | ||||
| 	imported := make(chan interface{}) | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { | ||||
| 		if light { | ||||
| 			if header == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty header") | ||||
| 			} | ||||
| 			imported <- header | ||||
| 		} else { | ||||
| 			if block == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty block") | ||||
| 			} | ||||
| 			imported <- block | ||||
| 		} | ||||
| 	} | ||||
| 	for i := len(hashes) - 2; i >= 0; i-- { | ||||
| 		tester.fetcher.Notify("first", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), firstHeaderWrapper, firstBodyFetcher) | ||||
| 		tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout+time.Millisecond), secondHeaderWrapper, secondBodyFetcher) | ||||
| @ -340,30 +402,42 @@ func testConcurrentAnnouncements(t *testing.T, protocol int) { | ||||
| 	if int(counter) != targetBlocks { | ||||
| 		t.Fatalf("retrieval count mismatch: have %v, want %v", counter, targetBlocks) | ||||
| 	} | ||||
| 	verifyChainHeight(t, tester, uint64(len(hashes)-1)) | ||||
| } | ||||
| 
 | ||||
| // Tests that announcements arriving while a previous is being fetched still
 | ||||
| // results in a valid import.
 | ||||
| func TestOverlappingAnnouncements62(t *testing.T) { testOverlappingAnnouncements(t, 62) } | ||||
| func TestOverlappingAnnouncements63(t *testing.T) { testOverlappingAnnouncements(t, 63) } | ||||
| func TestOverlappingAnnouncements64(t *testing.T) { testOverlappingAnnouncements(t, 64) } | ||||
| func TestFullOverlappingAnnouncements(t *testing.T)  { testOverlappingAnnouncements(t, false) } | ||||
| func TestLightOverlappingAnnouncements(t *testing.T) { testOverlappingAnnouncements(t, true) } | ||||
| 
 | ||||
| func testOverlappingAnnouncements(t *testing.T, protocol int) { | ||||
| func testOverlappingAnnouncements(t *testing.T, light bool) { | ||||
| 	// Create a chain of blocks to import
 | ||||
| 	targetBlocks := 4 * hashLimit | ||||
| 	hashes, blocks := makeChain(targetBlocks, 0, genesis) | ||||
| 
 | ||||
| 	tester := newTester() | ||||
| 	tester := newTester(light) | ||||
| 	headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) | ||||
| 	bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) | ||||
| 
 | ||||
| 	// Iteratively announce blocks, but overlap them continuously
 | ||||
| 	overlap := 16 | ||||
| 	imported := make(chan *types.Block, len(hashes)-1) | ||||
| 	imported := make(chan interface{}, len(hashes)-1) | ||||
| 	for i := 0; i < overlap; i++ { | ||||
| 		imported <- nil | ||||
| 	} | ||||
| 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block } | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { | ||||
| 		if light { | ||||
| 			if header == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty header") | ||||
| 			} | ||||
| 			imported <- header | ||||
| 		} else { | ||||
| 			if block == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty block") | ||||
| 			} | ||||
| 			imported <- block | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	for i := len(hashes) - 2; i >= 0; i-- { | ||||
| 		tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) | ||||
| @ -375,19 +449,19 @@ func testOverlappingAnnouncements(t *testing.T, protocol int) { | ||||
| 	} | ||||
| 	// Wait for all the imports to complete and check count
 | ||||
| 	verifyImportCount(t, imported, overlap) | ||||
| 	verifyChainHeight(t, tester, uint64(len(hashes)-1)) | ||||
| } | ||||
| 
 | ||||
| // Tests that announces already being retrieved will not be duplicated.
 | ||||
| func TestPendingDeduplication62(t *testing.T) { testPendingDeduplication(t, 62) } | ||||
| func TestPendingDeduplication63(t *testing.T) { testPendingDeduplication(t, 63) } | ||||
| func TestPendingDeduplication64(t *testing.T) { testPendingDeduplication(t, 64) } | ||||
| func TestFullPendingDeduplication(t *testing.T)  { testPendingDeduplication(t, false) } | ||||
| func TestLightPendingDeduplication(t *testing.T) { testPendingDeduplication(t, true) } | ||||
| 
 | ||||
| func testPendingDeduplication(t *testing.T, protocol int) { | ||||
| func testPendingDeduplication(t *testing.T, light bool) { | ||||
| 	// Create a hash and corresponding block
 | ||||
| 	hashes, blocks := makeChain(1, 0, genesis) | ||||
| 
 | ||||
| 	// Assemble a tester with a built in counter and delayed fetcher
 | ||||
| 	tester := newTester() | ||||
| 	tester := newTester(light) | ||||
| 	headerFetcher := tester.makeHeaderFetcher("repeater", blocks, -gatherSlack) | ||||
| 	bodyFetcher := tester.makeBodyFetcher("repeater", blocks, 0) | ||||
| 
 | ||||
| @ -403,42 +477,58 @@ func testPendingDeduplication(t *testing.T, protocol int) { | ||||
| 		}() | ||||
| 		return nil | ||||
| 	} | ||||
| 	checkNonExist := func() bool { | ||||
| 		return tester.getBlock(hashes[0]) == nil | ||||
| 	} | ||||
| 	if light { | ||||
| 		checkNonExist = func() bool { | ||||
| 			return tester.getHeader(hashes[0]) == nil | ||||
| 		} | ||||
| 	} | ||||
| 	// Announce the same block many times until it's fetched (wait for any pending ops)
 | ||||
| 	for tester.getBlock(hashes[0]) == nil { | ||||
| 	for checkNonExist() { | ||||
| 		tester.fetcher.Notify("repeater", hashes[0], 1, time.Now().Add(-arriveTimeout), headerWrapper, bodyFetcher) | ||||
| 		time.Sleep(time.Millisecond) | ||||
| 	} | ||||
| 	time.Sleep(delay) | ||||
| 
 | ||||
| 	// Check that all blocks were imported and none fetched twice
 | ||||
| 	if imported := len(tester.blocks); imported != 2 { | ||||
| 		t.Fatalf("synchronised block mismatch: have %v, want %v", imported, 2) | ||||
| 	} | ||||
| 	if int(counter) != 1 { | ||||
| 		t.Fatalf("retrieval count mismatch: have %v, want %v", counter, 1) | ||||
| 	} | ||||
| 	verifyChainHeight(t, tester, 1) | ||||
| } | ||||
| 
 | ||||
| // Tests that announcements retrieved in a random order are cached and eventually
 | ||||
| // imported when all the gaps are filled in.
 | ||||
| func TestRandomArrivalImport62(t *testing.T) { testRandomArrivalImport(t, 62) } | ||||
| func TestRandomArrivalImport63(t *testing.T) { testRandomArrivalImport(t, 63) } | ||||
| func TestRandomArrivalImport64(t *testing.T) { testRandomArrivalImport(t, 64) } | ||||
| func TestFullRandomArrivalImport(t *testing.T)  { testRandomArrivalImport(t, false) } | ||||
| func TestLightRandomArrivalImport(t *testing.T) { testRandomArrivalImport(t, true) } | ||||
| 
 | ||||
| func testRandomArrivalImport(t *testing.T, protocol int) { | ||||
| func testRandomArrivalImport(t *testing.T, light bool) { | ||||
| 	// Create a chain of blocks to import, and choose one to delay
 | ||||
| 	targetBlocks := maxQueueDist | ||||
| 	hashes, blocks := makeChain(targetBlocks, 0, genesis) | ||||
| 	skip := targetBlocks / 2 | ||||
| 
 | ||||
| 	tester := newTester() | ||||
| 	tester := newTester(light) | ||||
| 	headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) | ||||
| 	bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) | ||||
| 
 | ||||
| 	// Iteratively announce blocks, skipping one entry
 | ||||
| 	imported := make(chan *types.Block, len(hashes)-1) | ||||
| 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block } | ||||
| 
 | ||||
| 	imported := make(chan interface{}, len(hashes)-1) | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { | ||||
| 		if light { | ||||
| 			if header == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty header") | ||||
| 			} | ||||
| 			imported <- header | ||||
| 		} else { | ||||
| 			if block == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty block") | ||||
| 			} | ||||
| 			imported <- block | ||||
| 		} | ||||
| 	} | ||||
| 	for i := len(hashes) - 1; i >= 0; i-- { | ||||
| 		if i != skip { | ||||
| 			tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) | ||||
| @ -448,27 +538,24 @@ func testRandomArrivalImport(t *testing.T, protocol int) { | ||||
| 	// Finally announce the skipped entry and check full import
 | ||||
| 	tester.fetcher.Notify("valid", hashes[skip], uint64(len(hashes)-skip-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) | ||||
| 	verifyImportCount(t, imported, len(hashes)-1) | ||||
| 	verifyChainHeight(t, tester, uint64(len(hashes)-1)) | ||||
| } | ||||
| 
 | ||||
| // Tests that direct block enqueues (due to block propagation vs. hash announce)
 | ||||
| // are correctly schedule, filling and import queue gaps.
 | ||||
| func TestQueueGapFill62(t *testing.T) { testQueueGapFill(t, 62) } | ||||
| func TestQueueGapFill63(t *testing.T) { testQueueGapFill(t, 63) } | ||||
| func TestQueueGapFill64(t *testing.T) { testQueueGapFill(t, 64) } | ||||
| 
 | ||||
| func testQueueGapFill(t *testing.T, protocol int) { | ||||
| func TestQueueGapFill(t *testing.T) { | ||||
| 	// Create a chain of blocks to import, and choose one to not announce at all
 | ||||
| 	targetBlocks := maxQueueDist | ||||
| 	hashes, blocks := makeChain(targetBlocks, 0, genesis) | ||||
| 	skip := targetBlocks / 2 | ||||
| 
 | ||||
| 	tester := newTester() | ||||
| 	tester := newTester(false) | ||||
| 	headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) | ||||
| 	bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) | ||||
| 
 | ||||
| 	// Iteratively announce blocks, skipping one entry
 | ||||
| 	imported := make(chan *types.Block, len(hashes)-1) | ||||
| 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block } | ||||
| 	imported := make(chan interface{}, len(hashes)-1) | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } | ||||
| 
 | ||||
| 	for i := len(hashes) - 1; i >= 0; i-- { | ||||
| 		if i != skip { | ||||
| @ -479,20 +566,17 @@ func testQueueGapFill(t *testing.T, protocol int) { | ||||
| 	// Fill the missing block directly as if propagated
 | ||||
| 	tester.fetcher.Enqueue("valid", blocks[hashes[skip]]) | ||||
| 	verifyImportCount(t, imported, len(hashes)-1) | ||||
| 	verifyChainHeight(t, tester, uint64(len(hashes)-1)) | ||||
| } | ||||
| 
 | ||||
| // Tests that blocks arriving from various sources (multiple propagations, hash
 | ||||
| // announces, etc) do not get scheduled for import multiple times.
 | ||||
| func TestImportDeduplication62(t *testing.T) { testImportDeduplication(t, 62) } | ||||
| func TestImportDeduplication63(t *testing.T) { testImportDeduplication(t, 63) } | ||||
| func TestImportDeduplication64(t *testing.T) { testImportDeduplication(t, 64) } | ||||
| 
 | ||||
| func testImportDeduplication(t *testing.T, protocol int) { | ||||
| func TestImportDeduplication(t *testing.T) { | ||||
| 	// Create two blocks to import (one for duplication, the other for stalling)
 | ||||
| 	hashes, blocks := makeChain(2, 0, genesis) | ||||
| 
 | ||||
| 	// Create the tester and wrap the importer with a counter
 | ||||
| 	tester := newTester() | ||||
| 	tester := newTester(false) | ||||
| 	headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) | ||||
| 	bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) | ||||
| 
 | ||||
| @ -503,9 +587,9 @@ func testImportDeduplication(t *testing.T, protocol int) { | ||||
| 	} | ||||
| 	// Instrument the fetching and imported events
 | ||||
| 	fetching := make(chan []common.Hash) | ||||
| 	imported := make(chan *types.Block, len(hashes)-1) | ||||
| 	imported := make(chan interface{}, len(hashes)-1) | ||||
| 	tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes } | ||||
| 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block } | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } | ||||
| 
 | ||||
| 	// Announce the duplicating block, wait for retrieval, and also propagate directly
 | ||||
| 	tester.fetcher.Notify("valid", hashes[0], 1, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) | ||||
| @ -534,7 +618,7 @@ func TestDistantPropagationDiscarding(t *testing.T) { | ||||
| 	low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1 | ||||
| 
 | ||||
| 	// Create a tester and simulate a head block being the middle of the above chain
 | ||||
| 	tester := newTester() | ||||
| 	tester := newTester(false) | ||||
| 
 | ||||
| 	tester.lock.Lock() | ||||
| 	tester.hashes = []common.Hash{head} | ||||
| @ -558,11 +642,10 @@ func TestDistantPropagationDiscarding(t *testing.T) { | ||||
| // Tests that announcements with numbers much lower or higher than out current
 | ||||
| // head get discarded to prevent wasting resources on useless blocks from faulty
 | ||||
| // peers.
 | ||||
| func TestDistantAnnouncementDiscarding62(t *testing.T) { testDistantAnnouncementDiscarding(t, 62) } | ||||
| func TestDistantAnnouncementDiscarding63(t *testing.T) { testDistantAnnouncementDiscarding(t, 63) } | ||||
| func TestDistantAnnouncementDiscarding64(t *testing.T) { testDistantAnnouncementDiscarding(t, 64) } | ||||
| func TestFullDistantAnnouncementDiscarding(t *testing.T)  { testDistantAnnouncementDiscarding(t, false) } | ||||
| func TestLightDistantAnnouncementDiscarding(t *testing.T) { testDistantAnnouncementDiscarding(t, true) } | ||||
| 
 | ||||
| func testDistantAnnouncementDiscarding(t *testing.T, protocol int) { | ||||
| func testDistantAnnouncementDiscarding(t *testing.T, light bool) { | ||||
| 	// Create a long chain to import and define the discard boundaries
 | ||||
| 	hashes, blocks := makeChain(3*maxQueueDist, 0, genesis) | ||||
| 	head := hashes[len(hashes)/2] | ||||
| @ -570,10 +653,11 @@ func testDistantAnnouncementDiscarding(t *testing.T, protocol int) { | ||||
| 	low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1 | ||||
| 
 | ||||
| 	// Create a tester and simulate a head block being the middle of the above chain
 | ||||
| 	tester := newTester() | ||||
| 	tester := newTester(light) | ||||
| 
 | ||||
| 	tester.lock.Lock() | ||||
| 	tester.hashes = []common.Hash{head} | ||||
| 	tester.headers = map[common.Hash]*types.Header{head: blocks[head].Header()} | ||||
| 	tester.blocks = map[common.Hash]*types.Block{head: blocks[head]} | ||||
| 	tester.lock.Unlock() | ||||
| 
 | ||||
| @ -601,21 +685,31 @@ func testDistantAnnouncementDiscarding(t *testing.T, protocol int) { | ||||
| 
 | ||||
| // Tests that peers announcing blocks with invalid numbers (i.e. not matching
 | ||||
| // the headers provided afterwards) get dropped as malicious.
 | ||||
| func TestInvalidNumberAnnouncement62(t *testing.T) { testInvalidNumberAnnouncement(t, 62) } | ||||
| func TestInvalidNumberAnnouncement63(t *testing.T) { testInvalidNumberAnnouncement(t, 63) } | ||||
| func TestInvalidNumberAnnouncement64(t *testing.T) { testInvalidNumberAnnouncement(t, 64) } | ||||
| func TestFullInvalidNumberAnnouncement(t *testing.T)  { testInvalidNumberAnnouncement(t, false) } | ||||
| func TestLightInvalidNumberAnnouncement(t *testing.T) { testInvalidNumberAnnouncement(t, true) } | ||||
| 
 | ||||
| func testInvalidNumberAnnouncement(t *testing.T, protocol int) { | ||||
| func testInvalidNumberAnnouncement(t *testing.T, light bool) { | ||||
| 	// Create a single block to import and check numbers against
 | ||||
| 	hashes, blocks := makeChain(1, 0, genesis) | ||||
| 
 | ||||
| 	tester := newTester() | ||||
| 	tester := newTester(light) | ||||
| 	badHeaderFetcher := tester.makeHeaderFetcher("bad", blocks, -gatherSlack) | ||||
| 	badBodyFetcher := tester.makeBodyFetcher("bad", blocks, 0) | ||||
| 
 | ||||
| 	imported := make(chan *types.Block) | ||||
| 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block } | ||||
| 
 | ||||
| 	imported := make(chan interface{}) | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { | ||||
| 		if light { | ||||
| 			if header == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty header") | ||||
| 			} | ||||
| 			imported <- header | ||||
| 		} else { | ||||
| 			if block == nil { | ||||
| 				t.Fatalf("Fetcher try to import empty block") | ||||
| 			} | ||||
| 			imported <- block | ||||
| 		} | ||||
| 	} | ||||
| 	// Announce a block with a bad number, check for immediate drop
 | ||||
| 	tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), badHeaderFetcher, badBodyFetcher) | ||||
| 	verifyImportEvent(t, imported, false) | ||||
| @ -646,15 +740,11 @@ func testInvalidNumberAnnouncement(t *testing.T, protocol int) { | ||||
| 
 | ||||
| // Tests that if a block is empty (i.e. header only), no body request should be
 | ||||
| // made, and instead the header should be assembled into a whole block in itself.
 | ||||
| func TestEmptyBlockShortCircuit62(t *testing.T) { testEmptyBlockShortCircuit(t, 62) } | ||||
| func TestEmptyBlockShortCircuit63(t *testing.T) { testEmptyBlockShortCircuit(t, 63) } | ||||
| func TestEmptyBlockShortCircuit64(t *testing.T) { testEmptyBlockShortCircuit(t, 64) } | ||||
| 
 | ||||
| func testEmptyBlockShortCircuit(t *testing.T, protocol int) { | ||||
| func TestEmptyBlockShortCircuit(t *testing.T) { | ||||
| 	// Create a chain of blocks to import
 | ||||
| 	hashes, blocks := makeChain(32, 0, genesis) | ||||
| 
 | ||||
| 	tester := newTester() | ||||
| 	tester := newTester(false) | ||||
| 	headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) | ||||
| 	bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) | ||||
| 
 | ||||
| @ -665,9 +755,13 @@ func testEmptyBlockShortCircuit(t *testing.T, protocol int) { | ||||
| 	completing := make(chan []common.Hash) | ||||
| 	tester.fetcher.completingHook = func(hashes []common.Hash) { completing <- hashes } | ||||
| 
 | ||||
| 	imported := make(chan *types.Block) | ||||
| 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block } | ||||
| 
 | ||||
| 	imported := make(chan interface{}) | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { | ||||
| 		if block == nil { | ||||
| 			t.Fatalf("Fetcher try to import empty block") | ||||
| 		} | ||||
| 		imported <- block | ||||
| 	} | ||||
| 	// Iteratively announce blocks until all are imported
 | ||||
| 	for i := len(hashes) - 2; i >= 0; i-- { | ||||
| 		tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) | ||||
| @ -687,16 +781,12 @@ func testEmptyBlockShortCircuit(t *testing.T, protocol int) { | ||||
| // Tests that a peer is unable to use unbounded memory with sending infinite
 | ||||
| // block announcements to a node, but that even in the face of such an attack,
 | ||||
| // the fetcher remains operational.
 | ||||
| func TestHashMemoryExhaustionAttack62(t *testing.T) { testHashMemoryExhaustionAttack(t, 62) } | ||||
| func TestHashMemoryExhaustionAttack63(t *testing.T) { testHashMemoryExhaustionAttack(t, 63) } | ||||
| func TestHashMemoryExhaustionAttack64(t *testing.T) { testHashMemoryExhaustionAttack(t, 64) } | ||||
| 
 | ||||
| func testHashMemoryExhaustionAttack(t *testing.T, protocol int) { | ||||
| func TestHashMemoryExhaustionAttack(t *testing.T) { | ||||
| 	// Create a tester with instrumented import hooks
 | ||||
| 	tester := newTester() | ||||
| 	tester := newTester(false) | ||||
| 
 | ||||
| 	imported, announces := make(chan *types.Block), int32(0) | ||||
| 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block } | ||||
| 	imported, announces := make(chan interface{}), int32(0) | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } | ||||
| 	tester.fetcher.announceChangeHook = func(hash common.Hash, added bool) { | ||||
| 		if added { | ||||
| 			atomic.AddInt32(&announces, 1) | ||||
| @ -740,10 +830,10 @@ func testHashMemoryExhaustionAttack(t *testing.T, protocol int) { | ||||
| // system memory.
 | ||||
| func TestBlockMemoryExhaustionAttack(t *testing.T) { | ||||
| 	// Create a tester with instrumented import hooks
 | ||||
| 	tester := newTester() | ||||
| 	tester := newTester(false) | ||||
| 
 | ||||
| 	imported, enqueued := make(chan *types.Block), int32(0) | ||||
| 	tester.fetcher.importedHook = func(block *types.Block) { imported <- block } | ||||
| 	imported, enqueued := make(chan interface{}), int32(0) | ||||
| 	tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } | ||||
| 	tester.fetcher.queueChangeHook = func(hash common.Hash, added bool) { | ||||
| 		if added { | ||||
| 			atomic.AddInt32(&enqueued, 1) | ||||
|  | ||||
| @ -188,7 +188,7 @@ func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCh | ||||
| 		} | ||||
| 		return n, err | ||||
| 	} | ||||
| 	manager.blockFetcher = fetcher.NewBlockFetcher(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer) | ||||
| 	manager.blockFetcher = fetcher.NewBlockFetcher(false, nil, blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, nil, inserter, manager.removePeer) | ||||
| 
 | ||||
| 	fetchTx := func(peer string, hashes []common.Hash) error { | ||||
| 		p := manager.peers.Peer(peer) | ||||
|  | ||||
| @ -269,7 +269,7 @@ func (s *LightEthereum) EventMux() *event.TypeMux           { return s.eventMux | ||||
| // network protocols to start.
 | ||||
| func (s *LightEthereum) Protocols() []p2p.Protocol { | ||||
| 	return s.makeProtocols(ClientProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} { | ||||
| 		if p := s.peers.peer(peerIdToString(id)); p != nil { | ||||
| 		if p := s.peers.peer(id.String()); p != nil { | ||||
| 			return p.Info() | ||||
| 		} | ||||
| 		return nil | ||||
| @ -285,6 +285,7 @@ func (s *LightEthereum) Start(srvr *p2p.Server) error { | ||||
| 	// Start bloom request workers.
 | ||||
| 	s.wg.Add(bloomServiceThreads) | ||||
| 	s.startBloomHandlers(params.BloomBitsBlocksClient) | ||||
| 	s.handler.start() | ||||
| 
 | ||||
| 	s.netRPCService = ethapi.NewPublicNetAPI(srvr, s.config.NetworkId) | ||||
| 	return nil | ||||
|  | ||||
| @ -64,16 +64,20 @@ func newClientHandler(ulcServers []string, ulcFraction int, checkpoint *params.T | ||||
| 	if checkpoint != nil { | ||||
| 		height = (checkpoint.SectionIndex+1)*params.CHTFrequency - 1 | ||||
| 	} | ||||
| 	handler.fetcher = newLightFetcher(handler, backend.serverPool.getTimeout) | ||||
| 	handler.fetcher = newLightFetcher(backend.blockchain, backend.engine, backend.peers, handler.ulc, backend.chainDb, backend.reqDist, handler.synchronise) | ||||
| 	handler.downloader = downloader.New(height, backend.chainDb, nil, backend.eventMux, nil, backend.blockchain, handler.removePeer) | ||||
| 	handler.backend.peers.subscribe((*downloaderPeerNotify)(handler)) | ||||
| 	return handler | ||||
| } | ||||
| 
 | ||||
| func (h *clientHandler) start() { | ||||
| 	h.fetcher.start() | ||||
| } | ||||
| 
 | ||||
| func (h *clientHandler) stop() { | ||||
| 	close(h.closeCh) | ||||
| 	h.downloader.Terminate() | ||||
| 	h.fetcher.close() | ||||
| 	h.fetcher.stop() | ||||
| 	h.wg.Wait() | ||||
| } | ||||
| 
 | ||||
| @ -121,7 +125,6 @@ func (h *clientHandler) handle(p *serverPeer) error { | ||||
| 		connectionTimer.Update(time.Duration(mclock.Now() - connectedAt)) | ||||
| 		serverConnectionGauge.Update(int64(h.backend.peers.len())) | ||||
| 	}() | ||||
| 
 | ||||
| 	h.fetcher.announce(p, &announceData{Hash: p.headInfo.Hash, Number: p.headInfo.Number, Td: p.headInfo.Td}) | ||||
| 
 | ||||
| 	// Mark the peer starts to be served.
 | ||||
| @ -185,6 +188,9 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { | ||||
| 				p.Log().Trace("Valid announcement signature") | ||||
| 			} | ||||
| 			p.Log().Trace("Announce message content", "number", req.Number, "hash", req.Hash, "td", req.Td, "reorg", req.ReorgDepth) | ||||
| 
 | ||||
| 			// Update peer head information first and then notify the announcement
 | ||||
| 			p.updateHead(req.Hash, req.Number, req.Td) | ||||
| 			h.fetcher.announce(p, &req) | ||||
| 		} | ||||
| 	case BlockHeadersMsg: | ||||
| @ -196,12 +202,17 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { | ||||
| 		if err := msg.Decode(&resp); err != nil { | ||||
| 			return errResp(ErrDecode, "msg %v: %v", msg, err) | ||||
| 		} | ||||
| 		headers := resp.Headers | ||||
| 		p.fcServer.ReceivedReply(resp.ReqID, resp.BV) | ||||
| 		p.answeredRequest(resp.ReqID) | ||||
| 		if h.fetcher.requestedID(resp.ReqID) { | ||||
| 			h.fetcher.deliverHeaders(p, resp.ReqID, resp.Headers) | ||||
| 		} else { | ||||
| 			if err := h.downloader.DeliverHeaders(p.id, resp.Headers); err != nil { | ||||
| 
 | ||||
| 		// Filter out any explicitly requested headers, deliver the rest to the downloader
 | ||||
| 		filter := len(headers) == 1 | ||||
| 		if filter { | ||||
| 			headers = h.fetcher.deliverHeaders(p, resp.ReqID, resp.Headers) | ||||
| 		} | ||||
| 		if len(headers) != 0 || !filter { | ||||
| 			if err := h.downloader.DeliverHeaders(p.id, headers); err != nil { | ||||
| 				log.Debug("Failed to deliver headers", "err", err) | ||||
| 			} | ||||
| 		} | ||||
| @ -320,8 +331,7 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { | ||||
| 	// Deliver the received response to retriever.
 | ||||
| 	if deliverMsg != nil { | ||||
| 		if err := h.backend.retriever.deliver(p, deliverMsg); err != nil { | ||||
| 			p.errCount++ | ||||
| 			if p.errCount > maxResponseErrors { | ||||
| 			if val := p.errCount.Add(1, mclock.Now()); val > maxResponseErrors { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| @ -212,7 +212,7 @@ func (f *clientPool) connect(peer clientPoolPeer, capacity uint64) bool { | ||||
| 	id, freeID := peer.ID(), peer.freeClientId() | ||||
| 	if _, ok := f.connectedMap[id]; ok { | ||||
| 		clientRejectedMeter.Mark(1) | ||||
| 		log.Debug("Client already connected", "address", freeID, "id", peerIdToString(id)) | ||||
| 		log.Debug("Client already connected", "address", freeID, "id", id.String()) | ||||
| 		return false | ||||
| 	} | ||||
| 	// Create a clientInfo but do not add it yet
 | ||||
| @ -277,7 +277,7 @@ func (f *clientPool) connect(peer clientPoolPeer, capacity uint64) bool { | ||||
| 				f.connectedQueue.Push(c) | ||||
| 			} | ||||
| 			clientRejectedMeter.Mark(1) | ||||
| 			log.Debug("Client rejected", "address", freeID, "id", peerIdToString(id)) | ||||
| 			log.Debug("Client rejected", "address", freeID, "id", id.String()) | ||||
| 			return false | ||||
| 		} | ||||
| 		// accept new client, drop old ones
 | ||||
| @ -322,7 +322,7 @@ func (f *clientPool) disconnect(p clientPoolPeer) { | ||||
| 	// Short circuit if the peer hasn't been registered.
 | ||||
| 	e := f.connectedMap[p.ID()] | ||||
| 	if e == nil { | ||||
| 		log.Debug("Client not connected", "address", p.freeClientId(), "id", peerIdToString(p.ID())) | ||||
| 		log.Debug("Client not connected", "address", p.freeClientId(), "id", p.ID().String()) | ||||
| 		return | ||||
| 	} | ||||
| 	f.dropClient(e, f.clock.Now(), false) | ||||
|  | ||||
							
								
								
									
										1301
									
								
								les/fetcher.go
									
									
									
									
									
								
							
							
						
						
									
										1301
									
								
								les/fetcher.go
									
									
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										268
									
								
								les/fetcher_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										268
									
								
								les/fetcher_test.go
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,268 @@ | ||||
| // Copyright 2020 The go-ethereum Authors
 | ||||
| // This file is part of the go-ethereum library.
 | ||||
| //
 | ||||
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | ||||
| // it under the terms of the GNU Lesser General Public License as published by
 | ||||
| // the Free Software Foundation, either version 3 of the License, or
 | ||||
| // (at your option) any later version.
 | ||||
| //
 | ||||
| // The go-ethereum library is distributed in the hope that it will be useful,
 | ||||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | ||||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | ||||
| // GNU Lesser General Public License for more details.
 | ||||
| //
 | ||||
| // You should have received a copy of the GNU Lesser General Public License
 | ||||
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | ||||
| 
 | ||||
| package les | ||||
| 
 | ||||
| import ( | ||||
| 	"math/big" | ||||
| 	"testing" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/consensus/ethash" | ||||
| 	"github.com/ethereum/go-ethereum/core" | ||||
| 	"github.com/ethereum/go-ethereum/core/rawdb" | ||||
| 	"github.com/ethereum/go-ethereum/core/types" | ||||
| 	"github.com/ethereum/go-ethereum/p2p/enode" | ||||
| ) | ||||
| 
 | ||||
| // verifyImportEvent verifies that one single event arrive on an import channel.
 | ||||
| func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) { | ||||
| 	if arrive { | ||||
| 		select { | ||||
| 		case <-imported: | ||||
| 		case <-time.After(time.Second): | ||||
| 			t.Fatalf("import timeout") | ||||
| 		} | ||||
| 	} else { | ||||
| 		select { | ||||
| 		case <-imported: | ||||
| 			t.Fatalf("import invoked") | ||||
| 		case <-time.After(20 * time.Millisecond): | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // verifyImportDone verifies that no more events are arriving on an import channel.
 | ||||
| func verifyImportDone(t *testing.T, imported chan interface{}) { | ||||
| 	select { | ||||
| 	case <-imported: | ||||
| 		t.Fatalf("extra block imported") | ||||
| 	case <-time.After(50 * time.Millisecond): | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // verifyChainHeight verifies the chain height is as expected.
 | ||||
| func verifyChainHeight(t *testing.T, fetcher *lightFetcher, height uint64) { | ||||
| 	local := fetcher.chain.CurrentHeader().Number.Uint64() | ||||
| 	if local != height { | ||||
| 		t.Fatalf("chain height mismatch, got %d, want %d", local, height) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestSequentialAnnouncementsLes2(t *testing.T) { testSequentialAnnouncements(t, 2) } | ||||
| func TestSequentialAnnouncementsLes3(t *testing.T) { testSequentialAnnouncements(t, 3) } | ||||
| 
 | ||||
| func testSequentialAnnouncements(t *testing.T, protocol int) { | ||||
| 	s, c, teardown := newClientServerEnv(t, 4, protocol, nil, nil, 0, false, false) | ||||
| 	defer teardown() | ||||
| 
 | ||||
| 	// Create connected peer pair.
 | ||||
| 	c.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.
 | ||||
| 	p1, _, err := newTestPeerPair("peer", protocol, s.handler, c.handler) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to create peer pair %v", err) | ||||
| 	} | ||||
| 	c.handler.fetcher.noAnnounce = false | ||||
| 
 | ||||
| 	importCh := make(chan interface{}) | ||||
| 	c.handler.fetcher.newHeadHook = func(header *types.Header) { | ||||
| 		importCh <- header | ||||
| 	} | ||||
| 	for i := uint64(1); i <= s.backend.Blockchain().CurrentHeader().Number.Uint64(); i++ { | ||||
| 		header := s.backend.Blockchain().GetHeaderByNumber(i) | ||||
| 		hash, number := header.Hash(), header.Number.Uint64() | ||||
| 		td := rawdb.ReadTd(s.db, hash, number) | ||||
| 
 | ||||
| 		announce := announceData{hash, number, td, 0, nil} | ||||
| 		if p1.cpeer.announceType == announceTypeSigned { | ||||
| 			announce.sign(s.handler.server.privateKey) | ||||
| 		} | ||||
| 		p1.cpeer.sendAnnounce(announce) | ||||
| 		verifyImportEvent(t, importCh, true) | ||||
| 	} | ||||
| 	verifyImportDone(t, importCh) | ||||
| 	verifyChainHeight(t, c.handler.fetcher, 4) | ||||
| } | ||||
| 
 | ||||
| func TestGappedAnnouncementsLes2(t *testing.T) { testGappedAnnouncements(t, 2) } | ||||
| func TestGappedAnnouncementsLes3(t *testing.T) { testGappedAnnouncements(t, 3) } | ||||
| 
 | ||||
| func testGappedAnnouncements(t *testing.T, protocol int) { | ||||
| 	s, c, teardown := newClientServerEnv(t, 4, protocol, nil, nil, 0, false, false) | ||||
| 	defer teardown() | ||||
| 
 | ||||
| 	// Create connected peer pair.
 | ||||
| 	c.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.
 | ||||
| 	peer, _, err := newTestPeerPair("peer", protocol, s.handler, c.handler) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to create peer pair %v", err) | ||||
| 	} | ||||
| 	c.handler.fetcher.noAnnounce = false | ||||
| 
 | ||||
| 	done := make(chan *types.Header, 1) | ||||
| 	c.handler.fetcher.newHeadHook = func(header *types.Header) { done <- header } | ||||
| 
 | ||||
| 	// Prepare announcement by latest header.
 | ||||
| 	latest := s.backend.Blockchain().CurrentHeader() | ||||
| 	hash, number := latest.Hash(), latest.Number.Uint64() | ||||
| 	td := rawdb.ReadTd(s.db, hash, number) | ||||
| 
 | ||||
| 	// Sign the announcement if necessary.
 | ||||
| 	announce := announceData{hash, number, td, 0, nil} | ||||
| 	if peer.cpeer.announceType == announceTypeSigned { | ||||
| 		announce.sign(s.handler.server.privateKey) | ||||
| 	} | ||||
| 	peer.cpeer.sendAnnounce(announce) | ||||
| 
 | ||||
| 	<-done // Wait syncing
 | ||||
| 	verifyChainHeight(t, c.handler.fetcher, 4) | ||||
| 
 | ||||
| 	// Send a reorged announcement
 | ||||
| 	var newAnno = make(chan struct{}, 1) | ||||
| 	c.handler.fetcher.noAnnounce = true | ||||
| 	c.handler.fetcher.newAnnounce = func(*serverPeer, *announceData) { | ||||
| 		newAnno <- struct{}{} | ||||
| 	} | ||||
| 	blocks, _ := core.GenerateChain(rawdb.ReadChainConfig(s.db, s.backend.Blockchain().Genesis().Hash()), s.backend.Blockchain().GetBlockByNumber(3), | ||||
| 		ethash.NewFaker(), s.db, 2, func(i int, gen *core.BlockGen) { | ||||
| 			gen.OffsetTime(-9) // higher block difficulty
 | ||||
| 		}) | ||||
| 	s.backend.Blockchain().InsertChain(blocks) | ||||
| 	<-newAnno | ||||
| 	c.handler.fetcher.noAnnounce = false | ||||
| 	c.handler.fetcher.newAnnounce = nil | ||||
| 
 | ||||
| 	latest = blocks[len(blocks)-1].Header() | ||||
| 	hash, number = latest.Hash(), latest.Number.Uint64() | ||||
| 	td = rawdb.ReadTd(s.db, hash, number) | ||||
| 
 | ||||
| 	announce = announceData{hash, number, td, 1, nil} | ||||
| 	if peer.cpeer.announceType == announceTypeSigned { | ||||
| 		announce.sign(s.handler.server.privateKey) | ||||
| 	} | ||||
| 	peer.cpeer.sendAnnounce(announce) | ||||
| 
 | ||||
| 	<-done // Wait syncing
 | ||||
| 	verifyChainHeight(t, c.handler.fetcher, 5) | ||||
| } | ||||
| 
 | ||||
| func TestTrustedAnnouncementsLes2(t *testing.T) { testTrustedAnnouncement(t, 2) } | ||||
| func TestTrustedAnnouncementsLes3(t *testing.T) { testTrustedAnnouncement(t, 3) } | ||||
| 
 | ||||
| func testTrustedAnnouncement(t *testing.T, protocol int) { | ||||
| 	var ( | ||||
| 		servers   []*testServer | ||||
| 		teardowns []func() | ||||
| 		nodes     []*enode.Node | ||||
| 		ids       []string | ||||
| 		cpeers    []*clientPeer | ||||
| 		speers    []*serverPeer | ||||
| 	) | ||||
| 	for i := 0; i < 10; i++ { | ||||
| 		s, n, teardown := newTestServerPeer(t, 10, protocol) | ||||
| 
 | ||||
| 		servers = append(servers, s) | ||||
| 		nodes = append(nodes, n) | ||||
| 		teardowns = append(teardowns, teardown) | ||||
| 
 | ||||
| 		// A half of them are trusted servers.
 | ||||
| 		if i < 5 { | ||||
| 			ids = append(ids, n.String()) | ||||
| 		} | ||||
| 	} | ||||
| 	_, c, teardown := newClientServerEnv(t, 0, protocol, nil, ids, 60, false, false) | ||||
| 	defer teardown() | ||||
| 	defer func() { | ||||
| 		for i := 0; i < len(teardowns); i++ { | ||||
| 			teardowns[i]() | ||||
| 		} | ||||
| 	}() | ||||
| 
 | ||||
| 	c.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.
 | ||||
| 
 | ||||
| 	// Connect all server instances.
 | ||||
| 	for i := 0; i < len(servers); i++ { | ||||
| 		sp, cp, err := connect(servers[i].handler, nodes[i].ID(), c.handler, protocol) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("connect server and client failed, err %s", err) | ||||
| 		} | ||||
| 		cpeers = append(cpeers, cp) | ||||
| 		speers = append(speers, sp) | ||||
| 	} | ||||
| 	c.handler.fetcher.noAnnounce = false | ||||
| 
 | ||||
| 	newHead := make(chan *types.Header, 1) | ||||
| 	c.handler.fetcher.newHeadHook = func(header *types.Header) { newHead <- header } | ||||
| 
 | ||||
| 	check := func(height []uint64, expected uint64, callback func()) { | ||||
| 		for i := 0; i < len(height); i++ { | ||||
| 			for j := 0; j < len(servers); j++ { | ||||
| 				h := servers[j].backend.Blockchain().GetHeaderByNumber(height[i]) | ||||
| 				hash, number := h.Hash(), h.Number.Uint64() | ||||
| 				td := rawdb.ReadTd(servers[j].db, hash, number) | ||||
| 
 | ||||
| 				// Sign the announcement if necessary.
 | ||||
| 				announce := announceData{hash, number, td, 0, nil} | ||||
| 				p := cpeers[j] | ||||
| 				if p.announceType == announceTypeSigned { | ||||
| 					announce.sign(servers[j].handler.server.privateKey) | ||||
| 				} | ||||
| 				p.sendAnnounce(announce) | ||||
| 			} | ||||
| 		} | ||||
| 		if callback != nil { | ||||
| 			callback() | ||||
| 		} | ||||
| 		verifyChainHeight(t, c.handler.fetcher, expected) | ||||
| 	} | ||||
| 	check([]uint64{1}, 1, func() { <-newHead })   // Sequential announcements
 | ||||
| 	check([]uint64{4}, 4, func() { <-newHead })   // ULC-style light syncing, rollback untrusted headers
 | ||||
| 	check([]uint64{10}, 10, func() { <-newHead }) // Sync the whole chain.
 | ||||
| } | ||||
| 
 | ||||
| func TestInvalidAnnounces(t *testing.T) { | ||||
| 	s, c, teardown := newClientServerEnv(t, 4, lpv3, nil, nil, 0, false, false) | ||||
| 	defer teardown() | ||||
| 
 | ||||
| 	// Create connected peer pair.
 | ||||
| 	c.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.
 | ||||
| 	peer, _, err := newTestPeerPair("peer", lpv3, s.handler, c.handler) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to create peer pair %v", err) | ||||
| 	} | ||||
| 	c.handler.fetcher.noAnnounce = false | ||||
| 
 | ||||
| 	done := make(chan *types.Header, 1) | ||||
| 	c.handler.fetcher.newHeadHook = func(header *types.Header) { done <- header } | ||||
| 
 | ||||
| 	// Prepare announcement by latest header.
 | ||||
| 	headerOne := s.backend.Blockchain().GetHeaderByNumber(1) | ||||
| 	hash, number := headerOne.Hash(), headerOne.Number.Uint64() | ||||
| 	td := big.NewInt(200) // bad td
 | ||||
| 
 | ||||
| 	// Sign the announcement if necessary.
 | ||||
| 	announce := announceData{hash, number, td, 0, nil} | ||||
| 	if peer.cpeer.announceType == announceTypeSigned { | ||||
| 		announce.sign(s.handler.server.privateKey) | ||||
| 	} | ||||
| 	peer.cpeer.sendAnnounce(announce) | ||||
| 	<-done // Wait syncing
 | ||||
| 
 | ||||
| 	// Ensure the bad peer is evicited
 | ||||
| 	if c.handler.backend.peers.len() != 0 { | ||||
| 		t.Fatalf("Failed to evict invalid peer") | ||||
| 	} | ||||
| } | ||||
| @ -222,13 +222,13 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od | ||||
| 
 | ||||
| 	// expect retrievals to fail (except genesis block) without a les peer
 | ||||
| 	client.handler.backend.peers.lock.Lock() | ||||
| 	client.peer.speer.hasBlock = func(common.Hash, uint64, bool) bool { return false } | ||||
| 	client.peer.speer.hasBlockHook = func(common.Hash, uint64, bool) bool { return false } | ||||
| 	client.handler.backend.peers.lock.Unlock() | ||||
| 	test(expFail) | ||||
| 
 | ||||
| 	// expect all retrievals to pass
 | ||||
| 	client.handler.backend.peers.lock.Lock() | ||||
| 	client.peer.speer.hasBlock = func(common.Hash, uint64, bool) bool { return true } | ||||
| 	client.peer.speer.hasBlockHook = func(common.Hash, uint64, bool) bool { return true } | ||||
| 	client.handler.backend.peers.lock.Unlock() | ||||
| 	test(5) | ||||
| 
 | ||||
|  | ||||
							
								
								
									
										64
									
								
								les/peer.go
									
									
									
									
									
								
							
							
						
						
									
										64
									
								
								les/peer.go
									
									
									
									
									
								
							| @ -36,7 +36,6 @@ import ( | ||||
| 	"github.com/ethereum/go-ethereum/les/utils" | ||||
| 	"github.com/ethereum/go-ethereum/light" | ||||
| 	"github.com/ethereum/go-ethereum/p2p" | ||||
| 	"github.com/ethereum/go-ethereum/p2p/enode" | ||||
| 	"github.com/ethereum/go-ethereum/params" | ||||
| 	"github.com/ethereum/go-ethereum/rlp" | ||||
| ) | ||||
| @ -115,11 +114,6 @@ func (m keyValueMap) get(key string, val interface{}) error { | ||||
| 	return rlp.DecodeBytes(enc, val) | ||||
| } | ||||
| 
 | ||||
| // peerIdToString converts enode.ID to a string form
 | ||||
| func peerIdToString(id enode.ID) string { | ||||
| 	return fmt.Sprintf("%x", id.Bytes()) | ||||
| } | ||||
| 
 | ||||
| // peerCommons contains fields needed by both server peer and client peer.
 | ||||
| type peerCommons struct { | ||||
| 	*p2p.Peer | ||||
| @ -343,12 +337,12 @@ type serverPeer struct { | ||||
| 	sentReqs         map[uint64]sentReqEntry | ||||
| 
 | ||||
| 	// Statistics
 | ||||
| 	errCount    int // Counter the invalid responses server has replied
 | ||||
| 	errCount    utils.LinearExpiredValue // Counter the invalid responses server has replied
 | ||||
| 	updateCount uint64 | ||||
| 	updateTime  mclock.AbsTime | ||||
| 
 | ||||
| 	// Callbacks
 | ||||
| 	hasBlock func(common.Hash, uint64, bool) bool // Used to determine whether the server has the specified block.
 | ||||
| 	// Test callback hooks
 | ||||
| 	hasBlockHook func(common.Hash, uint64, bool) bool // Used to determine whether the server has the specified block.
 | ||||
| } | ||||
| 
 | ||||
| func newServerPeer(version int, network uint64, trusted bool, p *p2p.Peer, rw p2p.MsgReadWriter) *serverPeer { | ||||
| @ -356,13 +350,14 @@ func newServerPeer(version int, network uint64, trusted bool, p *p2p.Peer, rw p2 | ||||
| 		peerCommons: peerCommons{ | ||||
| 			Peer:      p, | ||||
| 			rw:        rw, | ||||
| 			id:        peerIdToString(p.ID()), | ||||
| 			id:        p.ID().String(), | ||||
| 			version:   version, | ||||
| 			network:   network, | ||||
| 			sendQueue: utils.NewExecQueue(100), | ||||
| 			closeCh:   make(chan struct{}), | ||||
| 		}, | ||||
| 		trusted: trusted, | ||||
| 		trusted:  trusted, | ||||
| 		errCount: utils.LinearExpiredValue{Rate: mclock.AbsTime(time.Hour)}, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| @ -524,7 +519,11 @@ func (p *serverPeer) getTxRelayCost(amount, size int) uint64 { | ||||
| // HasBlock checks if the peer has a given block
 | ||||
| func (p *serverPeer) HasBlock(hash common.Hash, number uint64, hasState bool) bool { | ||||
| 	p.lock.RLock() | ||||
| 	defer p.lock.RUnlock() | ||||
| 
 | ||||
| 	if p.hasBlockHook != nil { | ||||
| 		return p.hasBlockHook(hash, number, hasState) | ||||
| 	} | ||||
| 	head := p.headInfo.Number | ||||
| 	var since, recent uint64 | ||||
| 	if hasState { | ||||
| @ -534,10 +533,7 @@ func (p *serverPeer) HasBlock(hash common.Hash, number uint64, hasState bool) bo | ||||
| 		since = p.chainSince | ||||
| 		recent = p.chainRecent | ||||
| 	} | ||||
| 	hasBlock := p.hasBlock | ||||
| 	p.lock.RUnlock() | ||||
| 
 | ||||
| 	return head >= number && number >= since && (recent == 0 || number+recent+4 > head) && hasBlock != nil && hasBlock(hash, number, hasState) | ||||
| 	return head >= number && number >= since && (recent == 0 || number+recent+4 > head) | ||||
| } | ||||
| 
 | ||||
| // updateFlowControl updates the flow control parameters belonging to the server
 | ||||
| @ -562,6 +558,15 @@ func (p *serverPeer) updateFlowControl(update keyValueMap) { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // updateHead updates the head information based on the announcement from
 | ||||
| // the peer.
 | ||||
| func (p *serverPeer) updateHead(hash common.Hash, number uint64, td *big.Int) { | ||||
| 	p.lock.Lock() | ||||
| 	defer p.lock.Unlock() | ||||
| 
 | ||||
| 	p.headInfo = blockInfo{Hash: hash, Number: number, Td: td} | ||||
| } | ||||
| 
 | ||||
| // Handshake executes the les protocol handshake, negotiating version number,
 | ||||
| // network IDs, difficulties, head and genesis blocks.
 | ||||
| func (p *serverPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, server *LesServer) error { | ||||
| @ -712,11 +717,15 @@ type clientPeer struct { | ||||
| 	// responseLock ensures that responses are queued in the same order as
 | ||||
| 	// RequestProcessed is called
 | ||||
| 	responseLock  sync.Mutex | ||||
| 	server        bool | ||||
| 	invalidCount  uint32 // Counter the invalid request the client peer has made.
 | ||||
| 	responseCount uint64 // Counter to generate an unique id for request processing.
 | ||||
| 	errCh         chan error | ||||
| 	fcClient      *flowcontrol.ClientNode // Server side mirror token bucket.
 | ||||
| 
 | ||||
| 	// invalidLock is used for protecting invalidCount.
 | ||||
| 	invalidLock  sync.RWMutex | ||||
| 	invalidCount utils.LinearExpiredValue // Counter the invalid request the client peer has made.
 | ||||
| 
 | ||||
| 	server   bool | ||||
| 	errCh    chan error | ||||
| 	fcClient *flowcontrol.ClientNode // Server side mirror token bucket.
 | ||||
| } | ||||
| 
 | ||||
| func newClientPeer(version int, network uint64, p *p2p.Peer, rw p2p.MsgReadWriter) *clientPeer { | ||||
| @ -724,13 +733,14 @@ func newClientPeer(version int, network uint64, p *p2p.Peer, rw p2p.MsgReadWrite | ||||
| 		peerCommons: peerCommons{ | ||||
| 			Peer:      p, | ||||
| 			rw:        rw, | ||||
| 			id:        peerIdToString(p.ID()), | ||||
| 			id:        p.ID().String(), | ||||
| 			version:   version, | ||||
| 			network:   network, | ||||
| 			sendQueue: utils.NewExecQueue(100), | ||||
| 			closeCh:   make(chan struct{}), | ||||
| 		}, | ||||
| 		errCh: make(chan error, 1), | ||||
| 		invalidCount: utils.LinearExpiredValue{Rate: mclock.AbsTime(time.Hour)}, | ||||
| 		errCh:        make(chan error, 1), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| @ -970,6 +980,18 @@ func (p *clientPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, ge | ||||
| 	}) | ||||
| } | ||||
| 
 | ||||
| func (p *clientPeer) bumpInvalid() { | ||||
| 	p.invalidLock.Lock() | ||||
| 	p.invalidCount.Add(1, mclock.Now()) | ||||
| 	p.invalidLock.Unlock() | ||||
| } | ||||
| 
 | ||||
| func (p *clientPeer) getInvalid() uint64 { | ||||
| 	p.invalidLock.RLock() | ||||
| 	defer p.invalidLock.RUnlock() | ||||
| 	return p.invalidCount.Value(mclock.Now()) | ||||
| } | ||||
| 
 | ||||
| // serverPeerSubscriber is an interface to notify services about added or
 | ||||
| // removed server peers
 | ||||
| type serverPeerSubscriber interface { | ||||
|  | ||||
| @ -116,7 +116,7 @@ func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) { | ||||
| 		srv.maxCapacity = totalRecharge | ||||
| 	} | ||||
| 	srv.fcManager.SetCapacityLimits(srv.freeCapacity, srv.maxCapacity, srv.freeCapacity*2) | ||||
| 	srv.clientPool = newClientPool(srv.chainDb, srv.freeCapacity, mclock.System{}, func(id enode.ID) { go srv.peers.unregister(peerIdToString(id)) }) | ||||
| 	srv.clientPool = newClientPool(srv.chainDb, srv.freeCapacity, mclock.System{}, func(id enode.ID) { go srv.peers.unregister(id.String()) }) | ||||
| 	srv.clientPool.setDefaultFactors(priceFactors{0, 1, 1}, priceFactors{0, 1, 1}) | ||||
| 
 | ||||
| 	checkpoint := srv.latestLocalCheckpoint() | ||||
| @ -153,7 +153,7 @@ func (s *LesServer) APIs() []rpc.API { | ||||
| 
 | ||||
| func (s *LesServer) Protocols() []p2p.Protocol { | ||||
| 	ps := s.makeProtocols(ServerProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} { | ||||
| 		if p := s.peers.peer(peerIdToString(id)); p != nil { | ||||
| 		if p := s.peers.peer(id.String()); p != nil { | ||||
| 			return p.Info() | ||||
| 		} | ||||
| 		return nil | ||||
|  | ||||
| @ -322,7 +322,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error { | ||||
| 						origin = h.blockchain.GetHeaderByNumber(query.Origin.Number) | ||||
| 					} | ||||
| 					if origin == nil { | ||||
| 						atomic.AddUint32(&p.invalidCount, 1) | ||||
| 						p.bumpInvalid() | ||||
| 						break | ||||
| 					} | ||||
| 					headers = append(headers, origin) | ||||
| @ -419,7 +419,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error { | ||||
| 					} | ||||
| 					body := h.blockchain.GetBodyRLP(hash) | ||||
| 					if body == nil { | ||||
| 						atomic.AddUint32(&p.invalidCount, 1) | ||||
| 						p.bumpInvalid() | ||||
| 						continue | ||||
| 					} | ||||
| 					bodies = append(bodies, body) | ||||
| @ -467,7 +467,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error { | ||||
| 					header := h.blockchain.GetHeaderByHash(request.BHash) | ||||
| 					if header == nil { | ||||
| 						p.Log().Warn("Failed to retrieve associate header for code", "hash", request.BHash) | ||||
| 						atomic.AddUint32(&p.invalidCount, 1) | ||||
| 						p.bumpInvalid() | ||||
| 						continue | ||||
| 					} | ||||
| 					// Refuse to search stale state data in the database since looking for
 | ||||
| @ -475,7 +475,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error { | ||||
| 					local := h.blockchain.CurrentHeader().Number.Uint64() | ||||
| 					if !h.server.archiveMode && header.Number.Uint64()+core.TriesInMemory <= local { | ||||
| 						p.Log().Debug("Reject stale code request", "number", header.Number.Uint64(), "head", local) | ||||
| 						atomic.AddUint32(&p.invalidCount, 1) | ||||
| 						p.bumpInvalid() | ||||
| 						continue | ||||
| 					} | ||||
| 					triedb := h.blockchain.StateCache().TrieDB() | ||||
| @ -483,7 +483,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error { | ||||
| 					account, err := h.getAccount(triedb, header.Root, common.BytesToHash(request.AccKey)) | ||||
| 					if err != nil { | ||||
| 						p.Log().Warn("Failed to retrieve account for code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err) | ||||
| 						atomic.AddUint32(&p.invalidCount, 1) | ||||
| 						p.bumpInvalid() | ||||
| 						continue | ||||
| 					} | ||||
| 					code, err := triedb.Node(common.BytesToHash(account.CodeHash)) | ||||
| @ -542,7 +542,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error { | ||||
| 					results := h.blockchain.GetReceiptsByHash(hash) | ||||
| 					if results == nil { | ||||
| 						if header := h.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash { | ||||
| 							atomic.AddUint32(&p.invalidCount, 1) | ||||
| 							p.bumpInvalid() | ||||
| 							continue | ||||
| 						} | ||||
| 					} | ||||
| @ -605,7 +605,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error { | ||||
| 
 | ||||
| 						if header = h.blockchain.GetHeaderByHash(request.BHash); header == nil { | ||||
| 							p.Log().Warn("Failed to retrieve header for proof", "hash", request.BHash) | ||||
| 							atomic.AddUint32(&p.invalidCount, 1) | ||||
| 							p.bumpInvalid() | ||||
| 							continue | ||||
| 						} | ||||
| 						// Refuse to search stale state data in the database since looking for
 | ||||
| @ -613,14 +613,14 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error { | ||||
| 						local := h.blockchain.CurrentHeader().Number.Uint64() | ||||
| 						if !h.server.archiveMode && header.Number.Uint64()+core.TriesInMemory <= local { | ||||
| 							p.Log().Debug("Reject stale trie request", "number", header.Number.Uint64(), "head", local) | ||||
| 							atomic.AddUint32(&p.invalidCount, 1) | ||||
| 							p.bumpInvalid() | ||||
| 							continue | ||||
| 						} | ||||
| 						root = header.Root | ||||
| 					} | ||||
| 					// If a header lookup failed (non existent), ignore subsequent requests for the same header
 | ||||
| 					if root == (common.Hash{}) { | ||||
| 						atomic.AddUint32(&p.invalidCount, 1) | ||||
| 						p.bumpInvalid() | ||||
| 						continue | ||||
| 					} | ||||
| 					// Open the account or storage trie for the request
 | ||||
| @ -639,7 +639,7 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error { | ||||
| 						account, err := h.getAccount(statedb.TrieDB(), root, common.BytesToHash(request.AccKey)) | ||||
| 						if err != nil { | ||||
| 							p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err) | ||||
| 							atomic.AddUint32(&p.invalidCount, 1) | ||||
| 							p.bumpInvalid() | ||||
| 							continue | ||||
| 						} | ||||
| 						trie, err = statedb.OpenStorageTrie(common.BytesToHash(request.AccKey), account.Root) | ||||
| @ -833,9 +833,9 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error { | ||||
| 		clientErrorMeter.Mark(1) | ||||
| 		return errResp(ErrInvalidMsgCode, "%v", msg.Code) | ||||
| 	} | ||||
| 	// If the client has made too much invalid request(e.g. request a non-exist data),
 | ||||
| 	// If the client has made too much invalid request(e.g. request a non-existent data),
 | ||||
| 	// reject them to prevent SPAM attack.
 | ||||
| 	if atomic.LoadUint32(&p.invalidCount) > maxRequestErrors { | ||||
| 	if p.getInvalid() > maxRequestErrors { | ||||
| 		clientErrorMeter.Mark(1) | ||||
| 		return errTooManyInvalidRequest | ||||
| 	} | ||||
|  | ||||
| @ -223,6 +223,7 @@ func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, index | ||||
| 	if client.oracle != nil { | ||||
| 		client.oracle.Start(backend) | ||||
| 	} | ||||
| 	client.handler.start() | ||||
| 	return client.handler | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -124,6 +124,50 @@ func (e *ExpiredValue) SubExp(a ExpiredValue) { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // LinearExpiredValue is very similar with the expiredValue which the value
 | ||||
| // will continuously expired. But the different part is it's expired linearly.
 | ||||
| type LinearExpiredValue struct { | ||||
| 	Offset uint64         // The latest time offset
 | ||||
| 	Val    uint64         // The remaining value, can never be negative
 | ||||
| 	Rate   mclock.AbsTime `rlp:"-"` // Expiration rate(by nanosecond), will ignored by RLP
 | ||||
| } | ||||
| 
 | ||||
| // value calculates the value at the given moment. This function always has the
 | ||||
| // assumption that the given timestamp shouldn't less than the recorded one.
 | ||||
| func (e LinearExpiredValue) Value(now mclock.AbsTime) uint64 { | ||||
| 	offset := uint64(now / e.Rate) | ||||
| 	if e.Offset < offset { | ||||
| 		diff := offset - e.Offset | ||||
| 		if e.Val >= diff { | ||||
| 			e.Val -= diff | ||||
| 		} else { | ||||
| 			e.Val = 0 | ||||
| 		} | ||||
| 	} | ||||
| 	return e.Val | ||||
| } | ||||
| 
 | ||||
| // add adds a signed value at the given moment. This function always has the
 | ||||
| // assumption that the given timestamp shouldn't less than the recorded one.
 | ||||
| func (e *LinearExpiredValue) Add(amount int64, now mclock.AbsTime) uint64 { | ||||
| 	offset := uint64(now / e.Rate) | ||||
| 	if e.Offset < offset { | ||||
| 		diff := offset - e.Offset | ||||
| 		if e.Val >= diff { | ||||
| 			e.Val -= diff | ||||
| 		} else { | ||||
| 			e.Val = 0 | ||||
| 		} | ||||
| 		e.Offset = offset | ||||
| 	} | ||||
| 	if amount < 0 && uint64(-amount) > e.Val { | ||||
| 		e.Val = 0 | ||||
| 	} else { | ||||
| 		e.Val = uint64(int64(e.Val) + amount) | ||||
| 	} | ||||
| 	return e.Val | ||||
| } | ||||
| 
 | ||||
| // Expirer changes logOffset with a linear rate which can be changed during operation.
 | ||||
| // It is not thread safe, if access by multiple goroutines is needed then it should be
 | ||||
| // encapsulated into a locked structure.
 | ||||
|  | ||||
| @ -18,6 +18,8 @@ package utils | ||||
| 
 | ||||
| import ( | ||||
| 	"testing" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/common/mclock" | ||||
| ) | ||||
| 
 | ||||
| func TestValueExpiration(t *testing.T) { | ||||
| @ -116,3 +118,78 @@ func TestExpiredValueSubtraction(t *testing.T) { | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestLinearExpiredValue(t *testing.T) { | ||||
| 	var cases = []struct { | ||||
| 		value  LinearExpiredValue | ||||
| 		now    mclock.AbsTime | ||||
| 		expect uint64 | ||||
| 	}{ | ||||
| 		{LinearExpiredValue{ | ||||
| 			Offset: 0, | ||||
| 			Val:    0, | ||||
| 			Rate:   mclock.AbsTime(1), | ||||
| 		}, 0, 0}, | ||||
| 
 | ||||
| 		{LinearExpiredValue{ | ||||
| 			Offset: 1, | ||||
| 			Val:    1, | ||||
| 			Rate:   mclock.AbsTime(1), | ||||
| 		}, 0, 1}, | ||||
| 
 | ||||
| 		{LinearExpiredValue{ | ||||
| 			Offset: 1, | ||||
| 			Val:    1, | ||||
| 			Rate:   mclock.AbsTime(1), | ||||
| 		}, mclock.AbsTime(2), 0}, | ||||
| 
 | ||||
| 		{LinearExpiredValue{ | ||||
| 			Offset: 1, | ||||
| 			Val:    1, | ||||
| 			Rate:   mclock.AbsTime(1), | ||||
| 		}, mclock.AbsTime(3), 0}, | ||||
| 	} | ||||
| 	for _, c := range cases { | ||||
| 		if value := c.value.Value(c.now); value != c.expect { | ||||
| 			t.Fatalf("Value mismatch, want=%d, got=%d", c.expect, value) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func TestLinearExpiredAddition(t *testing.T) { | ||||
| 	var cases = []struct { | ||||
| 		value  LinearExpiredValue | ||||
| 		amount int64 | ||||
| 		now    mclock.AbsTime | ||||
| 		expect uint64 | ||||
| 	}{ | ||||
| 		{LinearExpiredValue{ | ||||
| 			Offset: 0, | ||||
| 			Val:    0, | ||||
| 			Rate:   mclock.AbsTime(1), | ||||
| 		}, -1, 0, 0}, | ||||
| 
 | ||||
| 		{LinearExpiredValue{ | ||||
| 			Offset: 1, | ||||
| 			Val:    1, | ||||
| 			Rate:   mclock.AbsTime(1), | ||||
| 		}, -1, 0, 0}, | ||||
| 
 | ||||
| 		{LinearExpiredValue{ | ||||
| 			Offset: 1, | ||||
| 			Val:    2, | ||||
| 			Rate:   mclock.AbsTime(1), | ||||
| 		}, -1, mclock.AbsTime(2), 0}, | ||||
| 
 | ||||
| 		{LinearExpiredValue{ | ||||
| 			Offset: 1, | ||||
| 			Val:    2, | ||||
| 			Rate:   mclock.AbsTime(1), | ||||
| 		}, -2, mclock.AbsTime(2), 0}, | ||||
| 	} | ||||
| 	for _, c := range cases { | ||||
| 		if value := c.value.Add(c.amount, c.now); value != c.expect { | ||||
| 			t.Fatalf("Value mismatch, want=%d, got=%d", c.expect, value) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user