eth: fix typos (#16414)
This commit is contained in:
parent
2a4bd55b43
commit
7aad81f881
@ -63,7 +63,7 @@ type Ethereum struct {
|
|||||||
chainConfig *params.ChainConfig
|
chainConfig *params.ChainConfig
|
||||||
|
|
||||||
// Channel for shutting down the service
|
// Channel for shutting down the service
|
||||||
shutdownChan chan bool // Channel for shutting down the ethereum
|
shutdownChan chan bool // Channel for shutting down the Ethereum
|
||||||
stopDbUpgrade func() error // stop chain db sequential key upgrade
|
stopDbUpgrade func() error // stop chain db sequential key upgrade
|
||||||
|
|
||||||
// Handlers
|
// Handlers
|
||||||
@ -351,7 +351,7 @@ func (s *Ethereum) StartMining(local bool) error {
|
|||||||
if local {
|
if local {
|
||||||
// If local (CPU) mining is started, we can disable the transaction rejection
|
// If local (CPU) mining is started, we can disable the transaction rejection
|
||||||
// mechanism introduced to speed sync times. CPU mining on mainnet is ludicrous
|
// mechanism introduced to speed sync times. CPU mining on mainnet is ludicrous
|
||||||
// so noone will ever hit this path, whereas marking sync done on CPU mining
|
// so none will ever hit this path, whereas marking sync done on CPU mining
|
||||||
// will ensure that private networks work in single miner mode too.
|
// will ensure that private networks work in single miner mode too.
|
||||||
atomic.StoreUint32(&s.protocolManager.acceptTxs, 1)
|
atomic.StoreUint32(&s.protocolManager.acceptTxs, 1)
|
||||||
}
|
}
|
||||||
|
@ -62,7 +62,7 @@ func upgradeDeduplicateData(db ethdb.Database) func() error {
|
|||||||
failed error
|
failed error
|
||||||
)
|
)
|
||||||
for failed == nil && it.Next() {
|
for failed == nil && it.Next() {
|
||||||
// Skip any entries that don't look like old transaction meta entires (<hash>0x01)
|
// Skip any entries that don't look like old transaction meta entries (<hash>0x01)
|
||||||
key := it.Key()
|
key := it.Key()
|
||||||
if len(key) != common.HashLength+1 || key[common.HashLength] != 0x01 {
|
if len(key) != common.HashLength+1 || key[common.HashLength] != 0x01 {
|
||||||
continue
|
continue
|
||||||
@ -86,7 +86,7 @@ func upgradeDeduplicateData(db ethdb.Database) func() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Convert the old metadata to a new lookup entry, delete duplicate data
|
// Convert the old metadata to a new lookup entry, delete duplicate data
|
||||||
if failed = db.Put(append([]byte("l"), hash...), it.Value()); failed == nil { // Write the new looku entry
|
if failed = db.Put(append([]byte("l"), hash...), it.Value()); failed == nil { // Write the new lookup entry
|
||||||
if failed = db.Delete(hash); failed == nil { // Delete the duplicate transaction data
|
if failed = db.Delete(hash); failed == nil { // Delete the duplicate transaction data
|
||||||
if failed = db.Delete(append([]byte("receipts-"), hash...)); failed == nil { // Delete the duplicate receipt data
|
if failed = db.Delete(append([]byte("receipts-"), hash...)); failed == nil { // Delete the duplicate receipt data
|
||||||
if failed = db.Delete(key); failed != nil { // Delete the old transaction metadata
|
if failed = db.Delete(key); failed != nil { // Delete the old transaction metadata
|
||||||
|
@ -47,7 +47,7 @@ var (
|
|||||||
|
|
||||||
MaxForkAncestry = 3 * params.EpochDuration // Maximum chain reorganisation
|
MaxForkAncestry = 3 * params.EpochDuration // Maximum chain reorganisation
|
||||||
rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests
|
rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests
|
||||||
rttMaxEstimate = 20 * time.Second // Maximum rount-trip time to target for download requests
|
rttMaxEstimate = 20 * time.Second // Maximum round-trip time to target for download requests
|
||||||
rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value
|
rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value
|
||||||
ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion
|
ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion
|
||||||
ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts
|
ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts
|
||||||
@ -884,7 +884,7 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64)
|
|||||||
// immediately to the header processor to keep the rest of the pipeline full even
|
// immediately to the header processor to keep the rest of the pipeline full even
|
||||||
// in the case of header stalls.
|
// in the case of header stalls.
|
||||||
//
|
//
|
||||||
// The method returs the entire filled skeleton and also the number of headers
|
// The method returns the entire filled skeleton and also the number of headers
|
||||||
// already forwarded for processing.
|
// already forwarded for processing.
|
||||||
func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
|
func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
|
||||||
log.Debug("Filling up skeleton", "from", from)
|
log.Debug("Filling up skeleton", "from", from)
|
||||||
@ -1377,7 +1377,7 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error {
|
|||||||
pivot = height - uint64(fsMinFullBlocks)
|
pivot = height - uint64(fsMinFullBlocks)
|
||||||
}
|
}
|
||||||
// To cater for moving pivot points, track the pivot block and subsequently
|
// To cater for moving pivot points, track the pivot block and subsequently
|
||||||
// accumulated download results separatey.
|
// accumulated download results separately.
|
||||||
var (
|
var (
|
||||||
oldPivot *fetchResult // Locked in pivot block, might change eventually
|
oldPivot *fetchResult // Locked in pivot block, might change eventually
|
||||||
oldTail []*fetchResult // Downloaded content after the pivot
|
oldTail []*fetchResult // Downloaded content after the pivot
|
||||||
@ -1615,7 +1615,7 @@ func (d *Downloader) qosReduceConfidence() {
|
|||||||
//
|
//
|
||||||
// Note, the returned RTT is .9 of the actually estimated RTT. The reason is that
|
// Note, the returned RTT is .9 of the actually estimated RTT. The reason is that
|
||||||
// the downloader tries to adapt queries to the RTT, so multiple RTT values can
|
// the downloader tries to adapt queries to the RTT, so multiple RTT values can
|
||||||
// be adapted to, but smaller ones are preffered (stabler download stream).
|
// be adapted to, but smaller ones are preferred (stabler download stream).
|
||||||
func (d *Downloader) requestRTT() time.Duration {
|
func (d *Downloader) requestRTT() time.Duration {
|
||||||
return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10
|
return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10
|
||||||
}
|
}
|
||||||
|
@ -159,7 +159,7 @@ func (dl *downloadTester) makeChainFork(n, f int, parent *types.Block, parentRec
|
|||||||
// Create the common suffix
|
// Create the common suffix
|
||||||
hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
|
hashes, headers, blocks, receipts := dl.makeChain(n-f, 0, parent, parentReceipts, false)
|
||||||
|
|
||||||
// Create the forks, making the second heavyer if non balanced forks were requested
|
// Create the forks, making the second heavier if non balanced forks were requested
|
||||||
hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
|
hashes1, headers1, blocks1, receipts1 := dl.makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]], false)
|
||||||
hashes1 = append(hashes1, hashes[1:]...)
|
hashes1 = append(hashes1, hashes[1:]...)
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
|
|
||||||
// FakePeer is a mock downloader peer that operates on a local database instance
|
// FakePeer is a mock downloader peer that operates on a local database instance
|
||||||
// instead of being an actual live node. It's useful for testing and to implement
|
// instead of being an actual live node. It's useful for testing and to implement
|
||||||
// sync commands from an xisting local database.
|
// sync commands from an existing local database.
|
||||||
type FakePeer struct {
|
type FakePeer struct {
|
||||||
id string
|
id string
|
||||||
db ethdb.Database
|
db ethdb.Database
|
||||||
@ -48,7 +48,7 @@ func (p *FakePeer) Head() (common.Hash, *big.Int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RequestHeadersByHash implements downloader.Peer, returning a batch of headers
|
// RequestHeadersByHash implements downloader.Peer, returning a batch of headers
|
||||||
// defined by the origin hash and the associaed query parameters.
|
// defined by the origin hash and the associated query parameters.
|
||||||
func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int, reverse bool) error {
|
func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int, reverse bool) error {
|
||||||
var (
|
var (
|
||||||
headers []*types.Header
|
headers []*types.Header
|
||||||
@ -92,7 +92,7 @@ func (p *FakePeer) RequestHeadersByHash(hash common.Hash, amount int, skip int,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RequestHeadersByNumber implements downloader.Peer, returning a batch of headers
|
// RequestHeadersByNumber implements downloader.Peer, returning a batch of headers
|
||||||
// defined by the origin number and the associaed query parameters.
|
// defined by the origin number and the associated query parameters.
|
||||||
func (p *FakePeer) RequestHeadersByNumber(number uint64, amount int, skip int, reverse bool) error {
|
func (p *FakePeer) RequestHeadersByNumber(number uint64, amount int, skip int, reverse bool) error {
|
||||||
var (
|
var (
|
||||||
headers []*types.Header
|
headers []*types.Header
|
||||||
|
@ -551,7 +551,7 @@ func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peerC
|
|||||||
// medianRTT returns the median RTT of the peerset, considering only the tuning
|
// medianRTT returns the median RTT of the peerset, considering only the tuning
|
||||||
// peers if there are more peers available.
|
// peers if there are more peers available.
|
||||||
func (ps *peerSet) medianRTT() time.Duration {
|
func (ps *peerSet) medianRTT() time.Duration {
|
||||||
// Gather all the currnetly measured round trip times
|
// Gather all the currently measured round trip times
|
||||||
ps.lock.RLock()
|
ps.lock.RLock()
|
||||||
defer ps.lock.RUnlock()
|
defer ps.lock.RUnlock()
|
||||||
|
|
||||||
|
@ -275,7 +275,7 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {
|
|||||||
if q.headerResults != nil {
|
if q.headerResults != nil {
|
||||||
panic("skeleton assembly already in progress")
|
panic("skeleton assembly already in progress")
|
||||||
}
|
}
|
||||||
// Shedule all the header retrieval tasks for the skeleton assembly
|
// Schedule all the header retrieval tasks for the skeleton assembly
|
||||||
q.headerTaskPool = make(map[uint64]*types.Header)
|
q.headerTaskPool = make(map[uint64]*types.Header)
|
||||||
q.headerTaskQueue = prque.New()
|
q.headerTaskQueue = prque.New()
|
||||||
q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
|
q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
|
|
||||||
// stateReq represents a batch of state fetch requests groupped together into
|
// stateReq represents a batch of state fetch requests grouped together into
|
||||||
// a single data retrieval network packet.
|
// a single data retrieval network packet.
|
||||||
type stateReq struct {
|
type stateReq struct {
|
||||||
items []common.Hash // Hashes of the state items to download
|
items []common.Hash // Hashes of the state items to download
|
||||||
@ -139,7 +139,7 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {
|
|||||||
|
|
||||||
// Handle incoming state packs:
|
// Handle incoming state packs:
|
||||||
case pack := <-d.stateCh:
|
case pack := <-d.stateCh:
|
||||||
// Discard any data not requested (or previsouly timed out)
|
// Discard any data not requested (or previously timed out)
|
||||||
req := active[pack.PeerId()]
|
req := active[pack.PeerId()]
|
||||||
if req == nil {
|
if req == nil {
|
||||||
log.Debug("Unrequested node data", "peer", pack.PeerId(), "len", pack.Items())
|
log.Debug("Unrequested node data", "peer", pack.PeerId(), "len", pack.Items())
|
||||||
@ -182,7 +182,7 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync {
|
|||||||
case req := <-d.trackStateReq:
|
case req := <-d.trackStateReq:
|
||||||
// If an active request already exists for this peer, we have a problem. In
|
// If an active request already exists for this peer, we have a problem. In
|
||||||
// theory the trie node schedule must never assign two requests to the same
|
// theory the trie node schedule must never assign two requests to the same
|
||||||
// peer. In practive however, a peer might receive a request, disconnect and
|
// peer. In practice however, a peer might receive a request, disconnect and
|
||||||
// immediately reconnect before the previous times out. In this case the first
|
// immediately reconnect before the previous times out. In this case the first
|
||||||
// request is never honored, alas we must not silently overwrite it, as that
|
// request is never honored, alas we must not silently overwrite it, as that
|
||||||
// causes valid requests to go missing and sync to get stuck.
|
// causes valid requests to go missing and sync to get stuck.
|
||||||
@ -228,7 +228,7 @@ type stateSync struct {
|
|||||||
err error // Any error hit during sync (set before completion)
|
err error // Any error hit during sync (set before completion)
|
||||||
}
|
}
|
||||||
|
|
||||||
// stateTask represents a single trie node download taks, containing a set of
|
// stateTask represents a single trie node download task, containing a set of
|
||||||
// peers already attempted retrieval from to detect stalled syncs and abort.
|
// peers already attempted retrieval from to detect stalled syncs and abort.
|
||||||
type stateTask struct {
|
type stateTask struct {
|
||||||
attempts map[string]struct{}
|
attempts map[string]struct{}
|
||||||
@ -333,7 +333,7 @@ func (s *stateSync) commit(force bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// assignTasks attempts to assing new tasks to all idle peers, either from the
|
// assignTasks attempts to assign new tasks to all idle peers, either from the
|
||||||
// batch currently being retried, or fetching new data from the trie sync itself.
|
// batch currently being retried, or fetching new data from the trie sync itself.
|
||||||
func (s *stateSync) assignTasks() {
|
func (s *stateSync) assignTasks() {
|
||||||
// Iterate over all idle peers and try to assign them state fetches
|
// Iterate over all idle peers and try to assign them state fetches
|
||||||
|
@ -127,7 +127,7 @@ type Fetcher struct {
|
|||||||
// Block cache
|
// Block cache
|
||||||
queue *prque.Prque // Queue containing the import operations (block number sorted)
|
queue *prque.Prque // Queue containing the import operations (block number sorted)
|
||||||
queues map[string]int // Per peer block counts to prevent memory exhaustion
|
queues map[string]int // Per peer block counts to prevent memory exhaustion
|
||||||
queued map[common.Hash]*inject // Set of already queued blocks (to dedup imports)
|
queued map[common.Hash]*inject // Set of already queued blocks (to dedupe imports)
|
||||||
|
|
||||||
// Callbacks
|
// Callbacks
|
||||||
getBlock blockRetrievalFn // Retrieves a block from the local chain
|
getBlock blockRetrievalFn // Retrieves a block from the local chain
|
||||||
|
@ -98,7 +98,7 @@ func (api *PublicFilterAPI) timeoutLoop() {
|
|||||||
// NewPendingTransactionFilter creates a filter that fetches pending transaction hashes
|
// NewPendingTransactionFilter creates a filter that fetches pending transaction hashes
|
||||||
// as transactions enter the pending state.
|
// as transactions enter the pending state.
|
||||||
//
|
//
|
||||||
// It is part of the filter package because this filter can be used throug the
|
// It is part of the filter package because this filter can be used through the
|
||||||
// `eth_getFilterChanges` polling method that is also used for log filters.
|
// `eth_getFilterChanges` polling method that is also used for log filters.
|
||||||
//
|
//
|
||||||
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newpendingtransactionfilter
|
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newpendingtransactionfilter
|
||||||
|
@ -96,8 +96,8 @@ type ProtocolManager struct {
|
|||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
// NewProtocolManager returns a new Ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
|
||||||
// with the ethereum network.
|
// with the Ethereum network.
|
||||||
func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkId uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
|
func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkId uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
|
||||||
// Create the protocol manager with the base fields
|
// Create the protocol manager with the base fields
|
||||||
manager := &ProtocolManager{
|
manager := &ProtocolManager{
|
||||||
@ -498,20 +498,20 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
|
|||||||
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
return errResp(ErrDecode, "msg %v: %v", msg, err)
|
||||||
}
|
}
|
||||||
// Deliver them all to the downloader for queuing
|
// Deliver them all to the downloader for queuing
|
||||||
trasactions := make([][]*types.Transaction, len(request))
|
transactions := make([][]*types.Transaction, len(request))
|
||||||
uncles := make([][]*types.Header, len(request))
|
uncles := make([][]*types.Header, len(request))
|
||||||
|
|
||||||
for i, body := range request {
|
for i, body := range request {
|
||||||
trasactions[i] = body.Transactions
|
transactions[i] = body.Transactions
|
||||||
uncles[i] = body.Uncles
|
uncles[i] = body.Uncles
|
||||||
}
|
}
|
||||||
// Filter out any explicitly requested bodies, deliver the rest to the downloader
|
// Filter out any explicitly requested bodies, deliver the rest to the downloader
|
||||||
filter := len(trasactions) > 0 || len(uncles) > 0
|
filter := len(transactions) > 0 || len(uncles) > 0
|
||||||
if filter {
|
if filter {
|
||||||
trasactions, uncles = pm.fetcher.FilterBodies(p.id, trasactions, uncles, time.Now())
|
transactions, uncles = pm.fetcher.FilterBodies(p.id, transactions, uncles, time.Now())
|
||||||
}
|
}
|
||||||
if len(trasactions) > 0 || len(uncles) > 0 || !filter {
|
if len(transactions) > 0 || len(uncles) > 0 || !filter {
|
||||||
err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
|
err := pm.downloader.DeliverBodies(p.id, transactions, uncles)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug("Failed to deliver bodies", "err", err)
|
log.Debug("Failed to deliver bodies", "err", err)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user