eth, core: interupt the chain processing on stop

Added an additional channel which is used to interupt the chain manager
when it's processing blocks.
This commit is contained in:
obscuren 2015-06-12 13:36:38 +02:00
parent e2c2d8e15e
commit 90c4493a10
2 changed files with 109 additions and 99 deletions

View File

@ -100,8 +100,9 @@ type ChainManager struct {
cache *BlockCache cache *BlockCache
futureBlocks *BlockCache futureBlocks *BlockCache
quit chan struct{} quit chan struct{}
wg sync.WaitGroup procInterupt chan struct{} // interupt signaler for block processing
wg sync.WaitGroup
pow pow.PoW pow pow.PoW
} }
@ -113,6 +114,7 @@ func NewChainManager(genesis *types.Block, blockDb, stateDb common.Database, pow
genesisBlock: GenesisBlock(42, stateDb), genesisBlock: GenesisBlock(42, stateDb),
eventMux: mux, eventMux: mux,
quit: make(chan struct{}), quit: make(chan struct{}),
procInterupt: make(chan struct{}),
cache: NewBlockCache(blockCacheLimit), cache: NewBlockCache(blockCacheLimit),
pow: pow, pow: pow,
} }
@ -516,6 +518,7 @@ func (self *ChainManager) CalcTotalDiff(block *types.Block) (*big.Int, error) {
func (bc *ChainManager) Stop() { func (bc *ChainManager) Stop() {
close(bc.quit) close(bc.quit)
close(bc.procInterupt)
bc.wg.Wait() bc.wg.Wait()
@ -568,119 +571,126 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
defer close(nonceQuit) defer close(nonceQuit)
txcount := 0 txcount := 0
done:
for i, block := range chain { for i, block := range chain {
bstart := time.Now() select {
// Wait for block i's nonce to be verified before processing case <-self.procInterupt:
// its state transition. glog.V(logger.Debug).Infoln("Premature abort during chain processing")
for !nonceChecked[i] { break done
r := <-nonceDone default:
nonceChecked[r.i] = true bstart := time.Now()
if !r.valid { // Wait for block i's nonce to be verified before processing
block := chain[r.i] // its state transition.
return r.i, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()} for !nonceChecked[i] {
} r := <-nonceDone
} nonceChecked[r.i] = true
if !r.valid {
if BadHashes[block.Hash()] { block := chain[r.i]
err := fmt.Errorf("Found known bad hash in chain %x", block.Hash()) return r.i, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()}
blockErr(block, err) }
return i, err
}
// Setting block.Td regardless of error (known for example) prevents errors down the line
// in the protocol handler
block.Td = new(big.Int).Set(CalcTD(block, self.GetBlock(block.ParentHash())))
// Call in to the block processor and check for errors. It's likely that if one block fails
// all others will fail too (unless a known block is returned).
logs, err := self.processor.Process(block)
if err != nil {
if IsKnownBlockErr(err) {
stats.ignored++
continue
} }
if err == BlockFutureErr { if BadHashes[block.Hash()] {
// Allow up to MaxFuture second in the future blocks. If this limit err := fmt.Errorf("Found known bad hash in chain %x", block.Hash())
// is exceeded the chain is discarded and processed at a later time blockErr(block, err)
// if given. return i, err
if max := time.Now().Unix() + maxTimeFutureBlocks; block.Time() > max { }
return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max)
// Setting block.Td regardless of error (known for example) prevents errors down the line
// in the protocol handler
block.Td = new(big.Int).Set(CalcTD(block, self.GetBlock(block.ParentHash())))
// Call in to the block processor and check for errors. It's likely that if one block fails
// all others will fail too (unless a known block is returned).
logs, err := self.processor.Process(block)
if err != nil {
if IsKnownBlockErr(err) {
stats.ignored++
continue
} }
block.SetQueued(true) if err == BlockFutureErr {
self.futureBlocks.Push(block) // Allow up to MaxFuture second in the future blocks. If this limit
stats.queued++ // is exceeded the chain is discarded and processed at a later time
continue // if given.
} if max := time.Now().Unix() + maxTimeFutureBlocks; block.Time() > max {
return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max)
}
if IsParentErr(err) && self.futureBlocks.Has(block.ParentHash()) { block.SetQueued(true)
block.SetQueued(true) self.futureBlocks.Push(block)
self.futureBlocks.Push(block) stats.queued++
stats.queued++ continue
continue
}
blockErr(block, err)
return i, err
}
txcount += len(block.Transactions())
cblock := self.currentBlock
// Compare the TD of the last known block in the canonical chain to make sure it's greater.
// At this point it's possible that a different chain (fork) becomes the new canonical chain.
if block.Td.Cmp(self.Td()) > 0 {
// chain fork
if block.ParentHash() != cblock.Hash() {
// during split we merge two different chains and create the new canonical chain
err := self.merge(cblock, block)
if err != nil {
return i, err
} }
queue[i] = ChainSplitEvent{block, logs} if IsParentErr(err) && self.futureBlocks.Has(block.ParentHash()) {
queueEvent.splitCount++ block.SetQueued(true)
self.futureBlocks.Push(block)
stats.queued++
continue
}
blockErr(block, err)
return i, err
} }
self.mu.Lock() txcount += len(block.Transactions())
self.setTotalDifficulty(block.Td)
self.insert(block)
self.mu.Unlock()
jsonlogger.LogJson(&logger.EthChainNewHead{ cblock := self.currentBlock
BlockHash: block.Hash().Hex(), // Compare the TD of the last known block in the canonical chain to make sure it's greater.
BlockNumber: block.Number(), // At this point it's possible that a different chain (fork) becomes the new canonical chain.
ChainHeadHash: cblock.Hash().Hex(), if block.Td.Cmp(self.Td()) > 0 {
BlockPrevHash: block.ParentHash().Hex(), // chain fork
}) if block.ParentHash() != cblock.Hash() {
// during split we merge two different chains and create the new canonical chain
err := self.merge(cblock, block)
if err != nil {
return i, err
}
self.setTransState(state.New(block.Root(), self.stateDb)) queue[i] = ChainSplitEvent{block, logs}
self.txState.SetState(state.New(block.Root(), self.stateDb)) queueEvent.splitCount++
}
queue[i] = ChainEvent{block, block.Hash(), logs} self.mu.Lock()
queueEvent.canonicalCount++ self.setTotalDifficulty(block.Td)
self.insert(block)
self.mu.Unlock()
if glog.V(logger.Debug) { jsonlogger.LogJson(&logger.EthChainNewHead{
glog.Infof("[%v] inserted block #%d (%d TXs %d UNCs) (%x...). Took %v\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) BlockHash: block.Hash().Hex(),
} BlockNumber: block.Number(),
} else { ChainHeadHash: cblock.Hash().Hex(),
if glog.V(logger.Detail) { BlockPrevHash: block.ParentHash().Hex(),
glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) (%x...). Took %v\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) })
self.setTransState(state.New(block.Root(), self.stateDb))
self.txState.SetState(state.New(block.Root(), self.stateDb))
queue[i] = ChainEvent{block, block.Hash(), logs}
queueEvent.canonicalCount++
if glog.V(logger.Debug) {
glog.Infof("[%v] inserted block #%d (%d TXs %d UNCs) (%x...). Took %v\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart))
}
} else {
if glog.V(logger.Detail) {
glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) (%x...). Took %v\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart))
}
queue[i] = ChainSideEvent{block, logs}
queueEvent.sideCount++
} }
// Write block to database. Eventually we'll have to improve on this and throw away blocks that are
// not in the canonical chain.
self.write(block)
// Delete from future blocks
self.futureBlocks.Delete(block.Hash())
stats.processed++
queue[i] = ChainSideEvent{block, logs}
queueEvent.sideCount++
} }
// Write block to database. Eventually we'll have to improve on this and throw away blocks that are
// not in the canonical chain.
self.write(block)
// Delete from future blocks
self.futureBlocks.Delete(block.Hash())
stats.processed++
} }
if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) { if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) {

View File

@ -527,8 +527,8 @@ func (self *Ethereum) AddPeer(nodeURL string) error {
func (s *Ethereum) Stop() { func (s *Ethereum) Stop() {
s.net.Stop() s.net.Stop()
s.protocolManager.Stop()
s.chainManager.Stop() s.chainManager.Stop()
s.protocolManager.Stop()
s.txPool.Stop() s.txPool.Stop()
s.eventMux.Stop() s.eventMux.Stop()
if s.whisper != nil { if s.whisper != nil {