core: seperated proccing mutex and getting mutex

This commit is contained in:
obscuren 2015-04-28 17:48:46 +02:00
parent 9f32117457
commit 7ac24d551b
2 changed files with 51 additions and 45 deletions

View File

@ -21,7 +21,7 @@ import (
const ( const (
// must be bumped when consensus algorithm is changed, this forces the upgradedb // must be bumped when consensus algorithm is changed, this forces the upgradedb
// command to be run (forces the blocks to be imported again using the new algorithm) // command to be run (forces the blocks to be imported again using the new algorithm)
BlockChainVersion = 3 BlockChainVersion = 2
) )
var statelogger = logger.NewLogger("BLOCK") var statelogger = logger.NewLogger("BLOCK")

View File

@ -74,8 +74,10 @@ type ChainManager struct {
eventMux *event.TypeMux eventMux *event.TypeMux
genesisBlock *types.Block genesisBlock *types.Block
// Last known total difficulty // Last known total difficulty
mu sync.RWMutex mu sync.RWMutex
tsmu sync.RWMutex tsmu sync.RWMutex
insertMu sync.Mutex
td *big.Int td *big.Int
currentBlock *types.Block currentBlock *types.Block
lastBlockHash common.Hash lastBlockHash common.Hash
@ -496,8 +498,8 @@ func (self *ChainManager) procFutureBlocks() {
} }
func (self *ChainManager) InsertChain(chain types.Blocks) error { func (self *ChainManager) InsertChain(chain types.Blocks) error {
self.mu.Lock() self.insertMu.Lock()
defer self.mu.Unlock() defer self.insertMu.Unlock()
// A queued approach to delivering events. This is generally faster than direct delivery and requires much less mutex acquiring. // A queued approach to delivering events. This is generally faster than direct delivery and requires much less mutex acquiring.
var ( var (
@ -546,51 +548,55 @@ func (self *ChainManager) InsertChain(chain types.Blocks) error {
block.Td = new(big.Int).Set(CalculateTD(block, self.GetBlock(block.ParentHash()))) block.Td = new(big.Int).Set(CalculateTD(block, self.GetBlock(block.ParentHash())))
cblock := self.currentBlock self.mu.Lock()
// Write block to database. Eventually we'll have to improve on this and throw away blocks that are {
// not in the canonical chain. cblock := self.currentBlock
self.write(block) // Write block to database. Eventually we'll have to improve on this and throw away blocks that are
// Compare the TD of the last known block in the canonical chain to make sure it's greater. // not in the canonical chain.
// At this point it's possible that a different chain (fork) becomes the new canonical chain. self.write(block)
if block.Td.Cmp(self.td) > 0 { // Compare the TD of the last known block in the canonical chain to make sure it's greater.
//if block.Header().Number.Cmp(new(big.Int).Add(cblock.Header().Number, common.Big1)) < 0 { // At this point it's possible that a different chain (fork) becomes the new canonical chain.
if block.Number().Cmp(cblock.Number()) <= 0 { if block.Td.Cmp(self.td) > 0 {
chash := cblock.Hash() //if block.Header().Number.Cmp(new(big.Int).Add(cblock.Header().Number, common.Big1)) < 0 {
hash := block.Hash() if block.Number().Cmp(cblock.Number()) <= 0 {
chash := cblock.Hash()
hash := block.Hash()
if glog.V(logger.Info) { if glog.V(logger.Info) {
glog.Infof("Split detected. New head #%v (%x) TD=%v, was #%v (%x) TD=%v\n", block.Header().Number, hash[:4], block.Td, cblock.Header().Number, chash[:4], self.td) glog.Infof("Split detected. New head #%v (%x) TD=%v, was #%v (%x) TD=%v\n", block.Header().Number, hash[:4], block.Td, cblock.Header().Number, chash[:4], self.td)
}
// during split we merge two different chains and create the new canonical chain
self.merge(self.getBlockByNumber(block.NumberU64()), block)
queue[i] = ChainSplitEvent{block, logs}
queueEvent.splitCount++
} }
// during split we merge two different chains and create the new canonical chain
self.merge(self.getBlockByNumber(block.NumberU64()), block)
queue[i] = ChainSplitEvent{block, logs} self.setTotalDifficulty(block.Td)
queueEvent.splitCount++ self.insert(block)
jsonlogger.LogJson(&logger.EthChainNewHead{
BlockHash: block.Hash().Hex(),
BlockNumber: block.Number(),
ChainHeadHash: cblock.Hash().Hex(),
BlockPrevHash: block.ParentHash().Hex(),
})
self.setTransState(state.New(block.Root(), self.stateDb))
self.txState.SetState(state.New(block.Root(), self.stateDb))
queue[i] = ChainEvent{block, logs}
queueEvent.canonicalCount++
if glog.V(logger.Debug) {
glog.Infof("inserted block #%d (%d TXs %d UNCs) (%x...)\n", block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4])
}
} else {
queue[i] = ChainSideEvent{block, logs}
queueEvent.sideCount++
} }
self.setTotalDifficulty(block.Td)
self.insert(block)
jsonlogger.LogJson(&logger.EthChainNewHead{
BlockHash: block.Hash().Hex(),
BlockNumber: block.Number(),
ChainHeadHash: cblock.Hash().Hex(),
BlockPrevHash: block.ParentHash().Hex(),
})
self.setTransState(state.New(block.Root(), self.stateDb))
self.txState.SetState(state.New(block.Root(), self.stateDb))
queue[i] = ChainEvent{block, logs}
queueEvent.canonicalCount++
if glog.V(logger.Debug) {
glog.Infof("inserted block #%d (%d TXs %d UNCs) (%x...)\n", block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4])
}
} else {
queue[i] = ChainSideEvent{block, logs}
queueEvent.sideCount++
} }
self.mu.Unlock()
stats.processed++ stats.processed++