core: support inserting pure header chains

This commit is contained in:
Péter Szilágyi 2015-09-21 15:36:29 +03:00
parent 92f9a3e5fa
commit c33cc382b3
9 changed files with 653 additions and 348 deletions

View File

@ -369,13 +369,26 @@ func (sm *BlockProcessor) GetLogs(block *types.Block) (logs vm.Logs, err error)
return logs, nil
}
// ValidateHeader verifies the validity of a header, relying on the database and
// POW behind the block processor.
func (sm *BlockProcessor) ValidateHeader(header *types.Header, checkPow, uncle bool) error {
// Short circuit if the header's already known or its parent missing
if sm.bc.HasHeader(header.Hash()) {
return nil
}
if parent := sm.bc.GetHeader(header.ParentHash); parent == nil {
return ParentError(header.ParentHash)
} else {
return ValidateHeader(sm.Pow, header, parent, checkPow, uncle)
}
}
// See YP section 4.3.4. "Block Header Validity"
// Validates a header. Returns an error if the header is invalid.
func ValidateHeader(pow pow.PoW, header *types.Header, parent *types.Header, checkPow, uncle bool) error {
if big.NewInt(int64(len(header.Extra))).Cmp(params.MaximumExtraDataSize) == 1 {
return fmt.Errorf("Header extra data too long (%d)", len(header.Extra))
}
if uncle {
if header.Time.Cmp(common.MaxBig) == 1 {
return BlockTSTooBigErr

View File

@ -67,9 +67,9 @@ type BlockChain struct {
chainmu sync.RWMutex
tsmu sync.RWMutex
td *big.Int
currentBlock *types.Block
currentGasLimit *big.Int
checkpoint int // checkpoint counts towards the new checkpoint
currentHeader *types.Header // Current head of the header chain (may be above the block chain!)
currentBlock *types.Block // Current head of the block chain
headerCache *lru.Cache // Cache for the most recent block headers
bodyCache *lru.Cache // Cache for the most recent block bodies
@ -120,20 +120,15 @@ func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*Bl
}
glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
}
if err := bc.setLastState(); err != nil {
if err := bc.loadLastState(); err != nil {
return nil, err
}
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
for hash, _ := range BadHashes {
if block := bc.GetBlock(hash); block != nil {
glog.V(logger.Error).Infof("Found bad hash. Reorganising chain to state %x\n", block.ParentHash().Bytes()[:4])
block = bc.GetBlock(block.ParentHash())
if block == nil {
glog.Fatal("Unable to complete. Parent block not found. Corrupted DB?")
}
bc.SetHead(block)
glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation")
if header := bc.GetHeader(hash); header != nil {
glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4])
bc.SetHead(header.Number.Uint64() - 1)
glog.V(logger.Error).Infoln("Chain rewind was successful, resuming normal operation")
}
}
// Take ownership of this particular state
@ -141,30 +136,75 @@ func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*Bl
return bc, nil
}
func (bc *BlockChain) SetHead(head *types.Block) {
// loadLastState loads the last known chain state from the database. This method
// assumes that the chain manager mutex is held.
func (self *BlockChain) loadLastState() error {
// Restore the last known head block
head := GetHeadBlockHash(self.chainDb)
if head == (common.Hash{}) {
// Corrupt or empty database, init from scratch
self.Reset()
} else {
if block := self.GetBlock(head); block != nil {
// Block found, set as the current head
self.currentBlock = block
} else {
// Corrupt or empty database, init from scratch
self.Reset()
}
}
// Restore the last known head header
self.currentHeader = self.currentBlock.Header()
if head := GetHeadHeaderHash(self.chainDb); head != (common.Hash{}) {
if header := self.GetHeader(head); header != nil {
self.currentHeader = header
}
}
// Issue a status log and return
headerTd := self.GetTd(self.currentHeader.Hash())
blockTd := self.GetTd(self.currentBlock.Hash())
glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", self.currentHeader.Number, self.currentHeader.Hash(), headerTd)
glog.V(logger.Info).Infof("Last block: #%d [%x…] TD=%v", self.currentBlock.Number(), self.currentBlock.Hash(), blockTd)
return nil
}
// SetHead rewind the local chain to a new head entity. In the case of headers,
// everything above the new head will be deleted and the new one set. In the case
// of blocks though, the head may be further rewound if block bodies are missing
// (non-archive nodes after a fast sync).
func (bc *BlockChain) SetHead(head uint64) {
bc.mu.Lock()
defer bc.mu.Unlock()
for block := bc.currentBlock; block != nil && block.Hash() != head.Hash(); block = bc.GetBlock(block.ParentHash()) {
DeleteBlock(bc.chainDb, block.Hash())
// Delete everything from the current header head (is above block head)
for i := bc.currentHeader.Number.Uint64(); i > head; i-- {
if hash := GetCanonicalHash(bc.chainDb, i); hash != (common.Hash{}) {
DeleteCanonicalHash(bc.chainDb, i)
DeleteHeader(bc.chainDb, hash)
DeleteBody(bc.chainDb, hash)
DeleteTd(bc.chainDb, hash)
}
}
bc.currentHeader = GetHeader(bc.chainDb, GetCanonicalHash(bc.chainDb, head))
// Rewind the block chain until a whole block is found
for bc.GetBlockByNumber(head) == nil {
head--
}
bc.currentBlock = bc.GetBlockByNumber(head)
// Clear out any stale content from the caches
bc.headerCache.Purge()
bc.bodyCache.Purge()
bc.bodyRLPCache.Purge()
bc.blockCache.Purge()
bc.futureBlocks.Purge()
bc.currentBlock = head
bc.setTotalDifficulty(bc.GetTd(head.Hash()))
bc.insert(head)
bc.setLastState()
}
func (self *BlockChain) Td() *big.Int {
self.mu.RLock()
defer self.mu.RUnlock()
return new(big.Int).Set(self.td)
// Update all computed fields to the new head
bc.insert(bc.currentBlock)
bc.loadLastState()
}
func (self *BlockChain) GasLimit() *big.Int {
@ -181,6 +221,19 @@ func (self *BlockChain) LastBlockHash() common.Hash {
return self.currentBlock.Hash()
}
// CurrentHeader retrieves the current head header of the canonical chain. The
// header is retrieved from the chain manager's internal cache, involving no
// database operations.
func (self *BlockChain) CurrentHeader() *types.Header {
self.mu.RLock()
defer self.mu.RUnlock()
return self.currentHeader
}
// CurrentBlock retrieves the current head block of the canonical chain. The
// block is retrieved from the chain manager's internal cache, involving no
// database operations.
func (self *BlockChain) CurrentBlock() *types.Block {
self.mu.RLock()
defer self.mu.RUnlock()
@ -192,7 +245,7 @@ func (self *BlockChain) Status() (td *big.Int, currentBlock common.Hash, genesis
self.mu.RLock()
defer self.mu.RUnlock()
return new(big.Int).Set(self.td), self.currentBlock.Hash(), self.genesisBlock.Hash()
return self.GetTd(self.currentBlock.Hash()), self.currentBlock.Hash(), self.genesisBlock.Hash()
}
func (self *BlockChain) SetProcessor(proc types.BlockProcessor) {
@ -203,26 +256,6 @@ func (self *BlockChain) State() (*state.StateDB, error) {
return state.New(self.CurrentBlock().Root(), self.chainDb)
}
func (bc *BlockChain) setLastState() error {
head := GetHeadBlockHash(bc.chainDb)
if head != (common.Hash{}) {
block := bc.GetBlock(head)
if block != nil {
bc.currentBlock = block
}
} else {
bc.Reset()
}
bc.td = bc.GetTd(bc.currentBlock.Hash())
bc.currentGasLimit = CalcGasLimit(bc.currentBlock)
if glog.V(logger.Info) {
glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td)
}
return nil
}
// Reset purges the entire blockchain, restoring it to its genesis state.
func (bc *BlockChain) Reset() {
bc.ResetWithGenesisBlock(bc.genesisBlock)
@ -238,6 +271,9 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
DeleteBlock(bc.chainDb, block.Hash())
}
for header := bc.currentHeader; header != nil; header = bc.GetHeader(header.ParentHash) {
DeleteBlock(bc.chainDb, header.Hash())
}
bc.headerCache.Purge()
bc.bodyCache.Purge()
bc.bodyRLPCache.Purge()
@ -254,7 +290,7 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
bc.genesisBlock = genesis
bc.insert(bc.genesisBlock)
bc.currentBlock = bc.genesisBlock
bc.setTotalDifficulty(genesis.Difficulty())
bc.currentHeader = bc.genesisBlock.Header()
}
// Export writes the active chain to the given writer.
@ -290,17 +326,26 @@ func (self *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
return nil
}
// insert injects a block into the current chain block chain. Note, this function
// assumes that the `mu` mutex is held!
// insert injects a new head block into the current block chain. This method
// assumes that the block is indeed a true head. It will also reset the head
// header to this very same block to prevent the headers from diverging on a
// different header chain.
//
// Note, this function assumes that the `mu` mutex is held!
func (bc *BlockChain) insert(block *types.Block) {
// Add the block to the canonical chain number scheme and mark as the head
if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
glog.Fatalf("failed to insert block number: %v", err)
}
if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
glog.Fatalf("failed to insert block number: %v", err)
glog.Fatalf("failed to insert head block hash: %v", err)
}
if err := WriteHeadHeaderHash(bc.chainDb, block.Hash()); err != nil {
glog.Fatalf("failed to insert head header hash: %v", err)
}
// Update the internal state with the head block
bc.currentBlock = block
bc.currentHeader = block.Header()
}
// Accessors
@ -456,19 +501,15 @@ func (self *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*ty
return
}
func (self *BlockChain) GetUnclesInChain(block *types.Block, length int) (uncles []*types.Header) {
// GetUnclesInChain retrieves all the uncles from a given block backwards until
// a specific distance is reached.
func (self *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
uncles := []*types.Header{}
for i := 0; block != nil && i < length; i++ {
uncles = append(uncles, block.Uncles()...)
block = self.GetBlock(block.ParentHash())
}
return
}
// setTotalDifficulty updates the TD of the chain manager. Note, this function
// assumes that the `mu` mutex is held!
func (bc *BlockChain) setTotalDifficulty(td *big.Int) {
bc.td = new(big.Int).Set(td)
return uncles
}
func (bc *BlockChain) Stop() {
@ -504,6 +545,135 @@ const (
SideStatTy
)
// writeHeader writes a header into the local chain, given that its parent is
// already known. If the total difficulty of the newly inserted header becomes
// greater than the old known TD, the canonical chain is re-routed.
//
// Note: This method is not concurrent-safe with inserting blocks simultaneously
// into the chain, as side effects caused by reorganizations cannot be emulated
// without the real blocks. Hence, writing headers directly should only be done
// in two scenarios: pure-header mode of operation (light clients), or properly
// separated header/block phases (non-archive clients).
func (self *BlockChain) writeHeader(header *types.Header) error {
self.wg.Add(1)
defer self.wg.Done()
// Calculate the total difficulty of the header
ptd := self.GetTd(header.ParentHash)
if ptd == nil {
return ParentError(header.ParentHash)
}
td := new(big.Int).Add(header.Difficulty, ptd)
// Make sure no inconsistent state is leaked during insertion
self.mu.Lock()
defer self.mu.Unlock()
// If the total difficulty is higher than our known, add it to the canonical chain
if td.Cmp(self.GetTd(self.currentHeader.Hash())) > 0 {
// Delete any canonical number assignments above the new head
for i := header.Number.Uint64() + 1; GetCanonicalHash(self.chainDb, i) != (common.Hash{}); i++ {
DeleteCanonicalHash(self.chainDb, i)
}
// Overwrite any stale canonical number assignments
head := self.GetHeader(header.ParentHash)
for GetCanonicalHash(self.chainDb, head.Number.Uint64()) != head.Hash() {
WriteCanonicalHash(self.chainDb, head.Hash(), head.Number.Uint64())
head = self.GetHeader(head.ParentHash)
}
// Extend the canonical chain with the new header
if err := WriteCanonicalHash(self.chainDb, header.Hash(), header.Number.Uint64()); err != nil {
glog.Fatalf("failed to insert header number: %v", err)
}
if err := WriteHeadHeaderHash(self.chainDb, header.Hash()); err != nil {
glog.Fatalf("failed to insert head header hash: %v", err)
}
self.currentHeader = types.CopyHeader(header)
}
// Irrelevant of the canonical status, write the header itself to the database
if err := WriteTd(self.chainDb, header.Hash(), td); err != nil {
glog.Fatalf("failed to write header total difficulty: %v", err)
}
if err := WriteHeader(self.chainDb, header); err != nil {
glog.Fatalf("filed to write header contents: %v", err)
}
return nil
}
// InsertHeaderChain will attempt to insert the given header chain in to the
// local chain, possibly creating a dork. If an error is returned, it will
// return the index number of the failing header as well an error describing
// what went wrong.
//
// The verify parameter can be used to fine tune whether nonce verification
// should be done or not. The reason behind the optional check is because some
// of the header retrieval mechanisms already need to verfy nonces, as well as
// because nonces can be verified sparsely, not needing to check each.
func (self *BlockChain) InsertHeaderChain(chain []*types.Header, verify bool) (int, error) {
self.wg.Add(1)
defer self.wg.Done()
// Make sure only one thread manipulates the chain at once
self.chainmu.Lock()
defer self.chainmu.Unlock()
// Collect some import statistics to report on
stats := struct{ processed, ignored int }{}
start := time.Now()
// Start the parallel nonce verifier, with a fake nonce if not requested
verifier := self.pow
if !verify {
verifier = FakePow{}
}
nonceAbort, nonceResults := verifyNoncesFromHeaders(verifier, chain)
defer close(nonceAbort)
// Iterate over the headers, inserting any new ones
complete := make([]bool, len(chain))
for i, header := range chain {
// Short circuit insertion if shutting down
if atomic.LoadInt32(&self.procInterrupt) == 1 {
glog.V(logger.Debug).Infoln("Premature abort during header chain processing")
break
}
hash := header.Hash()
// Accumulate verification results until the next header is verified
for !complete[i] {
if res := <-nonceResults; res.valid {
complete[res.index] = true
} else {
header := chain[res.index]
return res.index, &BlockNonceErr{
Hash: header.Hash(),
Number: new(big.Int).Set(header.Number),
Nonce: header.Nonce.Uint64(),
}
}
}
if BadHashes[hash] {
glog.V(logger.Error).Infof("Bad header %d [%x…], known bad hash", header.Number, hash)
return i, BadHashError(hash)
}
// Write the header to the chain and get the status
if self.HasHeader(hash) {
stats.ignored++
continue
}
if err := self.writeHeader(header); err != nil {
return i, err
}
stats.processed++
}
// Report some public statistics so the user has a clue what's going on
first, last := chain[0], chain[len(chain)-1]
glog.V(logger.Info).Infof("imported %d header(s) (%d ignored) in %v. #%v [%x… / %x…]", stats.processed, stats.ignored,
time.Since(start), last.Number, first.Hash().Bytes()[:4], last.Hash().Bytes()[:4])
return 0, nil
}
// WriteBlock writes the block to the chain.
func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err error) {
self.wg.Add(1)
@ -522,7 +692,7 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err
// Compare the TD of the last known block in the canonical chain to make sure it's greater.
// At this point it's possible that a different chain (fork) becomes the new canonical chain.
if td.Cmp(self.Td()) > 0 {
if td.Cmp(self.GetTd(self.currentBlock.Hash())) > 0 {
// chain fork
if block.ParentHash() != cblock.Hash() {
// during split we merge two different chains and create the new canonical chain
@ -534,7 +704,6 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err
status = CanonStatTy
self.mu.Lock()
self.setTotalDifficulty(td)
self.insert(block)
self.mu.Unlock()
} else {
@ -580,7 +749,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
txcount := 0
for i, block := range chain {
if atomic.LoadInt32(&self.procInterrupt) == 1 {
glog.V(logger.Debug).Infoln("Premature abort during chain processing")
glog.V(logger.Debug).Infoln("Premature abort during block chain processing")
break
}
@ -788,8 +957,7 @@ func (self *BlockChain) postChainEvents(events []interface{}) {
if event, ok := event.(ChainEvent); ok {
// We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long
// and in most cases isn't even necessary.
if self.currentBlock.Hash() == event.Hash {
self.currentGasLimit = CalcGasLimit(event.Block)
if self.LastBlockHash() == event.Hash {
self.eventMux.Post(ChainHeadEvent{event.Block})
}
}

View File

@ -64,44 +64,58 @@ func theBlockChain(db ethdb.Database, t *testing.T) *BlockChain {
}
// Test fork of length N starting from block i
func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big.Int)) {
// switch databases to process the new chain
db, err := ethdb.NewMemDatabase()
if err != nil {
t.Fatal("Failed to create db:", err)
}
// copy old chain up to i into new db with deterministic canonical
bman2, err := newCanonical(i, db)
func testFork(t *testing.T, processor *BlockProcessor, i, n int, full bool, comparator func(td1, td2 *big.Int)) {
// Copy old chain up to #i into a new db
db, processor2, err := newCanonical(i, full)
if err != nil {
t.Fatal("could not make new canonical in testFork", err)
}
// assert the bmans have the same block at i
bi1 := bman.bc.GetBlockByNumber(uint64(i)).Hash()
bi2 := bman2.bc.GetBlockByNumber(uint64(i)).Hash()
if bi1 != bi2 {
fmt.Printf("%+v\n%+v\n\n", bi1, bi2)
t.Fatal("chains do not have the same hash at height", i)
// Assert the chains have the same header/block at #i
var hash1, hash2 common.Hash
if full {
hash1 = processor.bc.GetBlockByNumber(uint64(i)).Hash()
hash2 = processor2.bc.GetBlockByNumber(uint64(i)).Hash()
} else {
hash1 = processor.bc.GetHeaderByNumber(uint64(i)).Hash()
hash2 = processor2.bc.GetHeaderByNumber(uint64(i)).Hash()
}
bman2.bc.SetProcessor(bman2)
// extend the fork
parent := bman2.bc.CurrentBlock()
chainB := makeChain(parent, N, db, forkSeed)
_, err = bman2.bc.InsertChain(chainB)
if err != nil {
t.Fatal("Insert chain error for fork:", err)
if hash1 != hash2 {
t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
}
tdpre := bman.bc.Td()
// Test the fork's blocks on the original chain
td, err := testChain(chainB, bman)
if err != nil {
t.Fatal("expected chainB not to give errors:", err)
// Extend the newly created chain
var (
blockChainB []*types.Block
headerChainB []*types.Header
)
if full {
blockChainB = makeBlockChain(processor2.bc.CurrentBlock(), n, db, forkSeed)
if _, err := processor2.bc.InsertChain(blockChainB); err != nil {
t.Fatalf("failed to insert forking chain: %v", err)
}
} else {
headerChainB = makeHeaderChain(processor2.bc.CurrentHeader(), n, db, forkSeed)
if _, err := processor2.bc.InsertHeaderChain(headerChainB, true); err != nil {
t.Fatalf("failed to insert forking chain: %v", err)
}
}
// Compare difficulties
f(tdpre, td)
// Sanity check that the forked chain can be imported into the original
var tdPre, tdPost *big.Int
// Loop over parents making sure reconstruction is done properly
if full {
tdPre = processor.bc.GetTd(processor.bc.CurrentBlock().Hash())
if err := testBlockChainImport(blockChainB, processor); err != nil {
t.Fatalf("failed to import forked block chain: %v", err)
}
tdPost = processor.bc.GetTd(blockChainB[len(blockChainB)-1].Hash())
} else {
tdPre = processor.bc.GetTd(processor.bc.CurrentHeader().Hash())
if err := testHeaderChainImport(headerChainB, processor); err != nil {
t.Fatalf("failed to import forked header chain: %v", err)
}
tdPost = processor.bc.GetTd(headerChainB[len(headerChainB)-1].Hash())
}
// Compare the total difficulties of the chains
comparator(tdPre, tdPost)
}
func printChain(bc *BlockChain) {
@ -111,22 +125,41 @@ func printChain(bc *BlockChain) {
}
}
// process blocks against a chain
func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) {
for _, block := range chainB {
_, _, err := bman.bc.processor.Process(block)
if err != nil {
// testBlockChainImport tries to process a chain of blocks, writing them into
// the database if successful.
func testBlockChainImport(chain []*types.Block, processor *BlockProcessor) error {
for _, block := range chain {
// Try and process the block
if _, _, err := processor.Process(block); err != nil {
if IsKnownBlockErr(err) {
continue
}
return nil, err
return err
}
bman.bc.mu.Lock()
WriteTd(bman.bc.chainDb, block.Hash(), new(big.Int).Add(block.Difficulty(), bman.bc.GetTd(block.ParentHash())))
WriteBlock(bman.bc.chainDb, block)
bman.bc.mu.Unlock()
// Manually insert the block into the database, but don't reorganize (allows subsequent testing)
processor.bc.mu.Lock()
WriteTd(processor.chainDb, block.Hash(), new(big.Int).Add(block.Difficulty(), processor.bc.GetTd(block.ParentHash())))
WriteBlock(processor.chainDb, block)
processor.bc.mu.Unlock()
}
return bman.bc.GetTd(chainB[len(chainB)-1].Hash()), nil
return nil
}
// testHeaderChainImport tries to process a chain of header, writing them into
// the database if successful.
func testHeaderChainImport(chain []*types.Header, processor *BlockProcessor) error {
for _, header := range chain {
// Try and validate the header
if err := processor.ValidateHeader(header, false, false); err != nil {
return err
}
// Manually insert the header into the database, but don't reorganize (allows subsequent testing)
processor.bc.mu.Lock()
WriteTd(processor.chainDb, header.Hash(), new(big.Int).Add(header.Difficulty, processor.bc.GetTd(header.ParentHash)))
WriteHeader(processor.chainDb, header)
processor.bc.mu.Unlock()
}
return nil
}
func loadChain(fn string, t *testing.T) (types.Blocks, error) {
@ -154,139 +187,147 @@ func insertChain(done chan bool, blockchain *BlockChain, chain types.Blocks, t *
}
func TestLastBlock(t *testing.T) {
db, err := ethdb.NewMemDatabase()
if err != nil {
t.Fatal("Failed to create db:", err)
}
db, _ := ethdb.NewMemDatabase()
bchain := theBlockChain(db, t)
block := makeChain(bchain.CurrentBlock(), 1, db, 0)[0]
block := makeBlockChain(bchain.CurrentBlock(), 1, db, 0)[0]
bchain.insert(block)
if block.Hash() != GetHeadBlockHash(db) {
t.Errorf("Write/Get HeadBlockHash failed")
}
}
func TestExtendCanonical(t *testing.T) {
CanonicalLength := 5
db, err := ethdb.NewMemDatabase()
// Tests that given a starting canonical chain of a given size, it can be extended
// with various length chains.
func TestExtendCanonicalHeaders(t *testing.T) { testExtendCanonical(t, false) }
func TestExtendCanonicalBlocks(t *testing.T) { testExtendCanonical(t, true) }
func testExtendCanonical(t *testing.T, full bool) {
length := 5
// Make first chain starting from genesis
_, processor, err := newCanonical(length, full)
if err != nil {
t.Fatal("Failed to create db:", err)
t.Fatalf("failed to make new canonical chain: %v", err)
}
// make first chain starting from genesis
bman, err := newCanonical(CanonicalLength, db)
if err != nil {
t.Fatal("Could not make new canonical chain:", err)
}
f := func(td1, td2 *big.Int) {
// Define the difficulty comparator
better := func(td1, td2 *big.Int) {
if td2.Cmp(td1) <= 0 {
t.Error("expected chainB to have higher difficulty. Got", td2, "expected more than", td1)
t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1)
}
}
// Start fork from current height (CanonicalLength)
testFork(t, bman, CanonicalLength, 1, f)
testFork(t, bman, CanonicalLength, 2, f)
testFork(t, bman, CanonicalLength, 5, f)
testFork(t, bman, CanonicalLength, 10, f)
// Start fork from current height
testFork(t, processor, length, 1, full, better)
testFork(t, processor, length, 2, full, better)
testFork(t, processor, length, 5, full, better)
testFork(t, processor, length, 10, full, better)
}
func TestShorterFork(t *testing.T) {
db, err := ethdb.NewMemDatabase()
// Tests that given a starting canonical chain of a given size, creating shorter
// forks do not take canonical ownership.
func TestShorterForkHeaders(t *testing.T) { testShorterFork(t, false) }
func TestShorterForkBlocks(t *testing.T) { testShorterFork(t, true) }
func testShorterFork(t *testing.T, full bool) {
length := 10
// Make first chain starting from genesis
_, processor, err := newCanonical(length, full)
if err != nil {
t.Fatal("Failed to create db:", err)
t.Fatalf("failed to make new canonical chain: %v", err)
}
// make first chain starting from genesis
bman, err := newCanonical(10, db)
if err != nil {
t.Fatal("Could not make new canonical chain:", err)
}
f := func(td1, td2 *big.Int) {
// Define the difficulty comparator
worse := func(td1, td2 *big.Int) {
if td2.Cmp(td1) >= 0 {
t.Error("expected chainB to have lower difficulty. Got", td2, "expected less than", td1)
t.Errorf("total difficulty mismatch: have %v, expected less than %v", td2, td1)
}
}
// Sum of numbers must be less than 10
// for this to be a shorter fork
testFork(t, bman, 0, 3, f)
testFork(t, bman, 0, 7, f)
testFork(t, bman, 1, 1, f)
testFork(t, bman, 1, 7, f)
testFork(t, bman, 5, 3, f)
testFork(t, bman, 5, 4, f)
// Sum of numbers must be less than `length` for this to be a shorter fork
testFork(t, processor, 0, 3, full, worse)
testFork(t, processor, 0, 7, full, worse)
testFork(t, processor, 1, 1, full, worse)
testFork(t, processor, 1, 7, full, worse)
testFork(t, processor, 5, 3, full, worse)
testFork(t, processor, 5, 4, full, worse)
}
func TestLongerFork(t *testing.T) {
db, err := ethdb.NewMemDatabase()
// Tests that given a starting canonical chain of a given size, creating longer
// forks do take canonical ownership.
func TestLongerForkHeaders(t *testing.T) { testLongerFork(t, false) }
func TestLongerForkBlocks(t *testing.T) { testLongerFork(t, true) }
func testLongerFork(t *testing.T, full bool) {
length := 10
// Make first chain starting from genesis
_, processor, err := newCanonical(length, full)
if err != nil {
t.Fatal("Failed to create db:", err)
t.Fatalf("failed to make new canonical chain: %v", err)
}
// make first chain starting from genesis
bman, err := newCanonical(10, db)
if err != nil {
t.Fatal("Could not make new canonical chain:", err)
}
f := func(td1, td2 *big.Int) {
// Define the difficulty comparator
better := func(td1, td2 *big.Int) {
if td2.Cmp(td1) <= 0 {
t.Error("expected chainB to have higher difficulty. Got", td2, "expected more than", td1)
t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1)
}
}
// Sum of numbers must be greater than 10
// for this to be a longer fork
testFork(t, bman, 0, 11, f)
testFork(t, bman, 0, 15, f)
testFork(t, bman, 1, 10, f)
testFork(t, bman, 1, 12, f)
testFork(t, bman, 5, 6, f)
testFork(t, bman, 5, 8, f)
// Sum of numbers must be greater than `length` for this to be a longer fork
testFork(t, processor, 0, 11, full, better)
testFork(t, processor, 0, 15, full, better)
testFork(t, processor, 1, 10, full, better)
testFork(t, processor, 1, 12, full, better)
testFork(t, processor, 5, 6, full, better)
testFork(t, processor, 5, 8, full, better)
}
func TestEqualFork(t *testing.T) {
db, err := ethdb.NewMemDatabase()
// Tests that given a starting canonical chain of a given size, creating equal
// forks do take canonical ownership.
func TestEqualForkHeaders(t *testing.T) { testEqualFork(t, false) }
func TestEqualForkBlocks(t *testing.T) { testEqualFork(t, true) }
func testEqualFork(t *testing.T, full bool) {
length := 10
// Make first chain starting from genesis
_, processor, err := newCanonical(length, full)
if err != nil {
t.Fatal("Failed to create db:", err)
t.Fatalf("failed to make new canonical chain: %v", err)
}
bman, err := newCanonical(10, db)
if err != nil {
t.Fatal("Could not make new canonical chain:", err)
}
f := func(td1, td2 *big.Int) {
// Define the difficulty comparator
equal := func(td1, td2 *big.Int) {
if td2.Cmp(td1) != 0 {
t.Error("expected chainB to have equal difficulty. Got", td2, "expected ", td1)
t.Errorf("total difficulty mismatch: have %v, want %v", td2, td1)
}
}
// Sum of numbers must be equal to 10
// for this to be an equal fork
testFork(t, bman, 0, 10, f)
testFork(t, bman, 1, 9, f)
testFork(t, bman, 2, 8, f)
testFork(t, bman, 5, 5, f)
testFork(t, bman, 6, 4, f)
testFork(t, bman, 9, 1, f)
// Sum of numbers must be equal to `length` for this to be an equal fork
testFork(t, processor, 0, 10, full, equal)
testFork(t, processor, 1, 9, full, equal)
testFork(t, processor, 2, 8, full, equal)
testFork(t, processor, 5, 5, full, equal)
testFork(t, processor, 6, 4, full, equal)
testFork(t, processor, 9, 1, full, equal)
}
func TestBrokenChain(t *testing.T) {
db, err := ethdb.NewMemDatabase()
// Tests that chains missing links do not get accepted by the processor.
func TestBrokenHeaderChain(t *testing.T) { testBrokenChain(t, false) }
func TestBrokenBlockChain(t *testing.T) { testBrokenChain(t, true) }
func testBrokenChain(t *testing.T, full bool) {
// Make chain starting from genesis
db, processor, err := newCanonical(10, full)
if err != nil {
t.Fatal("Failed to create db:", err)
t.Fatalf("failed to make new canonical chain: %v", err)
}
bman, err := newCanonical(10, db)
if err != nil {
t.Fatal("Could not make new canonical chain:", err)
}
db2, err := ethdb.NewMemDatabase()
if err != nil {
t.Fatal("Failed to create db:", err)
}
bman2, err := newCanonical(10, db2)
if err != nil {
t.Fatal("Could not make new canonical chain:", err)
}
bman2.bc.SetProcessor(bman2)
parent := bman2.bc.CurrentBlock()
chainB := makeChain(parent, 5, db2, forkSeed)
chainB = chainB[1:]
_, err = testChain(chainB, bman)
if err == nil {
t.Error("expected broken chain to return error")
// Create a forked chain, and try to insert with a missing link
if full {
chain := makeBlockChain(processor.bc.CurrentBlock(), 5, db, forkSeed)[1:]
if err := testBlockChainImport(chain, processor); err == nil {
t.Errorf("broken block chain not reported")
}
} else {
chain := makeHeaderChain(processor.bc.CurrentHeader(), 5, db, forkSeed)[1:]
if err := testHeaderChainImport(chain, processor); err == nil {
t.Errorf("broken header chain not reported")
}
}
}
@ -376,7 +417,16 @@ type bproc struct{}
func (bproc) Process(*types.Block) (vm.Logs, types.Receipts, error) { return nil, nil, nil }
func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block {
func makeHeaderChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Header {
blocks := makeBlockChainWithDiff(genesis, d, seed)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
return headers
}
func makeBlockChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block {
var chain []*types.Block
for i, difficulty := range d {
header := &types.Header{
@ -410,142 +460,209 @@ func chm(genesis *types.Block, db ethdb.Database) *BlockChain {
return bc
}
func TestReorgLongest(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
// Tests that reorganizing a long difficult chain after a short easy one
// overwrites the canonical numbers and links in the database.
func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false) }
func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true) }
genesis, err := WriteTestNetGenesisBlock(db, 0)
if err != nil {
t.Error(err)
t.FailNow()
}
func testReorgLong(t *testing.T, full bool) {
testReorg(t, []int{1, 2, 4}, []int{1, 2, 3, 4}, 10, full)
}
// Tests that reorganizing a short difficult chain after a long easy one
// overwrites the canonical numbers and links in the database.
func TestReorgShortHeaders(t *testing.T) { testReorgShort(t, false) }
func TestReorgShortBlocks(t *testing.T) { testReorgShort(t, true) }
func testReorgShort(t *testing.T, full bool) {
testReorg(t, []int{1, 2, 3, 4}, []int{1, 10}, 11, full)
}
func testReorg(t *testing.T, first, second []int, td int64, full bool) {
// Create a pristine block chain
db, _ := ethdb.NewMemDatabase()
genesis, _ := WriteTestNetGenesisBlock(db, 0)
bc := chm(genesis, db)
chain1 := makeChainWithDiff(genesis, []int{1, 2, 4}, 10)
chain2 := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11)
bc.InsertChain(chain1)
bc.InsertChain(chain2)
prev := bc.CurrentBlock()
for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) {
if prev.ParentHash() != block.Hash() {
t.Errorf("parent hash mismatch %x - %x", prev.ParentHash(), block.Hash())
// Insert an easy and a difficult chain afterwards
if full {
bc.InsertChain(makeBlockChainWithDiff(genesis, first, 11))
bc.InsertChain(makeBlockChainWithDiff(genesis, second, 22))
} else {
bc.InsertHeaderChain(makeHeaderChainWithDiff(genesis, first, 11), false)
bc.InsertHeaderChain(makeHeaderChainWithDiff(genesis, second, 22), false)
}
// Check that the chain is valid number and link wise
if full {
prev := bc.CurrentBlock()
for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) {
if prev.ParentHash() != block.Hash() {
t.Errorf("parent block hash mismatch: have %x, want %x", prev.ParentHash(), block.Hash())
}
}
} else {
prev := bc.CurrentHeader()
for header := bc.GetHeaderByNumber(bc.CurrentHeader().Number.Uint64() - 1); header.Number.Uint64() != 0; prev, header = header, bc.GetHeaderByNumber(header.Number.Uint64()-1) {
if prev.ParentHash != header.Hash() {
t.Errorf("parent header hash mismatch: have %x, want %x", prev.ParentHash, header.Hash())
}
}
}
// Make sure the chain total difficulty is the correct one
want := new(big.Int).Add(genesis.Difficulty(), big.NewInt(td))
if full {
if have := bc.GetTd(bc.CurrentBlock().Hash()); have.Cmp(want) != 0 {
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
}
} else {
if have := bc.GetTd(bc.CurrentHeader().Hash()); have.Cmp(want) != 0 {
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
}
}
}
func TestBadHashes(t *testing.T) {
// Tests that the insertion functions detect banned hashes.
func TestBadHeaderHashes(t *testing.T) { testBadHashes(t, false) }
func TestBadBlockHashes(t *testing.T) { testBadHashes(t, true) }
func testBadHashes(t *testing.T, full bool) {
// Create a pristine block chain
db, _ := ethdb.NewMemDatabase()
genesis, err := WriteTestNetGenesisBlock(db, 0)
if err != nil {
t.Error(err)
t.FailNow()
}
genesis, _ := WriteTestNetGenesisBlock(db, 0)
bc := chm(genesis, db)
chain := makeChainWithDiff(genesis, []int{1, 2, 4}, 10)
BadHashes[chain[2].Header().Hash()] = true
_, err = bc.InsertChain(chain)
// Create a chain, ban a hash and try to import
var err error
if full {
blocks := makeBlockChainWithDiff(genesis, []int{1, 2, 4}, 10)
BadHashes[blocks[2].Header().Hash()] = true
_, err = bc.InsertChain(blocks)
} else {
headers := makeHeaderChainWithDiff(genesis, []int{1, 2, 4}, 10)
BadHashes[headers[2].Hash()] = true
_, err = bc.InsertHeaderChain(headers, true)
}
if !IsBadHashError(err) {
t.Errorf("error mismatch: want: BadHashError, have: %v", err)
}
}
func TestReorgBadHashes(t *testing.T) {
// Tests that bad hashes are detected on boot, and the chan rolled back to a
// good state prior to the bad hash.
func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false) }
func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true) }
func testReorgBadHashes(t *testing.T, full bool) {
// Create a pristine block chain
db, _ := ethdb.NewMemDatabase()
genesis, err := WriteTestNetGenesisBlock(db, 0)
if err != nil {
t.Error(err)
t.FailNow()
}
genesis, _ := WriteTestNetGenesisBlock(db, 0)
bc := chm(genesis, db)
chain := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11)
bc.InsertChain(chain)
// Create a chain, import and ban aferwards
headers := makeHeaderChainWithDiff(genesis, []int{1, 2, 3, 4}, 10)
blocks := makeBlockChainWithDiff(genesis, []int{1, 2, 3, 4}, 10)
if chain[3].Header().Hash() != bc.LastBlockHash() {
t.Errorf("last block hash mismatch: want: %x, have: %x", chain[3].Header().Hash(), bc.LastBlockHash())
if full {
if _, err := bc.InsertChain(blocks); err != nil {
t.Fatalf("failed to import blocks: %v", err)
}
if bc.CurrentBlock().Hash() != blocks[3].Hash() {
t.Errorf("last block hash mismatch: have: %x, want %x", bc.CurrentBlock().Hash(), blocks[3].Header().Hash())
}
BadHashes[blocks[3].Header().Hash()] = true
defer func() { delete(BadHashes, blocks[3].Header().Hash()) }()
} else {
if _, err := bc.InsertHeaderChain(headers, true); err != nil {
t.Fatalf("failed to import headers: %v", err)
}
if bc.CurrentHeader().Hash() != headers[3].Hash() {
t.Errorf("last header hash mismatch: have: %x, want %x", bc.CurrentHeader().Hash(), headers[3].Hash())
}
BadHashes[headers[3].Hash()] = true
defer func() { delete(BadHashes, headers[3].Hash()) }()
}
// NewChainManager should check BadHashes when loading it db
BadHashes[chain[3].Header().Hash()] = true
var eventMux event.TypeMux
ncm, err := NewBlockChain(db, FakePow{}, &eventMux)
// Create a new chain manager and check it rolled back the state
ncm, err := NewBlockChain(db, FakePow{}, new(event.TypeMux))
if err != nil {
t.Errorf("NewChainManager err: %s", err)
t.Fatalf("failed to create new chain manager: %v", err)
}
// check it set head to (valid) parent of bad hash block
if chain[2].Header().Hash() != ncm.LastBlockHash() {
t.Errorf("last block hash mismatch: want: %x, have: %x", chain[2].Header().Hash(), ncm.LastBlockHash())
}
if chain[2].Header().GasLimit.Cmp(ncm.GasLimit()) != 0 {
t.Errorf("current block gasLimit mismatch: want: %x, have: %x", chain[2].Header().GasLimit, ncm.GasLimit())
}
}
func TestReorgShortest(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
genesis, err := WriteTestNetGenesisBlock(db, 0)
if err != nil {
t.Error(err)
t.FailNow()
}
bc := chm(genesis, db)
chain1 := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 10)
chain2 := makeChainWithDiff(genesis, []int{1, 10}, 11)
bc.InsertChain(chain1)
bc.InsertChain(chain2)
prev := bc.CurrentBlock()
for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) {
if prev.ParentHash() != block.Hash() {
t.Errorf("parent hash mismatch %x - %x", prev.ParentHash(), block.Hash())
if full {
if ncm.CurrentBlock().Hash() != blocks[2].Header().Hash() {
t.Errorf("last block hash mismatch: have: %x, want %x", ncm.CurrentBlock().Hash(), blocks[2].Header().Hash())
}
if blocks[2].Header().GasLimit.Cmp(ncm.GasLimit()) != 0 {
t.Errorf("last block gasLimit mismatch: have: %x, want %x", ncm.GasLimit(), blocks[2].Header().GasLimit)
}
} else {
if ncm.CurrentHeader().Hash() != genesis.Hash() {
t.Errorf("last header hash mismatch: have: %x, want %x", ncm.CurrentHeader().Hash(), genesis.Hash())
}
}
}
func TestInsertNonceError(t *testing.T) {
// Tests chain insertions in the face of one entity containing an invalid nonce.
func TestHeadersInsertNonceError(t *testing.T) { testInsertNonceError(t, false) }
func TestBlocksInsertNonceError(t *testing.T) { testInsertNonceError(t, true) }
func testInsertNonceError(t *testing.T, full bool) {
for i := 1; i < 25 && !t.Failed(); i++ {
db, _ := ethdb.NewMemDatabase()
genesis, err := WriteTestNetGenesisBlock(db, 0)
// Create a pristine chain and database
db, processor, err := newCanonical(0, full)
if err != nil {
t.Error(err)
t.FailNow()
t.Fatalf("failed to create pristine chain: %v", err)
}
bc := chm(genesis, db)
bc.processor = NewBlockProcessor(db, bc.pow, bc, bc.eventMux)
blocks := makeChain(bc.currentBlock, i, db, 0)
bc := processor.bc
fail := rand.Int() % len(blocks)
failblock := blocks[fail]
bc.pow = failPow{failblock.NumberU64()}
n, err := bc.InsertChain(blocks)
// Create and insert a chain with a failing nonce
var (
failAt int
failRes int
failNum uint64
failHash common.Hash
)
if full {
blocks := makeBlockChain(processor.bc.CurrentBlock(), i, db, 0)
failAt = rand.Int() % len(blocks)
failNum = blocks[failAt].NumberU64()
failHash = blocks[failAt].Hash()
processor.bc.pow = failPow{failNum}
failRes, err = processor.bc.InsertChain(blocks)
} else {
headers := makeHeaderChain(processor.bc.CurrentHeader(), i, db, 0)
failAt = rand.Int() % len(headers)
failNum = headers[failAt].Number.Uint64()
failHash = headers[failAt].Hash()
processor.bc.pow = failPow{failNum}
failRes, err = processor.bc.InsertHeaderChain(headers, true)
}
// Check that the returned error indicates the nonce failure.
if n != fail {
t.Errorf("(i=%d) wrong failed block index: got %d, want %d", i, n, fail)
if failRes != failAt {
t.Errorf("test %d: failure index mismatch: have %d, want %d", i, failRes, failAt)
}
if !IsBlockNonceErr(err) {
t.Fatalf("(i=%d) got %q, want a nonce error", i, err)
t.Fatalf("test %d: error mismatch: have %v, want nonce error", i, err)
}
nerr := err.(*BlockNonceErr)
if nerr.Number.Cmp(failblock.Number()) != 0 {
t.Errorf("(i=%d) wrong block number in error, got %v, want %v", i, nerr.Number, failblock.Number())
if nerr.Number.Uint64() != failNum {
t.Errorf("test %d: number mismatch: have %v, want %v", i, nerr.Number, failNum)
}
if nerr.Hash != failblock.Hash() {
t.Errorf("(i=%d) wrong block hash in error, got %v, want %v", i, nerr.Hash, failblock.Hash())
if nerr.Hash != failHash {
t.Errorf("test %d: hash mismatch: have %x, want %x", i, nerr.Hash[:4], failHash[:4])
}
// Check that all no blocks after the failing block have been inserted.
for _, block := range blocks[fail:] {
if bc.HasBlock(block.Hash()) {
t.Errorf("(i=%d) invalid block %d present in chain", i, block.NumberU64())
for j := 0; j < i-failAt; j++ {
if full {
if block := bc.GetBlockByNumber(failNum + uint64(j)); block != nil {
t.Errorf("test %d: invalid block in chain: %v", i, block)
}
} else {
if header := bc.GetHeaderByNumber(failNum + uint64(j)); header != nil {
t.Errorf("test %d: invalid header in chain: %v", i, header)
}
}
}
}

View File

@ -211,25 +211,49 @@ func makeHeader(parent *types.Block, state *state.StateDB) *types.Header {
}
}
// newCanonical creates a new deterministic canonical chain by running
// InsertChain on the result of makeChain.
func newCanonical(n int, db ethdb.Database) (*BlockProcessor, error) {
// newCanonical creates a chain database, and injects a deterministic canonical
// chain. Depending on the full flag, if creates either a full block chain or a
// header only chain.
func newCanonical(n int, full bool) (ethdb.Database, *BlockProcessor, error) {
// Create te new chain database
db, _ := ethdb.NewMemDatabase()
evmux := &event.TypeMux{}
WriteTestNetGenesisBlock(db, 0)
chainman, _ := NewBlockChain(db, FakePow{}, evmux)
bman := NewBlockProcessor(db, FakePow{}, chainman, evmux)
bman.bc.SetProcessor(bman)
parent := bman.bc.CurrentBlock()
// Initialize a fresh chain with only a genesis block
genesis, _ := WriteTestNetGenesisBlock(db, 0)
blockchain, _ := NewBlockChain(db, FakePow{}, evmux)
processor := NewBlockProcessor(db, FakePow{}, blockchain, evmux)
processor.bc.SetProcessor(processor)
// Create and inject the requested chain
if n == 0 {
return bman, nil
return db, processor, nil
}
lchain := makeChain(parent, n, db, canonicalSeed)
_, err := bman.bc.InsertChain(lchain)
return bman, err
if full {
// Full block-chain requested
blocks := makeBlockChain(genesis, n, db, canonicalSeed)
_, err := blockchain.InsertChain(blocks)
return db, processor, err
}
// Header-only chain requested
headers := makeHeaderChain(genesis.Header(), n, db, canonicalSeed)
_, err := blockchain.InsertHeaderChain(headers, true)
return db, processor, err
}
func makeChain(parent *types.Block, n int, db ethdb.Database, seed int) []*types.Block {
// makeHeaderChain creates a deterministic chain of headers rooted at parent.
func makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) []*types.Header {
blocks := makeBlockChain(types.NewBlockWithHeader(parent), n, db, seed)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
return headers
}
// makeBlockChain creates a deterministic chain of blocks rooted at parent.
func makeBlockChain(parent *types.Block, n int, db ethdb.Database, seed int) []*types.Block {
return GenerateChain(parent, db, n, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
})

View File

@ -184,7 +184,7 @@ var (
// are ignored and set to values derived from the given txs, uncles
// and receipts.
func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt) *Block {
b := &Block{header: copyHeader(header), td: new(big.Int)}
b := &Block{header: CopyHeader(header), td: new(big.Int)}
// TODO: panic if len(txs) != len(receipts)
if len(txs) == 0 {
@ -210,7 +210,7 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*
b.header.UncleHash = CalcUncleHash(uncles)
b.uncles = make([]*Header, len(uncles))
for i := range uncles {
b.uncles[i] = copyHeader(uncles[i])
b.uncles[i] = CopyHeader(uncles[i])
}
}
@ -221,10 +221,12 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*
// header data is copied, changes to header and to the field values
// will not affect the block.
func NewBlockWithHeader(header *Header) *Block {
return &Block{header: copyHeader(header)}
return &Block{header: CopyHeader(header)}
}
func copyHeader(h *Header) *Header {
// CopyHeader creates a deep copy of a block header to prevent side effects from
// modifying a header variable.
func CopyHeader(h *Header) *Header {
cpy := *h
if cpy.Time = new(big.Int); h.Time != nil {
cpy.Time.Set(h.Time)
@ -326,7 +328,7 @@ func (b *Block) ReceiptHash() common.Hash { return b.header.ReceiptHash }
func (b *Block) UncleHash() common.Hash { return b.header.UncleHash }
func (b *Block) Extra() []byte { return common.CopyBytes(b.header.Extra) }
func (b *Block) Header() *Header { return copyHeader(b.header) }
func (b *Block) Header() *Header { return CopyHeader(b.header) }
func (b *Block) HashNoNonce() common.Hash {
return b.header.HashNoNonce()
@ -370,13 +372,13 @@ func (b *Block) WithMiningResult(nonce uint64, mixDigest common.Hash) *Block {
// WithBody returns a new block with the given transaction and uncle contents.
func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block {
block := &Block{
header: copyHeader(b.header),
header: CopyHeader(b.header),
transactions: make([]*Transaction, len(transactions)),
uncles: make([]*Header, len(uncles)),
}
copy(block.transactions, transactions)
for i := range uncles {
block.uncles[i] = copyHeader(uncles[i])
block.uncles[i] = CopyHeader(uncles[i])
}
return block
}

View File

@ -464,7 +464,7 @@ func (s *Ethereum) NodeInfo() *NodeInfo {
DiscPort: int(node.UDP),
TCPPort: int(node.TCP),
ListenAddr: s.net.ListenAddr,
Td: s.BlockChain().Td().String(),
Td: s.BlockChain().GetTd(s.BlockChain().CurrentBlock().Hash()).String(),
}
}

View File

@ -589,15 +589,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
request.Block.ReceivedAt = msg.ReceivedAt
// Mark the block's arrival for whatever reason
_, chainHead, _ := pm.blockchain.Status()
jsonlogger.LogJson(&logger.EthChainReceivedNewBlock{
BlockHash: request.Block.Hash().Hex(),
BlockNumber: request.Block.Number(),
ChainHeadHash: chainHead.Hex(),
BlockPrevHash: request.Block.ParentHash().Hex(),
RemoteId: p.ID().String(),
})
// Mark the peer as owning the block and schedule it for import
p.MarkBlock(request.Block.Hash())
p.SetHead(request.Block.Hash())
@ -607,7 +598,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
// Update the peers total difficulty if needed, schedule a download if gapped
if request.TD.Cmp(p.Td()) > 0 {
p.SetTd(request.TD)
if request.TD.Cmp(new(big.Int).Add(pm.blockchain.Td(), request.Block.Difficulty())) > 0 {
td := pm.blockchain.GetTd(pm.blockchain.CurrentBlock().Hash())
if request.TD.Cmp(new(big.Int).Add(td, request.Block.Difficulty())) > 0 {
go pm.synchronise(p)
}
}
@ -624,12 +616,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrDecode, "transaction %d is nil", i)
}
p.MarkTransaction(tx.Hash())
// Log it's arrival for later analysis
jsonlogger.LogJson(&logger.EthTxReceived{
TxHash: tx.Hash().Hex(),
RemoteId: p.ID().String(),
})
}
pm.txpool.AddTransactions(txs)

View File

@ -160,7 +160,8 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
return
}
// Make sure the peer's TD is higher than our own. If not drop.
if peer.Td().Cmp(pm.blockchain.Td()) <= 0 {
td := pm.blockchain.GetTd(pm.blockchain.CurrentBlock().Hash())
if peer.Td().Cmp(td) <= 0 {
return
}
// Otherwise try to sync with the downloader

View File

@ -146,13 +146,7 @@ func (self *debugApi) SetHead(req *shared.Request) (interface{}, error) {
if err := self.codec.Decode(req.Params, &args); err != nil {
return nil, shared.NewDecodeParamError(err.Error())
}
block := self.xeth.EthBlockByNumber(args.BlockNumber)
if block == nil {
return nil, fmt.Errorf("block #%d not found", args.BlockNumber)
}
self.ethereum.BlockChain().SetHead(block)
self.ethereum.BlockChain().SetHead(uint64(args.BlockNumber))
return nil, nil
}