core, eth: receipt chain reconstruction

This commit is contained in:
Péter Szilágyi 2015-09-30 19:23:31 +03:00
parent 42c8afd440
commit 832b37c822
22 changed files with 613 additions and 230 deletions

View File

@ -163,7 +163,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
// Generate a chain of b.N blocks using the supplied block // Generate a chain of b.N blocks using the supplied block
// generator function. // generator function.
genesis := WriteGenesisBlockForTesting(db, GenesisAccount{benchRootAddr, benchRootFunds}) genesis := WriteGenesisBlockForTesting(db, GenesisAccount{benchRootAddr, benchRootFunds})
chain := GenerateChain(genesis, db, b.N, gen) chain, _ := GenerateChain(genesis, db, b.N, gen)
// Time the insertion of the new chain. // Time the insertion of the new chain.
// State and blocks are stored in the same DB. // State and blocks are stored in the same DB.

View File

@ -71,14 +71,14 @@ func TestPutReceipt(t *testing.T) {
receipt := new(types.Receipt) receipt := new(types.Receipt)
receipt.Logs = vm.Logs{&vm.Log{ receipt.Logs = vm.Logs{&vm.Log{
Address: addr, Address: addr,
Topics: []common.Hash{hash}, Topics: []common.Hash{hash},
Data: []byte("hi"), Data: []byte("hi"),
Number: 42, BlockNumber: 42,
TxHash: hash, TxHash: hash,
TxIndex: 0, TxIndex: 0,
BlockHash: hash, BlockHash: hash,
Index: 0, Index: 0,
}} }}
PutReceipts(db, types.Receipts{receipt}) PutReceipts(db, types.Receipts{receipt})

View File

@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger"
@ -67,9 +68,10 @@ type BlockChain struct {
chainmu sync.RWMutex chainmu sync.RWMutex
tsmu sync.RWMutex tsmu sync.RWMutex
checkpoint int // checkpoint counts towards the new checkpoint checkpoint int // checkpoint counts towards the new checkpoint
currentHeader *types.Header // Current head of the header chain (may be above the block chain!) currentHeader *types.Header // Current head of the header chain (may be above the block chain!)
currentBlock *types.Block // Current head of the block chain currentBlock *types.Block // Current head of the block chain
currentFastBlock *types.Block // Current head of the fast-sync chain (may be above the block chain!)
headerCache *lru.Cache // Cache for the most recent block headers headerCache *lru.Cache // Cache for the most recent block headers
bodyCache *lru.Cache // Cache for the most recent block bodies bodyCache *lru.Cache // Cache for the most recent block bodies
@ -160,12 +162,21 @@ func (self *BlockChain) loadLastState() error {
self.currentHeader = header self.currentHeader = header
} }
} }
// Restore the last known head fast block
self.currentFastBlock = self.currentBlock
if head := GetHeadFastBlockHash(self.chainDb); head != (common.Hash{}) {
if block := self.GetBlock(head); block != nil {
self.currentFastBlock = block
}
}
// Issue a status log and return // Issue a status log and return
headerTd := self.GetTd(self.currentHeader.Hash()) headerTd := self.GetTd(self.currentHeader.Hash())
blockTd := self.GetTd(self.currentBlock.Hash()) blockTd := self.GetTd(self.currentBlock.Hash())
fastTd := self.GetTd(self.currentFastBlock.Hash())
glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", self.currentHeader.Number, self.currentHeader.Hash(), headerTd) glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", self.currentHeader.Number, self.currentHeader.Hash().Bytes()[:4], headerTd)
glog.V(logger.Info).Infof("Last block: #%d [%x…] TD=%v", self.currentBlock.Number(), self.currentBlock.Hash(), blockTd) glog.V(logger.Info).Infof("Fast block: #%d [%x…] TD=%v", self.currentFastBlock.Number(), self.currentFastBlock.Hash().Bytes()[:4], fastTd)
glog.V(logger.Info).Infof("Last block: #%d [%x…] TD=%v", self.currentBlock.Number(), self.currentBlock.Hash().Bytes()[:4], blockTd)
return nil return nil
} }
@ -178,23 +189,48 @@ func (bc *BlockChain) SetHead(head uint64) {
bc.mu.Lock() bc.mu.Lock()
defer bc.mu.Unlock() defer bc.mu.Unlock()
// Delete everything from the current header head (is above block head) // Figure out the highest known canonical assignment
for i := bc.currentHeader.Number.Uint64(); i > head; i-- { height := uint64(0)
if hash := GetCanonicalHash(bc.chainDb, i); hash != (common.Hash{}) { if bc.currentHeader != nil {
DeleteCanonicalHash(bc.chainDb, i) if hh := bc.currentHeader.Number.Uint64(); hh > height {
DeleteHeader(bc.chainDb, hash) height = hh
DeleteBody(bc.chainDb, hash)
DeleteTd(bc.chainDb, hash)
} }
} }
bc.currentHeader = GetHeader(bc.chainDb, GetCanonicalHash(bc.chainDb, head)) if bc.currentBlock != nil {
if bh := bc.currentBlock.NumberU64(); bh > height {
// Rewind the block chain until a whole block is found height = bh
for bc.GetBlockByNumber(head) == nil { }
head--
} }
bc.currentBlock = bc.GetBlockByNumber(head) if bc.currentFastBlock != nil {
if fbh := bc.currentFastBlock.NumberU64(); fbh > height {
height = fbh
}
}
// Gather all the hashes that need deletion
drop := make(map[common.Hash]struct{})
for bc.currentHeader != nil && bc.currentHeader.Number.Uint64() > head {
drop[bc.currentHeader.Hash()] = struct{}{}
bc.currentHeader = bc.GetHeader(bc.currentHeader.ParentHash)
}
for bc.currentBlock != nil && bc.currentBlock.NumberU64() > head {
drop[bc.currentBlock.Hash()] = struct{}{}
bc.currentBlock = bc.GetBlock(bc.currentBlock.ParentHash())
}
for bc.currentFastBlock != nil && bc.currentFastBlock.NumberU64() > head {
drop[bc.currentFastBlock.Hash()] = struct{}{}
bc.currentFastBlock = bc.GetBlock(bc.currentFastBlock.ParentHash())
}
// Roll back the canonical chain numbering
for i := height; i > head; i-- {
DeleteCanonicalHash(bc.chainDb, i)
}
// Delete everything found by the above rewind
for hash, _ := range drop {
DeleteHeader(bc.chainDb, hash)
DeleteBody(bc.chainDb, hash)
DeleteTd(bc.chainDb, hash)
}
// Clear out any stale content from the caches // Clear out any stale content from the caches
bc.headerCache.Purge() bc.headerCache.Purge()
bc.bodyCache.Purge() bc.bodyCache.Purge()
@ -203,6 +239,9 @@ func (bc *BlockChain) SetHead(head uint64) {
bc.futureBlocks.Purge() bc.futureBlocks.Purge()
// Update all computed fields to the new head // Update all computed fields to the new head
if bc.currentBlock == nil {
bc.currentBlock = bc.genesisBlock
}
bc.insert(bc.currentBlock) bc.insert(bc.currentBlock)
bc.loadLastState() bc.loadLastState()
} }
@ -222,8 +261,7 @@ func (self *BlockChain) LastBlockHash() common.Hash {
} }
// CurrentHeader retrieves the current head header of the canonical chain. The // CurrentHeader retrieves the current head header of the canonical chain. The
// header is retrieved from the chain manager's internal cache, involving no // header is retrieved from the chain manager's internal cache.
// database operations.
func (self *BlockChain) CurrentHeader() *types.Header { func (self *BlockChain) CurrentHeader() *types.Header {
self.mu.RLock() self.mu.RLock()
defer self.mu.RUnlock() defer self.mu.RUnlock()
@ -232,8 +270,7 @@ func (self *BlockChain) CurrentHeader() *types.Header {
} }
// CurrentBlock retrieves the current head block of the canonical chain. The // CurrentBlock retrieves the current head block of the canonical chain. The
// block is retrieved from the chain manager's internal cache, involving no // block is retrieved from the chain manager's internal cache.
// database operations.
func (self *BlockChain) CurrentBlock() *types.Block { func (self *BlockChain) CurrentBlock() *types.Block {
self.mu.RLock() self.mu.RLock()
defer self.mu.RUnlock() defer self.mu.RUnlock()
@ -241,6 +278,15 @@ func (self *BlockChain) CurrentBlock() *types.Block {
return self.currentBlock return self.currentBlock
} }
// CurrentFastBlock retrieves the current fast-sync head block of the canonical
// chain. The block is retrieved from the chain manager's internal cache.
func (self *BlockChain) CurrentFastBlock() *types.Block {
self.mu.RLock()
defer self.mu.RUnlock()
return self.currentFastBlock
}
func (self *BlockChain) Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash) { func (self *BlockChain) Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash) {
self.mu.RLock() self.mu.RLock()
defer self.mu.RUnlock() defer self.mu.RUnlock()
@ -264,22 +310,12 @@ func (bc *BlockChain) Reset() {
// ResetWithGenesisBlock purges the entire blockchain, restoring it to the // ResetWithGenesisBlock purges the entire blockchain, restoring it to the
// specified genesis state. // specified genesis state.
func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) { func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
// Dump the entire block chain and purge the caches
bc.SetHead(0)
bc.mu.Lock() bc.mu.Lock()
defer bc.mu.Unlock() defer bc.mu.Unlock()
// Dump the entire block chain and purge the caches
for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
DeleteBlock(bc.chainDb, block.Hash())
}
for header := bc.currentHeader; header != nil; header = bc.GetHeader(header.ParentHash) {
DeleteBlock(bc.chainDb, header.Hash())
}
bc.headerCache.Purge()
bc.bodyCache.Purge()
bc.bodyRLPCache.Purge()
bc.blockCache.Purge()
bc.futureBlocks.Purge()
// Prepare the genesis block and reinitialize the chain // Prepare the genesis block and reinitialize the chain
if err := WriteTd(bc.chainDb, genesis.Hash(), genesis.Difficulty()); err != nil { if err := WriteTd(bc.chainDb, genesis.Hash(), genesis.Difficulty()); err != nil {
glog.Fatalf("failed to write genesis block TD: %v", err) glog.Fatalf("failed to write genesis block TD: %v", err)
@ -291,6 +327,7 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
bc.insert(bc.genesisBlock) bc.insert(bc.genesisBlock)
bc.currentBlock = bc.genesisBlock bc.currentBlock = bc.genesisBlock
bc.currentHeader = bc.genesisBlock.Header() bc.currentHeader = bc.genesisBlock.Header()
bc.currentFastBlock = bc.genesisBlock
} }
// Export writes the active chain to the given writer. // Export writes the active chain to the given writer.
@ -328,8 +365,8 @@ func (self *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
// insert injects a new head block into the current block chain. This method // insert injects a new head block into the current block chain. This method
// assumes that the block is indeed a true head. It will also reset the head // assumes that the block is indeed a true head. It will also reset the head
// header to this very same block to prevent the headers from diverging on a // header and the head fast sync block to this very same block to prevent them
// different header chain. // from diverging on a different header chain.
// //
// Note, this function assumes that the `mu` mutex is held! // Note, this function assumes that the `mu` mutex is held!
func (bc *BlockChain) insert(block *types.Block) { func (bc *BlockChain) insert(block *types.Block) {
@ -343,9 +380,13 @@ func (bc *BlockChain) insert(block *types.Block) {
if err := WriteHeadHeaderHash(bc.chainDb, block.Hash()); err != nil { if err := WriteHeadHeaderHash(bc.chainDb, block.Hash()); err != nil {
glog.Fatalf("failed to insert head header hash: %v", err) glog.Fatalf("failed to insert head header hash: %v", err)
} }
if err := WriteHeadFastBlockHash(bc.chainDb, block.Hash()); err != nil {
glog.Fatalf("failed to insert head fast block hash: %v", err)
}
// Update the internal state with the head block // Update the internal state with the head block
bc.currentBlock = block bc.currentBlock = block
bc.currentHeader = block.Header() bc.currentHeader = block.Header()
bc.currentFastBlock = block
} }
// Accessors // Accessors
@ -634,7 +675,7 @@ func (self *BlockChain) InsertHeaderChain(chain []*types.Header, verify bool) (i
for i, header := range chain { for i, header := range chain {
// Short circuit insertion if shutting down // Short circuit insertion if shutting down
if atomic.LoadInt32(&self.procInterrupt) == 1 { if atomic.LoadInt32(&self.procInterrupt) == 1 {
glog.V(logger.Debug).Infoln("Premature abort during header chain processing") glog.V(logger.Debug).Infoln("premature abort during header chain processing")
break break
} }
hash := header.Hash() hash := header.Hash()
@ -653,7 +694,7 @@ func (self *BlockChain) InsertHeaderChain(chain []*types.Header, verify bool) (i
} }
} }
if BadHashes[hash] { if BadHashes[hash] {
glog.V(logger.Error).Infof("Bad header %d [%x…], known bad hash", header.Number, hash) glog.V(logger.Error).Infof("bad header %d [%x…], known bad hash", header.Number, hash)
return i, BadHashError(hash) return i, BadHashError(hash)
} }
// Write the header to the chain and get the status // Write the header to the chain and get the status
@ -674,6 +715,95 @@ func (self *BlockChain) InsertHeaderChain(chain []*types.Header, verify bool) (i
return 0, nil return 0, nil
} }
// InsertReceiptChain attempts to complete an already existing header chain with
// transaction and receipt data.
func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
self.wg.Add(1)
defer self.wg.Done()
// Make sure only one thread manipulates the chain at once
self.chainmu.Lock()
defer self.chainmu.Unlock()
// Collect some import statistics to report on
stats := struct{ processed, ignored int }{}
start := time.Now()
// Iterate over the blocks and receipts, inserting any new ones
for i := 0; i < len(blockChain) && i < len(receiptChain); i++ {
block, receipts := blockChain[i], receiptChain[i]
// Short circuit insertion if shutting down
if atomic.LoadInt32(&self.procInterrupt) == 1 {
glog.V(logger.Debug).Infoln("premature abort during receipt chain processing")
break
}
// Short circuit if the owner header is unknown
if !self.HasHeader(block.Hash()) {
glog.V(logger.Debug).Infof("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
return i, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
}
// Skip if the entire data is already known
if self.HasBlock(block.Hash()) {
stats.ignored++
continue
}
// Compute all the non-consensus fields of the receipts
transactions, logIndex := block.Transactions(), uint(0)
for j := 0; j < len(receipts); j++ {
// The transaction hash can be retrieved from the transaction itself
receipts[j].TxHash = transactions[j].Hash()
// The contract address can be derived from the transaction itself
if MessageCreatesContract(transactions[j]) {
from, _ := transactions[j].From()
receipts[j].ContractAddress = crypto.CreateAddress(from, transactions[j].Nonce())
}
// The used gas can be calculated based on previous receipts
if j == 0 {
receipts[j].GasUsed = new(big.Int).Set(receipts[j].CumulativeGasUsed)
} else {
receipts[j].GasUsed = new(big.Int).Sub(receipts[j].CumulativeGasUsed, receipts[j-1].CumulativeGasUsed)
}
// The derived log fields can simply be set from the block and transaction
for k := 0; k < len(receipts[j].Logs); k++ {
receipts[j].Logs[k].BlockNumber = block.NumberU64()
receipts[j].Logs[k].BlockHash = block.Hash()
receipts[j].Logs[k].TxHash = receipts[j].TxHash
receipts[j].Logs[k].TxIndex = uint(j)
receipts[j].Logs[k].Index = logIndex
logIndex++
}
}
// Write all the data out into the database
if err := WriteBody(self.chainDb, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil {
glog.Fatalf("failed to write block body: %v", err)
return i, err
}
if err := PutBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil {
glog.Fatalf("failed to write block receipts: %v", err)
return i, err
}
// Update the head fast sync block if better
self.mu.Lock()
if self.GetTd(self.currentFastBlock.Hash()).Cmp(self.GetTd(block.Hash())) < 0 {
if err := WriteHeadFastBlockHash(self.chainDb, block.Hash()); err != nil {
glog.Fatalf("failed to update head fast block hash: %v", err)
}
self.currentFastBlock = block
}
self.mu.Unlock()
stats.processed++
}
// Report some public statistics so the user has a clue what's going on
first, last := blockChain[0], blockChain[len(blockChain)-1]
glog.V(logger.Info).Infof("imported %d receipt(s) (%d ignored) in %v. #%d [%x… / %x…]", stats.processed, stats.ignored,
time.Since(start), last.Number(), first.Hash().Bytes()[:4], last.Hash().Bytes()[:4])
return 0, nil
}
// WriteBlock writes the block to the chain. // WriteBlock writes the block to the chain.
func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err error) { func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err error) {
self.wg.Add(1) self.wg.Add(1)
@ -799,7 +929,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
return i, err return i, err
} }
if err := PutBlockReceipts(self.chainDb, block, receipts); err != nil { if err := PutBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil {
glog.V(logger.Warn).Infoln("error writing block receipts:", err) glog.V(logger.Warn).Infoln("error writing block receipts:", err)
} }

View File

@ -430,9 +430,12 @@ func makeBlockChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.B
var chain []*types.Block var chain []*types.Block
for i, difficulty := range d { for i, difficulty := range d {
header := &types.Header{ header := &types.Header{
Coinbase: common.Address{seed}, Coinbase: common.Address{seed},
Number: big.NewInt(int64(i + 1)), Number: big.NewInt(int64(i + 1)),
Difficulty: big.NewInt(int64(difficulty)), Difficulty: big.NewInt(int64(difficulty)),
UncleHash: types.EmptyUncleHash,
TxHash: types.EmptyRootHash,
ReceiptHash: types.EmptyRootHash,
} }
if i == 0 { if i == 0 {
header.ParentHash = genesis.Hash() header.ParentHash = genesis.Hash()
@ -668,6 +671,155 @@ func testInsertNonceError(t *testing.T, full bool) {
} }
} }
// Tests that fast importing a block chain produces the same chain data as the
// classical full block processing.
func TestFastVsFullChains(t *testing.T) {
// Configure and generate a sample block chain
var (
gendb, _ = ethdb.NewMemDatabase()
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
address = crypto.PubkeyToAddress(key.PublicKey)
funds = big.NewInt(1000000000)
genesis = GenesisBlockForTesting(gendb, address, funds)
)
blocks, receipts := GenerateChain(genesis, gendb, 1024, func(i int, block *BlockGen) {
block.SetCoinbase(common.Address{0x00})
// If the block number is multiple of 3, send a few bonus transactions to the miner
if i%3 == 2 {
for j := 0; j < i%4+1; j++ {
tx, err := types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key)
if err != nil {
panic(err)
}
block.AddTx(tx)
}
}
// If the block number is a multiple of 5, add a few bonus uncles to the block
if i%5 == 5 {
block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))})
}
})
// Import the chain as an archive node for the comparison baseline
archiveDb, _ := ethdb.NewMemDatabase()
WriteGenesisBlockForTesting(archiveDb, GenesisAccount{address, funds})
archive, _ := NewBlockChain(archiveDb, FakePow{}, new(event.TypeMux))
archive.SetProcessor(NewBlockProcessor(archiveDb, FakePow{}, archive, new(event.TypeMux)))
if n, err := archive.InsertChain(blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
}
// Fast import the chain as a non-archive node to test
fastDb, _ := ethdb.NewMemDatabase()
WriteGenesisBlockForTesting(fastDb, GenesisAccount{address, funds})
fast, _ := NewBlockChain(fastDb, FakePow{}, new(event.TypeMux))
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
if n, err := fast.InsertHeaderChain(headers, true); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
if n, err := fast.InsertReceiptChain(blocks, receipts); err != nil {
t.Fatalf("failed to insert receipt %d: %v", n, err)
}
// Iterate over all chain data components, and cross reference
for i := 0; i < len(blocks); i++ {
num, hash := blocks[i].NumberU64(), blocks[i].Hash()
if ftd, atd := fast.GetTd(hash), archive.GetTd(hash); ftd.Cmp(atd) != 0 {
t.Errorf("block #%d [%x]: td mismatch: have %v, want %v", num, hash, ftd, atd)
}
if fheader, aheader := fast.GetHeader(hash), archive.GetHeader(hash); fheader.Hash() != aheader.Hash() {
t.Errorf("block #%d [%x]: header mismatch: have %v, want %v", num, hash, fheader, aheader)
}
if fblock, ablock := fast.GetBlock(hash), archive.GetBlock(hash); fblock.Hash() != ablock.Hash() {
t.Errorf("block #%d [%x]: block mismatch: have %v, want %v", num, hash, fblock, ablock)
} else if types.DeriveSha(fblock.Transactions()) != types.DeriveSha(ablock.Transactions()) {
t.Errorf("block #%d [%x]: transactions mismatch: have %v, want %v", num, hash, fblock.Transactions(), ablock.Transactions())
} else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(ablock.Uncles()) {
t.Errorf("block #%d [%x]: uncles mismatch: have %v, want %v", num, hash, fblock.Uncles(), ablock.Uncles())
}
if freceipts, areceipts := GetBlockReceipts(fastDb, hash), GetBlockReceipts(archiveDb, hash); types.DeriveSha(freceipts) != types.DeriveSha(areceipts) {
t.Errorf("block #%d [%x]: receipts mismatch: have %v, want %v", num, hash, freceipts, areceipts)
}
}
// Check that the canonical chains are the same between the databases
for i := 0; i < len(blocks)+1; i++ {
if fhash, ahash := GetCanonicalHash(fastDb, uint64(i)), GetCanonicalHash(archiveDb, uint64(i)); fhash != ahash {
t.Errorf("block #%d: canonical hash mismatch: have %v, want %v", i, fhash, ahash)
}
}
}
// Tests that various import methods move the chain head pointers to the correct
// positions.
func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Configure and generate a sample block chain
var (
gendb, _ = ethdb.NewMemDatabase()
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
address = crypto.PubkeyToAddress(key.PublicKey)
funds = big.NewInt(1000000000)
genesis = GenesisBlockForTesting(gendb, address, funds)
)
height := uint64(1024)
blocks, receipts := GenerateChain(genesis, gendb, int(height), nil)
// Create a small assertion method to check the three heads
assert := func(t *testing.T, kind string, chain *BlockChain, header uint64, fast uint64, block uint64) {
if num := chain.CurrentBlock().NumberU64(); num != block {
t.Errorf("%s head block mismatch: have #%v, want #%v", kind, num, block)
}
if num := chain.CurrentFastBlock().NumberU64(); num != fast {
t.Errorf("%s head fast-block mismatch: have #%v, want #%v", kind, num, fast)
}
if num := chain.CurrentHeader().Number.Uint64(); num != header {
t.Errorf("%s head header mismatch: have #%v, want #%v", kind, num, header)
}
}
// Import the chain as an archive node and ensure all pointers are updated
archiveDb, _ := ethdb.NewMemDatabase()
WriteGenesisBlockForTesting(archiveDb, GenesisAccount{address, funds})
archive, _ := NewBlockChain(archiveDb, FakePow{}, new(event.TypeMux))
archive.SetProcessor(NewBlockProcessor(archiveDb, FakePow{}, archive, new(event.TypeMux)))
if n, err := archive.InsertChain(blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
}
assert(t, "archive", archive, height, height, height)
// Import the chain as a non-archive node and ensure all pointers are updated
fastDb, _ := ethdb.NewMemDatabase()
WriteGenesisBlockForTesting(fastDb, GenesisAccount{address, funds})
fast, _ := NewBlockChain(fastDb, FakePow{}, new(event.TypeMux))
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
if n, err := fast.InsertHeaderChain(headers, true); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
if n, err := fast.InsertReceiptChain(blocks, receipts); err != nil {
t.Fatalf("failed to insert receipt %d: %v", n, err)
}
assert(t, "fast", fast, height, height, 0)
// Import the chain as a light node and ensure all pointers are updated
lightDb, _ := ethdb.NewMemDatabase()
WriteGenesisBlockForTesting(lightDb, GenesisAccount{address, funds})
light, _ := NewBlockChain(lightDb, FakePow{}, new(event.TypeMux))
if n, err := light.InsertHeaderChain(headers, true); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
assert(t, "light", light, height, 0, 0)
}
// Tests that chain reorganizations handle transaction removals and reinsertions. // Tests that chain reorganizations handle transaction removals and reinsertions.
func TestChainTxReorgs(t *testing.T) { func TestChainTxReorgs(t *testing.T) {
params.MinGasLimit = big.NewInt(125000) // Minimum the gas limit may ever be. params.MinGasLimit = big.NewInt(125000) // Minimum the gas limit may ever be.
@ -704,7 +856,7 @@ func TestChainTxReorgs(t *testing.T) {
// - futureAdd: transaction added after the reorg has already finished // - futureAdd: transaction added after the reorg has already finished
var pastAdd, freshAdd, futureAdd *types.Transaction var pastAdd, freshAdd, futureAdd *types.Transaction
chain := GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) { chain, _ := GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) {
switch i { switch i {
case 0: case 0:
pastDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2) pastDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2)
@ -730,7 +882,7 @@ func TestChainTxReorgs(t *testing.T) {
} }
// overwrite the old chain // overwrite the old chain
chain = GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) { chain, _ = GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) {
switch i { switch i {
case 0: case 0:
pastAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3) pastAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3)

View File

@ -164,13 +164,13 @@ func (b *BlockGen) OffsetTime(seconds int64) {
// Blocks created by GenerateChain do not contain valid proof of work // Blocks created by GenerateChain do not contain valid proof of work
// values. Inserting them into BlockChain requires use of FakePow or // values. Inserting them into BlockChain requires use of FakePow or
// a similar non-validating proof of work implementation. // a similar non-validating proof of work implementation.
func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int, *BlockGen)) []*types.Block { func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) {
statedb, err := state.New(parent.Root(), db) statedb, err := state.New(parent.Root(), db)
if err != nil { if err != nil {
panic(err) panic(err)
} }
blocks := make(types.Blocks, n) blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n)
genblock := func(i int, h *types.Header) *types.Block { genblock := func(i int, h *types.Header) (*types.Block, types.Receipts) {
b := &BlockGen{parent: parent, i: i, chain: blocks, header: h, statedb: statedb} b := &BlockGen{parent: parent, i: i, chain: blocks, header: h, statedb: statedb}
if gen != nil { if gen != nil {
gen(i, b) gen(i, b)
@ -181,15 +181,16 @@ func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int,
panic(fmt.Sprintf("state write error: %v", err)) panic(fmt.Sprintf("state write error: %v", err))
} }
h.Root = root h.Root = root
return types.NewBlock(h, b.txs, b.uncles, b.receipts) return types.NewBlock(h, b.txs, b.uncles, b.receipts), b.receipts
} }
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
header := makeHeader(parent, statedb) header := makeHeader(parent, statedb)
block := genblock(i, header) block, receipt := genblock(i, header)
blocks[i] = block blocks[i] = block
receipts[i] = receipt
parent = block parent = block
} }
return blocks return blocks, receipts
} }
func makeHeader(parent *types.Block, state *state.StateDB) *types.Header { func makeHeader(parent *types.Block, state *state.StateDB) *types.Header {
@ -254,7 +255,8 @@ func makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) [
// makeBlockChain creates a deterministic chain of blocks rooted at parent. // makeBlockChain creates a deterministic chain of blocks rooted at parent.
func makeBlockChain(parent *types.Block, n int, db ethdb.Database, seed int) []*types.Block { func makeBlockChain(parent *types.Block, n int, db ethdb.Database, seed int) []*types.Block {
return GenerateChain(parent, db, n, func(i int, b *BlockGen) { blocks, _ := GenerateChain(parent, db, n, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)}) b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
}) })
return blocks
} }

View File

@ -47,7 +47,7 @@ func ExampleGenerateChain() {
// This call generates a chain of 5 blocks. The function runs for // This call generates a chain of 5 blocks. The function runs for
// each block and adds different features to gen based on the // each block and adds different features to gen based on the
// block index. // block index.
chain := GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) { chain, _ := GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) {
switch i { switch i {
case 0: case 0:
// In block 1, addr1 sends addr2 some ether. // In block 1, addr1 sends addr2 some ether.

View File

@ -60,7 +60,7 @@ func TestPowVerification(t *testing.T) {
var ( var (
testdb, _ = ethdb.NewMemDatabase() testdb, _ = ethdb.NewMemDatabase()
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int)) genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
blocks = GenerateChain(genesis, testdb, 8, nil) blocks, _ = GenerateChain(genesis, testdb, 8, nil)
) )
headers := make([]*types.Header, len(blocks)) headers := make([]*types.Header, len(blocks))
for i, block := range blocks { for i, block := range blocks {
@ -115,7 +115,7 @@ func testPowConcurrentVerification(t *testing.T, threads int) {
var ( var (
testdb, _ = ethdb.NewMemDatabase() testdb, _ = ethdb.NewMemDatabase()
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int)) genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
blocks = GenerateChain(genesis, testdb, 8, nil) blocks, _ = GenerateChain(genesis, testdb, 8, nil)
) )
headers := make([]*types.Header, len(blocks)) headers := make([]*types.Header, len(blocks))
for i, block := range blocks { for i, block := range blocks {
@ -186,7 +186,7 @@ func testPowConcurrentAbortion(t *testing.T, threads int) {
var ( var (
testdb, _ = ethdb.NewMemDatabase() testdb, _ = ethdb.NewMemDatabase()
genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int)) genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
blocks = GenerateChain(genesis, testdb, 1024, nil) blocks, _ = GenerateChain(genesis, testdb, 1024, nil)
) )
headers := make([]*types.Header, len(blocks)) headers := make([]*types.Header, len(blocks))
for i, block := range blocks { for i, block := range blocks {

View File

@ -34,6 +34,7 @@ import (
var ( var (
headHeaderKey = []byte("LastHeader") headHeaderKey = []byte("LastHeader")
headBlockKey = []byte("LastBlock") headBlockKey = []byte("LastBlock")
headFastKey = []byte("LastFast")
blockPrefix = []byte("block-") blockPrefix = []byte("block-")
blockNumPrefix = []byte("block-num-") blockNumPrefix = []byte("block-num-")
@ -129,7 +130,7 @@ func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash {
// header. The difference between this and GetHeadBlockHash is that whereas the // header. The difference between this and GetHeadBlockHash is that whereas the
// last block hash is only updated upon a full block import, the last header // last block hash is only updated upon a full block import, the last header
// hash is updated already at header import, allowing head tracking for the // hash is updated already at header import, allowing head tracking for the
// fast synchronization mechanism. // light synchronization mechanism.
func GetHeadHeaderHash(db ethdb.Database) common.Hash { func GetHeadHeaderHash(db ethdb.Database) common.Hash {
data, _ := db.Get(headHeaderKey) data, _ := db.Get(headHeaderKey)
if len(data) == 0 { if len(data) == 0 {
@ -147,6 +148,18 @@ func GetHeadBlockHash(db ethdb.Database) common.Hash {
return common.BytesToHash(data) return common.BytesToHash(data)
} }
// GetHeadFastBlockHash retrieves the hash of the current canonical head block during
// fast synchronization. The difference between this and GetHeadBlockHash is that
// whereas the last block hash is only updated upon a full block import, the last
// fast hash is updated when importing pre-processed blocks.
func GetHeadFastBlockHash(db ethdb.Database) common.Hash {
data, _ := db.Get(headFastKey)
if len(data) == 0 {
return common.Hash{}
}
return common.BytesToHash(data)
}
// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil // GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
// if the header's not found. // if the header's not found.
func GetHeaderRLP(db ethdb.Database, hash common.Hash) rlp.RawValue { func GetHeaderRLP(db ethdb.Database, hash common.Hash) rlp.RawValue {
@ -249,6 +262,15 @@ func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
return nil return nil
} }
// WriteHeadFastBlockHash stores the fast head block's hash.
func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error {
if err := db.Put(headFastKey, hash.Bytes()); err != nil {
glog.Fatalf("failed to store last fast block's hash into database: %v", err)
return err
}
return nil
}
// WriteHeader serializes a block header into the database. // WriteHeader serializes a block header into the database.
func WriteHeader(db ethdb.Database, header *types.Header) error { func WriteHeader(db ethdb.Database, header *types.Header) error {
data, err := rlp.EncodeToBytes(header) data, err := rlp.EncodeToBytes(header)

View File

@ -163,7 +163,12 @@ func TestBlockStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase() db, _ := ethdb.NewMemDatabase()
// Create a test block to move around the database and make sure it's really new // Create a test block to move around the database and make sure it's really new
block := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block")}) block := types.NewBlockWithHeader(&types.Header{
Extra: []byte("test block"),
UncleHash: types.EmptyUncleHash,
TxHash: types.EmptyRootHash,
ReceiptHash: types.EmptyRootHash,
})
if entry := GetBlock(db, block.Hash()); entry != nil { if entry := GetBlock(db, block.Hash()); entry != nil {
t.Fatalf("Non existent block returned: %v", entry) t.Fatalf("Non existent block returned: %v", entry)
} }
@ -208,8 +213,12 @@ func TestBlockStorage(t *testing.T) {
// Tests that partial block contents don't get reassembled into full blocks. // Tests that partial block contents don't get reassembled into full blocks.
func TestPartialBlockStorage(t *testing.T) { func TestPartialBlockStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase() db, _ := ethdb.NewMemDatabase()
block := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block")}) block := types.NewBlockWithHeader(&types.Header{
Extra: []byte("test block"),
UncleHash: types.EmptyUncleHash,
TxHash: types.EmptyRootHash,
ReceiptHash: types.EmptyRootHash,
})
// Store a header and check that it's not recognized as a block // Store a header and check that it's not recognized as a block
if err := WriteHeader(db, block.Header()); err != nil { if err := WriteHeader(db, block.Header()); err != nil {
t.Fatalf("Failed to write header into database: %v", err) t.Fatalf("Failed to write header into database: %v", err)
@ -298,6 +307,7 @@ func TestHeadStorage(t *testing.T) {
blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")}) blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")})
blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")}) blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")})
blockFast := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block fast")})
// Check that no head entries are in a pristine database // Check that no head entries are in a pristine database
if entry := GetHeadHeaderHash(db); entry != (common.Hash{}) { if entry := GetHeadHeaderHash(db); entry != (common.Hash{}) {
@ -306,6 +316,9 @@ func TestHeadStorage(t *testing.T) {
if entry := GetHeadBlockHash(db); entry != (common.Hash{}) { if entry := GetHeadBlockHash(db); entry != (common.Hash{}) {
t.Fatalf("Non head block entry returned: %v", entry) t.Fatalf("Non head block entry returned: %v", entry)
} }
if entry := GetHeadFastBlockHash(db); entry != (common.Hash{}) {
t.Fatalf("Non fast head block entry returned: %v", entry)
}
// Assign separate entries for the head header and block // Assign separate entries for the head header and block
if err := WriteHeadHeaderHash(db, blockHead.Hash()); err != nil { if err := WriteHeadHeaderHash(db, blockHead.Hash()); err != nil {
t.Fatalf("Failed to write head header hash: %v", err) t.Fatalf("Failed to write head header hash: %v", err)
@ -313,6 +326,9 @@ func TestHeadStorage(t *testing.T) {
if err := WriteHeadBlockHash(db, blockFull.Hash()); err != nil { if err := WriteHeadBlockHash(db, blockFull.Hash()); err != nil {
t.Fatalf("Failed to write head block hash: %v", err) t.Fatalf("Failed to write head block hash: %v", err)
} }
if err := WriteHeadFastBlockHash(db, blockFast.Hash()); err != nil {
t.Fatalf("Failed to write fast head block hash: %v", err)
}
// Check that both heads are present, and different (i.e. two heads maintained) // Check that both heads are present, and different (i.e. two heads maintained)
if entry := GetHeadHeaderHash(db); entry != blockHead.Hash() { if entry := GetHeadHeaderHash(db); entry != blockHead.Hash() {
t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash()) t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash())
@ -320,6 +336,9 @@ func TestHeadStorage(t *testing.T) {
if entry := GetHeadBlockHash(db); entry != blockFull.Hash() { if entry := GetHeadBlockHash(db); entry != blockFull.Hash() {
t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash()) t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash())
} }
if entry := GetHeadFastBlockHash(db); entry != blockFast.Hash() {
t.Fatalf("Fast head block hash mismatch: have %v, want %v", entry, blockFast.Hash())
}
} }
func TestMipmapBloom(t *testing.T) { func TestMipmapBloom(t *testing.T) {

View File

@ -103,7 +103,7 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block,
if err := WriteBlock(chainDb, block); err != nil { if err := WriteBlock(chainDb, block); err != nil {
return nil, err return nil, err
} }
if err := PutBlockReceipts(chainDb, block, nil); err != nil { if err := PutBlockReceipts(chainDb, block.Hash(), nil); err != nil {
return nil, err return nil, err
} }
if err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()); err != nil { if err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()); err != nil {

View File

@ -155,7 +155,7 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts {
// PutBlockReceipts stores the block's transactions associated receipts // PutBlockReceipts stores the block's transactions associated receipts
// and stores them by block hash in a single slice. This is required for // and stores them by block hash in a single slice. This is required for
// forks and chain reorgs // forks and chain reorgs
func PutBlockReceipts(db ethdb.Database, block *types.Block, receipts types.Receipts) error { func PutBlockReceipts(db ethdb.Database, hash common.Hash, receipts types.Receipts) error {
rs := make([]*types.ReceiptForStorage, len(receipts)) rs := make([]*types.ReceiptForStorage, len(receipts))
for i, receipt := range receipts { for i, receipt := range receipts {
rs[i] = (*types.ReceiptForStorage)(receipt) rs[i] = (*types.ReceiptForStorage)(receipt)
@ -164,12 +164,9 @@ func PutBlockReceipts(db ethdb.Database, block *types.Block, receipts types.Rece
if err != nil { if err != nil {
return err return err
} }
hash := block.Hash()
err = db.Put(append(blockReceiptsPre, hash[:]...), bytes) err = db.Put(append(blockReceiptsPre, hash[:]...), bytes)
if err != nil { if err != nil {
return err return err
} }
return nil return nil
} }

View File

@ -128,7 +128,6 @@ type Block struct {
header *Header header *Header
uncles []*Header uncles []*Header
transactions Transactions transactions Transactions
receipts Receipts
// caches // caches
hash atomic.Value hash atomic.Value
@ -200,8 +199,6 @@ func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*
} else { } else {
b.header.ReceiptHash = DeriveSha(Receipts(receipts)) b.header.ReceiptHash = DeriveSha(Receipts(receipts))
b.header.Bloom = CreateBloom(receipts) b.header.Bloom = CreateBloom(receipts)
b.receipts = make([]*Receipt, len(receipts))
copy(b.receipts, receipts)
} }
if len(uncles) == 0 { if len(uncles) == 0 {
@ -299,7 +296,6 @@ func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error {
// TODO: copies // TODO: copies
func (b *Block) Uncles() []*Header { return b.uncles } func (b *Block) Uncles() []*Header { return b.uncles }
func (b *Block) Transactions() Transactions { return b.transactions } func (b *Block) Transactions() Transactions { return b.transactions }
func (b *Block) Receipts() Receipts { return b.receipts }
func (b *Block) Transaction(hash common.Hash) *Transaction { func (b *Block) Transaction(hash common.Hash) *Transaction {
for _, transaction := range b.transactions { for _, transaction := range b.transactions {
@ -364,7 +360,6 @@ func (b *Block) WithMiningResult(nonce uint64, mixDigest common.Hash) *Block {
return &Block{ return &Block{
header: &cpy, header: &cpy,
transactions: b.transactions, transactions: b.transactions,
receipts: b.receipts,
uncles: b.uncles, uncles: b.uncles,
} }
} }

View File

@ -41,8 +41,8 @@ type Receipt struct {
} }
// NewReceipt creates a barebone transaction receipt, copying the init fields. // NewReceipt creates a barebone transaction receipt, copying the init fields.
func NewReceipt(root []byte, cumalativeGasUsed *big.Int) *Receipt { func NewReceipt(root []byte, cumulativeGasUsed *big.Int) *Receipt {
return &Receipt{PostState: common.CopyBytes(root), CumulativeGasUsed: new(big.Int).Set(cumalativeGasUsed)} return &Receipt{PostState: common.CopyBytes(root), CumulativeGasUsed: new(big.Int).Set(cumulativeGasUsed)}
} }
// EncodeRLP implements rlp.Encoder, and flattens the consensus fields of a receipt // EncodeRLP implements rlp.Encoder, and flattens the consensus fields of a receipt

View File

@ -25,19 +25,21 @@ import (
) )
type Log struct { type Log struct {
// Consensus fields
Address common.Address Address common.Address
Topics []common.Hash Topics []common.Hash
Data []byte Data []byte
Number uint64
TxHash common.Hash // Derived fields (don't reorder!)
TxIndex uint BlockNumber uint64
BlockHash common.Hash TxHash common.Hash
Index uint TxIndex uint
BlockHash common.Hash
Index uint
} }
func NewLog(address common.Address, topics []common.Hash, data []byte, number uint64) *Log { func NewLog(address common.Address, topics []common.Hash, data []byte, number uint64) *Log {
return &Log{Address: address, Topics: topics, Data: data, Number: number} return &Log{Address: address, Topics: topics, Data: data, BlockNumber: number}
} }
func (l *Log) EncodeRLP(w io.Writer) error { func (l *Log) EncodeRLP(w io.Writer) error {

View File

@ -102,6 +102,9 @@ type headHeaderRetrievalFn func() *types.Header
// headBlockRetrievalFn is a callback type for retrieving the head block from the local chain. // headBlockRetrievalFn is a callback type for retrieving the head block from the local chain.
type headBlockRetrievalFn func() *types.Block type headBlockRetrievalFn func() *types.Block
// headFastBlockRetrievalFn is a callback type for retrieving the head fast block from the local chain.
type headFastBlockRetrievalFn func() *types.Block
// tdRetrievalFn is a callback type for retrieving the total difficulty of a local block. // tdRetrievalFn is a callback type for retrieving the total difficulty of a local block.
type tdRetrievalFn func(common.Hash) *big.Int type tdRetrievalFn func(common.Hash) *big.Int
@ -188,17 +191,18 @@ type Downloader struct {
syncStatsLock sync.RWMutex // Lock protecting the sync stats fields syncStatsLock sync.RWMutex // Lock protecting the sync stats fields
// Callbacks // Callbacks
hasHeader headerCheckFn // Checks if a header is present in the chain hasHeader headerCheckFn // Checks if a header is present in the chain
hasBlock blockCheckFn // Checks if a block is present in the chain hasBlock blockCheckFn // Checks if a block is present in the chain
getHeader headerRetrievalFn // Retrieves a header from the chain getHeader headerRetrievalFn // Retrieves a header from the chain
getBlock blockRetrievalFn // Retrieves a block from the chain getBlock blockRetrievalFn // Retrieves a block from the chain
headHeader headHeaderRetrievalFn // Retrieves the head header from the chain headHeader headHeaderRetrievalFn // Retrieves the head header from the chain
headBlock headBlockRetrievalFn // Retrieves the head block from the chain headBlock headBlockRetrievalFn // Retrieves the head block from the chain
getTd tdRetrievalFn // Retrieves the TD of a block from the chain headFastBlock headFastBlockRetrievalFn // Retrieves the head fast-sync block from the chain
insertHeaders headerChainInsertFn // Injects a batch of headers into the chain getTd tdRetrievalFn // Retrieves the TD of a block from the chain
insertBlocks blockChainInsertFn // Injects a batch of blocks into the chain insertHeaders headerChainInsertFn // Injects a batch of headers into the chain
insertReceipts receiptChainInsertFn // Injects a batch of blocks and their receipts into the chain insertBlocks blockChainInsertFn // Injects a batch of blocks into the chain
dropPeer peerDropFn // Drops a peer for misbehaving insertReceipts receiptChainInsertFn // Injects a batch of blocks and their receipts into the chain
dropPeer peerDropFn // Drops a peer for misbehaving
// Status // Status
synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing
@ -229,8 +233,8 @@ type Downloader struct {
// New creates a new downloader to fetch hashes and blocks from remote peers. // New creates a new downloader to fetch hashes and blocks from remote peers.
func New(mode SyncMode, mux *event.TypeMux, hasHeader headerCheckFn, hasBlock blockCheckFn, getHeader headerRetrievalFn, getBlock blockRetrievalFn, func New(mode SyncMode, mux *event.TypeMux, hasHeader headerCheckFn, hasBlock blockCheckFn, getHeader headerRetrievalFn, getBlock blockRetrievalFn,
headHeader headHeaderRetrievalFn, headBlock headBlockRetrievalFn, getTd tdRetrievalFn, insertHeaders headerChainInsertFn, insertBlocks blockChainInsertFn, headHeader headHeaderRetrievalFn, headBlock headBlockRetrievalFn, headFastBlock headFastBlockRetrievalFn, getTd tdRetrievalFn,
insertReceipts receiptChainInsertFn, dropPeer peerDropFn) *Downloader { insertHeaders headerChainInsertFn, insertBlocks blockChainInsertFn, insertReceipts receiptChainInsertFn, dropPeer peerDropFn) *Downloader {
return &Downloader{ return &Downloader{
mode: mode, mode: mode,
@ -243,6 +247,7 @@ func New(mode SyncMode, mux *event.TypeMux, hasHeader headerCheckFn, hasBlock bl
getBlock: getBlock, getBlock: getBlock,
headHeader: headHeader, headHeader: headHeader,
headBlock: headBlock, headBlock: headBlock,
headFastBlock: headFastBlock,
getTd: getTd, getTd: getTd,
insertHeaders: insertHeaders, insertHeaders: insertHeaders,
insertBlocks: insertBlocks, insertBlocks: insertBlocks,
@ -393,7 +398,9 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
}() }()
glog.V(logger.Debug).Infof("Synchronising with the network using: %s [eth/%d]", p.id, p.version) glog.V(logger.Debug).Infof("Synchronising with the network using: %s [eth/%d]", p.id, p.version)
defer glog.V(logger.Debug).Infof("Synchronisation terminated") defer func(start time.Time) {
glog.V(logger.Debug).Infof("Synchronisation terminated after %v", time.Since(start))
}(time.Now())
switch { switch {
case p.version == 61: case p.version == 61:
@ -989,6 +996,8 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
head := d.headHeader().Number.Uint64() head := d.headHeader().Number.Uint64()
if d.mode == FullSync { if d.mode == FullSync {
head = d.headBlock().NumberU64() head = d.headBlock().NumberU64()
} else if d.mode == FastSync {
head = d.headFastBlock().NumberU64()
} }
from := int64(head) - int64(MaxHeaderFetch) + 1 from := int64(head) - int64(MaxHeaderFetch) + 1
if from < 0 { if from < 0 {
@ -1020,7 +1029,7 @@ func (d *Downloader) findAncestor(p *peer) (uint64, error) {
// Check if a common ancestor was found // Check if a common ancestor was found
finished = true finished = true
for i := len(headers) - 1; i >= 0; i-- { for i := len(headers) - 1; i >= 0; i-- {
if (d.mode == FullSync && d.hasBlock(headers[i].Hash())) || (d.mode != FullSync && d.hasHeader(headers[i].Hash())) { if (d.mode != LightSync && d.hasBlock(headers[i].Hash())) || (d.mode == LightSync && d.hasHeader(headers[i].Hash())) {
number, hash = headers[i].Number.Uint64(), headers[i].Hash() number, hash = headers[i].Number.Uint64(), headers[i].Hash()
break break
} }
@ -1182,17 +1191,18 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error {
// Otherwise insert all the new headers, aborting in case of junk // Otherwise insert all the new headers, aborting in case of junk
glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headerPack.headers), from) glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headerPack.headers), from)
if d.mode == FastSync || d.mode == LightSync {
if n, err := d.insertHeaders(headerPack.headers, false); err != nil {
glog.V(logger.Debug).Infof("%v: invalid header #%d [%x…]: %v", p, headerPack.headers[n].Number, headerPack.headers[n].Hash().Bytes()[:4], err)
return errInvalidChain
}
}
if d.mode == FullSync || d.mode == FastSync { if d.mode == FullSync || d.mode == FastSync {
inserts := d.queue.Schedule(headerPack.headers, from, d.mode == FastSync) inserts := d.queue.Schedule(headerPack.headers, from, d.mode == FastSync)
if len(inserts) != len(headerPack.headers) { if len(inserts) != len(headerPack.headers) {
glog.V(logger.Debug).Infof("%v: stale headers", p) glog.V(logger.Debug).Infof("%v: stale headers", p)
return errBadPeer return errBadPeer
} }
} else {
if n, err := d.insertHeaders(headerPack.headers, true); err != nil {
glog.V(logger.Debug).Infof("%v: invalid header #%d [%x…]: %v", p, headerPack.headers[n].Number, headerPack.headers[n].Hash().Bytes()[:4], err)
return errInvalidChain
}
} }
// Notify the content fetchers of new headers, but stop if queue is full // Notify the content fetchers of new headers, but stop if queue is full
cont := d.queue.PendingBlocks() < maxQueuedHeaders || d.queue.PendingReceipts() < maxQueuedHeaders cont := d.queue.PendingBlocks() < maxQueuedHeaders || d.queue.PendingReceipts() < maxQueuedHeaders
@ -1394,6 +1404,7 @@ func (d *Downloader) fetchParts(from uint64, errCancel error, deliveryCh chan da
for _, pid := range expire() { for _, pid := range expire() {
if peer := d.peers.Peer(pid); peer != nil { if peer := d.peers.Peer(pid); peer != nil {
peer.Demote() peer.Demote()
setIdle(peer)
glog.V(logger.Detail).Infof("%s: %s delivery timeout", peer, strings.ToLower(kind)) glog.V(logger.Detail).Infof("%s: %s delivery timeout", peer, strings.ToLower(kind))
} }
} }
@ -1497,7 +1508,7 @@ func (d *Downloader) process() {
// Actually import the blocks // Actually import the blocks
if glog.V(logger.Debug) { if glog.V(logger.Debug) {
first, last := results[0].Header, results[len(results)-1].Header first, last := results[0].Header, results[len(results)-1].Header
glog.V(logger.Debug).Infof("Inserting chain with %d items (#%d [%x…] - #%d [%x…])", len(results), first.Number, first.Hash().Bytes()[:4], last.Number, last.Hash().Bytes()[:4]) glog.Infof("Inserting chain with %d items (#%d [%x…] - #%d [%x…])", len(results), first.Number, first.Hash().Bytes()[:4], last.Number, last.Hash().Bytes()[:4])
} }
for len(results) != 0 { for len(results) != 0 {
// Check for any termination requests // Check for any termination requests
@ -1536,7 +1547,7 @@ func (d *Downloader) process() {
index, err = d.insertHeaders(headers, true) index, err = d.insertHeaders(headers, true)
} }
if err != nil { if err != nil {
glog.V(logger.Debug).Infof("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash(), err) glog.V(logger.Debug).Infof("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash().Bytes()[:4], err)
d.cancel() d.cancel()
return return
} }

View File

@ -45,9 +45,9 @@ var (
// the returned hash chain is ordered head->parent. In addition, every 3rd block // the returned hash chain is ordered head->parent. In addition, every 3rd block
// contains a transaction and every 5th an uncle to allow testing correct block // contains a transaction and every 5th an uncle to allow testing correct block
// reassembly. // reassembly.
func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block) { func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Receipts) ([]common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]types.Receipts) {
// Generate the block chain // Generate the block chain
blocks := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) { blocks, receipts := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) {
block.SetCoinbase(common.Address{seed}) block.SetCoinbase(common.Address{seed})
// If the block number is multiple of 3, send a bonus transaction to the miner // If the block number is multiple of 3, send a bonus transaction to the miner
@ -73,25 +73,29 @@ func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common
blockm := make(map[common.Hash]*types.Block, n+1) blockm := make(map[common.Hash]*types.Block, n+1)
blockm[parent.Hash()] = parent blockm[parent.Hash()] = parent
receiptm := make(map[common.Hash]types.Receipts, n+1)
receiptm[parent.Hash()] = parentReceipts
for i, b := range blocks { for i, b := range blocks {
hashes[len(hashes)-i-2] = b.Hash() hashes[len(hashes)-i-2] = b.Hash()
headerm[b.Hash()] = b.Header() headerm[b.Hash()] = b.Header()
blockm[b.Hash()] = b blockm[b.Hash()] = b
receiptm[b.Hash()] = receipts[i]
} }
return hashes, headerm, blockm return hashes, headerm, blockm, receiptm
} }
// makeChainFork creates two chains of length n, such that h1[:f] and // makeChainFork creates two chains of length n, such that h1[:f] and
// h2[:f] are different but have a common suffix of length n-f. // h2[:f] are different but have a common suffix of length n-f.
func makeChainFork(n, f int, parent *types.Block) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block) { func makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts) ([]common.Hash, []common.Hash, map[common.Hash]*types.Header, map[common.Hash]*types.Header, map[common.Hash]*types.Block, map[common.Hash]*types.Block, map[common.Hash]types.Receipts, map[common.Hash]types.Receipts) {
// Create the common suffix // Create the common suffix
hashes, headers, blocks := makeChain(n-f, 0, parent) hashes, headers, blocks, receipts := makeChain(n-f, 0, parent, parentReceipts)
// Create the forks // Create the forks
hashes1, headers1, blocks1 := makeChain(f, 1, blocks[hashes[0]]) hashes1, headers1, blocks1, receipts1 := makeChain(f, 1, blocks[hashes[0]], receipts[hashes[0]])
hashes1 = append(hashes1, hashes[1:]...) hashes1 = append(hashes1, hashes[1:]...)
hashes2, headers2, blocks2 := makeChain(f, 2, blocks[hashes[0]]) hashes2, headers2, blocks2, receipts2 := makeChain(f, 2, blocks[hashes[0]], receipts[hashes[0]])
hashes2 = append(hashes2, hashes[1:]...) hashes2 = append(hashes2, hashes[1:]...)
for hash, header := range headers { for hash, header := range headers {
@ -102,22 +106,28 @@ func makeChainFork(n, f int, parent *types.Block) ([]common.Hash, []common.Hash,
blocks1[hash] = block blocks1[hash] = block
blocks2[hash] = block blocks2[hash] = block
} }
return hashes1, hashes2, headers1, headers2, blocks1, blocks2 for hash, receipt := range receipts {
receipts1[hash] = receipt
receipts2[hash] = receipt
}
return hashes1, hashes2, headers1, headers2, blocks1, blocks2, receipts1, receipts2
} }
// downloadTester is a test simulator for mocking out local block chain. // downloadTester is a test simulator for mocking out local block chain.
type downloadTester struct { type downloadTester struct {
downloader *Downloader downloader *Downloader
ownHashes []common.Hash // Hash chain belonging to the tester ownHashes []common.Hash // Hash chain belonging to the tester
ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester
ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester
ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester
ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain
peerHashes map[string][]common.Hash // Hash chain belonging to different test peers
peerHeaders map[string]map[common.Hash]*types.Header // Headers belonging to different test peers peerHashes map[string][]common.Hash // Hash chain belonging to different test peers
peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers peerHeaders map[string]map[common.Hash]*types.Header // Headers belonging to different test peers
peerChainTds map[string]map[common.Hash]*big.Int // Total difficulties of the blocks in the peer chains peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers
peerReceipts map[string]map[common.Hash]types.Receipts // Receipts belonging to different test peers
peerChainTds map[string]map[common.Hash]*big.Int // Total difficulties of the blocks in the peer chains
lock sync.RWMutex lock sync.RWMutex
} }
@ -128,15 +138,16 @@ func newTester(mode SyncMode) *downloadTester {
ownHashes: []common.Hash{genesis.Hash()}, ownHashes: []common.Hash{genesis.Hash()},
ownHeaders: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()}, ownHeaders: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},
ownBlocks: map[common.Hash]*types.Block{genesis.Hash(): genesis}, ownBlocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
ownReceipts: map[common.Hash]types.Receipts{genesis.Hash(): genesis.Receipts()}, ownReceipts: map[common.Hash]types.Receipts{genesis.Hash(): nil},
ownChainTd: map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()}, ownChainTd: map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()},
peerHashes: make(map[string][]common.Hash), peerHashes: make(map[string][]common.Hash),
peerHeaders: make(map[string]map[common.Hash]*types.Header), peerHeaders: make(map[string]map[common.Hash]*types.Header),
peerBlocks: make(map[string]map[common.Hash]*types.Block), peerBlocks: make(map[string]map[common.Hash]*types.Block),
peerReceipts: make(map[string]map[common.Hash]types.Receipts),
peerChainTds: make(map[string]map[common.Hash]*big.Int), peerChainTds: make(map[string]map[common.Hash]*big.Int),
} }
tester.downloader = New(mode, new(event.TypeMux), tester.hasHeader, tester.hasBlock, tester.getHeader, tester.getBlock, tester.downloader = New(mode, new(event.TypeMux), tester.hasHeader, tester.hasBlock, tester.getHeader, tester.getBlock,
tester.headHeader, tester.headBlock, tester.getTd, tester.insertHeaders, tester.insertBlocks, tester.insertConfirmedBlocks, tester.dropPeer) tester.headHeader, tester.headBlock, tester.headFastBlock, tester.getTd, tester.insertHeaders, tester.insertBlocks, tester.insertReceipts, tester.dropPeer)
return tester return tester
} }
@ -197,7 +208,12 @@ func (dl *downloadTester) headHeader() *types.Header {
dl.lock.RLock() dl.lock.RLock()
defer dl.lock.RUnlock() defer dl.lock.RUnlock()
return dl.getHeader(dl.ownHashes[len(dl.ownHashes)-1]) for i := len(dl.ownHashes) - 1; i >= 0; i-- {
if header := dl.getHeader(dl.ownHashes[i]); header != nil {
return header
}
}
return nil
} }
// headBlock retrieves the current head block from the canonical chain. // headBlock retrieves the current head block from the canonical chain.
@ -213,6 +229,21 @@ func (dl *downloadTester) headBlock() *types.Block {
return nil return nil
} }
// headFastBlock retrieves the current head fast-sync block from the canonical chain.
func (dl *downloadTester) headFastBlock() *types.Block {
dl.lock.RLock()
defer dl.lock.RUnlock()
for i := len(dl.ownHashes) - 1; i >= 0; i-- {
if block := dl.getBlock(dl.ownHashes[i]); block != nil {
if _, ok := dl.ownReceipts[block.Hash()]; ok {
return block
}
}
}
return nil
}
// getTd retrieves the block's total difficulty from the canonical chain. // getTd retrieves the block's total difficulty from the canonical chain.
func (dl *downloadTester) getTd(hash common.Hash) *big.Int { func (dl *downloadTester) getTd(hash common.Hash) *big.Int {
dl.lock.RLock() dl.lock.RLock()
@ -227,6 +258,9 @@ func (dl *downloadTester) insertHeaders(headers []*types.Header, verify bool) (i
defer dl.lock.Unlock() defer dl.lock.Unlock()
for i, header := range headers { for i, header := range headers {
if _, ok := dl.ownHeaders[header.Hash()]; ok {
continue
}
if _, ok := dl.ownHeaders[header.ParentHash]; !ok { if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
return i, errors.New("unknown parent") return i, errors.New("unknown parent")
} }
@ -254,33 +288,33 @@ func (dl *downloadTester) insertBlocks(blocks types.Blocks) (int, error) {
return len(blocks), nil return len(blocks), nil
} }
// insertBlocks injects a new batch of blocks into the simulated chain. // insertReceipts injects a new batch of blocks into the simulated chain.
func (dl *downloadTester) insertConfirmedBlocks(blocks types.Blocks, receipts []types.Receipts) (int, error) { func (dl *downloadTester) insertReceipts(blocks types.Blocks, receipts []types.Receipts) (int, error) {
dl.lock.Lock() dl.lock.Lock()
defer dl.lock.Unlock() defer dl.lock.Unlock()
for i := 0; i < len(blocks) && i < len(receipts); i++ { for i := 0; i < len(blocks) && i < len(receipts); i++ {
if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {
return i, errors.New("unknown owner")
}
if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok { if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
return i, errors.New("unknown parent") return i, errors.New("unknown parent")
} }
dl.ownHashes = append(dl.ownHashes, blocks[i].Hash())
dl.ownHeaders[blocks[i].Hash()] = blocks[i].Header()
dl.ownBlocks[blocks[i].Hash()] = blocks[i] dl.ownBlocks[blocks[i].Hash()] = blocks[i]
dl.ownReceipts[blocks[i].Hash()] = blocks[i].Receipts() dl.ownReceipts[blocks[i].Hash()] = receipts[i]
dl.ownChainTd[blocks[i].Hash()] = dl.ownChainTd[blocks[i].ParentHash()]
} }
return len(blocks), nil return len(blocks), nil
} }
// newPeer registers a new block download source into the downloader. // newPeer registers a new block download source into the downloader.
func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block) error { func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts) error {
return dl.newSlowPeer(id, version, hashes, headers, blocks, 0) return dl.newSlowPeer(id, version, hashes, headers, blocks, receipts, 0)
} }
// newSlowPeer registers a new block download source into the downloader, with a // newSlowPeer registers a new block download source into the downloader, with a
// specific delay time on processing the network packets sent to it, simulating // specific delay time on processing the network packets sent to it, simulating
// potentially slow network IO. // potentially slow network IO.
func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, delay time.Duration) error { func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, headers map[common.Hash]*types.Header, blocks map[common.Hash]*types.Block, receipts map[common.Hash]types.Receipts, delay time.Duration) error {
dl.lock.Lock() dl.lock.Lock()
defer dl.lock.Unlock() defer dl.lock.Unlock()
@ -302,6 +336,7 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha
dl.peerHeaders[id] = make(map[common.Hash]*types.Header) dl.peerHeaders[id] = make(map[common.Hash]*types.Header)
dl.peerBlocks[id] = make(map[common.Hash]*types.Block) dl.peerBlocks[id] = make(map[common.Hash]*types.Block)
dl.peerReceipts[id] = make(map[common.Hash]types.Receipts)
dl.peerChainTds[id] = make(map[common.Hash]*big.Int) dl.peerChainTds[id] = make(map[common.Hash]*big.Int)
for _, hash := range hashes { for _, hash := range hashes {
@ -317,6 +352,9 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha
dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()]) dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][block.ParentHash()])
} }
} }
if receipt, ok := receipts[hash]; ok {
dl.peerReceipts[id][hash] = receipt
}
} }
} }
return err return err
@ -501,15 +539,15 @@ func (dl *downloadTester) peerGetReceiptsFn(id string, delay time.Duration) func
dl.lock.RLock() dl.lock.RLock()
defer dl.lock.RUnlock() defer dl.lock.RUnlock()
blocks := dl.peerBlocks[id] receipts := dl.peerReceipts[id]
receipts := make([][]*types.Receipt, 0, len(hashes)) results := make([][]*types.Receipt, 0, len(hashes))
for _, hash := range hashes { for _, hash := range hashes {
if block, ok := blocks[hash]; ok { if receipt, ok := receipts[hash]; ok {
receipts = append(receipts, block.Receipts()) results = append(results, receipt)
} }
} }
go dl.downloader.DeliverReceipts(id, receipts) go dl.downloader.DeliverReceipts(id, results)
return nil return nil
} }
@ -551,10 +589,10 @@ func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronis
func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) { func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
// Create a small enough block chain to download // Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15 targetBlocks := blockCacheLimit - 15
hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
tester := newTester(mode) tester := newTester(mode)
tester.newPeer("peer", protocol, hashes, headers, blocks) tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
// Synchronise with the peer and make sure all relevant data was retrieved // Synchronise with the peer and make sure all relevant data was retrieved
if err := tester.sync("peer", nil); err != nil { if err := tester.sync("peer", nil); err != nil {
@ -575,10 +613,10 @@ func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
func testThrottling(t *testing.T, protocol int, mode SyncMode) { func testThrottling(t *testing.T, protocol int, mode SyncMode) {
// Create a long block chain to download and the tester // Create a long block chain to download and the tester
targetBlocks := 8 * blockCacheLimit targetBlocks := 8 * blockCacheLimit
hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
tester := newTester(mode) tester := newTester(mode)
tester.newPeer("peer", protocol, hashes, headers, blocks) tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
// Wrap the importer to allow stepping // Wrap the importer to allow stepping
blocked, proceed := uint32(0), make(chan struct{}) blocked, proceed := uint32(0), make(chan struct{})
@ -650,11 +688,11 @@ func TestForkedSynchronisation64Light(t *testing.T) { testForkedSynchronisation(
func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) { func testForkedSynchronisation(t *testing.T, protocol int, mode SyncMode) {
// Create a long enough forked chain // Create a long enough forked chain
common, fork := MaxHashFetch, 2*MaxHashFetch common, fork := MaxHashFetch, 2*MaxHashFetch
hashesA, hashesB, headersA, headersB, blocksA, blocksB := makeChainFork(common+fork, fork, genesis) hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil)
tester := newTester(mode) tester := newTester(mode)
tester.newPeer("fork A", protocol, hashesA, headersA, blocksA) tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
tester.newPeer("fork B", protocol, hashesB, headersB, blocksB) tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
// Synchronise with the peer and make sure all blocks were retrieved // Synchronise with the peer and make sure all blocks were retrieved
if err := tester.sync("fork A", nil); err != nil { if err := tester.sync("fork A", nil); err != nil {
@ -731,10 +769,10 @@ func testCancel(t *testing.T, protocol int, mode SyncMode) {
if targetBlocks >= MaxHeaderFetch { if targetBlocks >= MaxHeaderFetch {
targetBlocks = MaxHeaderFetch - 15 targetBlocks = MaxHeaderFetch - 15
} }
hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
tester := newTester(mode) tester := newTester(mode)
tester.newPeer("peer", protocol, hashes, headers, blocks) tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
// Make sure canceling works with a pristine downloader // Make sure canceling works with a pristine downloader
tester.downloader.cancel() tester.downloader.cancel()
@ -764,12 +802,12 @@ func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
// Create various peers with various parts of the chain // Create various peers with various parts of the chain
targetPeers := 8 targetPeers := 8
targetBlocks := targetPeers*blockCacheLimit - 15 targetBlocks := targetPeers*blockCacheLimit - 15
hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
tester := newTester(mode) tester := newTester(mode)
for i := 0; i < targetPeers; i++ { for i := 0; i < targetPeers; i++ {
id := fmt.Sprintf("peer #%d", i) id := fmt.Sprintf("peer #%d", i)
tester.newPeer(id, protocol, hashes[i*blockCacheLimit:], headers, blocks) tester.newPeer(id, protocol, hashes[i*blockCacheLimit:], headers, blocks, receipts)
} }
// Synchronise with the middle peer and make sure half of the blocks were retrieved // Synchronise with the middle peer and make sure half of the blocks were retrieved
id := fmt.Sprintf("peer #%d", targetPeers/2) id := fmt.Sprintf("peer #%d", targetPeers/2)
@ -798,22 +836,21 @@ func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t,
func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) { func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
// Create a small enough block chain to download // Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15 targetBlocks := blockCacheLimit - 15
hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
// Create peers of every type // Create peers of every type
tester := newTester(mode) tester := newTester(mode)
tester.newPeer("peer 61", 61, hashes, headers, blocks) tester.newPeer("peer 61", 61, hashes, headers, blocks, receipts)
tester.newPeer("peer 62", 62, hashes, headers, blocks) tester.newPeer("peer 62", 62, hashes, headers, blocks, receipts)
tester.newPeer("peer 63", 63, hashes, headers, blocks) tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
tester.newPeer("peer 64", 64, hashes, headers, blocks) tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
// Synchronise with the requestd peer and make sure all blocks were retrieved // Synchronise with the requestd peer and make sure all blocks were retrieved
if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil); err != nil { if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err) t.Fatalf("failed to synchronise blocks: %v", err)
} }
if imported := len(tester.ownBlocks); imported != targetBlocks+1 { assertOwnChain(t, tester, targetBlocks+1)
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1)
}
// Check that no peers have been dropped off // Check that no peers have been dropped off
for _, version := range []int{61, 62, 63, 64} { for _, version := range []int{61, 62, 63, 64} {
peer := fmt.Sprintf("peer %d", version) peer := fmt.Sprintf("peer %d", version)
@ -835,18 +872,18 @@ func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, L
func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
// Create a small enough block chain to download // Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15 targetBlocks := blockCacheLimit - 15
hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
tester := newTester(mode) tester := newTester(mode)
tester.newPeer("peer", protocol, hashes, headers, blocks) tester.newPeer("peer", protocol, hashes, headers, blocks, receipts)
// Instrument the downloader to signal body requests // Instrument the downloader to signal body requests
bodies, receipts := int32(0), int32(0) bodiesHave, receiptsHave := int32(0), int32(0)
tester.downloader.bodyFetchHook = func(headers []*types.Header) { tester.downloader.bodyFetchHook = func(headers []*types.Header) {
atomic.AddInt32(&bodies, int32(len(headers))) atomic.AddInt32(&bodiesHave, int32(len(headers)))
} }
tester.downloader.receiptFetchHook = func(headers []*types.Header) { tester.downloader.receiptFetchHook = func(headers []*types.Header) {
atomic.AddInt32(&receipts, int32(len(headers))) atomic.AddInt32(&receiptsHave, int32(len(headers)))
} }
// Synchronise with the peer and make sure all blocks were retrieved // Synchronise with the peer and make sure all blocks were retrieved
if err := tester.sync("peer", nil); err != nil { if err := tester.sync("peer", nil); err != nil {
@ -860,15 +897,17 @@ func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
if mode != LightSync && block != genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { if mode != LightSync && block != genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {
bodiesNeeded++ bodiesNeeded++
} }
if mode == FastSync && block != genesis && len(block.Receipts()) > 0 { }
for _, receipt := range receipts {
if mode == FastSync && len(receipt) > 0 {
receiptsNeeded++ receiptsNeeded++
} }
} }
if int(bodies) != bodiesNeeded { if int(bodiesHave) != bodiesNeeded {
t.Errorf("body retrieval count mismatch: have %v, want %v", bodies, bodiesNeeded) t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave, bodiesNeeded)
} }
if int(receipts) != receiptsNeeded { if int(receiptsHave) != receiptsNeeded {
t.Errorf("receipt retrieval count mismatch: have %v, want %v", receipts, receiptsNeeded) t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave, receiptsNeeded)
} }
} }
@ -884,21 +923,20 @@ func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 6
func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) { func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
// Create a small enough block chain to download // Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15 targetBlocks := blockCacheLimit - 15
hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
tester := newTester(mode) tester := newTester(mode)
// Attempt a full sync with an attacker feeding gapped headers // Attempt a full sync with an attacker feeding gapped headers
tester.newPeer("attack", protocol, hashes, headers, blocks) tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
missing := targetBlocks / 2 missing := targetBlocks / 2
delete(tester.peerHeaders["attack"], hashes[missing]) delete(tester.peerHeaders["attack"], hashes[missing])
delete(tester.peerBlocks["attack"], hashes[missing])
if err := tester.sync("attack", nil); err == nil { if err := tester.sync("attack", nil); err == nil {
t.Fatalf("succeeded attacker synchronisation") t.Fatalf("succeeded attacker synchronisation")
} }
// Synchronise with the valid peer and make sure sync succeeds // Synchronise with the valid peer and make sure sync succeeds
tester.newPeer("valid", protocol, hashes, headers, blocks) tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
if err := tester.sync("valid", nil); err != nil { if err := tester.sync("valid", nil); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err) t.Fatalf("failed to synchronise blocks: %v", err)
} }
@ -917,20 +955,21 @@ func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 6
func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) { func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
// Create a small enough block chain to download // Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15 targetBlocks := blockCacheLimit - 15
hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
tester := newTester(mode) tester := newTester(mode)
// Attempt a full sync with an attacker feeding shifted headers // Attempt a full sync with an attacker feeding shifted headers
tester.newPeer("attack", protocol, hashes, headers, blocks) tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
delete(tester.peerHeaders["attack"], hashes[len(hashes)-2]) delete(tester.peerHeaders["attack"], hashes[len(hashes)-2])
delete(tester.peerBlocks["attack"], hashes[len(hashes)-2]) delete(tester.peerBlocks["attack"], hashes[len(hashes)-2])
delete(tester.peerReceipts["attack"], hashes[len(hashes)-2])
if err := tester.sync("attack", nil); err == nil { if err := tester.sync("attack", nil); err == nil {
t.Fatalf("succeeded attacker synchronisation") t.Fatalf("succeeded attacker synchronisation")
} }
// Synchronise with the valid peer and make sure sync succeeds // Synchronise with the valid peer and make sure sync succeeds
tester.newPeer("valid", protocol, hashes, headers, blocks) tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
if err := tester.sync("valid", nil); err != nil { if err := tester.sync("valid", nil); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err) t.Fatalf("failed to synchronise blocks: %v", err)
} }
@ -949,24 +988,24 @@ func TestInvalidContentAttack64Light(t *testing.T) { testInvalidContentAttack(t,
func testInvalidContentAttack(t *testing.T, protocol int, mode SyncMode) { func testInvalidContentAttack(t *testing.T, protocol int, mode SyncMode) {
// Create two peers, one feeding invalid block bodies // Create two peers, one feeding invalid block bodies
targetBlocks := 4*blockCacheLimit - 15 targetBlocks := 4*blockCacheLimit - 15
hashes, headers, validBlocks := makeChain(targetBlocks, 0, genesis) hashes, headers, validBlocks, validReceipts := makeChain(targetBlocks, 0, genesis, nil)
invalidBlocks := make(map[common.Hash]*types.Block) invalidBlocks := make(map[common.Hash]*types.Block)
for hash, block := range validBlocks { for hash, block := range validBlocks {
invalidBlocks[hash] = types.NewBlockWithHeader(block.Header()) invalidBlocks[hash] = types.NewBlockWithHeader(block.Header())
} }
invalidReceipts := make(map[common.Hash]*types.Block) invalidReceipts := make(map[common.Hash]types.Receipts)
for hash, block := range validBlocks { for hash, _ := range validReceipts {
invalidReceipts[hash] = types.NewBlockWithHeader(block.Header()).WithBody(block.Transactions(), block.Uncles()) invalidReceipts[hash] = types.Receipts{&types.Receipt{}}
} }
tester := newTester(mode) tester := newTester(mode)
tester.newPeer("valid", protocol, hashes, headers, validBlocks) tester.newPeer("valid", protocol, hashes, headers, validBlocks, validReceipts)
if mode != LightSync { if mode != LightSync {
tester.newPeer("body attack", protocol, hashes, headers, invalidBlocks) tester.newPeer("body attack", protocol, hashes, headers, invalidBlocks, validReceipts)
} }
if mode == FastSync { if mode == FastSync {
tester.newPeer("receipt attack", protocol, hashes, headers, invalidReceipts) tester.newPeer("receipt attack", protocol, hashes, headers, validBlocks, invalidReceipts)
} }
// Synchronise with the valid peer (will pull contents from the attacker too) // Synchronise with the valid peer (will pull contents from the attacker too)
if err := tester.sync("valid", nil); err != nil { if err := tester.sync("valid", nil); err != nil {
@ -995,9 +1034,9 @@ func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttac
func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) { func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
tester := newTester(mode) tester := newTester(mode)
hashes, headers, blocks := makeChain(0, 0, genesis) hashes, headers, blocks, receipts := makeChain(0, 0, genesis, nil)
tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks) tester.newPeer("attack", protocol, []common.Hash{hashes[0]}, headers, blocks, receipts)
if err := tester.sync("attack", big.NewInt(1000000)); err != errStallingPeer { if err := tester.sync("attack", big.NewInt(1000000)); err != errStallingPeer {
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
} }
@ -1040,7 +1079,7 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
for i, tt := range tests { for i, tt := range tests {
// Register a new peer and ensure it's presence // Register a new peer and ensure it's presence
id := fmt.Sprintf("test %d", i) id := fmt.Sprintf("test %d", i)
if err := tester.newPeer(id, protocol, []common.Hash{genesis.Hash()}, nil, nil); err != nil { if err := tester.newPeer(id, protocol, []common.Hash{genesis.Hash()}, nil, nil, nil); err != nil {
t.Fatalf("test %d: failed to register new peer: %v", i, err) t.Fatalf("test %d: failed to register new peer: %v", i, err)
} }
if _, ok := tester.peerHashes[id]; !ok { if _, ok := tester.peerHashes[id]; !ok {
@ -1069,7 +1108,7 @@ func TestSyncBoundaries64Light(t *testing.T) { testSyncBoundaries(t, 64, LightSy
func testSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { func testSyncBoundaries(t *testing.T, protocol int, mode SyncMode) {
// Create a small enough block chain to download // Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15 targetBlocks := blockCacheLimit - 15
hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
// Set a sync init hook to catch boundary changes // Set a sync init hook to catch boundary changes
starting := make(chan struct{}) starting := make(chan struct{})
@ -1085,7 +1124,7 @@ func testSyncBoundaries(t *testing.T, protocol int, mode SyncMode) {
t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0) t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0)
} }
// Synchronise half the blocks and check initial boundaries // Synchronise half the blocks and check initial boundaries
tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks) tester.newPeer("peer-half", protocol, hashes[targetBlocks/2:], headers, blocks, receipts)
pending := new(sync.WaitGroup) pending := new(sync.WaitGroup)
pending.Add(1) pending.Add(1)
@ -1103,7 +1142,7 @@ func testSyncBoundaries(t *testing.T, protocol int, mode SyncMode) {
pending.Wait() pending.Wait()
// Synchronise all the blocks and check continuation boundaries // Synchronise all the blocks and check continuation boundaries
tester.newPeer("peer-full", protocol, hashes, headers, blocks) tester.newPeer("peer-full", protocol, hashes, headers, blocks, receipts)
pending.Add(1) pending.Add(1)
go func() { go func() {
@ -1134,7 +1173,7 @@ func TestForkedSyncBoundaries64Light(t *testing.T) { testForkedSyncBoundaries(t,
func testForkedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { func testForkedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) {
// Create a forked chain to simulate origin revertal // Create a forked chain to simulate origin revertal
common, fork := MaxHashFetch, 2*MaxHashFetch common, fork := MaxHashFetch, 2*MaxHashFetch
hashesA, hashesB, headersA, headersB, blocksA, blocksB := makeChainFork(common+fork, fork, genesis) hashesA, hashesB, headersA, headersB, blocksA, blocksB, receiptsA, receiptsB := makeChainFork(common+fork, fork, genesis, nil)
// Set a sync init hook to catch boundary changes // Set a sync init hook to catch boundary changes
starting := make(chan struct{}) starting := make(chan struct{})
@ -1150,7 +1189,7 @@ func testForkedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) {
t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0) t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0)
} }
// Synchronise with one of the forks and check boundaries // Synchronise with one of the forks and check boundaries
tester.newPeer("fork A", protocol, hashesA, headersA, blocksA) tester.newPeer("fork A", protocol, hashesA, headersA, blocksA, receiptsA)
pending := new(sync.WaitGroup) pending := new(sync.WaitGroup)
pending.Add(1) pending.Add(1)
@ -1171,7 +1210,7 @@ func testForkedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) {
tester.downloader.syncStatsOrigin = tester.downloader.syncStatsHeight tester.downloader.syncStatsOrigin = tester.downloader.syncStatsHeight
// Synchronise with the second fork and check boundary resets // Synchronise with the second fork and check boundary resets
tester.newPeer("fork B", protocol, hashesB, headersB, blocksB) tester.newPeer("fork B", protocol, hashesB, headersB, blocksB, receiptsB)
pending.Add(1) pending.Add(1)
go func() { go func() {
@ -1202,7 +1241,7 @@ func TestFailedSyncBoundaries64Light(t *testing.T) { testFailedSyncBoundaries(t,
func testFailedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { func testFailedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) {
// Create a small enough block chain to download // Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15 targetBlocks := blockCacheLimit - 15
hashes, headers, blocks := makeChain(targetBlocks, 0, genesis) hashes, headers, blocks, receipts := makeChain(targetBlocks, 0, genesis, nil)
// Set a sync init hook to catch boundary changes // Set a sync init hook to catch boundary changes
starting := make(chan struct{}) starting := make(chan struct{})
@ -1218,10 +1257,11 @@ func testFailedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) {
t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0) t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0)
} }
// Attempt a full sync with a faulty peer // Attempt a full sync with a faulty peer
tester.newPeer("faulty", protocol, hashes, headers, blocks) tester.newPeer("faulty", protocol, hashes, headers, blocks, receipts)
missing := targetBlocks / 2 missing := targetBlocks / 2
delete(tester.peerHeaders["faulty"], hashes[missing]) delete(tester.peerHeaders["faulty"], hashes[missing])
delete(tester.peerBlocks["faulty"], hashes[missing]) delete(tester.peerBlocks["faulty"], hashes[missing])
delete(tester.peerReceipts["faulty"], hashes[missing])
pending := new(sync.WaitGroup) pending := new(sync.WaitGroup)
pending.Add(1) pending.Add(1)
@ -1240,7 +1280,7 @@ func testFailedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) {
pending.Wait() pending.Wait()
// Synchronise with a good peer and check that the boundary origin remind the same after a failure // Synchronise with a good peer and check that the boundary origin remind the same after a failure
tester.newPeer("valid", protocol, hashes, headers, blocks) tester.newPeer("valid", protocol, hashes, headers, blocks, receipts)
pending.Add(1) pending.Add(1)
go func() { go func() {
@ -1270,7 +1310,7 @@ func TestFakedSyncBoundaries64Light(t *testing.T) { testFakedSyncBoundaries(t, 6
func testFakedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) { func testFakedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) {
// Create a small block chain // Create a small block chain
targetBlocks := blockCacheLimit - 15 targetBlocks := blockCacheLimit - 15
hashes, headers, blocks := makeChain(targetBlocks+3, 0, genesis) hashes, headers, blocks, receipts := makeChain(targetBlocks+3, 0, genesis, nil)
// Set a sync init hook to catch boundary changes // Set a sync init hook to catch boundary changes
starting := make(chan struct{}) starting := make(chan struct{})
@ -1286,10 +1326,11 @@ func testFakedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) {
t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0) t.Fatalf("Pristine boundary mismatch: have %v/%v, want %v/%v", origin, latest, 0, 0)
} }
// Create and sync with an attacker that promises a higher chain than available // Create and sync with an attacker that promises a higher chain than available
tester.newPeer("attack", protocol, hashes, headers, blocks) tester.newPeer("attack", protocol, hashes, headers, blocks, receipts)
for i := 1; i < 3; i++ { for i := 1; i < 3; i++ {
delete(tester.peerHeaders["attack"], hashes[i]) delete(tester.peerHeaders["attack"], hashes[i])
delete(tester.peerBlocks["attack"], hashes[i]) delete(tester.peerBlocks["attack"], hashes[i])
delete(tester.peerReceipts["attack"], hashes[i])
} }
pending := new(sync.WaitGroup) pending := new(sync.WaitGroup)
@ -1309,7 +1350,7 @@ func testFakedSyncBoundaries(t *testing.T, protocol int, mode SyncMode) {
pending.Wait() pending.Wait()
// Synchronise with a good peer and check that the boundary height has been reduced to the true value // Synchronise with a good peer and check that the boundary height has been reduced to the true value
tester.newPeer("valid", protocol, hashes[3:], headers, blocks) tester.newPeer("valid", protocol, hashes[3:], headers, blocks, receipts)
pending.Add(1) pending.Add(1)
go func() { go func() {

View File

@ -45,7 +45,7 @@ var (
// contains a transaction and every 5th an uncle to allow testing correct block // contains a transaction and every 5th an uncle to allow testing correct block
// reassembly. // reassembly.
func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) { func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) {
blocks := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) { blocks, _ := core.GenerateChain(parent, testdb, n, func(i int, block *core.BlockGen) {
block.SetCoinbase(common.Address{seed}) block.SetCoinbase(common.Address{seed})
// If the block number is multiple of 3, send a bonus transaction to the miner // If the block number is multiple of 3, send a bonus transaction to the miner

View File

@ -129,8 +129,9 @@ func NewProtocolManager(mode Mode, networkId int, mux *event.TypeMux, txpool txP
case LightMode: case LightMode:
syncMode = downloader.LightSync syncMode = downloader.LightSync
} }
manager.downloader = downloader.New(syncMode, manager.eventMux, blockchain.HasHeader, blockchain.HasBlock, blockchain.GetHeader, blockchain.GetBlock, manager.downloader = downloader.New(syncMode, manager.eventMux, blockchain.HasHeader, blockchain.HasBlock, blockchain.GetHeader,
blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.GetTd, blockchain.InsertHeaderChain, blockchain.InsertChain, nil, manager.removePeer) blockchain.GetBlock, blockchain.CurrentHeader, blockchain.CurrentBlock, blockchain.CurrentFastBlock, blockchain.GetTd,
blockchain.InsertHeaderChain, blockchain.InsertChain, blockchain.InsertReceiptChain, manager.removePeer)
validator := func(block *types.Block, parent *types.Block) error { validator := func(block *types.Block, parent *types.Block) error {
return core.ValidateHeader(pow, block.Header(), parent.Header(), true, false) return core.ValidateHeader(pow, block.Header(), parent.Header(), true, false)
@ -438,28 +439,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
} }
} }
case p.version >= eth62 && msg.Code == BlockBodiesMsg:
// A batch of block bodies arrived to one of our previous requests
var request blockBodiesData
if err := msg.Decode(&request); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Deliver them all to the downloader for queuing
trasactions := make([][]*types.Transaction, len(request))
uncles := make([][]*types.Header, len(request))
for i, body := range request {
trasactions[i] = body.Transactions
uncles[i] = body.Uncles
}
// Filter out any explicitly requested bodies, deliver the rest to the downloader
if trasactions, uncles := pm.fetcher.FilterBodies(trasactions, uncles, time.Now()); len(trasactions) > 0 || len(uncles) > 0 {
err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
if err != nil {
glog.V(logger.Debug).Infoln(err)
}
}
case p.version >= eth62 && msg.Code == GetBlockBodiesMsg: case p.version >= eth62 && msg.Code == GetBlockBodiesMsg:
// Decode the retrieval message // Decode the retrieval message
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
@ -487,6 +466,28 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
} }
return p.SendBlockBodiesRLP(bodies) return p.SendBlockBodiesRLP(bodies)
case p.version >= eth62 && msg.Code == BlockBodiesMsg:
// A batch of block bodies arrived to one of our previous requests
var request blockBodiesData
if err := msg.Decode(&request); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Deliver them all to the downloader for queuing
trasactions := make([][]*types.Transaction, len(request))
uncles := make([][]*types.Header, len(request))
for i, body := range request {
trasactions[i] = body.Transactions
uncles[i] = body.Uncles
}
// Filter out any explicitly requested bodies, deliver the rest to the downloader
if trasactions, uncles := pm.fetcher.FilterBodies(trasactions, uncles, time.Now()); len(trasactions) > 0 || len(uncles) > 0 {
err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
if err != nil {
glog.V(logger.Debug).Infoln(err)
}
}
case p.version >= eth63 && msg.Code == GetNodeDataMsg: case p.version >= eth63 && msg.Code == GetNodeDataMsg:
// Decode the retrieval message // Decode the retrieval message
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
@ -550,6 +551,17 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
} }
return p.SendReceiptsRLP(receipts) return p.SendReceiptsRLP(receipts)
case p.version >= eth63 && msg.Code == ReceiptsMsg:
// A batch of receipts arrived to one of our previous requests
var receipts [][]*types.Receipt
if err := msg.Decode(&receipts); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Deliver all to the downloader
if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil {
glog.V(logger.Debug).Infof("failed to deliver receipts: %v", err)
}
case msg.Code == NewBlockHashesMsg: case msg.Code == NewBlockHashesMsg:
// Retrieve and deseralize the remote new block hashes notification // Retrieve and deseralize the remote new block hashes notification
type announce struct { type announce struct {

View File

@ -38,7 +38,7 @@ func newTestProtocolManager(mode Mode, blocks int, generator func(int, *core.Blo
blockproc = core.NewBlockProcessor(db, pow, blockchain, evmux) blockproc = core.NewBlockProcessor(db, pow, blockchain, evmux)
) )
blockchain.SetProcessor(blockproc) blockchain.SetProcessor(blockproc)
chain := core.GenerateChain(genesis, db, blocks, generator) chain, _ := core.GenerateChain(genesis, db, blocks, generator)
if _, err := blockchain.InsertChain(chain); err != nil { if _, err := blockchain.InsertChain(chain); err != nil {
panic(err) panic(err)
} }

View File

@ -55,7 +55,7 @@ var minimumProtocolVersion = map[Mode]uint{
var ProtocolVersions = []uint{eth64, eth63, eth62, eth61} var ProtocolVersions = []uint{eth64, eth63, eth62, eth61}
// Number of implemented message corresponding to different protocol versions. // Number of implemented message corresponding to different protocol versions.
var ProtocolLengths = []uint64{15, 12, 8, 9} var ProtocolLengths = []uint64{19, 17, 8, 9}
const ( const (
NetworkId = 1 NetworkId = 1

View File

@ -313,7 +313,7 @@ func (self *worker) wait() {
self.mux.Post(core.ChainHeadEvent{block}) self.mux.Post(core.ChainHeadEvent{block})
self.mux.Post(logs) self.mux.Post(logs)
} }
if err := core.PutBlockReceipts(self.chainDb, block, receipts); err != nil { if err := core.PutBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil {
glog.V(logger.Warn).Infoln("error writing block receipts:", err) glog.V(logger.Warn).Infoln("error writing block receipts:", err)
} }
}(block, work.state.Logs(), work.receipts) }(block, work.state.Logs(), work.receipts)

View File

@ -838,7 +838,7 @@ func NewLogRes(log *vm.Log) LogRes {
} }
l.Address = newHexData(log.Address) l.Address = newHexData(log.Address)
l.Data = newHexData(log.Data) l.Data = newHexData(log.Data)
l.BlockNumber = newHexNum(log.Number) l.BlockNumber = newHexNum(log.BlockNumber)
l.LogIndex = newHexNum(log.Index) l.LogIndex = newHexNum(log.Index)
l.TransactionHash = newHexData(log.TxHash) l.TransactionHash = newHexData(log.TxHash)
l.TransactionIndex = newHexNum(log.TxIndex) l.TransactionIndex = newHexNum(log.TxIndex)