2018-05-07 11:35:06 +00:00
|
|
|
// Copyright 2018 The go-ethereum Authors
|
2015-08-04 21:46:38 +00:00
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2018-05-07 11:35:06 +00:00
|
|
|
package rawdb
|
2015-08-04 21:46:38 +00:00
|
|
|
|
|
|
|
import (
|
2015-10-22 12:43:21 +00:00
|
|
|
"bytes"
|
2019-03-27 16:11:24 +00:00
|
|
|
"encoding/hex"
|
|
|
|
"fmt"
|
2019-11-19 10:32:57 +00:00
|
|
|
"io/ioutil"
|
2015-08-04 21:46:38 +00:00
|
|
|
"math/big"
|
2021-01-10 11:54:15 +00:00
|
|
|
"math/rand"
|
2019-11-19 10:32:57 +00:00
|
|
|
"os"
|
2020-07-13 09:02:54 +00:00
|
|
|
"reflect"
|
2015-08-04 21:46:38 +00:00
|
|
|
"testing"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2015-09-07 17:43:01 +00:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2021-09-07 10:31:17 +00:00
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
2019-04-15 09:36:27 +00:00
|
|
|
"github.com/ethereum/go-ethereum/params"
|
2015-09-07 17:43:01 +00:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
2019-01-03 22:15:26 +00:00
|
|
|
"golang.org/x/crypto/sha3"
|
2015-08-04 21:46:38 +00:00
|
|
|
)
|
|
|
|
|
2015-09-07 17:43:01 +00:00
|
|
|
// Tests block header storage and retrieval operations.
|
|
|
|
func TestHeaderStorage(t *testing.T) {
|
2018-09-24 12:57:49 +00:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-07 17:43:01 +00:00
|
|
|
|
|
|
|
// Create a test header to move around the database and make sure it's really new
|
2016-04-05 13:22:04 +00:00
|
|
|
header := &types.Header{Number: big.NewInt(42), Extra: []byte("test header")}
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry != nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Non existent header returned: %v", entry)
|
|
|
|
}
|
|
|
|
// Write and verify the header in the database
|
2018-05-07 11:35:06 +00:00
|
|
|
WriteHeader(db, header)
|
|
|
|
if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry == nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Stored header not found")
|
|
|
|
} else if entry.Hash() != header.Hash() {
|
|
|
|
t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header)
|
|
|
|
}
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadHeaderRLP(db, header.Hash(), header.Number.Uint64()); entry == nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Stored header RLP not found")
|
|
|
|
} else {
|
2019-01-03 22:15:26 +00:00
|
|
|
hasher := sha3.NewLegacyKeccak256()
|
2015-09-07 17:43:01 +00:00
|
|
|
hasher.Write(entry)
|
|
|
|
|
|
|
|
if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() {
|
|
|
|
t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Delete the header and verify the execution
|
2016-04-05 13:22:04 +00:00
|
|
|
DeleteHeader(db, header.Hash(), header.Number.Uint64())
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadHeader(db, header.Hash(), header.Number.Uint64()); entry != nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Deleted header returned: %v", entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests block body storage and retrieval operations.
|
|
|
|
func TestBodyStorage(t *testing.T) {
|
2018-09-24 12:57:49 +00:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-07 17:43:01 +00:00
|
|
|
|
|
|
|
// Create a test body to move around the database and make sure it's really new
|
|
|
|
body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}}
|
|
|
|
|
2019-01-03 22:15:26 +00:00
|
|
|
hasher := sha3.NewLegacyKeccak256()
|
2015-09-07 17:43:01 +00:00
|
|
|
rlp.Encode(hasher, body)
|
|
|
|
hash := common.BytesToHash(hasher.Sum(nil))
|
|
|
|
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadBody(db, hash, 0); entry != nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Non existent body returned: %v", entry)
|
|
|
|
}
|
|
|
|
// Write and verify the body in the database
|
2018-05-07 11:35:06 +00:00
|
|
|
WriteBody(db, hash, 0, body)
|
|
|
|
if entry := ReadBody(db, hash, 0); entry == nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Stored body not found")
|
2020-08-21 12:10:40 +00:00
|
|
|
} else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body)
|
|
|
|
}
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadBodyRLP(db, hash, 0); entry == nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Stored body RLP not found")
|
|
|
|
} else {
|
2019-01-03 22:15:26 +00:00
|
|
|
hasher := sha3.NewLegacyKeccak256()
|
2015-09-07 17:43:01 +00:00
|
|
|
hasher.Write(entry)
|
|
|
|
|
|
|
|
if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash {
|
|
|
|
t.Fatalf("Retrieved RLP body mismatch: have %v, want %v", entry, body)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Delete the body and verify the execution
|
2016-04-05 13:22:04 +00:00
|
|
|
DeleteBody(db, hash, 0)
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadBody(db, hash, 0); entry != nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Deleted body returned: %v", entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests block storage and retrieval operations.
|
|
|
|
func TestBlockStorage(t *testing.T) {
|
2018-09-24 12:57:49 +00:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-07 17:43:01 +00:00
|
|
|
|
|
|
|
// Create a test block to move around the database and make sure it's really new
|
2015-09-30 16:23:31 +00:00
|
|
|
block := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Extra: []byte("test block"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
|
|
|
TxHash: types.EmptyRootHash,
|
|
|
|
ReceiptHash: types.EmptyRootHash,
|
|
|
|
})
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Non existent block returned: %v", entry)
|
|
|
|
}
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Non existent header returned: %v", entry)
|
|
|
|
}
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Non existent body returned: %v", entry)
|
|
|
|
}
|
|
|
|
// Write and verify the block in the database
|
2018-05-07 11:35:06 +00:00
|
|
|
WriteBlock(db, block)
|
|
|
|
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry == nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Stored block not found")
|
|
|
|
} else if entry.Hash() != block.Hash() {
|
|
|
|
t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
|
|
|
|
}
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry == nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Stored header not found")
|
|
|
|
} else if entry.Hash() != block.Header().Hash() {
|
|
|
|
t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, block.Header())
|
|
|
|
}
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry == nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Stored body not found")
|
2020-08-21 12:10:40 +00:00
|
|
|
} else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(block.Transactions(), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) {
|
2016-04-15 08:57:37 +00:00
|
|
|
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body())
|
2015-09-07 17:43:01 +00:00
|
|
|
}
|
|
|
|
// Delete the block and verify the execution
|
2016-04-05 13:22:04 +00:00
|
|
|
DeleteBlock(db, block.Hash(), block.NumberU64())
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Deleted block returned: %v", entry)
|
|
|
|
}
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadHeader(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Deleted header returned: %v", entry)
|
|
|
|
}
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Deleted body returned: %v", entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that partial block contents don't get reassembled into full blocks.
|
|
|
|
func TestPartialBlockStorage(t *testing.T) {
|
2018-09-24 12:57:49 +00:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-30 16:23:31 +00:00
|
|
|
block := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Extra: []byte("test block"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
|
|
|
TxHash: types.EmptyRootHash,
|
|
|
|
ReceiptHash: types.EmptyRootHash,
|
|
|
|
})
|
2015-09-07 17:43:01 +00:00
|
|
|
// Store a header and check that it's not recognized as a block
|
2018-05-07 11:35:06 +00:00
|
|
|
WriteHeader(db, block.Header())
|
|
|
|
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Non existent block returned: %v", entry)
|
|
|
|
}
|
2016-04-05 13:22:04 +00:00
|
|
|
DeleteHeader(db, block.Hash(), block.NumberU64())
|
2015-09-07 17:43:01 +00:00
|
|
|
|
|
|
|
// Store a body and check that it's not recognized as a block
|
2018-05-07 11:35:06 +00:00
|
|
|
WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
|
|
|
|
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry != nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Non existent block returned: %v", entry)
|
|
|
|
}
|
2016-04-05 13:22:04 +00:00
|
|
|
DeleteBody(db, block.Hash(), block.NumberU64())
|
2015-09-07 17:43:01 +00:00
|
|
|
|
|
|
|
// Store a header and a body separately and check reassembly
|
2018-05-07 11:35:06 +00:00
|
|
|
WriteHeader(db, block.Header())
|
|
|
|
WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
|
|
|
|
|
|
|
|
if entry := ReadBlock(db, block.Hash(), block.NumberU64()); entry == nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Stored block not found")
|
|
|
|
} else if entry.Hash() != block.Hash() {
|
|
|
|
t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-10 11:54:15 +00:00
|
|
|
// Tests block storage and retrieval operations.
|
|
|
|
func TestBadBlockStorage(t *testing.T) {
|
|
|
|
db := NewMemoryDatabase()
|
|
|
|
|
|
|
|
// Create a test block to move around the database and make sure it's really new
|
|
|
|
block := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Number: big.NewInt(1),
|
|
|
|
Extra: []byte("bad block"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
|
|
|
TxHash: types.EmptyRootHash,
|
|
|
|
ReceiptHash: types.EmptyRootHash,
|
|
|
|
})
|
|
|
|
if entry := ReadBadBlock(db, block.Hash()); entry != nil {
|
|
|
|
t.Fatalf("Non existent block returned: %v", entry)
|
|
|
|
}
|
|
|
|
// Write and verify the block in the database
|
|
|
|
WriteBadBlock(db, block)
|
|
|
|
if entry := ReadBadBlock(db, block.Hash()); entry == nil {
|
|
|
|
t.Fatalf("Stored block not found")
|
|
|
|
} else if entry.Hash() != block.Hash() {
|
|
|
|
t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block)
|
|
|
|
}
|
|
|
|
// Write one more bad block
|
|
|
|
blockTwo := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Number: big.NewInt(2),
|
|
|
|
Extra: []byte("bad block two"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
|
|
|
TxHash: types.EmptyRootHash,
|
|
|
|
ReceiptHash: types.EmptyRootHash,
|
|
|
|
})
|
|
|
|
WriteBadBlock(db, blockTwo)
|
|
|
|
|
|
|
|
// Write the block one again, should be filtered out.
|
|
|
|
WriteBadBlock(db, block)
|
|
|
|
badBlocks := ReadAllBadBlocks(db)
|
|
|
|
if len(badBlocks) != 2 {
|
|
|
|
t.Fatalf("Failed to load all bad blocks")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write a bunch of bad blocks, all the blocks are should sorted
|
|
|
|
// in reverse order. The extra blocks should be truncated.
|
|
|
|
for _, n := range rand.Perm(100) {
|
|
|
|
block := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Number: big.NewInt(int64(n)),
|
|
|
|
Extra: []byte("bad block"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
|
|
|
TxHash: types.EmptyRootHash,
|
|
|
|
ReceiptHash: types.EmptyRootHash,
|
|
|
|
})
|
|
|
|
WriteBadBlock(db, block)
|
|
|
|
}
|
|
|
|
badBlocks = ReadAllBadBlocks(db)
|
|
|
|
if len(badBlocks) != badBlockToKeep {
|
|
|
|
t.Fatalf("The number of persised bad blocks in incorrect %d", len(badBlocks))
|
|
|
|
}
|
|
|
|
for i := 0; i < len(badBlocks)-1; i++ {
|
|
|
|
if badBlocks[i].NumberU64() < badBlocks[i+1].NumberU64() {
|
|
|
|
t.Fatalf("The bad blocks are not sorted #[%d](%d) < #[%d](%d)", i, i+1, badBlocks[i].NumberU64(), badBlocks[i+1].NumberU64())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete all bad blocks
|
|
|
|
DeleteBadBlocks(db)
|
|
|
|
badBlocks = ReadAllBadBlocks(db)
|
|
|
|
if len(badBlocks) != 0 {
|
|
|
|
t.Fatalf("Failed to delete bad blocks")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-07 17:43:01 +00:00
|
|
|
// Tests block total difficulty storage and retrieval operations.
|
|
|
|
func TestTdStorage(t *testing.T) {
|
2018-09-24 12:57:49 +00:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-07 17:43:01 +00:00
|
|
|
|
|
|
|
// Create a test TD to move around the database and make sure it's really new
|
|
|
|
hash, td := common.Hash{}, big.NewInt(314)
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadTd(db, hash, 0); entry != nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Non existent TD returned: %v", entry)
|
|
|
|
}
|
|
|
|
// Write and verify the TD in the database
|
2018-05-07 11:35:06 +00:00
|
|
|
WriteTd(db, hash, 0, td)
|
|
|
|
if entry := ReadTd(db, hash, 0); entry == nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Stored TD not found")
|
|
|
|
} else if entry.Cmp(td) != 0 {
|
|
|
|
t.Fatalf("Retrieved TD mismatch: have %v, want %v", entry, td)
|
|
|
|
}
|
|
|
|
// Delete the TD and verify the execution
|
2016-04-05 13:22:04 +00:00
|
|
|
DeleteTd(db, hash, 0)
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadTd(db, hash, 0); entry != nil {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Deleted TD returned: %v", entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that canonical numbers can be mapped to hashes and retrieved.
|
|
|
|
func TestCanonicalMappingStorage(t *testing.T) {
|
2018-09-24 12:57:49 +00:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-07 17:43:01 +00:00
|
|
|
|
|
|
|
// Create a test canonical number and assinged hash to move around
|
|
|
|
hash, number := common.Hash{0: 0xff}, uint64(314)
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadCanonicalHash(db, number); entry != (common.Hash{}) {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Non existent canonical mapping returned: %v", entry)
|
|
|
|
}
|
|
|
|
// Write and verify the TD in the database
|
2018-05-07 11:35:06 +00:00
|
|
|
WriteCanonicalHash(db, hash, number)
|
|
|
|
if entry := ReadCanonicalHash(db, number); entry == (common.Hash{}) {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Stored canonical mapping not found")
|
|
|
|
} else if entry != hash {
|
|
|
|
t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash)
|
|
|
|
}
|
|
|
|
// Delete the TD and verify the execution
|
|
|
|
DeleteCanonicalHash(db, number)
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadCanonicalHash(db, number); entry != (common.Hash{}) {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Deleted canonical mapping returned: %v", entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that head headers and head blocks can be assigned, individually.
|
|
|
|
func TestHeadStorage(t *testing.T) {
|
2018-09-24 12:57:49 +00:00
|
|
|
db := NewMemoryDatabase()
|
2015-09-07 17:43:01 +00:00
|
|
|
|
|
|
|
blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")})
|
|
|
|
blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")})
|
2015-09-30 16:23:31 +00:00
|
|
|
blockFast := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block fast")})
|
2015-09-07 17:43:01 +00:00
|
|
|
|
|
|
|
// Check that no head entries are in a pristine database
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadHeadHeaderHash(db); entry != (common.Hash{}) {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Non head header entry returned: %v", entry)
|
|
|
|
}
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadHeadBlockHash(db); entry != (common.Hash{}) {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Non head block entry returned: %v", entry)
|
|
|
|
}
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadHeadFastBlockHash(db); entry != (common.Hash{}) {
|
2015-09-30 16:23:31 +00:00
|
|
|
t.Fatalf("Non fast head block entry returned: %v", entry)
|
|
|
|
}
|
2015-09-07 17:43:01 +00:00
|
|
|
// Assign separate entries for the head header and block
|
2018-05-07 11:35:06 +00:00
|
|
|
WriteHeadHeaderHash(db, blockHead.Hash())
|
|
|
|
WriteHeadBlockHash(db, blockFull.Hash())
|
|
|
|
WriteHeadFastBlockHash(db, blockFast.Hash())
|
|
|
|
|
2015-09-07 17:43:01 +00:00
|
|
|
// Check that both heads are present, and different (i.e. two heads maintained)
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadHeadHeaderHash(db); entry != blockHead.Hash() {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash())
|
|
|
|
}
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadHeadBlockHash(db); entry != blockFull.Hash() {
|
2015-09-07 17:43:01 +00:00
|
|
|
t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash())
|
|
|
|
}
|
2018-05-07 11:35:06 +00:00
|
|
|
if entry := ReadHeadFastBlockHash(db); entry != blockFast.Hash() {
|
2015-09-30 16:23:31 +00:00
|
|
|
t.Fatalf("Fast head block hash mismatch: have %v, want %v", entry, blockFast.Hash())
|
|
|
|
}
|
2015-09-07 17:43:01 +00:00
|
|
|
}
|
2015-10-12 15:58:51 +00:00
|
|
|
|
2015-10-22 12:43:21 +00:00
|
|
|
// Tests that receipts associated with a single block can be stored and retrieved.
|
|
|
|
func TestBlockReceiptStorage(t *testing.T) {
|
2018-09-24 12:57:49 +00:00
|
|
|
db := NewMemoryDatabase()
|
2015-10-22 12:43:21 +00:00
|
|
|
|
2019-04-15 09:36:27 +00:00
|
|
|
// Create a live block since we need metadata to reconstruct the receipt
|
2019-03-27 16:11:24 +00:00
|
|
|
tx1 := types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, big.NewInt(1), nil)
|
|
|
|
tx2 := types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2), nil)
|
2019-04-15 09:36:27 +00:00
|
|
|
|
2019-03-27 16:11:24 +00:00
|
|
|
body := &types.Body{Transactions: types.Transactions{tx1, tx2}}
|
|
|
|
|
2019-04-15 09:36:27 +00:00
|
|
|
// Create the two receipts to manage afterwards
|
2015-10-22 12:43:21 +00:00
|
|
|
receipt1 := &types.Receipt{
|
2017-10-02 08:42:08 +00:00
|
|
|
Status: types.ReceiptStatusFailed,
|
2017-11-13 11:47:27 +00:00
|
|
|
CumulativeGasUsed: 1,
|
2017-01-05 13:03:50 +00:00
|
|
|
Logs: []*types.Log{
|
|
|
|
{Address: common.BytesToAddress([]byte{0x11})},
|
|
|
|
{Address: common.BytesToAddress([]byte{0x01, 0x11})},
|
2015-10-22 12:43:21 +00:00
|
|
|
},
|
2019-03-27 16:11:24 +00:00
|
|
|
TxHash: tx1.Hash(),
|
2015-10-22 12:43:21 +00:00
|
|
|
ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),
|
2017-11-13 11:47:27 +00:00
|
|
|
GasUsed: 111111,
|
2015-10-22 12:43:21 +00:00
|
|
|
}
|
2019-02-21 13:14:35 +00:00
|
|
|
receipt1.Bloom = types.CreateBloom(types.Receipts{receipt1})
|
2019-04-15 09:36:27 +00:00
|
|
|
|
2015-10-22 12:43:21 +00:00
|
|
|
receipt2 := &types.Receipt{
|
2017-08-24 13:17:02 +00:00
|
|
|
PostState: common.Hash{2}.Bytes(),
|
2017-11-13 11:47:27 +00:00
|
|
|
CumulativeGasUsed: 2,
|
2017-01-05 13:03:50 +00:00
|
|
|
Logs: []*types.Log{
|
|
|
|
{Address: common.BytesToAddress([]byte{0x22})},
|
|
|
|
{Address: common.BytesToAddress([]byte{0x02, 0x22})},
|
2015-10-22 12:43:21 +00:00
|
|
|
},
|
2019-03-27 16:11:24 +00:00
|
|
|
TxHash: tx2.Hash(),
|
2015-10-22 12:43:21 +00:00
|
|
|
ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}),
|
2017-11-13 11:47:27 +00:00
|
|
|
GasUsed: 222222,
|
2015-10-22 12:43:21 +00:00
|
|
|
}
|
2019-02-21 13:14:35 +00:00
|
|
|
receipt2.Bloom = types.CreateBloom(types.Receipts{receipt2})
|
2015-10-22 12:43:21 +00:00
|
|
|
receipts := []*types.Receipt{receipt1, receipt2}
|
|
|
|
|
|
|
|
// Check that no receipt entries are in a pristine database
|
|
|
|
hash := common.BytesToHash([]byte{0x03, 0x14})
|
2019-04-15 09:36:27 +00:00
|
|
|
if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); len(rs) != 0 {
|
2015-10-22 12:43:21 +00:00
|
|
|
t.Fatalf("non existent receipts returned: %v", rs)
|
|
|
|
}
|
2019-04-15 09:36:27 +00:00
|
|
|
// Insert the body that corresponds to the receipts
|
2019-03-27 16:11:24 +00:00
|
|
|
WriteBody(db, hash, 0, body)
|
2019-04-15 09:36:27 +00:00
|
|
|
|
2015-10-22 12:43:21 +00:00
|
|
|
// Insert the receipt slice into the database and check presence
|
2018-05-07 11:35:06 +00:00
|
|
|
WriteReceipts(db, hash, 0, receipts)
|
2019-04-15 09:36:27 +00:00
|
|
|
if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); len(rs) == 0 {
|
2015-10-22 12:43:21 +00:00
|
|
|
t.Fatalf("no receipts returned")
|
|
|
|
} else {
|
2019-03-27 16:11:24 +00:00
|
|
|
if err := checkReceiptsRLP(rs, receipts); err != nil {
|
|
|
|
t.Fatalf(err.Error())
|
2015-10-22 12:43:21 +00:00
|
|
|
}
|
|
|
|
}
|
2019-04-15 09:36:27 +00:00
|
|
|
// Delete the body and ensure that the receipts are no longer returned (metadata can't be recomputed)
|
2019-03-27 16:11:24 +00:00
|
|
|
DeleteBody(db, hash, 0)
|
2019-04-15 09:36:27 +00:00
|
|
|
if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); rs != nil {
|
2019-03-27 16:11:24 +00:00
|
|
|
t.Fatalf("receipts returned when body was deleted: %v", rs)
|
|
|
|
}
|
2019-04-15 09:36:27 +00:00
|
|
|
// Ensure that receipts without metadata can be returned without the block body too
|
|
|
|
if err := checkReceiptsRLP(ReadRawReceipts(db, hash, 0), receipts); err != nil {
|
2019-03-27 16:11:24 +00:00
|
|
|
t.Fatalf(err.Error())
|
|
|
|
}
|
2019-04-15 09:36:27 +00:00
|
|
|
// Sanity check that body alone without the receipt is a full purge
|
2019-03-27 16:11:24 +00:00
|
|
|
WriteBody(db, hash, 0, body)
|
|
|
|
|
2018-05-07 11:35:06 +00:00
|
|
|
DeleteReceipts(db, hash, 0)
|
2019-04-15 09:36:27 +00:00
|
|
|
if rs := ReadReceipts(db, hash, 0, params.TestChainConfig); len(rs) != 0 {
|
2015-10-22 12:43:21 +00:00
|
|
|
t.Fatalf("deleted receipts returned: %v", rs)
|
|
|
|
}
|
|
|
|
}
|
2019-03-27 16:11:24 +00:00
|
|
|
|
|
|
|
func checkReceiptsRLP(have, want types.Receipts) error {
|
|
|
|
if len(have) != len(want) {
|
|
|
|
return fmt.Errorf("receipts sizes mismatch: have %d, want %d", len(have), len(want))
|
|
|
|
}
|
|
|
|
for i := 0; i < len(want); i++ {
|
|
|
|
rlpHave, err := rlp.EncodeToBytes(have[i])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
rlpWant, err := rlp.EncodeToBytes(want[i])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !bytes.Equal(rlpHave, rlpWant) {
|
|
|
|
return fmt.Errorf("receipt #%d: receipt mismatch: have %s, want %s", i, hex.EncodeToString(rlpHave), hex.EncodeToString(rlpWant))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2019-11-19 10:32:57 +00:00
|
|
|
|
|
|
|
func TestAncientStorage(t *testing.T) {
|
|
|
|
// Freezer style fast import the chain.
|
|
|
|
frdir, err := ioutil.TempDir("", "")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create temp freezer dir: %v", err)
|
|
|
|
}
|
2021-09-07 10:31:17 +00:00
|
|
|
defer os.RemoveAll(frdir)
|
2019-11-19 10:32:57 +00:00
|
|
|
|
2021-03-22 18:06:30 +00:00
|
|
|
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
2019-11-19 10:32:57 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create database with ancient backend")
|
|
|
|
}
|
2021-08-08 13:44:42 +00:00
|
|
|
defer db.Close()
|
2019-11-19 10:32:57 +00:00
|
|
|
// Create a test block
|
|
|
|
block := types.NewBlockWithHeader(&types.Header{
|
|
|
|
Number: big.NewInt(0),
|
|
|
|
Extra: []byte("test block"),
|
|
|
|
UncleHash: types.EmptyUncleHash,
|
|
|
|
TxHash: types.EmptyRootHash,
|
|
|
|
ReceiptHash: types.EmptyRootHash,
|
|
|
|
})
|
|
|
|
// Ensure nothing non-existent will be read
|
|
|
|
hash, number := block.Hash(), block.NumberU64()
|
|
|
|
if blob := ReadHeaderRLP(db, hash, number); len(blob) > 0 {
|
|
|
|
t.Fatalf("non existent header returned")
|
|
|
|
}
|
|
|
|
if blob := ReadBodyRLP(db, hash, number); len(blob) > 0 {
|
|
|
|
t.Fatalf("non existent body returned")
|
|
|
|
}
|
|
|
|
if blob := ReadReceiptsRLP(db, hash, number); len(blob) > 0 {
|
|
|
|
t.Fatalf("non existent receipts returned")
|
|
|
|
}
|
|
|
|
if blob := ReadTdRLP(db, hash, number); len(blob) > 0 {
|
|
|
|
t.Fatalf("non existent td returned")
|
|
|
|
}
|
2021-09-07 10:31:17 +00:00
|
|
|
|
2019-11-19 10:32:57 +00:00
|
|
|
// Write and verify the header in the database
|
2021-09-07 10:31:17 +00:00
|
|
|
WriteAncientBlocks(db, []*types.Block{block}, []types.Receipts{nil}, big.NewInt(100))
|
|
|
|
|
2019-11-19 10:32:57 +00:00
|
|
|
if blob := ReadHeaderRLP(db, hash, number); len(blob) == 0 {
|
|
|
|
t.Fatalf("no header returned")
|
|
|
|
}
|
|
|
|
if blob := ReadBodyRLP(db, hash, number); len(blob) == 0 {
|
|
|
|
t.Fatalf("no body returned")
|
|
|
|
}
|
|
|
|
if blob := ReadReceiptsRLP(db, hash, number); len(blob) == 0 {
|
|
|
|
t.Fatalf("no receipts returned")
|
|
|
|
}
|
|
|
|
if blob := ReadTdRLP(db, hash, number); len(blob) == 0 {
|
|
|
|
t.Fatalf("no td returned")
|
|
|
|
}
|
2021-09-07 10:31:17 +00:00
|
|
|
|
2019-11-19 10:32:57 +00:00
|
|
|
// Use a fake hash for data retrieval, nothing should be returned.
|
|
|
|
fakeHash := common.BytesToHash([]byte{0x01, 0x02, 0x03})
|
|
|
|
if blob := ReadHeaderRLP(db, fakeHash, number); len(blob) != 0 {
|
|
|
|
t.Fatalf("invalid header returned")
|
|
|
|
}
|
|
|
|
if blob := ReadBodyRLP(db, fakeHash, number); len(blob) != 0 {
|
|
|
|
t.Fatalf("invalid body returned")
|
|
|
|
}
|
|
|
|
if blob := ReadReceiptsRLP(db, fakeHash, number); len(blob) != 0 {
|
|
|
|
t.Fatalf("invalid receipts returned")
|
|
|
|
}
|
|
|
|
if blob := ReadTdRLP(db, fakeHash, number); len(blob) != 0 {
|
|
|
|
t.Fatalf("invalid td returned")
|
|
|
|
}
|
|
|
|
}
|
2020-07-13 09:02:54 +00:00
|
|
|
|
|
|
|
func TestCanonicalHashIteration(t *testing.T) {
|
|
|
|
var cases = []struct {
|
|
|
|
from, to uint64
|
|
|
|
limit int
|
|
|
|
expect []uint64
|
|
|
|
}{
|
|
|
|
{1, 8, 0, nil},
|
|
|
|
{1, 8, 1, []uint64{1}},
|
|
|
|
{1, 8, 10, []uint64{1, 2, 3, 4, 5, 6, 7}},
|
|
|
|
{1, 9, 10, []uint64{1, 2, 3, 4, 5, 6, 7, 8}},
|
|
|
|
{2, 9, 10, []uint64{2, 3, 4, 5, 6, 7, 8}},
|
|
|
|
{9, 10, 10, nil},
|
|
|
|
}
|
|
|
|
// Test empty db iteration
|
|
|
|
db := NewMemoryDatabase()
|
|
|
|
numbers, _ := ReadAllCanonicalHashes(db, 0, 10, 10)
|
|
|
|
if len(numbers) != 0 {
|
|
|
|
t.Fatalf("No entry should be returned to iterate an empty db")
|
|
|
|
}
|
|
|
|
// Fill database with testing data.
|
|
|
|
for i := uint64(1); i <= 8; i++ {
|
|
|
|
WriteCanonicalHash(db, common.Hash{}, i)
|
|
|
|
WriteTd(db, common.Hash{}, i, big.NewInt(10)) // Write some interferential data
|
|
|
|
}
|
|
|
|
for i, c := range cases {
|
|
|
|
numbers, _ := ReadAllCanonicalHashes(db, c.from, c.to, c.limit)
|
|
|
|
if !reflect.DeepEqual(numbers, c.expect) {
|
|
|
|
t.Fatalf("Case %d failed, want %v, got %v", i, c.expect, numbers)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-09-07 10:31:17 +00:00
|
|
|
|
|
|
|
func TestHashesInRange(t *testing.T) {
|
|
|
|
mkHeader := func(number, seq int) *types.Header {
|
|
|
|
h := types.Header{
|
|
|
|
Difficulty: new(big.Int),
|
|
|
|
Number: big.NewInt(int64(number)),
|
|
|
|
GasLimit: uint64(seq),
|
|
|
|
}
|
|
|
|
return &h
|
|
|
|
}
|
|
|
|
db := NewMemoryDatabase()
|
|
|
|
// For each number, write N versions of that particular number
|
|
|
|
total := 0
|
|
|
|
for i := 0; i < 15; i++ {
|
|
|
|
for ii := 0; ii < i; ii++ {
|
|
|
|
WriteHeader(db, mkHeader(i, ii))
|
|
|
|
total++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashesInRange(db, 10, 10)), 10; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashesInRange(db, 10, 9)), 0; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashesInRange(db, 0, 100)), total; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashesInRange(db, 9, 10)), 9+10; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashes(db, 10)), 10; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashes(db, 16)), 0; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
if have, want := len(ReadAllHashes(db, 1)), 1; have != want {
|
|
|
|
t.Fatalf("Wrong number of hashes read, want %d, got %d", want, have)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This measures the write speed of the WriteAncientBlocks operation.
|
|
|
|
func BenchmarkWriteAncientBlocks(b *testing.B) {
|
|
|
|
// Open freezer database.
|
|
|
|
frdir, err := ioutil.TempDir("", "")
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("failed to create temp freezer dir: %v", err)
|
|
|
|
}
|
|
|
|
defer os.RemoveAll(frdir)
|
|
|
|
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("failed to create database with ancient backend")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the data to insert. The blocks must have consecutive numbers, so we create
|
|
|
|
// all of them ahead of time. However, there is no need to create receipts
|
|
|
|
// individually for each block, just make one batch here and reuse it for all writes.
|
|
|
|
const batchSize = 128
|
|
|
|
const blockTxs = 20
|
|
|
|
allBlocks := makeTestBlocks(b.N, blockTxs)
|
|
|
|
batchReceipts := makeTestReceipts(batchSize, blockTxs)
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
// The benchmark loop writes batches of blocks, but note that the total block count is
|
|
|
|
// b.N. This means the resulting ns/op measurement is the time it takes to write a
|
|
|
|
// single block and its associated data.
|
|
|
|
var td = big.NewInt(55)
|
|
|
|
var totalSize int64
|
|
|
|
for i := 0; i < b.N; i += batchSize {
|
|
|
|
length := batchSize
|
|
|
|
if i+batchSize > b.N {
|
|
|
|
length = b.N - i
|
|
|
|
}
|
|
|
|
|
|
|
|
blocks := allBlocks[i : i+length]
|
|
|
|
receipts := batchReceipts[:length]
|
|
|
|
writeSize, err := WriteAncientBlocks(db, blocks, receipts, td)
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
totalSize += writeSize
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enable MB/s reporting.
|
|
|
|
b.SetBytes(totalSize / int64(b.N))
|
|
|
|
}
|
|
|
|
|
|
|
|
// makeTestBlocks creates fake blocks for the ancient write benchmark.
|
|
|
|
func makeTestBlocks(nblock int, txsPerBlock int) []*types.Block {
|
|
|
|
key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
signer := types.LatestSignerForChainID(big.NewInt(8))
|
|
|
|
|
|
|
|
// Create transactions.
|
|
|
|
txs := make([]*types.Transaction, txsPerBlock)
|
|
|
|
for i := 0; i < len(txs); i++ {
|
|
|
|
var err error
|
|
|
|
to := common.Address{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
|
|
|
|
txs[i], err = types.SignNewTx(key, signer, &types.LegacyTx{
|
|
|
|
Nonce: 2,
|
|
|
|
GasPrice: big.NewInt(30000),
|
|
|
|
Gas: 0x45454545,
|
|
|
|
To: &to,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the blocks.
|
|
|
|
blocks := make([]*types.Block, nblock)
|
|
|
|
for i := 0; i < nblock; i++ {
|
|
|
|
header := &types.Header{
|
|
|
|
Number: big.NewInt(int64(i)),
|
|
|
|
Extra: []byte("test block"),
|
|
|
|
}
|
|
|
|
blocks[i] = types.NewBlockWithHeader(header).WithBody(txs, nil)
|
|
|
|
blocks[i].Hash() // pre-cache the block hash
|
|
|
|
}
|
|
|
|
return blocks
|
|
|
|
}
|
|
|
|
|
|
|
|
// makeTestReceipts creates fake receipts for the ancient write benchmark.
|
|
|
|
func makeTestReceipts(n int, nPerBlock int) []types.Receipts {
|
|
|
|
receipts := make([]*types.Receipt, nPerBlock)
|
|
|
|
for i := 0; i < len(receipts); i++ {
|
|
|
|
receipts[i] = &types.Receipt{
|
|
|
|
Status: types.ReceiptStatusSuccessful,
|
|
|
|
CumulativeGasUsed: 0x888888888,
|
|
|
|
Logs: make([]*types.Log, 5),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
allReceipts := make([]types.Receipts, n)
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
allReceipts[i] = receipts
|
|
|
|
}
|
|
|
|
return allReceipts
|
|
|
|
}
|