2018-05-07 11:35:06 +00:00
|
|
|
// Copyright 2018 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package rawdb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"encoding/binary"
|
2021-09-28 10:54:49 +00:00
|
|
|
"errors"
|
2021-09-07 10:31:17 +00:00
|
|
|
"fmt"
|
2018-05-07 11:35:06 +00:00
|
|
|
"math/big"
|
2021-01-10 11:54:15 +00:00
|
|
|
"sort"
|
2018-05-07 11:35:06 +00:00
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2019-11-19 10:32:57 +00:00
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
2018-09-24 12:57:49 +00:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
2018-05-07 11:35:06 +00:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2019-03-27 16:11:24 +00:00
|
|
|
"github.com/ethereum/go-ethereum/params"
|
2018-05-07 11:35:06 +00:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
|
|
|
)
|
|
|
|
|
|
|
|
// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
|
|
|
|
data, _ := db.Ancient(freezerHashTable, number)
|
2019-03-08 13:56:20 +00:00
|
|
|
if len(data) == 0 {
|
|
|
|
data, _ = db.Get(headerHashKey(number))
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
// In the background freezer is moving data from leveldb to flatten files.
|
|
|
|
// So during the first check for ancient db, the data is not yet in there,
|
|
|
|
// but when we reach into leveldb, the data was already moved. That would
|
|
|
|
// result in a not found error.
|
|
|
|
if len(data) == 0 {
|
|
|
|
data, _ = db.Ancient(freezerHashTable, number)
|
|
|
|
}
|
2019-03-08 13:56:20 +00:00
|
|
|
}
|
2018-05-07 11:35:06 +00:00
|
|
|
if len(data) == 0 {
|
|
|
|
return common.Hash{}
|
|
|
|
}
|
|
|
|
return common.BytesToHash(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteCanonicalHash stores the hash assigned to a canonical block number.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
2018-06-11 13:06:26 +00:00
|
|
|
if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
|
2018-05-07 11:35:06 +00:00
|
|
|
log.Crit("Failed to store number to hash mapping", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteCanonicalHash removes the number to hash canonical mapping.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func DeleteCanonicalHash(db ethdb.KeyValueWriter, number uint64) {
|
2018-06-11 13:06:26 +00:00
|
|
|
if err := db.Delete(headerHashKey(number)); err != nil {
|
2018-05-07 11:35:06 +00:00
|
|
|
log.Crit("Failed to delete number to hash mapping", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
// ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
|
2019-03-08 13:56:20 +00:00
|
|
|
// both canonical and reorged forks included.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
|
2019-03-08 13:56:20 +00:00
|
|
|
prefix := headerKeyPrefix(number)
|
|
|
|
|
|
|
|
hashes := make([]common.Hash, 0, 1)
|
2020-04-15 11:08:53 +00:00
|
|
|
it := db.NewIterator(prefix, nil)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
defer it.Release()
|
|
|
|
|
2019-03-08 13:56:20 +00:00
|
|
|
for it.Next() {
|
|
|
|
if key := it.Key(); len(key) == len(prefix)+32 {
|
|
|
|
hashes = append(hashes, common.BytesToHash(key[len(key)-32:]))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return hashes
|
|
|
|
}
|
|
|
|
|
2021-09-07 10:31:17 +00:00
|
|
|
type NumberHash struct {
|
|
|
|
Number uint64
|
|
|
|
Hash common.Hash
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
|
|
|
|
// both canonical and reorged forks included.
|
|
|
|
// This method considers both limits to be _inclusive_.
|
|
|
|
func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash {
|
|
|
|
var (
|
|
|
|
start = encodeBlockNumber(first)
|
|
|
|
keyLength = len(headerPrefix) + 8 + 32
|
|
|
|
hashes = make([]*NumberHash, 0, 1+last-first)
|
|
|
|
it = db.NewIterator(headerPrefix, start)
|
|
|
|
)
|
|
|
|
defer it.Release()
|
|
|
|
for it.Next() {
|
|
|
|
key := it.Key()
|
|
|
|
if len(key) != keyLength {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
num := binary.BigEndian.Uint64(key[len(headerPrefix) : len(headerPrefix)+8])
|
|
|
|
if num > last {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
hash := common.BytesToHash(key[len(key)-32:])
|
|
|
|
hashes = append(hashes, &NumberHash{num, hash})
|
|
|
|
}
|
|
|
|
return hashes
|
|
|
|
}
|
|
|
|
|
2020-07-13 09:02:54 +00:00
|
|
|
// ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the
|
|
|
|
// certain chain range. If the accumulated entries reaches the given threshold,
|
|
|
|
// abort the iteration and return the semi-finish result.
|
|
|
|
func ReadAllCanonicalHashes(db ethdb.Iteratee, from uint64, to uint64, limit int) ([]uint64, []common.Hash) {
|
|
|
|
// Short circuit if the limit is 0.
|
|
|
|
if limit == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
var (
|
|
|
|
numbers []uint64
|
|
|
|
hashes []common.Hash
|
|
|
|
)
|
|
|
|
// Construct the key prefix of start point.
|
|
|
|
start, end := headerHashKey(from), headerHashKey(to)
|
|
|
|
it := db.NewIterator(nil, start)
|
|
|
|
defer it.Release()
|
|
|
|
|
|
|
|
for it.Next() {
|
|
|
|
if bytes.Compare(it.Key(), end) >= 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if key := it.Key(); len(key) == len(headerPrefix)+8+1 && bytes.Equal(key[len(key)-1:], headerHashSuffix) {
|
|
|
|
numbers = append(numbers, binary.BigEndian.Uint64(key[len(headerPrefix):len(headerPrefix)+8]))
|
|
|
|
hashes = append(hashes, common.BytesToHash(it.Value()))
|
|
|
|
// If the accumulated entries reaches the limit threshold, return.
|
|
|
|
if len(numbers) >= limit {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return numbers, hashes
|
|
|
|
}
|
|
|
|
|
2018-05-07 11:35:06 +00:00
|
|
|
// ReadHeaderNumber returns the header number assigned to a hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 {
|
2018-06-11 13:06:26 +00:00
|
|
|
data, _ := db.Get(headerNumberKey(hash))
|
2018-05-07 11:35:06 +00:00
|
|
|
if len(data) != 8 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
number := binary.BigEndian.Uint64(data)
|
|
|
|
return &number
|
|
|
|
}
|
|
|
|
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 14:07:44 +00:00
|
|
|
// WriteHeaderNumber stores the hash->number mapping.
|
|
|
|
func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
|
key := headerNumberKey(hash)
|
|
|
|
enc := encodeBlockNumber(number)
|
|
|
|
if err := db.Put(key, enc); err != nil {
|
|
|
|
log.Crit("Failed to store hash to number mapping", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteHeaderNumber removes hash->number mapping.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) {
|
|
|
|
if err := db.Delete(headerNumberKey(hash)); err != nil {
|
|
|
|
log.Crit("Failed to delete hash to number mapping", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-07 11:35:06 +00:00
|
|
|
// ReadHeadHeaderHash retrieves the hash of the current canonical head header.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func ReadHeadHeaderHash(db ethdb.KeyValueReader) common.Hash {
|
2018-05-07 11:35:06 +00:00
|
|
|
data, _ := db.Get(headHeaderKey)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return common.Hash{}
|
|
|
|
}
|
|
|
|
return common.BytesToHash(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteHeadHeaderHash stores the hash of the current canonical head header.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) {
|
2018-05-07 11:35:06 +00:00
|
|
|
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
|
|
|
|
log.Crit("Failed to store last header's hash", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadHeadBlockHash retrieves the hash of the current canonical head block.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash {
|
2018-05-07 11:35:06 +00:00
|
|
|
data, _ := db.Get(headBlockKey)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return common.Hash{}
|
|
|
|
}
|
|
|
|
return common.BytesToHash(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteHeadBlockHash stores the head block's hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
|
2018-05-07 11:35:06 +00:00
|
|
|
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
|
|
|
|
log.Crit("Failed to store last block's hash", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func ReadHeadFastBlockHash(db ethdb.KeyValueReader) common.Hash {
|
2018-05-07 11:35:06 +00:00
|
|
|
data, _ := db.Get(headFastBlockKey)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return common.Hash{}
|
|
|
|
}
|
|
|
|
return common.BytesToHash(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteHeadFastBlockHash stores the hash of the current fast-sync head block.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
|
2018-05-07 11:35:06 +00:00
|
|
|
if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil {
|
|
|
|
log.Crit("Failed to store last fast block's hash", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-20 10:01:24 +00:00
|
|
|
// ReadLastPivotNumber retrieves the number of the last pivot block. If the node
|
|
|
|
// full synced, the last pivot will always be nil.
|
|
|
|
func ReadLastPivotNumber(db ethdb.KeyValueReader) *uint64 {
|
|
|
|
data, _ := db.Get(lastPivotKey)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var pivot uint64
|
|
|
|
if err := rlp.DecodeBytes(data, &pivot); err != nil {
|
|
|
|
log.Error("Invalid pivot block number in database", "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &pivot
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteLastPivotNumber stores the number of the last pivot block.
|
|
|
|
func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) {
|
|
|
|
enc, err := rlp.EncodeToBytes(pivot)
|
|
|
|
if err != nil {
|
|
|
|
log.Crit("Failed to encode pivot block number", "err", err)
|
|
|
|
}
|
|
|
|
if err := db.Put(lastPivotKey, enc); err != nil {
|
|
|
|
log.Crit("Failed to store pivot block number", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-07 11:35:06 +00:00
|
|
|
// ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow
|
|
|
|
// reporting correct numbers across restarts.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 {
|
2018-05-07 11:35:06 +00:00
|
|
|
data, _ := db.Get(fastTrieProgressKey)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return new(big.Int).SetBytes(data).Uint64()
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteFastTrieProgress stores the fast sync trie process counter to support
|
|
|
|
// retrieving it across restarts.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func WriteFastTrieProgress(db ethdb.KeyValueWriter, count uint64) {
|
2018-05-07 11:35:06 +00:00
|
|
|
if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
|
|
|
|
log.Crit("Failed to store fast sync trie progress", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-11 15:58:43 +00:00
|
|
|
// ReadTxIndexTail retrieves the number of oldest indexed block
|
|
|
|
// whose transaction indices has been indexed. If the corresponding entry
|
|
|
|
// is non-existent in database it means the indexing has been finished.
|
|
|
|
func ReadTxIndexTail(db ethdb.KeyValueReader) *uint64 {
|
|
|
|
data, _ := db.Get(txIndexTailKey)
|
|
|
|
if len(data) != 8 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
number := binary.BigEndian.Uint64(data)
|
|
|
|
return &number
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteTxIndexTail stores the number of oldest indexed block
|
|
|
|
// into database.
|
|
|
|
func WriteTxIndexTail(db ethdb.KeyValueWriter, number uint64) {
|
|
|
|
if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil {
|
|
|
|
log.Crit("Failed to store the transaction index tail", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadFastTxLookupLimit retrieves the tx lookup limit used in fast sync.
|
|
|
|
func ReadFastTxLookupLimit(db ethdb.KeyValueReader) *uint64 {
|
|
|
|
data, _ := db.Get(fastTxLookupLimitKey)
|
|
|
|
if len(data) != 8 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
number := binary.BigEndian.Uint64(data)
|
|
|
|
return &number
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteFastTxLookupLimit stores the txlookup limit used in fast sync into database.
|
|
|
|
func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
|
|
|
|
if err := db.Put(fastTxLookupLimitKey, encodeBlockNumber(number)); err != nil {
|
|
|
|
log.Crit("Failed to store transaction lookup limit for fast sync", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-07 11:35:06 +00:00
|
|
|
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
2019-11-19 10:32:57 +00:00
|
|
|
// First try to look up the data in ancient database. Extra hash
|
|
|
|
// comparison is necessary since ancient database only maintains
|
|
|
|
// the canonical data.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
data, _ := db.Ancient(freezerHeaderTable, number)
|
2019-11-19 10:32:57 +00:00
|
|
|
if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
// Then try to look up the data in leveldb.
|
|
|
|
data, _ = db.Get(headerKey(number, hash))
|
|
|
|
if len(data) > 0 {
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
// In the background freezer is moving data from leveldb to flatten files.
|
|
|
|
// So during the first check for ancient db, the data is not yet in there,
|
|
|
|
// but when we reach into leveldb, the data was already moved. That would
|
|
|
|
// result in a not found error.
|
|
|
|
data, _ = db.Ancient(freezerHeaderTable, number)
|
|
|
|
if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
|
|
|
|
return data
|
2019-03-08 13:56:20 +00:00
|
|
|
}
|
2019-11-19 10:32:57 +00:00
|
|
|
return nil // Can't find the data anywhere.
|
2018-05-07 11:35:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// HasHeader verifies the existence of a block header corresponding to the hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
|
|
|
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
|
2019-03-08 13:56:20 +00:00
|
|
|
return true
|
|
|
|
}
|
2018-06-11 13:06:26 +00:00
|
|
|
if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
|
2018-05-07 11:35:06 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadHeader retrieves the block header corresponding to the hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header {
|
2018-05-07 11:35:06 +00:00
|
|
|
data := ReadHeaderRLP(db, hash, number)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
header := new(types.Header)
|
|
|
|
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
|
|
|
|
log.Error("Invalid block header RLP", "hash", hash, "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return header
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteHeader stores a block header into the database and also stores the hash-
|
|
|
|
// to-number mapping.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) {
|
2018-05-07 11:35:06 +00:00
|
|
|
var (
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 14:07:44 +00:00
|
|
|
hash = header.Hash()
|
|
|
|
number = header.Number.Uint64()
|
2018-05-07 11:35:06 +00:00
|
|
|
)
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 14:07:44 +00:00
|
|
|
// Write the hash -> number mapping
|
|
|
|
WriteHeaderNumber(db, hash, number)
|
|
|
|
|
2018-05-07 11:35:06 +00:00
|
|
|
// Write the encoded header
|
|
|
|
data, err := rlp.EncodeToBytes(header)
|
|
|
|
if err != nil {
|
|
|
|
log.Crit("Failed to RLP encode header", "err", err)
|
|
|
|
}
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 14:07:44 +00:00
|
|
|
key := headerKey(number, hash)
|
2018-05-07 11:35:06 +00:00
|
|
|
if err := db.Put(key, data); err != nil {
|
|
|
|
log.Crit("Failed to store header", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteHeader removes all block header data associated with a hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func DeleteHeader(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
2018-09-24 12:57:49 +00:00
|
|
|
deleteHeaderWithoutNumber(db, hash, number)
|
2018-06-11 13:06:26 +00:00
|
|
|
if err := db.Delete(headerNumberKey(hash)); err != nil {
|
2018-05-07 11:35:06 +00:00
|
|
|
log.Crit("Failed to delete hash to number mapping", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-24 12:57:49 +00:00
|
|
|
// deleteHeaderWithoutNumber removes only the block header but does not remove
|
|
|
|
// the hash to number mapping.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
2018-09-24 12:57:49 +00:00
|
|
|
if err := db.Delete(headerKey(number, hash)); err != nil {
|
|
|
|
log.Crit("Failed to delete header", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-07 11:35:06 +00:00
|
|
|
// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
2019-11-19 10:32:57 +00:00
|
|
|
// First try to look up the data in ancient database. Extra hash
|
|
|
|
// comparison is necessary since ancient database only maintains
|
|
|
|
// the canonical data.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
data, _ := db.Ancient(freezerBodiesTable, number)
|
2019-11-19 10:32:57 +00:00
|
|
|
if len(data) > 0 {
|
|
|
|
h, _ := db.Ancient(freezerHashTable, number)
|
|
|
|
if common.BytesToHash(h) == hash {
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Then try to look up the data in leveldb.
|
|
|
|
data, _ = db.Get(blockBodyKey(number, hash))
|
|
|
|
if len(data) > 0 {
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
// In the background freezer is moving data from leveldb to flatten files.
|
|
|
|
// So during the first check for ancient db, the data is not yet in there,
|
|
|
|
// but when we reach into leveldb, the data was already moved. That would
|
|
|
|
// result in a not found error.
|
|
|
|
data, _ = db.Ancient(freezerBodiesTable, number)
|
|
|
|
if len(data) > 0 {
|
|
|
|
h, _ := db.Ancient(freezerHashTable, number)
|
|
|
|
if common.BytesToHash(h) == hash {
|
|
|
|
return data
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
}
|
2019-03-08 13:56:20 +00:00
|
|
|
}
|
2019-11-19 10:32:57 +00:00
|
|
|
return nil // Can't find the data anywhere.
|
2018-05-07 11:35:06 +00:00
|
|
|
}
|
|
|
|
|
2020-05-11 15:58:43 +00:00
|
|
|
// ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
|
|
|
|
// block at number, in RLP encoding.
|
|
|
|
func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
|
|
|
|
// If it's an ancient one, we don't need the canonical hash
|
|
|
|
data, _ := db.Ancient(freezerBodiesTable, number)
|
|
|
|
if len(data) == 0 {
|
|
|
|
// Need to get the hash
|
|
|
|
data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number)))
|
|
|
|
// In the background freezer is moving data from leveldb to flatten files.
|
|
|
|
// So during the first check for ancient db, the data is not yet in there,
|
|
|
|
// but when we reach into leveldb, the data was already moved. That would
|
|
|
|
// result in a not found error.
|
|
|
|
if len(data) == 0 {
|
|
|
|
data, _ = db.Ancient(freezerBodiesTable, number)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
|
2018-05-07 11:35:06 +00:00
|
|
|
// WriteBodyRLP stores an RLP encoded block body into the database.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
|
2018-06-11 13:06:26 +00:00
|
|
|
if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
|
2018-05-07 11:35:06 +00:00
|
|
|
log.Crit("Failed to store block body", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// HasBody verifies the existence of a block body corresponding to the hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
|
|
|
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
|
2019-03-08 13:56:20 +00:00
|
|
|
return true
|
|
|
|
}
|
2018-06-11 13:06:26 +00:00
|
|
|
if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
|
2018-05-07 11:35:06 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadBody retrieves the block body corresponding to the hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body {
|
2018-05-07 11:35:06 +00:00
|
|
|
data := ReadBodyRLP(db, hash, number)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
body := new(types.Body)
|
|
|
|
if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
|
|
|
|
log.Error("Invalid block body RLP", "hash", hash, "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return body
|
|
|
|
}
|
|
|
|
|
2019-04-26 09:22:21 +00:00
|
|
|
// WriteBody stores a block body into the database.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) {
|
2018-05-07 11:35:06 +00:00
|
|
|
data, err := rlp.EncodeToBytes(body)
|
|
|
|
if err != nil {
|
|
|
|
log.Crit("Failed to RLP encode body", "err", err)
|
|
|
|
}
|
|
|
|
WriteBodyRLP(db, hash, number, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteBody removes all block body data associated with a hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
2018-06-11 13:06:26 +00:00
|
|
|
if err := db.Delete(blockBodyKey(number, hash)); err != nil {
|
2018-05-07 11:35:06 +00:00
|
|
|
log.Crit("Failed to delete block body", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-24 12:57:49 +00:00
|
|
|
// ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
2019-11-19 10:32:57 +00:00
|
|
|
// First try to look up the data in ancient database. Extra hash
|
|
|
|
// comparison is necessary since ancient database only maintains
|
|
|
|
// the canonical data.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
data, _ := db.Ancient(freezerDifficultyTable, number)
|
2019-11-19 10:32:57 +00:00
|
|
|
if len(data) > 0 {
|
|
|
|
h, _ := db.Ancient(freezerHashTable, number)
|
|
|
|
if common.BytesToHash(h) == hash {
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Then try to look up the data in leveldb.
|
|
|
|
data, _ = db.Get(headerTDKey(number, hash))
|
|
|
|
if len(data) > 0 {
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
// In the background freezer is moving data from leveldb to flatten files.
|
|
|
|
// So during the first check for ancient db, the data is not yet in there,
|
|
|
|
// but when we reach into leveldb, the data was already moved. That would
|
|
|
|
// result in a not found error.
|
|
|
|
data, _ = db.Ancient(freezerDifficultyTable, number)
|
|
|
|
if len(data) > 0 {
|
|
|
|
h, _ := db.Ancient(freezerHashTable, number)
|
|
|
|
if common.BytesToHash(h) == hash {
|
|
|
|
return data
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
}
|
2019-03-08 13:56:20 +00:00
|
|
|
}
|
2019-11-19 10:32:57 +00:00
|
|
|
return nil // Can't find the data anywhere.
|
2018-09-24 12:57:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ReadTd retrieves a block's total difficulty corresponding to the hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int {
|
2018-09-24 12:57:49 +00:00
|
|
|
data := ReadTdRLP(db, hash, number)
|
2018-05-07 11:35:06 +00:00
|
|
|
if len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
td := new(big.Int)
|
|
|
|
if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
|
|
|
|
log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return td
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteTd stores the total difficulty of a block into the database.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) {
|
2018-05-07 11:35:06 +00:00
|
|
|
data, err := rlp.EncodeToBytes(td)
|
|
|
|
if err != nil {
|
|
|
|
log.Crit("Failed to RLP encode block total difficulty", "err", err)
|
|
|
|
}
|
2018-06-11 13:06:26 +00:00
|
|
|
if err := db.Put(headerTDKey(number, hash), data); err != nil {
|
2018-05-07 11:35:06 +00:00
|
|
|
log.Crit("Failed to store block total difficulty", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteTd removes all block total difficulty data associated with a hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
2018-06-11 13:06:26 +00:00
|
|
|
if err := db.Delete(headerTDKey(number, hash)); err != nil {
|
2018-05-07 11:35:06 +00:00
|
|
|
log.Crit("Failed to delete block total difficulty", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-16 11:15:05 +00:00
|
|
|
// HasReceipts verifies the existence of all the transaction receipts belonging
|
|
|
|
// to a block.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
|
|
|
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
|
2019-03-08 13:56:20 +00:00
|
|
|
return true
|
|
|
|
}
|
2018-11-16 11:15:05 +00:00
|
|
|
if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-09-24 12:57:49 +00:00
|
|
|
// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
2019-11-19 10:32:57 +00:00
|
|
|
// First try to look up the data in ancient database. Extra hash
|
|
|
|
// comparison is necessary since ancient database only maintains
|
|
|
|
// the canonical data.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
data, _ := db.Ancient(freezerReceiptTable, number)
|
2019-11-19 10:32:57 +00:00
|
|
|
if len(data) > 0 {
|
|
|
|
h, _ := db.Ancient(freezerHashTable, number)
|
|
|
|
if common.BytesToHash(h) == hash {
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Then try to look up the data in leveldb.
|
|
|
|
data, _ = db.Get(blockReceiptsKey(number, hash))
|
|
|
|
if len(data) > 0 {
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
// In the background freezer is moving data from leveldb to flatten files.
|
|
|
|
// So during the first check for ancient db, the data is not yet in there,
|
|
|
|
// but when we reach into leveldb, the data was already moved. That would
|
|
|
|
// result in a not found error.
|
|
|
|
data, _ = db.Ancient(freezerReceiptTable, number)
|
|
|
|
if len(data) > 0 {
|
|
|
|
h, _ := db.Ancient(freezerHashTable, number)
|
|
|
|
if common.BytesToHash(h) == hash {
|
|
|
|
return data
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
}
|
2019-03-08 13:56:20 +00:00
|
|
|
}
|
2019-11-19 10:32:57 +00:00
|
|
|
return nil // Can't find the data anywhere.
|
2018-09-24 12:57:49 +00:00
|
|
|
}
|
|
|
|
|
2019-03-27 16:11:24 +00:00
|
|
|
// ReadRawReceipts retrieves all the transaction receipts belonging to a block.
|
|
|
|
// The receipt metadata fields are not guaranteed to be populated, so they
|
|
|
|
// should not be used. Use ReadReceipts instead if the metadata is needed.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Receipts {
|
2018-05-07 11:35:06 +00:00
|
|
|
// Retrieve the flattened receipt slice
|
2018-09-24 12:57:49 +00:00
|
|
|
data := ReadReceiptsRLP(db, hash, number)
|
2018-05-07 11:35:06 +00:00
|
|
|
if len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2018-10-19 13:33:27 +00:00
|
|
|
// Convert the receipts from their storage form to their internal representation
|
2018-05-07 11:35:06 +00:00
|
|
|
storageReceipts := []*types.ReceiptForStorage{}
|
|
|
|
if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
|
|
|
|
log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
2019-04-15 09:36:27 +00:00
|
|
|
receipts := make(types.Receipts, len(storageReceipts))
|
|
|
|
for i, storageReceipt := range storageReceipts {
|
|
|
|
receipts[i] = (*types.Receipt)(storageReceipt)
|
2018-05-07 11:35:06 +00:00
|
|
|
}
|
|
|
|
return receipts
|
|
|
|
}
|
|
|
|
|
2019-03-27 16:11:24 +00:00
|
|
|
// ReadReceipts retrieves all the transaction receipts belonging to a block, including
|
|
|
|
// its correspoinding metadata fields. If it is unable to populate these metadata
|
|
|
|
// fields then nil is returned.
|
|
|
|
//
|
|
|
|
// The current implementation populates these metadata fields by reading the receipts'
|
|
|
|
// corresponding block body, so if the block body is not found it will return nil even
|
|
|
|
// if the receipt itself is stored.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts {
|
2019-04-15 09:36:27 +00:00
|
|
|
// We're deriving many fields from the block body, retrieve beside the receipt
|
2019-03-27 16:11:24 +00:00
|
|
|
receipts := ReadRawReceipts(db, hash, number)
|
|
|
|
if receipts == nil {
|
2019-04-15 09:36:27 +00:00
|
|
|
return nil
|
2019-03-27 16:11:24 +00:00
|
|
|
}
|
|
|
|
body := ReadBody(db, hash, number)
|
|
|
|
if body == nil {
|
|
|
|
log.Error("Missing body but have receipt", "hash", hash, "number", number)
|
|
|
|
return nil
|
|
|
|
}
|
2019-04-15 09:36:27 +00:00
|
|
|
if err := receipts.DeriveFields(config, hash, number, body.Transactions); err != nil {
|
|
|
|
log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)
|
2019-03-27 16:11:24 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return receipts
|
|
|
|
}
|
|
|
|
|
2018-05-07 11:35:06 +00:00
|
|
|
// WriteReceipts stores all the transaction receipts belonging to a block.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func WriteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) {
|
2018-05-07 11:35:06 +00:00
|
|
|
// Convert the receipts into their storage form and serialize them
|
|
|
|
storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
|
|
|
|
for i, receipt := range receipts {
|
|
|
|
storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
|
|
|
|
}
|
|
|
|
bytes, err := rlp.EncodeToBytes(storageReceipts)
|
|
|
|
if err != nil {
|
|
|
|
log.Crit("Failed to encode block receipts", "err", err)
|
|
|
|
}
|
|
|
|
// Store the flattened receipt slice
|
2018-06-11 13:06:26 +00:00
|
|
|
if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil {
|
2018-05-07 11:35:06 +00:00
|
|
|
log.Crit("Failed to store block receipts", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteReceipts removes all receipt data associated with a block hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
2018-06-11 13:06:26 +00:00
|
|
|
if err := db.Delete(blockReceiptsKey(number, hash)); err != nil {
|
2018-05-07 11:35:06 +00:00
|
|
|
log.Crit("Failed to delete block receipts", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-28 10:54:49 +00:00
|
|
|
// storedReceiptRLP is the storage encoding of a receipt.
|
|
|
|
// Re-definition in core/types/receipt.go.
|
|
|
|
type storedReceiptRLP struct {
|
|
|
|
PostStateOrStatus []byte
|
|
|
|
CumulativeGasUsed uint64
|
|
|
|
Logs []*types.LogForStorage
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReceiptLogs is a barebone version of ReceiptForStorage which only keeps
|
|
|
|
// the list of logs. When decoding a stored receipt into this object we
|
|
|
|
// avoid creating the bloom filter.
|
|
|
|
type receiptLogs struct {
|
|
|
|
Logs []*types.Log
|
|
|
|
}
|
|
|
|
|
|
|
|
// DecodeRLP implements rlp.Decoder.
|
|
|
|
func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error {
|
|
|
|
var stored storedReceiptRLP
|
|
|
|
if err := s.Decode(&stored); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
r.Logs = make([]*types.Log, len(stored.Logs))
|
|
|
|
for i, log := range stored.Logs {
|
|
|
|
r.Logs[i] = (*types.Log)(log)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeriveLogFields fills the logs in receiptLogs with information such as block number, txhash, etc.
|
|
|
|
func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, txs types.Transactions) error {
|
|
|
|
logIndex := uint(0)
|
|
|
|
if len(txs) != len(receipts) {
|
|
|
|
return errors.New("transaction and receipt count mismatch")
|
|
|
|
}
|
|
|
|
for i := 0; i < len(receipts); i++ {
|
|
|
|
txHash := txs[i].Hash()
|
|
|
|
// The derived log fields can simply be set from the block and transaction
|
|
|
|
for j := 0; j < len(receipts[i].Logs); j++ {
|
|
|
|
receipts[i].Logs[j].BlockNumber = number
|
|
|
|
receipts[i].Logs[j].BlockHash = hash
|
|
|
|
receipts[i].Logs[j].TxHash = txHash
|
|
|
|
receipts[i].Logs[j].TxIndex = uint(i)
|
|
|
|
receipts[i].Logs[j].Index = logIndex
|
|
|
|
logIndex++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadLogs retrieves the logs for all transactions in a block. The log fields
|
|
|
|
// are populated with metadata. In case the receipts or the block body
|
|
|
|
// are not found, a nil is returned.
|
|
|
|
func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log {
|
|
|
|
// Retrieve the flattened receipt slice
|
|
|
|
data := ReadReceiptsRLP(db, hash, number)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
receipts := []*receiptLogs{}
|
|
|
|
if err := rlp.DecodeBytes(data, &receipts); err != nil {
|
|
|
|
log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
body := ReadBody(db, hash, number)
|
|
|
|
if body == nil {
|
|
|
|
log.Error("Missing body but have receipt", "hash", hash, "number", number)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if err := deriveLogFields(receipts, hash, number, body.Transactions); err != nil {
|
|
|
|
log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
logs := make([][]*types.Log, len(receipts))
|
|
|
|
for i, receipt := range receipts {
|
|
|
|
logs[i] = receipt.Logs
|
|
|
|
}
|
|
|
|
return logs
|
|
|
|
}
|
|
|
|
|
2018-05-07 11:35:06 +00:00
|
|
|
// ReadBlock retrieves an entire block corresponding to the hash, assembling it
|
|
|
|
// back from the stored header and body. If either the header or body could not
|
|
|
|
// be retrieved nil is returned.
|
|
|
|
//
|
|
|
|
// Note, due to concurrent download of header and block body the header and thus
|
|
|
|
// canonical hash can be stored in the database but the body data not (yet).
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block {
|
2018-05-07 11:35:06 +00:00
|
|
|
header := ReadHeader(db, hash, number)
|
|
|
|
if header == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
body := ReadBody(db, hash, number)
|
|
|
|
if body == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles)
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteBlock serializes a block into the database, header and body separately.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
|
2018-05-07 11:35:06 +00:00
|
|
|
WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
|
|
|
|
WriteHeader(db, block.Header())
|
|
|
|
}
|
|
|
|
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
// WriteAncientBlock writes entire block data into ancient store and returns the total written size.
|
2021-09-07 10:31:17 +00:00
|
|
|
func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, td *big.Int) (int64, error) {
|
|
|
|
var (
|
|
|
|
tdSum = new(big.Int).Set(td)
|
|
|
|
stReceipts []*types.ReceiptForStorage
|
|
|
|
)
|
|
|
|
return db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
|
|
|
|
for i, block := range blocks {
|
|
|
|
// Convert receipts to storage format and sum up total difficulty.
|
|
|
|
stReceipts = stReceipts[:0]
|
|
|
|
for _, receipt := range receipts[i] {
|
|
|
|
stReceipts = append(stReceipts, (*types.ReceiptForStorage)(receipt))
|
|
|
|
}
|
|
|
|
header := block.Header()
|
|
|
|
if i > 0 {
|
|
|
|
tdSum.Add(tdSum, header.Difficulty)
|
|
|
|
}
|
|
|
|
if err := writeAncientBlock(op, block, header, stReceipts, tdSum); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *types.Header, receipts []*types.ReceiptForStorage, td *big.Int) error {
|
|
|
|
num := block.NumberU64()
|
|
|
|
if err := op.AppendRaw(freezerHashTable, num, block.Hash().Bytes()); err != nil {
|
|
|
|
return fmt.Errorf("can't add block %d hash: %v", num, err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
}
|
2021-09-07 10:31:17 +00:00
|
|
|
if err := op.Append(freezerHeaderTable, num, header); err != nil {
|
|
|
|
return fmt.Errorf("can't append block header %d: %v", num, err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
}
|
2021-09-07 10:31:17 +00:00
|
|
|
if err := op.Append(freezerBodiesTable, num, block.Body()); err != nil {
|
|
|
|
return fmt.Errorf("can't append block body %d: %v", num, err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
}
|
2021-09-07 10:31:17 +00:00
|
|
|
if err := op.Append(freezerReceiptTable, num, receipts); err != nil {
|
|
|
|
return fmt.Errorf("can't append block %d receipts: %v", num, err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
}
|
2021-09-07 10:31:17 +00:00
|
|
|
if err := op.Append(freezerDifficultyTable, num, td); err != nil {
|
|
|
|
return fmt.Errorf("can't append block %d total difficulty: %v", num, err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
}
|
2021-09-07 10:31:17 +00:00
|
|
|
return nil
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
}
|
|
|
|
|
2018-05-07 11:35:06 +00:00
|
|
|
// DeleteBlock removes all block data associated with a hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
2018-05-07 11:35:06 +00:00
|
|
|
DeleteReceipts(db, hash, number)
|
|
|
|
DeleteHeader(db, hash, number)
|
|
|
|
DeleteBody(db, hash, number)
|
|
|
|
DeleteTd(db, hash, number)
|
|
|
|
}
|
|
|
|
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
// DeleteBlockWithoutNumber removes all block data associated with a hash, except
|
2018-09-24 12:57:49 +00:00
|
|
|
// the hash to number mapping.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
2018-09-24 12:57:49 +00:00
|
|
|
DeleteReceipts(db, hash, number)
|
|
|
|
deleteHeaderWithoutNumber(db, hash, number)
|
|
|
|
DeleteBody(db, hash, number)
|
|
|
|
DeleteTd(db, hash, number)
|
|
|
|
}
|
|
|
|
|
2021-01-10 11:54:15 +00:00
|
|
|
const badBlockToKeep = 10
|
|
|
|
|
|
|
|
type badBlock struct {
|
|
|
|
Header *types.Header
|
|
|
|
Body *types.Body
|
|
|
|
}
|
|
|
|
|
|
|
|
// badBlockList implements the sort interface to allow sorting a list of
|
|
|
|
// bad blocks by their number in the reverse order.
|
|
|
|
type badBlockList []*badBlock
|
|
|
|
|
|
|
|
func (s badBlockList) Len() int { return len(s) }
|
|
|
|
func (s badBlockList) Less(i, j int) bool {
|
|
|
|
return s[i].Header.Number.Uint64() < s[j].Header.Number.Uint64()
|
|
|
|
}
|
|
|
|
func (s badBlockList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
|
|
|
|
|
|
// ReadBadBlock retrieves the bad block with the corresponding block hash.
|
|
|
|
func ReadBadBlock(db ethdb.Reader, hash common.Hash) *types.Block {
|
|
|
|
blob, err := db.Get(badBlockKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var badBlocks badBlockList
|
|
|
|
if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
for _, bad := range badBlocks {
|
|
|
|
if bad.Header.Hash() == hash {
|
|
|
|
return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadAllBadBlocks retrieves all the bad blocks in the database.
|
|
|
|
// All returned blocks are sorted in reverse order by number.
|
|
|
|
func ReadAllBadBlocks(db ethdb.Reader) []*types.Block {
|
|
|
|
blob, err := db.Get(badBlockKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var badBlocks badBlockList
|
|
|
|
if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var blocks []*types.Block
|
|
|
|
for _, bad := range badBlocks {
|
|
|
|
blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles))
|
|
|
|
}
|
|
|
|
return blocks
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteBadBlock serializes the bad block into the database. If the cumulated
|
|
|
|
// bad blocks exceeds the limitation, the oldest will be dropped.
|
|
|
|
func WriteBadBlock(db ethdb.KeyValueStore, block *types.Block) {
|
|
|
|
blob, err := db.Get(badBlockKey)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("Failed to load old bad blocks", "error", err)
|
|
|
|
}
|
|
|
|
var badBlocks badBlockList
|
|
|
|
if len(blob) > 0 {
|
|
|
|
if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
|
|
|
|
log.Crit("Failed to decode old bad blocks", "error", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, b := range badBlocks {
|
|
|
|
if b.Header.Number.Uint64() == block.NumberU64() && b.Header.Hash() == block.Hash() {
|
|
|
|
log.Info("Skip duplicated bad block", "number", block.NumberU64(), "hash", block.Hash())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
badBlocks = append(badBlocks, &badBlock{
|
|
|
|
Header: block.Header(),
|
|
|
|
Body: block.Body(),
|
|
|
|
})
|
|
|
|
sort.Sort(sort.Reverse(badBlocks))
|
|
|
|
if len(badBlocks) > badBlockToKeep {
|
|
|
|
badBlocks = badBlocks[:badBlockToKeep]
|
|
|
|
}
|
|
|
|
data, err := rlp.EncodeToBytes(badBlocks)
|
|
|
|
if err != nil {
|
|
|
|
log.Crit("Failed to encode bad blocks", "err", err)
|
|
|
|
}
|
|
|
|
if err := db.Put(badBlockKey, data); err != nil {
|
|
|
|
log.Crit("Failed to write bad blocks", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteBadBlocks deletes all the bad blocks from the database
|
|
|
|
func DeleteBadBlocks(db ethdb.KeyValueWriter) {
|
|
|
|
if err := db.Delete(badBlockKey); err != nil {
|
|
|
|
log.Crit("Failed to delete bad blocks", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-07 11:35:06 +00:00
|
|
|
// FindCommonAncestor returns the last common ancestor of two block headers
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header {
|
2018-05-07 11:35:06 +00:00
|
|
|
for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
|
|
|
|
a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
|
|
|
|
if a == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for an := a.Number.Uint64(); an < b.Number.Uint64(); {
|
|
|
|
b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
|
|
|
|
if b == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for a.Hash() != b.Hash() {
|
|
|
|
a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
|
|
|
|
if a == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
|
|
|
|
if b == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return a
|
|
|
|
}
|
2021-03-22 18:06:30 +00:00
|
|
|
|
|
|
|
// ReadHeadHeader returns the current canonical head header.
|
|
|
|
func ReadHeadHeader(db ethdb.Reader) *types.Header {
|
|
|
|
headHeaderHash := ReadHeadHeaderHash(db)
|
|
|
|
if headHeaderHash == (common.Hash{}) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
headHeaderNumber := ReadHeaderNumber(db, headHeaderHash)
|
|
|
|
if headHeaderNumber == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return ReadHeader(db, headHeaderHash, *headHeaderNumber)
|
|
|
|
}
|
|
|
|
|
2021-04-29 17:23:07 +00:00
|
|
|
// ReadHeadBlock returns the current canonical head block.
|
2021-03-22 18:06:30 +00:00
|
|
|
func ReadHeadBlock(db ethdb.Reader) *types.Block {
|
|
|
|
headBlockHash := ReadHeadBlockHash(db)
|
|
|
|
if headBlockHash == (common.Hash{}) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
headBlockNumber := ReadHeaderNumber(db, headBlockHash)
|
|
|
|
if headBlockNumber == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return ReadBlock(db, headBlockHash, *headBlockNumber)
|
|
|
|
}
|