core/rawdb, ethdb: introduce batched/atomic reads from ancients (#23566)

This PR adds a new accessor method to the freezer database. This new view offers a consistent interface, guaranteeing that all individual tables (headers, bodies etc) are all on the same number, and that this number is not changes (added/truncated) while the operation is performing.
This commit is contained in:
Martin Holst Swende 2021-10-25 16:24:27 +02:00 committed by GitHub
parent 2954f40eac
commit 0e7efd696b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 129 additions and 138 deletions

View File

@ -35,20 +35,15 @@ import (
// ReadCanonicalHash retrieves the hash assigned to a canonical block number. // ReadCanonicalHash retrieves the hash assigned to a canonical block number.
func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash { func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
data, _ := db.Ancient(freezerHashTable, number) var data []byte
db.ReadAncients(func(reader ethdb.AncientReader) error {
data, _ = reader.Ancient(freezerHashTable, number)
if len(data) == 0 { if len(data) == 0 {
// Get it by hash from leveldb
data, _ = db.Get(headerHashKey(number)) data, _ = db.Get(headerHashKey(number))
// In the background freezer is moving data from leveldb to flatten files.
// So during the first check for ancient db, the data is not yet in there,
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
if len(data) == 0 {
data, _ = db.Ancient(freezerHashTable, number)
}
}
if len(data) == 0 {
return common.Hash{}
} }
return nil
})
return common.BytesToHash(data) return common.BytesToHash(data)
} }
@ -304,32 +299,25 @@ func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding. // ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
var data []byte
db.ReadAncients(func(reader ethdb.AncientReader) error {
// First try to look up the data in ancient database. Extra hash // First try to look up the data in ancient database. Extra hash
// comparison is necessary since ancient database only maintains // comparison is necessary since ancient database only maintains
// the canonical data. // the canonical data.
data, _ := db.Ancient(freezerHeaderTable, number) data, _ = reader.Ancient(freezerHeaderTable, number)
if len(data) > 0 && crypto.Keccak256Hash(data) == hash { if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
return data return nil
} }
// Then try to look up the data in leveldb. // If not, try reading from leveldb
data, _ = db.Get(headerKey(number, hash)) data, _ = db.Get(headerKey(number, hash))
if len(data) > 0 { return nil
})
return data return data
} }
// In the background freezer is moving data from leveldb to flatten files.
// So during the first check for ancient db, the data is not yet in there,
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
data, _ = db.Ancient(freezerHeaderTable, number)
if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
return data
}
return nil // Can't find the data anywhere.
}
// HasHeader verifies the existence of a block header corresponding to the hash. // HasHeader verifies the existence of a block header corresponding to the hash.
func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool { func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool {
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { if isCanon(db, number, hash) {
return true return true
} }
if has, err := db.Has(headerKey(number, hash)); !has || err != nil { if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
@ -389,53 +377,48 @@ func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number
} }
} }
// isCanon is an internal utility method, to check whether the given number/hash
// is part of the ancient (canon) set.
func isCanon(reader ethdb.AncientReader, number uint64, hash common.Hash) bool {
h, err := reader.Ancient(freezerHashTable, number)
if err != nil {
return false
}
return bytes.Equal(h, hash[:])
}
// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. // ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
// First try to look up the data in ancient database. Extra hash // First try to look up the data in ancient database. Extra hash
// comparison is necessary since ancient database only maintains // comparison is necessary since ancient database only maintains
// the canonical data. // the canonical data.
data, _ := db.Ancient(freezerBodiesTable, number) var data []byte
if len(data) > 0 { db.ReadAncients(func(reader ethdb.AncientReader) error {
h, _ := db.Ancient(freezerHashTable, number) // Check if the data is in ancients
if common.BytesToHash(h) == hash { if isCanon(reader, number, hash) {
return data data, _ = reader.Ancient(freezerBodiesTable, number)
return nil
} }
} // If not, try reading from leveldb
// Then try to look up the data in leveldb.
data, _ = db.Get(blockBodyKey(number, hash)) data, _ = db.Get(blockBodyKey(number, hash))
if len(data) > 0 { return nil
})
return data return data
} }
// In the background freezer is moving data from leveldb to flatten files.
// So during the first check for ancient db, the data is not yet in there,
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
data, _ = db.Ancient(freezerBodiesTable, number)
if len(data) > 0 {
h, _ := db.Ancient(freezerHashTable, number)
if common.BytesToHash(h) == hash {
return data
}
}
return nil // Can't find the data anywhere.
}
// ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical // ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
// block at number, in RLP encoding. // block at number, in RLP encoding.
func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue { func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
// If it's an ancient one, we don't need the canonical hash var data []byte
data, _ := db.Ancient(freezerBodiesTable, number) db.ReadAncients(func(reader ethdb.AncientReader) error {
if len(data) == 0 { data, _ = reader.Ancient(freezerBodiesTable, number)
// Need to get the hash if len(data) > 0 {
return nil
}
// Get it by hash from leveldb
data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number))) data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number)))
// In the background freezer is moving data from leveldb to flatten files. return nil
// So during the first check for ancient db, the data is not yet in there, })
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
if len(data) == 0 {
data, _ = db.Ancient(freezerBodiesTable, number)
}
}
return data return data
} }
@ -448,7 +431,7 @@ func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp
// HasBody verifies the existence of a block body corresponding to the hash. // HasBody verifies the existence of a block body corresponding to the hash.
func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool { func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { if isCanon(db, number, hash) {
return true return true
} }
if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil { if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
@ -489,34 +472,19 @@ func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
// ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding. // ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
// First try to look up the data in ancient database. Extra hash var data []byte
// comparison is necessary since ancient database only maintains db.ReadAncients(func(reader ethdb.AncientReader) error {
// the canonical data. // Check if the data is in ancients
data, _ := db.Ancient(freezerDifficultyTable, number) if isCanon(reader, number, hash) {
if len(data) > 0 { data, _ = reader.Ancient(freezerDifficultyTable, number)
h, _ := db.Ancient(freezerHashTable, number) return nil
if common.BytesToHash(h) == hash {
return data
} }
} // If not, try reading from leveldb
// Then try to look up the data in leveldb.
data, _ = db.Get(headerTDKey(number, hash)) data, _ = db.Get(headerTDKey(number, hash))
if len(data) > 0 { return nil
})
return data return data
} }
// In the background freezer is moving data from leveldb to flatten files.
// So during the first check for ancient db, the data is not yet in there,
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
data, _ = db.Ancient(freezerDifficultyTable, number)
if len(data) > 0 {
h, _ := db.Ancient(freezerHashTable, number)
if common.BytesToHash(h) == hash {
return data
}
}
return nil // Can't find the data anywhere.
}
// ReadTd retrieves a block's total difficulty corresponding to the hash. // ReadTd retrieves a block's total difficulty corresponding to the hash.
func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int { func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int {
@ -553,7 +521,7 @@ func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
// HasReceipts verifies the existence of all the transaction receipts belonging // HasReceipts verifies the existence of all the transaction receipts belonging
// to a block. // to a block.
func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool { func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { if isCanon(db, number, hash) {
return true return true
} }
if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil { if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
@ -564,34 +532,19 @@ func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding. // ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
// First try to look up the data in ancient database. Extra hash var data []byte
// comparison is necessary since ancient database only maintains db.ReadAncients(func(reader ethdb.AncientReader) error {
// the canonical data. // Check if the data is in ancients
data, _ := db.Ancient(freezerReceiptTable, number) if isCanon(reader, number, hash) {
if len(data) > 0 { data, _ = reader.Ancient(freezerReceiptTable, number)
h, _ := db.Ancient(freezerHashTable, number) return nil
if common.BytesToHash(h) == hash {
return data
} }
} // If not, try reading from leveldb
// Then try to look up the data in leveldb.
data, _ = db.Get(blockReceiptsKey(number, hash)) data, _ = db.Get(blockReceiptsKey(number, hash))
if len(data) > 0 { return nil
})
return data return data
} }
// In the background freezer is moving data from leveldb to flatten files.
// So during the first check for ancient db, the data is not yet in there,
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
data, _ = db.Ancient(freezerReceiptTable, number)
if len(data) > 0 {
h, _ := db.Ancient(freezerHashTable, number)
if common.BytesToHash(h) == hash {
return data
}
}
return nil // Can't find the data anywhere.
}
// ReadRawReceipts retrieves all the transaction receipts belonging to a block. // ReadRawReceipts retrieves all the transaction receipts belonging to a block.
// The receipt metadata fields are not guaranteed to be populated, so they // The receipt metadata fields are not guaranteed to be populated, so they

View File

@ -89,8 +89,8 @@ func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) {
return nil, errNotSupported return nil, errNotSupported
} }
// ReadAncients returns an error as we don't have a backing chain freezer. // AncientRange returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) ReadAncients(kind string, start, max, maxByteSize uint64) ([][]byte, error) { func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) {
return nil, errNotSupported return nil, errNotSupported
} }
@ -119,6 +119,22 @@ func (db *nofreezedb) Sync() error {
return errNotSupported return errNotSupported
} }
func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReader) error) (err error) {
// Unlike other ancient-related methods, this method does not return
// errNotSupported when invoked.
// The reason for this is that the caller might want to do several things:
// 1. Check if something is in freezer,
// 2. If not, check leveldb.
//
// This will work, since the ancient-checks inside 'fn' will return errors,
// and the leveldb work will continue.
//
// If we instead were to return errNotSupported here, then the caller would
// have to explicitly check for that, having an extra clause to do the
// non-ancient operations.
return fn(db)
}
// NewDatabase creates a high level database on top of a given key-value data // NewDatabase creates a high level database on top of a given key-value data
// store without a freezer moving immutable chain segments into cold storage. // store without a freezer moving immutable chain segments into cold storage.
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database { func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {

View File

@ -80,8 +80,9 @@ type freezer struct {
frozen uint64 // Number of blocks already frozen frozen uint64 // Number of blocks already frozen
threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests) threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
// This lock synchronizes writers and the truncate operation. // This lock synchronizes writers and the truncate operation, as well as
writeLock sync.Mutex // the "atomic" (batched) read operations.
writeLock sync.RWMutex
writeBatch *freezerBatch writeBatch *freezerBatch
readonly bool readonly bool
@ -201,12 +202,12 @@ func (f *freezer) Ancient(kind string, number uint64) ([]byte, error) {
return nil, errUnknownTable return nil, errUnknownTable
} }
// ReadAncients retrieves multiple items in sequence, starting from the index 'start'. // AncientRange retrieves multiple items in sequence, starting from the index 'start'.
// It will return // It will return
// - at most 'max' items, // - at most 'max' items,
// - at least 1 item (even if exceeding the maxByteSize), but will otherwise // - at least 1 item (even if exceeding the maxByteSize), but will otherwise
// return as many items as fit into maxByteSize. // return as many items as fit into maxByteSize.
func (f *freezer) ReadAncients(kind string, start, count, maxBytes uint64) ([][]byte, error) { func (f *freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
if table := f.tables[kind]; table != nil { if table := f.tables[kind]; table != nil {
return table.RetrieveItems(start, count, maxBytes) return table.RetrieveItems(start, count, maxBytes)
} }
@ -222,8 +223,8 @@ func (f *freezer) Ancients() (uint64, error) {
func (f *freezer) AncientSize(kind string) (uint64, error) { func (f *freezer) AncientSize(kind string) (uint64, error) {
// This needs the write lock to avoid data races on table fields. // This needs the write lock to avoid data races on table fields.
// Speed doesn't matter here, AncientSize is for debugging. // Speed doesn't matter here, AncientSize is for debugging.
f.writeLock.Lock() f.writeLock.RLock()
defer f.writeLock.Unlock() defer f.writeLock.RUnlock()
if table := f.tables[kind]; table != nil { if table := f.tables[kind]; table != nil {
return table.size() return table.size()
@ -231,6 +232,14 @@ func (f *freezer) AncientSize(kind string) (uint64, error) {
return 0, errUnknownTable return 0, errUnknownTable
} }
// ReadAncients runs the given read operation while ensuring that no writes take place
// on the underlying freezer.
func (f *freezer) ReadAncients(fn func(ethdb.AncientReader) error) (err error) {
f.writeLock.RLock()
defer f.writeLock.RUnlock()
return fn(f)
}
// ModifyAncients runs the given write operation. // ModifyAncients runs the given write operation.
func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) { func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) {
if f.readonly { if f.readonly {

View File

@ -62,10 +62,10 @@ func (t *table) Ancient(kind string, number uint64) ([]byte, error) {
return t.db.Ancient(kind, number) return t.db.Ancient(kind, number)
} }
// ReadAncients is a noop passthrough that just forwards the request to the underlying // AncientRange is a noop passthrough that just forwards the request to the underlying
// database. // database.
func (t *table) ReadAncients(kind string, start, count, maxBytes uint64) ([][]byte, error) { func (t *table) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
return t.db.ReadAncients(kind, start, count, maxBytes) return t.db.AncientRange(kind, start, count, maxBytes)
} }
// Ancients is a noop passthrough that just forwards the request to the underlying // Ancients is a noop passthrough that just forwards the request to the underlying
@ -85,6 +85,10 @@ func (t *table) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, erro
return t.db.ModifyAncients(fn) return t.db.ModifyAncients(fn)
} }
func (t *table) ReadAncients(fn func(reader ethdb.AncientReader) error) (err error) {
return t.db.ReadAncients(fn)
}
// TruncateAncients is a noop passthrough that just forwards the request to the underlying // TruncateAncients is a noop passthrough that just forwards the request to the underlying
// database. // database.
func (t *table) TruncateAncients(items uint64) error { func (t *table) TruncateAncients(items uint64) error {

View File

@ -76,12 +76,12 @@ type AncientReader interface {
// Ancient retrieves an ancient binary blob from the append-only immutable files. // Ancient retrieves an ancient binary blob from the append-only immutable files.
Ancient(kind string, number uint64) ([]byte, error) Ancient(kind string, number uint64) ([]byte, error)
// ReadAncients retrieves multiple items in sequence, starting from the index 'start'. // AncientRange retrieves multiple items in sequence, starting from the index 'start'.
// It will return // It will return
// - at most 'count' items, // - at most 'count' items,
// - at least 1 item (even if exceeding the maxBytes), but will otherwise // - at least 1 item (even if exceeding the maxBytes), but will otherwise
// return as many items as fit into maxBytes. // return as many items as fit into maxBytes.
ReadAncients(kind string, start, count, maxBytes uint64) ([][]byte, error) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error)
// Ancients returns the ancient item numbers in the ancient store. // Ancients returns the ancient item numbers in the ancient store.
Ancients() (uint64, error) Ancients() (uint64, error)
@ -90,6 +90,15 @@ type AncientReader interface {
AncientSize(kind string) (uint64, error) AncientSize(kind string) (uint64, error)
} }
// AncientBatchReader is the interface for 'batched' or 'atomic' reading.
type AncientBatchReader interface {
AncientReader
// ReadAncients runs the given read operation while ensuring that no writes take place
// on the underlying freezer.
ReadAncients(fn func(AncientReader) error) (err error)
}
// AncientWriter contains the methods required to write to immutable ancient data. // AncientWriter contains the methods required to write to immutable ancient data.
type AncientWriter interface { type AncientWriter interface {
// ModifyAncients runs a write operation on the ancient store. // ModifyAncients runs a write operation on the ancient store.
@ -117,7 +126,7 @@ type AncientWriteOp interface {
// immutable ancient data. // immutable ancient data.
type Reader interface { type Reader interface {
KeyValueReader KeyValueReader
AncientReader AncientBatchReader
} }
// Writer contains the methods required to write data to both key-value as well as // Writer contains the methods required to write data to both key-value as well as
@ -130,7 +139,7 @@ type Writer interface {
// AncientStore contains all the methods required to allow handling different // AncientStore contains all the methods required to allow handling different
// ancient data stores backing immutable chain data store. // ancient data stores backing immutable chain data store.
type AncientStore interface { type AncientStore interface {
AncientReader AncientBatchReader
AncientWriter AncientWriter
io.Closer io.Closer
} }