eth/filters: avoid block body retrieval when no matching logs (#25199)

Logs stored on disk have minimal information. Contextual information such as block
number, index of log in block, index of transaction in block are filled in upon request.
We can fill in all these fields only having the block header and list of receipts.
But determining the transaction hash of a log requires the block body.

The goal of this PR is postponing this retrieval until we are sure we the transaction hash.
It happens often that the header bloom filter signals there might be matches in a block,
but after actually checking them reveals the logs do not match. We want to avoid fetching
the body in this case.

Note that this changes the semantics of Backend.GetLogs. Downstream callers of
GetLogs now assume log context fields have not been derived, and need to call
DeriveFields on the logs if necessary.
This commit is contained in:
Sina Mahmoodi 2023-02-13 10:59:27 +01:00 committed by GitHub
parent 241cf62b5c
commit 2def62b99b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 283 additions and 97 deletions

View File

@ -872,6 +872,13 @@ func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*t
return fb.bc.GetHeaderByHash(hash), nil
}
func (fb *filterBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) {
if body := fb.bc.GetBody(hash); body != nil {
return body, nil
}
return nil, errors.New("block body not found")
}
func (fb *filterBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
return fb.backend.pendingBlock, fb.backend.pendingReceipts
}

View File

@ -714,9 +714,9 @@ func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, t
return nil
}
// ReadLogs retrieves the logs for all transactions in a block. The log fields
// are populated with metadata. In case the receipts or the block body
// are not found, a nil is returned.
// ReadLogs retrieves the logs for all transactions in a block. In case
// receipts is not found, a nil is returned.
// Note: ReadLogs does not derive unstored log fields.
func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) [][]*types.Log {
// Retrieve the flattened receipt slice
data := ReadReceiptsRLP(db, hash, number)
@ -729,15 +729,6 @@ func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64, config *params.C
return nil
}
body := ReadBody(db, hash, number)
if body == nil {
log.Error("Missing body but have receipt", "hash", hash, "number", number)
return nil
}
if err := deriveLogFields(receipts, hash, number, body.Transactions); err != nil {
log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)
return nil
}
logs := make([][]*types.Log, len(receipts))
for i, receipt := range receipts {
logs[i] = receipt.Logs

View File

@ -750,10 +750,6 @@ func TestReadLogs(t *testing.T) {
t.Fatalf("unexpected number of logs[1] returned, have %d want %d", have, want)
}
// Fill in log fields so we can compare their rlp encoding
if err := types.Receipts(receipts).DeriveFields(params.TestChainConfig, hash, 0, body.Transactions); err != nil {
t.Fatal(err)
}
for i, pr := range receipts {
for j, pl := range pr.Logs {
rlpHave, err := rlp.EncodeToBytes(newFullLogRLP(logs[i][j]))

View File

@ -135,6 +135,17 @@ func (b *EthAPIBackend) BlockByHash(ctx context.Context, hash common.Hash) (*typ
return b.eth.blockchain.GetBlockByHash(hash), nil
}
// GetBody returns body of a block. It does not resolve special block numbers.
func (b *EthAPIBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) {
if number < 0 || hash == (common.Hash{}) {
return nil, errors.New("invalid arguments; expect hash and no special block numbers")
}
if body := b.eth.blockchain.GetBody(hash); body != nil {
return body, nil
}
return nil, errors.New("block body not found")
}
func (b *EthAPIBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) {
if blockNr, ok := blockNrOrHash.Number(); ok {
return b.BlockByNumber(ctx, blockNr)

View File

@ -104,7 +104,7 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
if header == nil {
return nil, errors.New("unknown block")
}
return f.blockLogs(ctx, header, false)
return f.blockLogs(ctx, header)
}
// Short-cut if all we care about is pending logs
if f.begin == rpc.PendingBlockNumber.Int64() {
@ -216,7 +216,7 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, err
if header == nil || err != nil {
return logs, err
}
found, err := f.blockLogs(ctx, header, true)
found, err := f.checkMatches(ctx, header)
if err != nil {
return logs, err
}
@ -241,7 +241,7 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e
if header == nil || err != nil {
return logs, err
}
found, err := f.blockLogs(ctx, header, false)
found, err := f.blockLogs(ctx, header)
if err != nil {
return logs, err
}
@ -251,15 +251,8 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e
}
// blockLogs returns the logs matching the filter criteria within a single block.
func (f *Filter) blockLogs(ctx context.Context, header *types.Header, skipBloom bool) ([]*types.Log, error) {
// Fast track: no filtering criteria
if len(f.addresses) == 0 && len(f.topics) == 0 {
list, err := f.sys.cachedGetLogs(ctx, header.Hash(), header.Number.Uint64())
if err != nil {
return nil, err
}
return flatten(list), nil
} else if skipBloom || bloomFilter(header.Bloom, f.addresses, f.topics) {
func (f *Filter) blockLogs(ctx context.Context, header *types.Header) ([]*types.Log, error) {
if bloomFilter(header.Bloom, f.addresses, f.topics) {
return f.checkMatches(ctx, header)
}
return nil, nil
@ -267,30 +260,37 @@ func (f *Filter) blockLogs(ctx context.Context, header *types.Header, skipBloom
// checkMatches checks if the receipts belonging to the given header contain any log events that
// match the filter criteria. This function is called when the bloom filter signals a potential match.
// skipFilter signals all logs of the given block are requested.
func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*types.Log, error) {
logsList, err := f.sys.cachedGetLogs(ctx, header.Hash(), header.Number.Uint64())
hash := header.Hash()
// Logs in cache are partially filled with context data
// such as tx index, block hash, etc.
// Notably tx hash is NOT filled in because it needs
// access to block body data.
cached, err := f.sys.cachedLogElem(ctx, hash, header.Number.Uint64())
if err != nil {
return nil, err
}
unfiltered := flatten(logsList)
logs := filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
if len(logs) > 0 {
// We have matching logs, check if we need to resolve full logs via the light client
if logs[0].TxHash == (common.Hash{}) {
receipts, err := f.sys.backend.GetReceipts(ctx, header.Hash())
if err != nil {
return nil, err
}
unfiltered = unfiltered[:0]
for _, receipt := range receipts {
unfiltered = append(unfiltered, receipt.Logs...)
}
logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
}
logs := filterLogs(cached.logs, nil, nil, f.addresses, f.topics)
if len(logs) == 0 {
return nil, nil
}
// Most backends will deliver un-derived logs, but check nevertheless.
if len(logs) > 0 && logs[0].TxHash != (common.Hash{}) {
return logs, nil
}
return nil, nil
body, err := f.sys.cachedGetBody(ctx, cached, hash, header.Number.Uint64())
if err != nil {
return nil, err
}
for i, log := range logs {
// Copy log not to modify cache elements
logcopy := *log
logcopy.TxHash = body.Transactions[logcopy.TxIndex].Hash()
logs[i] = &logcopy
}
return logs, nil
}
// pendingLogs returns the logs matching the filter criteria within the pending block.
@ -380,11 +380,3 @@ func bloomFilter(bloom types.Bloom, addresses []common.Address, topics [][]commo
}
return true
}
func flatten(list [][]*types.Log) []*types.Log {
var flat []*types.Log
for _, logs := range list {
flat = append(flat, logs...)
}
return flat
}

View File

@ -22,6 +22,7 @@ import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum"
@ -58,6 +59,7 @@ type Backend interface {
ChainDb() ethdb.Database
HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error)
HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error)
GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error)
GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error)
PendingBlockAndReceipts() (*types.Block, types.Receipts)
@ -77,7 +79,7 @@ type Backend interface {
// FilterSystem holds resources shared by all filters.
type FilterSystem struct {
backend Backend
logsCache *lru.Cache[common.Hash, [][]*types.Log]
logsCache *lru.Cache[common.Hash, *logCacheElem]
cfg *Config
}
@ -86,13 +88,18 @@ func NewFilterSystem(backend Backend, config Config) *FilterSystem {
config = config.withDefaults()
return &FilterSystem{
backend: backend,
logsCache: lru.NewCache[common.Hash, [][]*types.Log](config.LogCacheSize),
logsCache: lru.NewCache[common.Hash, *logCacheElem](config.LogCacheSize),
cfg: &config,
}
}
// cachedGetLogs loads block logs from the backend and caches the result.
func (sys *FilterSystem) cachedGetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) {
type logCacheElem struct {
logs []*types.Log
body atomic.Pointer[types.Body]
}
// cachedLogElem loads block logs from the backend and caches the result.
func (sys *FilterSystem) cachedLogElem(ctx context.Context, blockHash common.Hash, number uint64) (*logCacheElem, error) {
cached, ok := sys.logsCache.Get(blockHash)
if ok {
return cached, nil
@ -105,8 +112,35 @@ func (sys *FilterSystem) cachedGetLogs(ctx context.Context, blockHash common.Has
if logs == nil {
return nil, fmt.Errorf("failed to get logs for block #%d (0x%s)", number, blockHash.TerminalString())
}
sys.logsCache.Add(blockHash, logs)
return logs, nil
// Database logs are un-derived.
// Fill in whatever we can (txHash is inaccessible at this point).
flattened := make([]*types.Log, 0)
var logIdx uint
for i, txLogs := range logs {
for _, log := range txLogs {
log.BlockHash = blockHash
log.BlockNumber = number
log.TxIndex = uint(i)
log.Index = logIdx
logIdx++
flattened = append(flattened, log)
}
}
elem := &logCacheElem{logs: flattened}
sys.logsCache.Add(blockHash, elem)
return elem, nil
}
func (sys *FilterSystem) cachedGetBody(ctx context.Context, elem *logCacheElem, hash common.Hash, number uint64) (*types.Body, error) {
if body := elem.body.Load(); body != nil {
return body, nil
}
body, err := sys.backend.GetBody(ctx, hash, rpc.BlockNumber(number))
if err != nil {
return nil, err
}
elem.body.Store(body)
return body, nil
}
// Type determines the kind of filter and is used to put the filter in to
@ -431,6 +465,12 @@ func (es *EventSystem) handleChainEvent(filters filterIndex, ev core.ChainEvent)
if es.lightMode && len(filters[LogsSubscription]) > 0 {
es.lightFilterNewHead(ev.Block.Header(), func(header *types.Header, remove bool) {
for _, f := range filters[LogsSubscription] {
if f.logsCrit.FromBlock != nil && header.Number.Cmp(f.logsCrit.FromBlock) < 0 {
continue
}
if f.logsCrit.ToBlock != nil && header.Number.Cmp(f.logsCrit.ToBlock) > 0 {
continue
}
if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 {
f.logs <- matchedLogs
}
@ -474,42 +514,39 @@ func (es *EventSystem) lightFilterNewHead(newHeader *types.Header, callBack func
// filter logs of a single header in light client mode
func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []*types.Log {
if bloomFilter(header.Bloom, addresses, topics) {
// Get the logs of the block
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
logsList, err := es.sys.cachedGetLogs(ctx, header.Hash(), header.Number.Uint64())
if err != nil {
return nil
}
var unfiltered []*types.Log
for _, logs := range logsList {
for _, log := range logs {
logcopy := *log
logcopy.Removed = remove
unfiltered = append(unfiltered, &logcopy)
}
}
logs := filterLogs(unfiltered, nil, nil, addresses, topics)
if len(logs) > 0 && logs[0].TxHash == (common.Hash{}) {
// We have matching but non-derived logs
receipts, err := es.backend.GetReceipts(ctx, header.Hash())
if err != nil {
return nil
}
unfiltered = unfiltered[:0]
for _, receipt := range receipts {
for _, log := range receipt.Logs {
logcopy := *log
logcopy.Removed = remove
unfiltered = append(unfiltered, &logcopy)
}
}
logs = filterLogs(unfiltered, nil, nil, addresses, topics)
}
if !bloomFilter(header.Bloom, addresses, topics) {
return nil
}
// Get the logs of the block
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
cached, err := es.sys.cachedLogElem(ctx, header.Hash(), header.Number.Uint64())
if err != nil {
return nil
}
unfiltered := append([]*types.Log{}, cached.logs...)
for i, log := range unfiltered {
// Don't modify in-cache elements
logcopy := *log
logcopy.Removed = remove
// Swap copy in-place
unfiltered[i] = &logcopy
}
logs := filterLogs(unfiltered, nil, nil, addresses, topics)
// Txhash is already resolved
if len(logs) > 0 && logs[0].TxHash != (common.Hash{}) {
return logs
}
return nil
// Resolve txhash
body, err := es.sys.cachedGetBody(ctx, cached, header.Hash(), header.Number.Uint64())
if err != nil {
return nil
}
for _, log := range logs {
// logs are already copied, safe to modify
log.TxHash = body.Transactions[log.TxIndex].Hash()
}
return logs
}
// eventLoop (un)installs filters and processes mux events.

View File

@ -34,6 +34,7 @@ import (
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
@ -99,6 +100,13 @@ func (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*type
return rawdb.ReadHeader(b.db, hash, *number), nil
}
func (b *testBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) {
if body := rawdb.ReadBody(b.db, hash, uint64(number)); body != nil {
return body, nil
}
return nil, errors.New("block body not found")
}
func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
if number := rawdb.ReadHeaderNumber(b.db, hash); number != nil {
return rawdb.ReadReceipts(b.db, hash, *number, params.TestChainConfig), nil
@ -675,6 +683,143 @@ func TestPendingLogsSubscription(t *testing.T) {
}
}
func TestLightFilterLogs(t *testing.T) {
t.Parallel()
var (
db = rawdb.NewMemoryDatabase()
backend, sys = newTestFilterSystem(t, db, Config{})
api = NewFilterAPI(sys, true)
signer = types.HomesteadSigner{}
firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111")
secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222")
thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333")
notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999")
firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
// posted twice, once as regular logs and once as pending logs.
allLogs = []*types.Log{
// Block 1
{Address: firstAddr, Topics: []common.Hash{}, Data: []byte{}, BlockNumber: 2, Index: 0},
// Block 2
{Address: firstAddr, Topics: []common.Hash{firstTopic}, Data: []byte{}, BlockNumber: 3, Index: 0},
{Address: secondAddr, Topics: []common.Hash{firstTopic}, Data: []byte{}, BlockNumber: 3, Index: 1},
{Address: thirdAddress, Topics: []common.Hash{secondTopic}, Data: []byte{}, BlockNumber: 3, Index: 2},
// Block 3
{Address: thirdAddress, Topics: []common.Hash{secondTopic}, Data: []byte{}, BlockNumber: 4, Index: 0},
}
testCases = []struct {
crit FilterCriteria
expected []*types.Log
id rpc.ID
}{
// match all
0: {FilterCriteria{}, allLogs, ""},
// match none due to no matching addresses
1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""},
// match logs based on addresses, ignore topics
2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""},
// match logs based on addresses and topics
3: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""},
// all logs with block num >= 3
4: {FilterCriteria{FromBlock: big.NewInt(3), ToBlock: big.NewInt(5)}, allLogs[1:], ""},
// all logs
5: {FilterCriteria{FromBlock: big.NewInt(0), ToBlock: big.NewInt(5)}, allLogs, ""},
// all logs with 1>= block num <=2 and topic secondTopic
6: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(3), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""},
}
key, _ = crypto.GenerateKey()
addr = crypto.PubkeyToAddress(key.PublicKey)
genesis = &core.Genesis{Config: params.TestChainConfig,
Alloc: core.GenesisAlloc{
addr: {Balance: big.NewInt(params.Ether)},
},
}
receipts = []*types.Receipt{{
Logs: []*types.Log{allLogs[0]},
}, {
Logs: []*types.Log{allLogs[1], allLogs[2], allLogs[3]},
}, {
Logs: []*types.Log{allLogs[4]},
}}
)
_, blocks, _ := core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), 4, func(i int, b *core.BlockGen) {
if i == 0 {
return
}
receipts[i-1].Bloom = types.CreateBloom(types.Receipts{receipts[i-1]})
b.AddUncheckedReceipt(receipts[i-1])
tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i - 1), To: &common.Address{}, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), signer, key)
b.AddTx(tx)
})
for i, block := range blocks {
rawdb.WriteBlock(db, block)
rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
rawdb.WriteHeadBlockHash(db, block.Hash())
if i > 0 {
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), []*types.Receipt{receipts[i-1]})
}
}
// create all filters
for i := range testCases {
id, err := api.NewFilter(testCases[i].crit)
if err != nil {
t.Fatal(err)
}
testCases[i].id = id
}
// raise events
time.Sleep(1 * time.Second)
for _, block := range blocks {
backend.chainFeed.Send(core.ChainEvent{Block: block, Hash: common.Hash{}, Logs: allLogs})
}
for i, tt := range testCases {
var fetched []*types.Log
timeout := time.Now().Add(1 * time.Second)
for { // fetch all expected logs
results, err := api.GetFilterChanges(tt.id)
if err != nil {
t.Fatalf("Unable to fetch logs: %v", err)
}
fetched = append(fetched, results.([]*types.Log)...)
if len(fetched) >= len(tt.expected) {
break
}
// check timeout
if time.Now().After(timeout) {
break
}
time.Sleep(100 * time.Millisecond)
}
if len(fetched) != len(tt.expected) {
t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
return
}
for l := range fetched {
if fetched[l].Removed {
t.Errorf("expected log not to be removed for log %d in case %d", l, i)
}
expected := *tt.expected[l]
blockNum := expected.BlockNumber - 1
expected.BlockHash = blocks[blockNum].Hash()
expected.TxHash = blocks[blockNum].Transactions()[0].Hash()
if !reflect.DeepEqual(fetched[l], &expected) {
t.Errorf("invalid log on index %d for case %d", l, i)
}
}
}
}
// TestPendingTxFilterDeadlock tests if the event loop hangs when pending
// txes arrive at the same time that one of multiple filters is timing out.
// Please refer to #22131 for more details.

View File

@ -90,6 +90,7 @@ type Backend interface {
// This is copied from filters.Backend
// eth/filters needs to be initialized from this backend type, so methods needed by
// it must also be included here.
GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error)
GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error)
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription

View File

@ -288,6 +288,9 @@ func (b *backendMock) BlockByHash(ctx context.Context, hash common.Hash) (*types
func (b *backendMock) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) {
return nil, nil
}
func (b *backendMock) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) {
return nil, nil
}
func (b *backendMock) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) {
return nil, nil, nil
}

View File

@ -130,6 +130,10 @@ func (b *LesApiBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash r
return nil, errors.New("invalid arguments; neither block nor hash specified")
}
func (b *LesApiBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) {
return light.GetBody(ctx, b.eth.odr, hash, uint64(number))
}
func (b *LesApiBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
return nil, nil
}

View File

@ -148,7 +148,7 @@ func GetBlock(ctx context.Context, odr OdrBackend, hash common.Hash, number uint
}
// GetBlockReceipts retrieves the receipts generated by the transactions included
// in a block given by its hash.
// in a block given by its hash. Receipts will be filled in with context data.
func GetBlockReceipts(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (types.Receipts, error) {
// Assume receipts are already stored locally and attempt to retrieve.
receipts := rawdb.ReadRawReceipts(odr.Database(), hash, number)
@ -184,9 +184,8 @@ func GetBlockReceipts(ctx context.Context, odr OdrBackend, hash common.Hash, num
}
// GetBlockLogs retrieves the logs generated by the transactions included in a
// block given by its hash.
// block given by its hash. Logs will be filled in with context data.
func GetBlockLogs(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) ([][]*types.Log, error) {
// Retrieve the potentially incomplete receipts from disk or network
receipts, err := GetBlockReceipts(ctx, odr, hash, number)
if err != nil {
return nil, err