* eth/protocols/snap: fix problems due to idle-but-busy peers (#25651) * eth/protocols/snap: throttle trie heal requests when peers DoS us (#25666) * eth/protocols/snap: throttle trie heal requests when peers DoS us * eth/protocols/snap: lower heal throttle log to debug Co-authored-by: Martin Holst Swende <martin@swende.se> * eth/protocols/snap: fix comment Co-authored-by: Martin Holst Swende <martin@swende.se> * trie: check childrens' existence concurrently for snap heal (#25694) * eth: fix a rare datarace on CHT challenge reply / shutdown (#25831) * eth/filters: change filter block to be by-ref (#26054) This PR changes the block field in the filter to be a pointer, to disambiguate between empty hash and no hash * rpc: handle wrong HTTP batch response length (#26064) * params: release geth v1.10.26 stable * V1.10.25 statediff v4 wip (#275) * Statediff Geth Handle conflicts (#244) * Handle conflicts * Update go mod file versions * Make lint changes Disassociate block number from the indexer object Update ipld-eth-db ref Refactor builder code to make it reusable Use prefix comparison for account selective statediffing Update builder unit tests Add mode to write to CSV files in statediff file writer (#249) * Change file writing mode to csv files * Implement writer interface for file indexer * Implement option for csv or sql in file mode * Close files in CSV writer * Add tests for CSV file mode * Implement CSV file for watched addresses * Separate test configs for CSV and SQL * Refactor common code for file indexer tests Update indexer to include block hash in receipts and logs (#256) * Update indexer to include block hash in receipts and logs * Upgrade ipld-eth-db image in docker-compose to run tests Use watched addresses from direct indexing params by default while serving statediff APIs (#262) * Use watched addresses from direct indexing params in statediff APIs by default * Avoid using indexer object when direct indexing is off * Add nil check before accessing watched addresses from direct indexing params Rebase missed these changes needed at 1.10.20 Flags cleanup for CLI changes and linter complaints Linter appeasements to achieve perfection enforce go 1.18 for check (#267) * enforce go 1.18 for check * tests on 1.18 as well * adding db yml for possible change in docker-compose behavior in yml parsing Add indexer tests for handling non canonical blocks (#254) * Add indexer tests for header and transactions in a non canonical block * Add indexer tests for receipts in a non-canonical block and refactor * Add indexer tests for logs in a non-canonical block * Add indexer tests for state and storage nodes in a non-canonical block * Add indexer tests for non-canonical block at another height * Avoid passing address of a pointer * Update refs in GitHub workflow * Add genesis file path to stack-orchestrator config in GitHub workflow * Add descriptive comments fix non-deterministic ordering in unit tests Refactor indexer tests to avoid duplicate code (#270) * Refactor indexer tests to avoid duplicate code * Refactor file mode indexer tests * Fix expected db stats for sqlx after tx closure * Refactor indexer tests for legacy block * Refactor mainnet indexer tests * Refactor tests for watched addressess methods * Fix query in legacy indexer test rebase and resolve onto 1.10.23... still error out of index related to GetLeafKeys changed trie.Commit behavior was subtle about not not flushing to disk without an Update * no merge nodeset throws nil * linter appeasement Cerc refactor (#281) * first pass cerc refactor in cicd * 1st attempt to publish binary to git.vdb.to from github release * docker build step mangled * docker build step mangled * wrong username for docker login... which still succeeded * circcicd is not cerccicd * bad hostname adding manual override of binary publish to git.vdb.to for development/emergency (#282) Cerc io publish fix (#284) * adding manual override of binary publish to git.vdb.to for development/emergency * Create manual_binary_publish.yaml (#283) * github did not pick up workflow added outside of its UI and I still cannot spell cerc right rawdb helper functions for cold levelDB sync export Jenkins reborn (#285) * initial build and output testing... lots of trial and error * clean up for working (but failing) unit test geth with ubuntu foundation image * linter problem on comments in version * trying linter appeasement with gofmt output on versions.go Co-authored-by: Martin Holst Swende <martin@swende.se> Co-authored-by: Péter Szilágyi <peterke@gmail.com> Co-authored-by: Jordan Krage <jmank88@gmail.com> Co-authored-by: Felix Lange <fjl@twurst.com>
364 lines
10 KiB
Go
364 lines
10 KiB
Go
// Copyright 2014 The go-ethereum Authors
|
|
// This file is part of the go-ethereum library.
|
|
//
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
//
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU Lesser General Public License for more details.
|
|
//
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
package filters
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"math/big"
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
"github.com/ethereum/go-ethereum/core/bloombits"
|
|
"github.com/ethereum/go-ethereum/core/types"
|
|
"github.com/ethereum/go-ethereum/rpc"
|
|
)
|
|
|
|
// Filter can be used to retrieve and filter logs.
|
|
type Filter struct {
|
|
sys *FilterSystem
|
|
|
|
addresses []common.Address
|
|
topics [][]common.Hash
|
|
|
|
block *common.Hash // Block hash if filtering a single block
|
|
begin, end int64 // Range interval if filtering multiple blocks
|
|
|
|
matcher *bloombits.Matcher
|
|
}
|
|
|
|
// NewRangeFilter creates a new filter which uses a bloom filter on blocks to
|
|
// figure out whether a particular block is interesting or not.
|
|
func (sys *FilterSystem) NewRangeFilter(begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter {
|
|
// Flatten the address and topic filter clauses into a single bloombits filter
|
|
// system. Since the bloombits are not positional, nil topics are permitted,
|
|
// which get flattened into a nil byte slice.
|
|
var filters [][][]byte
|
|
if len(addresses) > 0 {
|
|
filter := make([][]byte, len(addresses))
|
|
for i, address := range addresses {
|
|
filter[i] = address.Bytes()
|
|
}
|
|
filters = append(filters, filter)
|
|
}
|
|
for _, topicList := range topics {
|
|
filter := make([][]byte, len(topicList))
|
|
for i, topic := range topicList {
|
|
filter[i] = topic.Bytes()
|
|
}
|
|
filters = append(filters, filter)
|
|
}
|
|
size, _ := sys.backend.BloomStatus()
|
|
|
|
// Create a generic filter and convert it into a range filter
|
|
filter := newFilter(sys, addresses, topics)
|
|
|
|
filter.matcher = bloombits.NewMatcher(size, filters)
|
|
filter.begin = begin
|
|
filter.end = end
|
|
|
|
return filter
|
|
}
|
|
|
|
// NewBlockFilter creates a new filter which directly inspects the contents of
|
|
// a block to figure out whether it is interesting or not.
|
|
func (sys *FilterSystem) NewBlockFilter(block common.Hash, addresses []common.Address, topics [][]common.Hash) *Filter {
|
|
// Create a generic filter and convert it into a block filter
|
|
filter := newFilter(sys, addresses, topics)
|
|
filter.block = &block
|
|
return filter
|
|
}
|
|
|
|
// newFilter creates a generic filter that can either filter based on a block hash,
|
|
// or based on range queries. The search criteria needs to be explicitly set.
|
|
func newFilter(sys *FilterSystem, addresses []common.Address, topics [][]common.Hash) *Filter {
|
|
return &Filter{
|
|
sys: sys,
|
|
addresses: addresses,
|
|
topics: topics,
|
|
}
|
|
}
|
|
|
|
// Logs searches the blockchain for matching log entries, returning all from the
|
|
// first block that contains matches, updating the start of the filter accordingly.
|
|
func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
|
|
// If we're doing singleton block filtering, execute and return
|
|
if f.block != nil {
|
|
header, err := f.sys.backend.HeaderByHash(ctx, *f.block)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if header == nil {
|
|
return nil, errors.New("unknown block")
|
|
}
|
|
return f.blockLogs(ctx, header, false)
|
|
}
|
|
// Short-cut if all we care about is pending logs
|
|
if f.begin == rpc.PendingBlockNumber.Int64() {
|
|
if f.end != rpc.PendingBlockNumber.Int64() {
|
|
return nil, errors.New("invalid block range")
|
|
}
|
|
return f.pendingLogs()
|
|
}
|
|
// Figure out the limits of the filter range
|
|
header, _ := f.sys.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
|
|
if header == nil {
|
|
return nil, nil
|
|
}
|
|
var (
|
|
head = header.Number.Uint64()
|
|
end = uint64(f.end)
|
|
pending = f.end == rpc.PendingBlockNumber.Int64()
|
|
)
|
|
if f.begin == rpc.LatestBlockNumber.Int64() {
|
|
f.begin = int64(head)
|
|
}
|
|
if f.end == rpc.LatestBlockNumber.Int64() || f.end == rpc.PendingBlockNumber.Int64() {
|
|
end = head
|
|
}
|
|
// Gather all indexed logs, and finish with non indexed ones
|
|
var (
|
|
logs []*types.Log
|
|
err error
|
|
size, sections = f.sys.backend.BloomStatus()
|
|
)
|
|
if indexed := sections * size; indexed > uint64(f.begin) {
|
|
if indexed > end {
|
|
logs, err = f.indexedLogs(ctx, end)
|
|
} else {
|
|
logs, err = f.indexedLogs(ctx, indexed-1)
|
|
}
|
|
if err != nil {
|
|
return logs, err
|
|
}
|
|
}
|
|
rest, err := f.unindexedLogs(ctx, end)
|
|
logs = append(logs, rest...)
|
|
if pending {
|
|
pendingLogs, err := f.pendingLogs()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
logs = append(logs, pendingLogs...)
|
|
}
|
|
return logs, err
|
|
}
|
|
|
|
// indexedLogs returns the logs matching the filter criteria based on the bloom
|
|
// bits indexed available locally or via the network.
|
|
func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) {
|
|
// Create a matcher session and request servicing from the backend
|
|
matches := make(chan uint64, 64)
|
|
|
|
session, err := f.matcher.Start(ctx, uint64(f.begin), end, matches)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer session.Close()
|
|
|
|
f.sys.backend.ServiceFilter(ctx, session)
|
|
|
|
// Iterate over the matches until exhausted or context closed
|
|
var logs []*types.Log
|
|
|
|
for {
|
|
select {
|
|
case number, ok := <-matches:
|
|
// Abort if all matches have been fulfilled
|
|
if !ok {
|
|
err := session.Error()
|
|
if err == nil {
|
|
f.begin = int64(end) + 1
|
|
}
|
|
return logs, err
|
|
}
|
|
f.begin = int64(number) + 1
|
|
|
|
// Retrieve the suggested block and pull any truly matching logs
|
|
header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(number))
|
|
if header == nil || err != nil {
|
|
return logs, err
|
|
}
|
|
found, err := f.blockLogs(ctx, header, true)
|
|
if err != nil {
|
|
return logs, err
|
|
}
|
|
logs = append(logs, found...)
|
|
|
|
case <-ctx.Done():
|
|
return logs, ctx.Err()
|
|
}
|
|
}
|
|
}
|
|
|
|
// unindexedLogs returns the logs matching the filter criteria based on raw block
|
|
// iteration and bloom matching.
|
|
func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) {
|
|
var logs []*types.Log
|
|
|
|
for ; f.begin <= int64(end); f.begin++ {
|
|
header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin))
|
|
if header == nil || err != nil {
|
|
return logs, err
|
|
}
|
|
found, err := f.blockLogs(ctx, header, false)
|
|
if err != nil {
|
|
return logs, err
|
|
}
|
|
logs = append(logs, found...)
|
|
}
|
|
return logs, nil
|
|
}
|
|
|
|
// blockLogs returns the logs matching the filter criteria within a single block.
|
|
func (f *Filter) blockLogs(ctx context.Context, header *types.Header, skipBloom bool) ([]*types.Log, error) {
|
|
// Fast track: no filtering criteria
|
|
if len(f.addresses) == 0 && len(f.topics) == 0 {
|
|
list, err := f.sys.cachedGetLogs(ctx, header.Hash(), header.Number.Uint64())
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return flatten(list), nil
|
|
} else if skipBloom || bloomFilter(header.Bloom, f.addresses, f.topics) {
|
|
return f.checkMatches(ctx, header)
|
|
}
|
|
return nil, nil
|
|
}
|
|
|
|
// checkMatches checks if the receipts belonging to the given header contain any log events that
|
|
// match the filter criteria. This function is called when the bloom filter signals a potential match.
|
|
func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*types.Log, error) {
|
|
logsList, err := f.sys.cachedGetLogs(ctx, header.Hash(), header.Number.Uint64())
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
unfiltered := flatten(logsList)
|
|
logs := filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
|
|
if len(logs) > 0 {
|
|
// We have matching logs, check if we need to resolve full logs via the light client
|
|
if logs[0].TxHash == (common.Hash{}) {
|
|
receipts, err := f.sys.backend.GetReceipts(ctx, header.Hash())
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
unfiltered = unfiltered[:0]
|
|
for _, receipt := range receipts {
|
|
unfiltered = append(unfiltered, receipt.Logs...)
|
|
}
|
|
logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
|
|
}
|
|
return logs, nil
|
|
}
|
|
return nil, nil
|
|
}
|
|
|
|
// pendingLogs returns the logs matching the filter criteria within the pending block.
|
|
func (f *Filter) pendingLogs() ([]*types.Log, error) {
|
|
block, receipts := f.sys.backend.PendingBlockAndReceipts()
|
|
if bloomFilter(block.Bloom(), f.addresses, f.topics) {
|
|
var unfiltered []*types.Log
|
|
for _, r := range receipts {
|
|
unfiltered = append(unfiltered, r.Logs...)
|
|
}
|
|
return filterLogs(unfiltered, nil, nil, f.addresses, f.topics), nil
|
|
}
|
|
return nil, nil
|
|
}
|
|
|
|
func includes(addresses []common.Address, a common.Address) bool {
|
|
for _, addr := range addresses {
|
|
if addr == a {
|
|
return true
|
|
}
|
|
}
|
|
|
|
return false
|
|
}
|
|
|
|
// filterLogs creates a slice of logs matching the given criteria.
|
|
func filterLogs(logs []*types.Log, fromBlock, toBlock *big.Int, addresses []common.Address, topics [][]common.Hash) []*types.Log {
|
|
var ret []*types.Log
|
|
Logs:
|
|
for _, log := range logs {
|
|
if fromBlock != nil && fromBlock.Int64() >= 0 && fromBlock.Uint64() > log.BlockNumber {
|
|
continue
|
|
}
|
|
if toBlock != nil && toBlock.Int64() >= 0 && toBlock.Uint64() < log.BlockNumber {
|
|
continue
|
|
}
|
|
|
|
if len(addresses) > 0 && !includes(addresses, log.Address) {
|
|
continue
|
|
}
|
|
// If the to filtered topics is greater than the amount of topics in logs, skip.
|
|
if len(topics) > len(log.Topics) {
|
|
continue
|
|
}
|
|
for i, sub := range topics {
|
|
match := len(sub) == 0 // empty rule set == wildcard
|
|
for _, topic := range sub {
|
|
if log.Topics[i] == topic {
|
|
match = true
|
|
break
|
|
}
|
|
}
|
|
if !match {
|
|
continue Logs
|
|
}
|
|
}
|
|
ret = append(ret, log)
|
|
}
|
|
return ret
|
|
}
|
|
|
|
func bloomFilter(bloom types.Bloom, addresses []common.Address, topics [][]common.Hash) bool {
|
|
if len(addresses) > 0 {
|
|
var included bool
|
|
for _, addr := range addresses {
|
|
if types.BloomLookup(bloom, addr) {
|
|
included = true
|
|
break
|
|
}
|
|
}
|
|
if !included {
|
|
return false
|
|
}
|
|
}
|
|
|
|
for _, sub := range topics {
|
|
included := len(sub) == 0 // empty rule set == wildcard
|
|
for _, topic := range sub {
|
|
if types.BloomLookup(bloom, topic) {
|
|
included = true
|
|
break
|
|
}
|
|
}
|
|
if !included {
|
|
return false
|
|
}
|
|
}
|
|
return true
|
|
}
|
|
|
|
func flatten(list [][]*types.Log) []*types.Log {
|
|
var flat []*types.Log
|
|
for _, logs := range list {
|
|
flat = append(flat, logs...)
|
|
}
|
|
return flat
|
|
}
|