2016-04-14 16:18:24 +00:00
|
|
|
// Copyright 2014 The go-ethereum Authors
|
2015-07-22 16:48:40 +00:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-07 00:54:22 +00:00
|
|
|
//
|
2015-07-23 16:35:11 +00:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-07 00:54:22 +00:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 16:48:40 +00:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-07 00:54:22 +00:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 16:48:40 +00:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-07 00:54:22 +00:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 16:48:40 +00:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-07 00:54:22 +00:00
|
|
|
|
2015-08-30 08:04:59 +00:00
|
|
|
package filters
|
2014-08-11 14:23:17 +00:00
|
|
|
|
|
|
|
import (
|
2017-03-22 17:20:33 +00:00
|
|
|
"context"
|
2018-07-12 14:36:07 +00:00
|
|
|
"errors"
|
2016-11-28 13:59:06 +00:00
|
|
|
"math/big"
|
|
|
|
|
2015-03-17 10:19:23 +00:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2017-08-18 19:52:20 +00:00
|
|
|
"github.com/ethereum/go-ethereum/core/bloombits"
|
2015-03-26 11:06:14 +00:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2016-01-13 18:35:48 +00:00
|
|
|
"github.com/ethereum/go-ethereum/rpc"
|
2014-08-11 14:23:17 +00:00
|
|
|
)
|
|
|
|
|
2016-12-04 18:07:24 +00:00
|
|
|
// Filter can be used to retrieve and filter logs.
|
2014-08-11 14:23:17 +00:00
|
|
|
type Filter struct {
|
2022-08-19 09:14:59 +00:00
|
|
|
sys *FilterSystem
|
2016-01-13 18:35:48 +00:00
|
|
|
|
2018-07-12 14:36:07 +00:00
|
|
|
addresses []common.Address
|
|
|
|
topics [][]common.Hash
|
|
|
|
|
2022-10-27 13:25:01 +00:00
|
|
|
block *common.Hash // Block hash if filtering a single block
|
|
|
|
begin, end int64 // Range interval if filtering multiple blocks
|
2017-08-18 19:52:20 +00:00
|
|
|
|
2017-08-29 11:13:11 +00:00
|
|
|
matcher *bloombits.Matcher
|
2014-08-11 14:23:17 +00:00
|
|
|
}
|
|
|
|
|
2018-07-12 14:36:07 +00:00
|
|
|
// NewRangeFilter creates a new filter which uses a bloom filter on blocks to
|
|
|
|
// figure out whether a particular block is interesting or not.
|
2022-08-19 09:14:59 +00:00
|
|
|
func (sys *FilterSystem) NewRangeFilter(begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter {
|
2017-09-27 10:14:52 +00:00
|
|
|
// Flatten the address and topic filter clauses into a single bloombits filter
|
|
|
|
// system. Since the bloombits are not positional, nil topics are permitted,
|
|
|
|
// which get flattened into a nil byte slice.
|
2017-09-06 00:33:10 +00:00
|
|
|
var filters [][][]byte
|
|
|
|
if len(addresses) > 0 {
|
|
|
|
filter := make([][]byte, len(addresses))
|
|
|
|
for i, address := range addresses {
|
|
|
|
filter[i] = address.Bytes()
|
|
|
|
}
|
|
|
|
filters = append(filters, filter)
|
|
|
|
}
|
|
|
|
for _, topicList := range topics {
|
|
|
|
filter := make([][]byte, len(topicList))
|
|
|
|
for i, topic := range topicList {
|
|
|
|
filter[i] = topic.Bytes()
|
|
|
|
}
|
|
|
|
filters = append(filters, filter)
|
|
|
|
}
|
2022-08-19 09:14:59 +00:00
|
|
|
size, _ := sys.backend.BloomStatus()
|
2017-08-29 11:13:11 +00:00
|
|
|
|
2018-07-12 14:36:07 +00:00
|
|
|
// Create a generic filter and convert it into a range filter
|
2022-08-19 09:14:59 +00:00
|
|
|
filter := newFilter(sys, addresses, topics)
|
2018-07-12 14:36:07 +00:00
|
|
|
|
|
|
|
filter.matcher = bloombits.NewMatcher(size, filters)
|
|
|
|
filter.begin = begin
|
|
|
|
filter.end = end
|
|
|
|
|
|
|
|
return filter
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewBlockFilter creates a new filter which directly inspects the contents of
|
|
|
|
// a block to figure out whether it is interesting or not.
|
2022-08-19 09:14:59 +00:00
|
|
|
func (sys *FilterSystem) NewBlockFilter(block common.Hash, addresses []common.Address, topics [][]common.Hash) *Filter {
|
2018-07-12 14:36:07 +00:00
|
|
|
// Create a generic filter and convert it into a block filter
|
2022-08-19 09:14:59 +00:00
|
|
|
filter := newFilter(sys, addresses, topics)
|
2022-10-27 13:25:01 +00:00
|
|
|
filter.block = &block
|
2018-07-12 14:36:07 +00:00
|
|
|
return filter
|
|
|
|
}
|
|
|
|
|
|
|
|
// newFilter creates a generic filter that can either filter based on a block hash,
|
|
|
|
// or based on range queries. The search criteria needs to be explicitly set.
|
2022-08-19 09:14:59 +00:00
|
|
|
func newFilter(sys *FilterSystem, addresses []common.Address, topics [][]common.Hash) *Filter {
|
2016-01-13 18:35:48 +00:00
|
|
|
return &Filter{
|
2022-08-19 09:14:59 +00:00
|
|
|
sys: sys,
|
2017-08-29 11:13:11 +00:00
|
|
|
addresses: addresses,
|
|
|
|
topics: topics,
|
2016-01-13 18:35:48 +00:00
|
|
|
}
|
2014-08-11 14:23:17 +00:00
|
|
|
}
|
|
|
|
|
2017-08-29 11:13:11 +00:00
|
|
|
// Logs searches the blockchain for matching log entries, returning all from the
|
|
|
|
// first block that contains matches, updating the start of the filter accordingly.
|
|
|
|
func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
|
2018-07-12 14:36:07 +00:00
|
|
|
// If we're doing singleton block filtering, execute and return
|
2022-10-27 13:25:01 +00:00
|
|
|
if f.block != nil {
|
|
|
|
header, err := f.sys.backend.HeaderByHash(ctx, *f.block)
|
2018-07-12 14:36:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if header == nil {
|
|
|
|
return nil, errors.New("unknown block")
|
|
|
|
}
|
2023-02-13 09:59:27 +00:00
|
|
|
return f.blockLogs(ctx, header)
|
2018-07-12 14:36:07 +00:00
|
|
|
}
|
2023-05-25 12:40:28 +00:00
|
|
|
|
2022-06-07 06:31:19 +00:00
|
|
|
var (
|
2023-05-25 12:40:28 +00:00
|
|
|
beginPending = f.begin == rpc.PendingBlockNumber.Int64()
|
|
|
|
endPending = f.end == rpc.PendingBlockNumber.Int64()
|
2022-06-07 06:31:19 +00:00
|
|
|
)
|
2023-05-25 12:40:28 +00:00
|
|
|
|
|
|
|
// special case for pending logs
|
|
|
|
if beginPending && !endPending {
|
2023-11-21 14:28:44 +00:00
|
|
|
return nil, errInvalidBlockRange
|
2023-05-25 12:40:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Short-cut if all we care about is pending logs
|
|
|
|
if beginPending && endPending {
|
|
|
|
return f.pendingLogs(), nil
|
|
|
|
}
|
|
|
|
|
2022-10-06 12:43:55 +00:00
|
|
|
resolveSpecial := func(number int64) (int64, error) {
|
|
|
|
var hdr *types.Header
|
|
|
|
switch number {
|
2023-05-25 12:40:28 +00:00
|
|
|
case rpc.LatestBlockNumber.Int64(), rpc.PendingBlockNumber.Int64():
|
2022-10-06 12:43:55 +00:00
|
|
|
// we should return head here since we've already captured
|
|
|
|
// that we need to get the pending logs in the pending boolean above
|
2023-05-25 12:40:28 +00:00
|
|
|
hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
|
|
|
|
if hdr == nil {
|
|
|
|
return 0, errors.New("latest header not found")
|
|
|
|
}
|
2022-10-06 12:43:55 +00:00
|
|
|
case rpc.FinalizedBlockNumber.Int64():
|
|
|
|
hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.FinalizedBlockNumber)
|
|
|
|
if hdr == nil {
|
|
|
|
return 0, errors.New("finalized header not found")
|
|
|
|
}
|
|
|
|
case rpc.SafeBlockNumber.Int64():
|
|
|
|
hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.SafeBlockNumber)
|
|
|
|
if hdr == nil {
|
|
|
|
return 0, errors.New("safe header not found")
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return number, nil
|
|
|
|
}
|
|
|
|
return hdr.Number.Int64(), nil
|
|
|
|
}
|
2023-05-25 12:40:28 +00:00
|
|
|
|
|
|
|
var err error
|
|
|
|
// range query need to resolve the special begin/end block number
|
2022-10-06 12:43:55 +00:00
|
|
|
if f.begin, err = resolveSpecial(f.begin); err != nil {
|
|
|
|
return nil, err
|
2014-09-26 11:32:54 +00:00
|
|
|
}
|
2022-10-06 12:43:55 +00:00
|
|
|
if f.end, err = resolveSpecial(f.end); err != nil {
|
|
|
|
return nil, err
|
2014-08-11 14:23:17 +00:00
|
|
|
}
|
2023-05-25 12:40:28 +00:00
|
|
|
|
|
|
|
logChan, errChan := f.rangeLogsAsync(ctx)
|
|
|
|
var logs []*types.Log
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case log := <-logChan:
|
|
|
|
logs = append(logs, log)
|
|
|
|
case err := <-errChan:
|
|
|
|
if err != nil {
|
|
|
|
// if an error occurs during extraction, we do return the extracted data
|
|
|
|
return logs, err
|
|
|
|
}
|
|
|
|
// Append the pending ones
|
|
|
|
if endPending {
|
|
|
|
pendingLogs := f.pendingLogs()
|
|
|
|
logs = append(logs, pendingLogs...)
|
|
|
|
}
|
|
|
|
return logs, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// rangeLogsAsync retrieves block-range logs that match the filter criteria asynchronously,
|
|
|
|
// it creates and returns two channels: one for delivering log data, and one for reporting errors.
|
|
|
|
func (f *Filter) rangeLogsAsync(ctx context.Context) (chan *types.Log, chan error) {
|
2017-08-29 11:13:11 +00:00
|
|
|
var (
|
2023-05-25 12:40:28 +00:00
|
|
|
logChan = make(chan *types.Log)
|
|
|
|
errChan = make(chan error)
|
2017-08-29 11:13:11 +00:00
|
|
|
)
|
2023-05-25 12:40:28 +00:00
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer func() {
|
|
|
|
close(errChan)
|
|
|
|
close(logChan)
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Gather all indexed logs, and finish with non indexed ones
|
|
|
|
var (
|
|
|
|
end = uint64(f.end)
|
|
|
|
size, sections = f.sys.backend.BloomStatus()
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
if indexed := sections * size; indexed > uint64(f.begin) {
|
|
|
|
if indexed > end {
|
|
|
|
indexed = end + 1
|
|
|
|
}
|
|
|
|
if err = f.indexedLogs(ctx, indexed-1, logChan); err != nil {
|
|
|
|
errChan <- err
|
|
|
|
return
|
|
|
|
}
|
2016-12-20 01:00:03 +00:00
|
|
|
}
|
2023-05-25 12:40:28 +00:00
|
|
|
|
|
|
|
if err := f.unindexedLogs(ctx, end, logChan); err != nil {
|
|
|
|
errChan <- err
|
|
|
|
return
|
2022-06-07 06:31:19 +00:00
|
|
|
}
|
2023-05-25 12:40:28 +00:00
|
|
|
|
|
|
|
errChan <- nil
|
|
|
|
}()
|
|
|
|
|
|
|
|
return logChan, errChan
|
2015-10-12 15:58:51 +00:00
|
|
|
}
|
2015-06-09 11:22:16 +00:00
|
|
|
|
2017-08-29 11:13:11 +00:00
|
|
|
// indexedLogs returns the logs matching the filter criteria based on the bloom
|
|
|
|
// bits indexed available locally or via the network.
|
2023-05-25 12:40:28 +00:00
|
|
|
func (f *Filter) indexedLogs(ctx context.Context, end uint64, logChan chan *types.Log) error {
|
2017-08-29 11:13:11 +00:00
|
|
|
// Create a matcher session and request servicing from the backend
|
|
|
|
matches := make(chan uint64, 64)
|
|
|
|
|
2017-10-24 13:19:09 +00:00
|
|
|
session, err := f.matcher.Start(ctx, uint64(f.begin), end, matches)
|
2017-08-29 11:13:11 +00:00
|
|
|
if err != nil {
|
2023-05-25 12:40:28 +00:00
|
|
|
return err
|
2017-08-18 19:52:20 +00:00
|
|
|
}
|
2017-10-24 13:19:09 +00:00
|
|
|
defer session.Close()
|
2017-08-29 11:13:11 +00:00
|
|
|
|
2022-08-19 09:14:59 +00:00
|
|
|
f.sys.backend.ServiceFilter(ctx, session)
|
2017-08-29 11:13:11 +00:00
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case number, ok := <-matches:
|
|
|
|
// Abort if all matches have been fulfilled
|
|
|
|
if !ok {
|
2017-10-24 13:19:09 +00:00
|
|
|
err := session.Error()
|
|
|
|
if err == nil {
|
|
|
|
f.begin = int64(end) + 1
|
|
|
|
}
|
2023-05-25 12:40:28 +00:00
|
|
|
return err
|
2017-08-29 11:13:11 +00:00
|
|
|
}
|
2017-10-24 13:19:09 +00:00
|
|
|
f.begin = int64(number) + 1
|
2017-11-15 11:54:40 +00:00
|
|
|
|
2017-08-29 11:13:11 +00:00
|
|
|
// Retrieve the suggested block and pull any truly matching logs
|
2022-08-19 09:14:59 +00:00
|
|
|
header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(number))
|
2017-08-29 11:13:11 +00:00
|
|
|
if header == nil || err != nil {
|
2023-05-25 12:40:28 +00:00
|
|
|
return err
|
2017-08-29 11:13:11 +00:00
|
|
|
}
|
2023-02-13 09:59:27 +00:00
|
|
|
found, err := f.checkMatches(ctx, header)
|
2017-08-29 11:13:11 +00:00
|
|
|
if err != nil {
|
2023-05-25 12:40:28 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, log := range found {
|
|
|
|
logChan <- log
|
2017-08-29 11:13:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
case <-ctx.Done():
|
2023-05-25 12:40:28 +00:00
|
|
|
return ctx.Err()
|
2017-08-29 11:13:11 +00:00
|
|
|
}
|
2017-08-18 19:52:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-27 15:33:14 +00:00
|
|
|
// unindexedLogs returns the logs matching the filter criteria based on raw block
|
2017-08-29 11:13:11 +00:00
|
|
|
// iteration and bloom matching.
|
2023-05-25 12:40:28 +00:00
|
|
|
func (f *Filter) unindexedLogs(ctx context.Context, end uint64, logChan chan *types.Log) error {
|
2017-08-29 11:13:11 +00:00
|
|
|
for ; f.begin <= int64(end); f.begin++ {
|
2022-08-19 09:14:59 +00:00
|
|
|
header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin))
|
2017-08-29 11:13:11 +00:00
|
|
|
if header == nil || err != nil {
|
2023-05-25 12:40:28 +00:00
|
|
|
return err
|
2017-08-29 11:13:11 +00:00
|
|
|
}
|
2023-02-13 09:59:27 +00:00
|
|
|
found, err := f.blockLogs(ctx, header)
|
2018-07-12 14:36:07 +00:00
|
|
|
if err != nil {
|
2023-05-25 12:40:28 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, log := range found {
|
|
|
|
select {
|
|
|
|
case logChan <- log:
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
2018-07-12 14:36:07 +00:00
|
|
|
}
|
|
|
|
}
|
2023-05-25 12:40:28 +00:00
|
|
|
return nil
|
2018-07-12 14:36:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// blockLogs returns the logs matching the filter criteria within a single block.
|
2023-02-13 09:59:27 +00:00
|
|
|
func (f *Filter) blockLogs(ctx context.Context, header *types.Header) ([]*types.Log, error) {
|
|
|
|
if bloomFilter(header.Bloom, f.addresses, f.topics) {
|
2022-08-19 09:14:59 +00:00
|
|
|
return f.checkMatches(ctx, header)
|
2015-10-12 15:58:51 +00:00
|
|
|
}
|
2022-08-19 09:14:59 +00:00
|
|
|
return nil, nil
|
2017-08-18 19:52:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// checkMatches checks if the receipts belonging to the given header contain any log events that
|
|
|
|
// match the filter criteria. This function is called when the bloom filter signals a potential match.
|
2023-02-13 09:59:27 +00:00
|
|
|
// skipFilter signals all logs of the given block are requested.
|
2022-08-19 09:14:59 +00:00
|
|
|
func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*types.Log, error) {
|
2023-02-13 09:59:27 +00:00
|
|
|
hash := header.Hash()
|
|
|
|
// Logs in cache are partially filled with context data
|
|
|
|
// such as tx index, block hash, etc.
|
|
|
|
// Notably tx hash is NOT filled in because it needs
|
|
|
|
// access to block body data.
|
|
|
|
cached, err := f.sys.cachedLogElem(ctx, hash, header.Number.Uint64())
|
2017-08-18 19:52:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-02-13 09:59:27 +00:00
|
|
|
logs := filterLogs(cached.logs, nil, nil, f.addresses, f.topics)
|
|
|
|
if len(logs) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
// Most backends will deliver un-derived logs, but check nevertheless.
|
|
|
|
if len(logs) > 0 && logs[0].TxHash != (common.Hash{}) {
|
2017-08-18 19:52:20 +00:00
|
|
|
return logs, nil
|
|
|
|
}
|
2023-02-13 09:59:27 +00:00
|
|
|
|
|
|
|
body, err := f.sys.cachedGetBody(ctx, cached, hash, header.Number.Uint64())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for i, log := range logs {
|
|
|
|
// Copy log not to modify cache elements
|
|
|
|
logcopy := *log
|
|
|
|
logcopy.TxHash = body.Transactions[logcopy.TxIndex].Hash()
|
|
|
|
logs[i] = &logcopy
|
|
|
|
}
|
|
|
|
return logs, nil
|
2015-10-12 15:58:51 +00:00
|
|
|
}
|
|
|
|
|
2022-06-07 06:31:19 +00:00
|
|
|
// pendingLogs returns the logs matching the filter criteria within the pending block.
|
2023-05-25 12:40:28 +00:00
|
|
|
func (f *Filter) pendingLogs() []*types.Log {
|
2022-08-19 09:14:59 +00:00
|
|
|
block, receipts := f.sys.backend.PendingBlockAndReceipts()
|
2023-05-31 07:09:49 +00:00
|
|
|
if block == nil || receipts == nil {
|
2023-05-25 12:40:28 +00:00
|
|
|
return nil
|
2023-05-23 11:18:38 +00:00
|
|
|
}
|
2022-06-07 06:31:19 +00:00
|
|
|
if bloomFilter(block.Bloom(), f.addresses, f.topics) {
|
|
|
|
var unfiltered []*types.Log
|
|
|
|
for _, r := range receipts {
|
|
|
|
unfiltered = append(unfiltered, r.Logs...)
|
|
|
|
}
|
2023-05-25 12:40:28 +00:00
|
|
|
return filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
|
2022-06-07 06:31:19 +00:00
|
|
|
}
|
2023-05-25 12:40:28 +00:00
|
|
|
return nil
|
2022-06-07 06:31:19 +00:00
|
|
|
}
|
|
|
|
|
2023-08-10 10:49:05 +00:00
|
|
|
// includes returns true if the element is present in the list.
|
|
|
|
func includes[T comparable](things []T, element T) bool {
|
|
|
|
for _, thing := range things {
|
|
|
|
if thing == element {
|
2015-09-01 07:19:45 +00:00
|
|
|
return true
|
2014-08-15 14:19:10 +00:00
|
|
|
}
|
|
|
|
}
|
2015-09-01 07:19:45 +00:00
|
|
|
return false
|
2014-08-15 14:19:10 +00:00
|
|
|
}
|
|
|
|
|
2016-12-04 18:07:24 +00:00
|
|
|
// filterLogs creates a slice of logs matching the given criteria.
|
2017-01-05 13:03:50 +00:00
|
|
|
func filterLogs(logs []*types.Log, fromBlock, toBlock *big.Int, addresses []common.Address, topics [][]common.Hash) []*types.Log {
|
2023-08-10 10:49:05 +00:00
|
|
|
var check = func(log *types.Log) bool {
|
2016-12-04 18:07:24 +00:00
|
|
|
if fromBlock != nil && fromBlock.Int64() >= 0 && fromBlock.Uint64() > log.BlockNumber {
|
2023-08-10 10:49:05 +00:00
|
|
|
return false
|
2016-11-28 13:59:06 +00:00
|
|
|
}
|
2016-12-04 18:07:24 +00:00
|
|
|
if toBlock != nil && toBlock.Int64() >= 0 && toBlock.Uint64() < log.BlockNumber {
|
2023-08-10 10:49:05 +00:00
|
|
|
return false
|
2016-11-28 13:59:06 +00:00
|
|
|
}
|
2016-07-27 15:47:46 +00:00
|
|
|
if len(addresses) > 0 && !includes(addresses, log.Address) {
|
2023-08-10 10:49:05 +00:00
|
|
|
return false
|
2014-08-14 22:24:37 +00:00
|
|
|
}
|
2016-07-27 15:47:46 +00:00
|
|
|
// If the to filtered topics is greater than the amount of topics in logs, skip.
|
|
|
|
if len(topics) > len(log.Topics) {
|
2023-08-10 10:49:05 +00:00
|
|
|
return false
|
2015-04-24 11:36:34 +00:00
|
|
|
}
|
2018-06-14 09:27:02 +00:00
|
|
|
for i, sub := range topics {
|
2023-08-10 10:49:05 +00:00
|
|
|
if len(sub) == 0 {
|
|
|
|
continue // empty rule set == wildcard
|
2014-08-15 14:19:10 +00:00
|
|
|
}
|
2023-08-10 10:49:05 +00:00
|
|
|
if !includes(sub, log.Topics[i]) {
|
|
|
|
return false
|
2015-04-24 11:36:34 +00:00
|
|
|
}
|
2014-08-15 14:19:10 +00:00
|
|
|
}
|
2023-08-10 10:49:05 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
var ret []*types.Log
|
|
|
|
for _, log := range logs {
|
|
|
|
if check(log) {
|
|
|
|
ret = append(ret, log)
|
|
|
|
}
|
2014-08-11 14:23:17 +00:00
|
|
|
}
|
2015-01-28 09:23:18 +00:00
|
|
|
return ret
|
2014-08-11 14:23:17 +00:00
|
|
|
}
|
|
|
|
|
2016-10-14 03:51:29 +00:00
|
|
|
func bloomFilter(bloom types.Bloom, addresses []common.Address, topics [][]common.Hash) bool {
|
|
|
|
if len(addresses) > 0 {
|
2015-02-17 15:12:55 +00:00
|
|
|
var included bool
|
2016-10-14 03:51:29 +00:00
|
|
|
for _, addr := range addresses {
|
|
|
|
if types.BloomLookup(bloom, addr) {
|
2015-02-17 15:12:55 +00:00
|
|
|
included = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !included {
|
|
|
|
return false
|
|
|
|
}
|
2014-08-11 14:23:17 +00:00
|
|
|
}
|
|
|
|
|
2016-10-14 03:51:29 +00:00
|
|
|
for _, sub := range topics {
|
2017-09-27 10:14:52 +00:00
|
|
|
included := len(sub) == 0 // empty rule set == wildcard
|
2015-03-01 18:08:26 +00:00
|
|
|
for _, topic := range sub {
|
2017-09-27 10:14:52 +00:00
|
|
|
if types.BloomLookup(bloom, topic) {
|
2015-03-01 18:08:26 +00:00
|
|
|
included = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !included {
|
2015-01-28 09:23:18 +00:00
|
|
|
return false
|
2014-08-11 14:23:17 +00:00
|
|
|
}
|
|
|
|
}
|
2015-01-28 09:23:18 +00:00
|
|
|
return true
|
2014-08-11 14:23:17 +00:00
|
|
|
}
|