2016-04-14 16:18:24 +00:00
|
|
|
// Copyright 2014 The go-ethereum Authors
|
2015-07-22 16:48:40 +00:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-07 00:54:22 +00:00
|
|
|
//
|
2015-07-23 16:35:11 +00:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-07 00:54:22 +00:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 16:48:40 +00:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-07 00:54:22 +00:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 16:48:40 +00:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-07 00:54:22 +00:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 16:48:40 +00:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-07 00:54:22 +00:00
|
|
|
|
2015-08-30 08:04:59 +00:00
|
|
|
package filters
|
2014-08-11 14:23:17 +00:00
|
|
|
|
|
|
|
import (
|
2017-03-22 17:20:33 +00:00
|
|
|
"context"
|
2018-07-12 14:36:07 +00:00
|
|
|
"errors"
|
2016-11-28 13:59:06 +00:00
|
|
|
"math/big"
|
|
|
|
|
2015-03-17 10:19:23 +00:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
2017-08-18 19:52:20 +00:00
|
|
|
"github.com/ethereum/go-ethereum/core/bloombits"
|
2015-03-26 11:06:14 +00:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2016-01-13 18:35:48 +00:00
|
|
|
"github.com/ethereum/go-ethereum/rpc"
|
2014-08-11 14:23:17 +00:00
|
|
|
)
|
|
|
|
|
2016-12-04 18:07:24 +00:00
|
|
|
// Filter can be used to retrieve and filter logs.
|
2014-08-11 14:23:17 +00:00
|
|
|
type Filter struct {
|
2022-08-19 09:14:59 +00:00
|
|
|
sys *FilterSystem
|
2016-01-13 18:35:48 +00:00
|
|
|
|
2018-07-12 14:36:07 +00:00
|
|
|
addresses []common.Address
|
|
|
|
topics [][]common.Hash
|
|
|
|
|
2022-10-27 13:25:01 +00:00
|
|
|
block *common.Hash // Block hash if filtering a single block
|
|
|
|
begin, end int64 // Range interval if filtering multiple blocks
|
2017-08-18 19:52:20 +00:00
|
|
|
|
2017-08-29 11:13:11 +00:00
|
|
|
matcher *bloombits.Matcher
|
2014-08-11 14:23:17 +00:00
|
|
|
}
|
|
|
|
|
2018-07-12 14:36:07 +00:00
|
|
|
// NewRangeFilter creates a new filter which uses a bloom filter on blocks to
|
|
|
|
// figure out whether a particular block is interesting or not.
|
2022-08-19 09:14:59 +00:00
|
|
|
func (sys *FilterSystem) NewRangeFilter(begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter {
|
2017-09-27 10:14:52 +00:00
|
|
|
// Flatten the address and topic filter clauses into a single bloombits filter
|
|
|
|
// system. Since the bloombits are not positional, nil topics are permitted,
|
|
|
|
// which get flattened into a nil byte slice.
|
2017-09-06 00:33:10 +00:00
|
|
|
var filters [][][]byte
|
|
|
|
if len(addresses) > 0 {
|
|
|
|
filter := make([][]byte, len(addresses))
|
|
|
|
for i, address := range addresses {
|
|
|
|
filter[i] = address.Bytes()
|
|
|
|
}
|
|
|
|
filters = append(filters, filter)
|
|
|
|
}
|
|
|
|
for _, topicList := range topics {
|
|
|
|
filter := make([][]byte, len(topicList))
|
|
|
|
for i, topic := range topicList {
|
|
|
|
filter[i] = topic.Bytes()
|
|
|
|
}
|
|
|
|
filters = append(filters, filter)
|
|
|
|
}
|
2022-08-19 09:14:59 +00:00
|
|
|
size, _ := sys.backend.BloomStatus()
|
2017-08-29 11:13:11 +00:00
|
|
|
|
2018-07-12 14:36:07 +00:00
|
|
|
// Create a generic filter and convert it into a range filter
|
2022-08-19 09:14:59 +00:00
|
|
|
filter := newFilter(sys, addresses, topics)
|
2018-07-12 14:36:07 +00:00
|
|
|
|
|
|
|
filter.matcher = bloombits.NewMatcher(size, filters)
|
|
|
|
filter.begin = begin
|
|
|
|
filter.end = end
|
|
|
|
|
|
|
|
return filter
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewBlockFilter creates a new filter which directly inspects the contents of
|
|
|
|
// a block to figure out whether it is interesting or not.
|
2022-08-19 09:14:59 +00:00
|
|
|
func (sys *FilterSystem) NewBlockFilter(block common.Hash, addresses []common.Address, topics [][]common.Hash) *Filter {
|
2018-07-12 14:36:07 +00:00
|
|
|
// Create a generic filter and convert it into a block filter
|
2022-08-19 09:14:59 +00:00
|
|
|
filter := newFilter(sys, addresses, topics)
|
2022-10-27 13:25:01 +00:00
|
|
|
filter.block = &block
|
2018-07-12 14:36:07 +00:00
|
|
|
return filter
|
|
|
|
}
|
|
|
|
|
|
|
|
// newFilter creates a generic filter that can either filter based on a block hash,
|
|
|
|
// or based on range queries. The search criteria needs to be explicitly set.
|
2022-08-19 09:14:59 +00:00
|
|
|
func newFilter(sys *FilterSystem, addresses []common.Address, topics [][]common.Hash) *Filter {
|
2016-01-13 18:35:48 +00:00
|
|
|
return &Filter{
|
2022-08-19 09:14:59 +00:00
|
|
|
sys: sys,
|
2017-08-29 11:13:11 +00:00
|
|
|
addresses: addresses,
|
|
|
|
topics: topics,
|
2016-01-13 18:35:48 +00:00
|
|
|
}
|
2014-08-11 14:23:17 +00:00
|
|
|
}
|
|
|
|
|
2017-08-29 11:13:11 +00:00
|
|
|
// Logs searches the blockchain for matching log entries, returning all from the
|
|
|
|
// first block that contains matches, updating the start of the filter accordingly.
|
|
|
|
func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
|
2018-07-12 14:36:07 +00:00
|
|
|
// If we're doing singleton block filtering, execute and return
|
2022-10-27 13:25:01 +00:00
|
|
|
if f.block != nil {
|
|
|
|
header, err := f.sys.backend.HeaderByHash(ctx, *f.block)
|
2018-07-12 14:36:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if header == nil {
|
|
|
|
return nil, errors.New("unknown block")
|
|
|
|
}
|
2022-08-19 09:14:59 +00:00
|
|
|
return f.blockLogs(ctx, header, false)
|
2018-07-12 14:36:07 +00:00
|
|
|
}
|
2022-06-07 06:31:19 +00:00
|
|
|
// Short-cut if all we care about is pending logs
|
|
|
|
if f.begin == rpc.PendingBlockNumber.Int64() {
|
|
|
|
if f.end != rpc.PendingBlockNumber.Int64() {
|
|
|
|
return nil, errors.New("invalid block range")
|
|
|
|
}
|
|
|
|
return f.pendingLogs()
|
|
|
|
}
|
2017-08-29 11:13:11 +00:00
|
|
|
// Figure out the limits of the filter range
|
2022-08-19 09:14:59 +00:00
|
|
|
header, _ := f.sys.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
|
2017-08-29 11:13:11 +00:00
|
|
|
if header == nil {
|
2016-01-13 18:35:48 +00:00
|
|
|
return nil, nil
|
2016-07-26 14:37:04 +00:00
|
|
|
}
|
2022-06-07 06:31:19 +00:00
|
|
|
var (
|
|
|
|
head = header.Number.Uint64()
|
|
|
|
end = uint64(f.end)
|
|
|
|
pending = f.end == rpc.PendingBlockNumber.Int64()
|
|
|
|
)
|
|
|
|
if f.begin == rpc.LatestBlockNumber.Int64() {
|
2017-08-29 11:13:11 +00:00
|
|
|
f.begin = int64(head)
|
2014-09-26 11:32:54 +00:00
|
|
|
}
|
2022-06-07 06:31:19 +00:00
|
|
|
if f.end == rpc.LatestBlockNumber.Int64() || f.end == rpc.PendingBlockNumber.Int64() {
|
2017-08-29 11:13:11 +00:00
|
|
|
end = head
|
2014-08-11 14:23:17 +00:00
|
|
|
}
|
2017-08-29 11:13:11 +00:00
|
|
|
// Gather all indexed logs, and finish with non indexed ones
|
|
|
|
var (
|
2022-06-07 06:31:19 +00:00
|
|
|
logs []*types.Log
|
|
|
|
err error
|
2022-08-19 09:14:59 +00:00
|
|
|
size, sections = f.sys.backend.BloomStatus()
|
2017-08-29 11:13:11 +00:00
|
|
|
)
|
|
|
|
if indexed := sections * size; indexed > uint64(f.begin) {
|
|
|
|
if indexed > end {
|
|
|
|
logs, err = f.indexedLogs(ctx, end)
|
|
|
|
} else {
|
|
|
|
logs, err = f.indexedLogs(ctx, indexed-1)
|
|
|
|
}
|
|
|
|
if err != nil {
|
2016-12-20 01:00:03 +00:00
|
|
|
return logs, err
|
|
|
|
}
|
2015-08-31 15:09:50 +00:00
|
|
|
}
|
2017-08-29 11:13:11 +00:00
|
|
|
rest, err := f.unindexedLogs(ctx, end)
|
|
|
|
logs = append(logs, rest...)
|
2022-06-07 06:31:19 +00:00
|
|
|
if pending {
|
|
|
|
pendingLogs, err := f.pendingLogs()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
logs = append(logs, pendingLogs...)
|
|
|
|
}
|
2017-08-29 11:13:11 +00:00
|
|
|
return logs, err
|
2015-10-12 15:58:51 +00:00
|
|
|
}
|
2015-06-09 11:22:16 +00:00
|
|
|
|
2017-08-29 11:13:11 +00:00
|
|
|
// indexedLogs returns the logs matching the filter criteria based on the bloom
|
|
|
|
// bits indexed available locally or via the network.
|
|
|
|
func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) {
|
|
|
|
// Create a matcher session and request servicing from the backend
|
|
|
|
matches := make(chan uint64, 64)
|
|
|
|
|
2017-10-24 13:19:09 +00:00
|
|
|
session, err := f.matcher.Start(ctx, uint64(f.begin), end, matches)
|
2017-08-29 11:13:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-08-18 19:52:20 +00:00
|
|
|
}
|
2017-10-24 13:19:09 +00:00
|
|
|
defer session.Close()
|
2017-08-29 11:13:11 +00:00
|
|
|
|
2022-08-19 09:14:59 +00:00
|
|
|
f.sys.backend.ServiceFilter(ctx, session)
|
2017-08-29 11:13:11 +00:00
|
|
|
|
|
|
|
// Iterate over the matches until exhausted or context closed
|
|
|
|
var logs []*types.Log
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case number, ok := <-matches:
|
|
|
|
// Abort if all matches have been fulfilled
|
|
|
|
if !ok {
|
2017-10-24 13:19:09 +00:00
|
|
|
err := session.Error()
|
|
|
|
if err == nil {
|
|
|
|
f.begin = int64(end) + 1
|
|
|
|
}
|
|
|
|
return logs, err
|
2017-08-29 11:13:11 +00:00
|
|
|
}
|
2017-10-24 13:19:09 +00:00
|
|
|
f.begin = int64(number) + 1
|
2017-11-15 11:54:40 +00:00
|
|
|
|
2017-08-29 11:13:11 +00:00
|
|
|
// Retrieve the suggested block and pull any truly matching logs
|
2022-08-19 09:14:59 +00:00
|
|
|
header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(number))
|
2017-08-29 11:13:11 +00:00
|
|
|
if header == nil || err != nil {
|
|
|
|
return logs, err
|
|
|
|
}
|
2022-08-19 09:14:59 +00:00
|
|
|
found, err := f.blockLogs(ctx, header, true)
|
2017-08-29 11:13:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return logs, err
|
|
|
|
}
|
|
|
|
logs = append(logs, found...)
|
|
|
|
|
|
|
|
case <-ctx.Done():
|
|
|
|
return logs, ctx.Err()
|
|
|
|
}
|
2017-08-18 19:52:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-27 15:33:14 +00:00
|
|
|
// unindexedLogs returns the logs matching the filter criteria based on raw block
|
2017-08-29 11:13:11 +00:00
|
|
|
// iteration and bloom matching.
|
|
|
|
func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) {
|
|
|
|
var logs []*types.Log
|
|
|
|
|
|
|
|
for ; f.begin <= int64(end); f.begin++ {
|
2022-08-19 09:14:59 +00:00
|
|
|
header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin))
|
2017-08-29 11:13:11 +00:00
|
|
|
if header == nil || err != nil {
|
|
|
|
return logs, err
|
|
|
|
}
|
2022-08-19 09:14:59 +00:00
|
|
|
found, err := f.blockLogs(ctx, header, false)
|
2018-07-12 14:36:07 +00:00
|
|
|
if err != nil {
|
|
|
|
return logs, err
|
|
|
|
}
|
|
|
|
logs = append(logs, found...)
|
|
|
|
}
|
|
|
|
return logs, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// blockLogs returns the logs matching the filter criteria within a single block.
|
2022-08-19 09:14:59 +00:00
|
|
|
func (f *Filter) blockLogs(ctx context.Context, header *types.Header, skipBloom bool) ([]*types.Log, error) {
|
|
|
|
// Fast track: no filtering criteria
|
|
|
|
if len(f.addresses) == 0 && len(f.topics) == 0 {
|
|
|
|
list, err := f.sys.cachedGetLogs(ctx, header.Hash(), header.Number.Uint64())
|
2018-07-12 14:36:07 +00:00
|
|
|
if err != nil {
|
2022-08-19 09:14:59 +00:00
|
|
|
return nil, err
|
2017-08-29 11:13:11 +00:00
|
|
|
}
|
2022-08-19 09:14:59 +00:00
|
|
|
return flatten(list), nil
|
|
|
|
} else if skipBloom || bloomFilter(header.Bloom, f.addresses, f.topics) {
|
|
|
|
return f.checkMatches(ctx, header)
|
2015-10-12 15:58:51 +00:00
|
|
|
}
|
2022-08-19 09:14:59 +00:00
|
|
|
return nil, nil
|
2017-08-18 19:52:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// checkMatches checks if the receipts belonging to the given header contain any log events that
|
|
|
|
// match the filter criteria. This function is called when the bloom filter signals a potential match.
|
2022-08-19 09:14:59 +00:00
|
|
|
func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*types.Log, error) {
|
|
|
|
logsList, err := f.sys.cachedGetLogs(ctx, header.Hash(), header.Number.Uint64())
|
2017-08-18 19:52:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-08-19 09:14:59 +00:00
|
|
|
|
|
|
|
unfiltered := flatten(logsList)
|
|
|
|
logs := filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
|
2017-08-18 19:52:20 +00:00
|
|
|
if len(logs) > 0 {
|
2018-02-22 10:48:14 +00:00
|
|
|
// We have matching logs, check if we need to resolve full logs via the light client
|
|
|
|
if logs[0].TxHash == (common.Hash{}) {
|
2022-08-19 09:14:59 +00:00
|
|
|
receipts, err := f.sys.backend.GetReceipts(ctx, header.Hash())
|
2018-02-22 10:48:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
unfiltered = unfiltered[:0]
|
|
|
|
for _, receipt := range receipts {
|
|
|
|
unfiltered = append(unfiltered, receipt.Logs...)
|
|
|
|
}
|
|
|
|
logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
|
|
|
|
}
|
2017-08-18 19:52:20 +00:00
|
|
|
return logs, nil
|
|
|
|
}
|
|
|
|
return nil, nil
|
2015-10-12 15:58:51 +00:00
|
|
|
}
|
|
|
|
|
2022-06-07 06:31:19 +00:00
|
|
|
// pendingLogs returns the logs matching the filter criteria within the pending block.
|
|
|
|
func (f *Filter) pendingLogs() ([]*types.Log, error) {
|
2022-08-19 09:14:59 +00:00
|
|
|
block, receipts := f.sys.backend.PendingBlockAndReceipts()
|
2022-06-07 06:31:19 +00:00
|
|
|
if bloomFilter(block.Bloom(), f.addresses, f.topics) {
|
|
|
|
var unfiltered []*types.Log
|
|
|
|
for _, r := range receipts {
|
|
|
|
unfiltered = append(unfiltered, r.Logs...)
|
|
|
|
}
|
|
|
|
return filterLogs(unfiltered, nil, nil, f.addresses, f.topics), nil
|
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2015-03-17 10:19:23 +00:00
|
|
|
func includes(addresses []common.Address, a common.Address) bool {
|
2014-08-15 14:19:10 +00:00
|
|
|
for _, addr := range addresses {
|
2015-09-01 07:19:45 +00:00
|
|
|
if addr == a {
|
|
|
|
return true
|
2014-08-15 14:19:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-01 07:19:45 +00:00
|
|
|
return false
|
2014-08-15 14:19:10 +00:00
|
|
|
}
|
|
|
|
|
2016-12-04 18:07:24 +00:00
|
|
|
// filterLogs creates a slice of logs matching the given criteria.
|
2017-01-05 13:03:50 +00:00
|
|
|
func filterLogs(logs []*types.Log, fromBlock, toBlock *big.Int, addresses []common.Address, topics [][]common.Hash) []*types.Log {
|
|
|
|
var ret []*types.Log
|
2015-02-05 01:28:54 +00:00
|
|
|
Logs:
|
2015-01-28 09:23:18 +00:00
|
|
|
for _, log := range logs {
|
2016-12-04 18:07:24 +00:00
|
|
|
if fromBlock != nil && fromBlock.Int64() >= 0 && fromBlock.Uint64() > log.BlockNumber {
|
2016-11-28 13:59:06 +00:00
|
|
|
continue
|
|
|
|
}
|
2016-12-04 18:07:24 +00:00
|
|
|
if toBlock != nil && toBlock.Int64() >= 0 && toBlock.Uint64() < log.BlockNumber {
|
2016-11-28 13:59:06 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-07-27 15:47:46 +00:00
|
|
|
if len(addresses) > 0 && !includes(addresses, log.Address) {
|
2014-08-14 22:24:37 +00:00
|
|
|
continue
|
|
|
|
}
|
2016-07-27 15:47:46 +00:00
|
|
|
// If the to filtered topics is greater than the amount of topics in logs, skip.
|
|
|
|
if len(topics) > len(log.Topics) {
|
2022-05-05 15:58:43 +00:00
|
|
|
continue
|
2015-04-24 11:36:34 +00:00
|
|
|
}
|
2018-06-14 09:27:02 +00:00
|
|
|
for i, sub := range topics {
|
|
|
|
match := len(sub) == 0 // empty rule set == wildcard
|
|
|
|
for _, topic := range sub {
|
2017-09-27 10:14:52 +00:00
|
|
|
if log.Topics[i] == topic {
|
2015-03-01 18:08:26 +00:00
|
|
|
match = true
|
2015-04-24 11:36:34 +00:00
|
|
|
break
|
2015-03-01 18:08:26 +00:00
|
|
|
}
|
2014-08-15 14:19:10 +00:00
|
|
|
}
|
2015-04-24 11:36:34 +00:00
|
|
|
if !match {
|
|
|
|
continue Logs
|
|
|
|
}
|
2014-08-15 14:19:10 +00:00
|
|
|
}
|
2015-01-28 09:23:18 +00:00
|
|
|
ret = append(ret, log)
|
2014-08-11 14:23:17 +00:00
|
|
|
}
|
2015-01-28 09:23:18 +00:00
|
|
|
return ret
|
2014-08-11 14:23:17 +00:00
|
|
|
}
|
|
|
|
|
2016-10-14 03:51:29 +00:00
|
|
|
func bloomFilter(bloom types.Bloom, addresses []common.Address, topics [][]common.Hash) bool {
|
|
|
|
if len(addresses) > 0 {
|
2015-02-17 15:12:55 +00:00
|
|
|
var included bool
|
2016-10-14 03:51:29 +00:00
|
|
|
for _, addr := range addresses {
|
|
|
|
if types.BloomLookup(bloom, addr) {
|
2015-02-17 15:12:55 +00:00
|
|
|
included = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !included {
|
|
|
|
return false
|
|
|
|
}
|
2014-08-11 14:23:17 +00:00
|
|
|
}
|
|
|
|
|
2016-10-14 03:51:29 +00:00
|
|
|
for _, sub := range topics {
|
2017-09-27 10:14:52 +00:00
|
|
|
included := len(sub) == 0 // empty rule set == wildcard
|
2015-03-01 18:08:26 +00:00
|
|
|
for _, topic := range sub {
|
2017-09-27 10:14:52 +00:00
|
|
|
if types.BloomLookup(bloom, topic) {
|
2015-03-01 18:08:26 +00:00
|
|
|
included = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !included {
|
2015-01-28 09:23:18 +00:00
|
|
|
return false
|
2014-08-11 14:23:17 +00:00
|
|
|
}
|
|
|
|
}
|
2015-01-28 09:23:18 +00:00
|
|
|
return true
|
2014-08-11 14:23:17 +00:00
|
|
|
}
|
2022-08-19 09:14:59 +00:00
|
|
|
|
|
|
|
func flatten(list [][]*types.Log) []*types.Log {
|
|
|
|
var flat []*types.Log
|
|
|
|
for _, logs := range list {
|
|
|
|
flat = append(flat, logs...)
|
|
|
|
}
|
|
|
|
return flat
|
|
|
|
}
|