2015-10-15 14:07:19 +00:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
2016-04-14 16:18:24 +00:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-10-15 14:07:19 +00:00
|
|
|
//
|
2016-04-14 16:18:24 +00:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
2015-10-15 14:07:19 +00:00
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2016-04-14 16:18:24 +00:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-10-15 14:07:19 +00:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2016-04-14 16:18:24 +00:00
|
|
|
// GNU Lesser General Public License for more details.
|
2015-10-15 14:07:19 +00:00
|
|
|
//
|
2016-04-14 16:18:24 +00:00
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-10-15 14:07:19 +00:00
|
|
|
|
|
|
|
package filters
|
|
|
|
|
|
|
|
import (
|
|
|
|
"crypto/rand"
|
|
|
|
"encoding/hex"
|
|
|
|
"encoding/json"
|
2016-03-29 13:07:40 +00:00
|
|
|
"errors"
|
2015-10-15 14:07:19 +00:00
|
|
|
"fmt"
|
2016-03-29 13:07:40 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
2015-10-15 14:07:19 +00:00
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
|
|
|
"github.com/ethereum/go-ethereum/core/vm"
|
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
|
|
|
"github.com/ethereum/go-ethereum/event"
|
2015-12-16 09:58:01 +00:00
|
|
|
"github.com/ethereum/go-ethereum/rpc"
|
2016-03-29 13:07:40 +00:00
|
|
|
|
|
|
|
"golang.org/x/net/context"
|
2015-10-15 14:07:19 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
filterTickerTime = 5 * time.Minute
|
|
|
|
)
|
|
|
|
|
|
|
|
// byte will be inferred
|
|
|
|
const (
|
|
|
|
unknownFilterTy = iota
|
|
|
|
blockFilterTy
|
|
|
|
transactionFilterTy
|
|
|
|
logFilterTy
|
|
|
|
)
|
|
|
|
|
2016-03-15 18:27:49 +00:00
|
|
|
// PublicFilterAPI offers support to create and manage filters. This will allow external clients to retrieve various
|
2015-10-15 14:07:19 +00:00
|
|
|
// information related to the Ethereum protocol such als blocks, transactions and logs.
|
|
|
|
type PublicFilterAPI struct {
|
|
|
|
mux *event.TypeMux
|
|
|
|
|
|
|
|
quit chan struct{}
|
|
|
|
chainDb ethdb.Database
|
|
|
|
|
|
|
|
filterManager *FilterSystem
|
|
|
|
|
|
|
|
filterMapMu sync.RWMutex
|
|
|
|
filterMapping map[string]int // maps between filter internal filter identifiers and external filter identifiers
|
|
|
|
|
|
|
|
logMu sync.RWMutex
|
|
|
|
logQueue map[int]*logQueue
|
|
|
|
|
|
|
|
blockMu sync.RWMutex
|
|
|
|
blockQueue map[int]*hashQueue
|
|
|
|
|
|
|
|
transactionMu sync.RWMutex
|
|
|
|
transactionQueue map[int]*hashQueue
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewPublicFilterAPI returns a new PublicFilterAPI instance.
|
|
|
|
func NewPublicFilterAPI(chainDb ethdb.Database, mux *event.TypeMux) *PublicFilterAPI {
|
|
|
|
svc := &PublicFilterAPI{
|
|
|
|
mux: mux,
|
|
|
|
chainDb: chainDb,
|
|
|
|
filterManager: NewFilterSystem(mux),
|
|
|
|
filterMapping: make(map[string]int),
|
|
|
|
logQueue: make(map[int]*logQueue),
|
|
|
|
blockQueue: make(map[int]*hashQueue),
|
|
|
|
transactionQueue: make(map[int]*hashQueue),
|
|
|
|
}
|
|
|
|
go svc.start()
|
|
|
|
return svc
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop quits the work loop.
|
|
|
|
func (s *PublicFilterAPI) Stop() {
|
|
|
|
close(s.quit)
|
|
|
|
}
|
|
|
|
|
|
|
|
// start the work loop, wait and process events.
|
|
|
|
func (s *PublicFilterAPI) start() {
|
|
|
|
timer := time.NewTicker(2 * time.Second)
|
|
|
|
defer timer.Stop()
|
|
|
|
done:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-timer.C:
|
2016-06-17 07:53:54 +00:00
|
|
|
s.filterManager.Lock() // lock order like filterLoop()
|
2015-10-15 14:07:19 +00:00
|
|
|
s.logMu.Lock()
|
|
|
|
for id, filter := range s.logQueue {
|
|
|
|
if time.Since(filter.timeout) > filterTickerTime {
|
|
|
|
s.filterManager.Remove(id)
|
|
|
|
delete(s.logQueue, id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.logMu.Unlock()
|
|
|
|
|
|
|
|
s.blockMu.Lock()
|
|
|
|
for id, filter := range s.blockQueue {
|
|
|
|
if time.Since(filter.timeout) > filterTickerTime {
|
|
|
|
s.filterManager.Remove(id)
|
|
|
|
delete(s.blockQueue, id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.blockMu.Unlock()
|
|
|
|
|
|
|
|
s.transactionMu.Lock()
|
|
|
|
for id, filter := range s.transactionQueue {
|
|
|
|
if time.Since(filter.timeout) > filterTickerTime {
|
|
|
|
s.filterManager.Remove(id)
|
|
|
|
delete(s.transactionQueue, id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.transactionMu.Unlock()
|
2016-06-17 07:53:54 +00:00
|
|
|
s.filterManager.Unlock()
|
2015-10-15 14:07:19 +00:00
|
|
|
case <-s.quit:
|
|
|
|
break done
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewBlockFilter create a new filter that returns blocks that are included into the canonical chain.
|
|
|
|
func (s *PublicFilterAPI) NewBlockFilter() (string, error) {
|
2016-06-17 07:53:54 +00:00
|
|
|
// protect filterManager.Add() and setting of filter fields
|
|
|
|
s.filterManager.Lock()
|
|
|
|
defer s.filterManager.Unlock()
|
|
|
|
|
2015-10-15 14:07:19 +00:00
|
|
|
externalId, err := newFilterId()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
filter := New(s.chainDb)
|
2016-02-13 00:40:44 +00:00
|
|
|
id, err := s.filterManager.Add(filter, ChainFilter)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2016-06-17 07:53:54 +00:00
|
|
|
s.blockMu.Lock()
|
2015-10-15 14:07:19 +00:00
|
|
|
s.blockQueue[id] = &hashQueue{timeout: time.Now()}
|
2016-06-17 07:53:54 +00:00
|
|
|
s.blockMu.Unlock()
|
2015-10-15 14:07:19 +00:00
|
|
|
|
|
|
|
filter.BlockCallback = func(block *types.Block, logs vm.Logs) {
|
|
|
|
s.blockMu.Lock()
|
|
|
|
defer s.blockMu.Unlock()
|
|
|
|
|
|
|
|
if queue := s.blockQueue[id]; queue != nil {
|
|
|
|
queue.add(block.Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
s.filterMapMu.Lock()
|
|
|
|
s.filterMapping[externalId] = id
|
|
|
|
s.filterMapMu.Unlock()
|
|
|
|
|
|
|
|
return externalId, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewPendingTransactionFilter creates a filter that returns new pending transactions.
|
|
|
|
func (s *PublicFilterAPI) NewPendingTransactionFilter() (string, error) {
|
2016-06-17 07:53:54 +00:00
|
|
|
// protect filterManager.Add() and setting of filter fields
|
|
|
|
s.filterManager.Lock()
|
|
|
|
defer s.filterManager.Unlock()
|
|
|
|
|
2015-10-15 14:07:19 +00:00
|
|
|
externalId, err := newFilterId()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
filter := New(s.chainDb)
|
2016-02-13 00:40:44 +00:00
|
|
|
id, err := s.filterManager.Add(filter, PendingTxFilter)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2016-06-17 07:53:54 +00:00
|
|
|
s.transactionMu.Lock()
|
2015-10-15 14:07:19 +00:00
|
|
|
s.transactionQueue[id] = &hashQueue{timeout: time.Now()}
|
2016-06-17 07:53:54 +00:00
|
|
|
s.transactionMu.Unlock()
|
2015-10-15 14:07:19 +00:00
|
|
|
|
|
|
|
filter.TransactionCallback = func(tx *types.Transaction) {
|
|
|
|
s.transactionMu.Lock()
|
|
|
|
defer s.transactionMu.Unlock()
|
|
|
|
|
|
|
|
if queue := s.transactionQueue[id]; queue != nil {
|
|
|
|
queue.add(tx.Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
s.filterMapMu.Lock()
|
|
|
|
s.filterMapping[externalId] = id
|
|
|
|
s.filterMapMu.Unlock()
|
|
|
|
|
|
|
|
return externalId, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// newLogFilter creates a new log filter.
|
2016-03-29 13:07:40 +00:00
|
|
|
func (s *PublicFilterAPI) newLogFilter(earliest, latest int64, addresses []common.Address, topics [][]common.Hash, callback func(log *vm.Log, removed bool)) (int, error) {
|
2016-06-17 07:53:54 +00:00
|
|
|
// protect filterManager.Add() and setting of filter fields
|
|
|
|
s.filterManager.Lock()
|
|
|
|
defer s.filterManager.Unlock()
|
2015-10-15 14:07:19 +00:00
|
|
|
|
|
|
|
filter := New(s.chainDb)
|
2016-02-13 00:40:44 +00:00
|
|
|
id, err := s.filterManager.Add(filter, LogFilter)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
2016-06-17 07:53:54 +00:00
|
|
|
s.logMu.Lock()
|
2015-10-15 14:07:19 +00:00
|
|
|
s.logQueue[id] = &logQueue{timeout: time.Now()}
|
2016-06-17 07:53:54 +00:00
|
|
|
s.logMu.Unlock()
|
2015-10-15 14:07:19 +00:00
|
|
|
|
|
|
|
filter.SetBeginBlock(earliest)
|
|
|
|
filter.SetEndBlock(latest)
|
|
|
|
filter.SetAddresses(addresses)
|
|
|
|
filter.SetTopics(topics)
|
2016-01-05 13:55:28 +00:00
|
|
|
filter.LogCallback = func(log *vm.Log, removed bool) {
|
2016-03-29 13:07:40 +00:00
|
|
|
if callback != nil {
|
|
|
|
callback(log, removed)
|
|
|
|
} else {
|
|
|
|
s.logMu.Lock()
|
|
|
|
defer s.logMu.Unlock()
|
|
|
|
if queue := s.logQueue[id]; queue != nil {
|
|
|
|
queue.add(vmlog{log, removed})
|
|
|
|
}
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-13 00:40:44 +00:00
|
|
|
return id, nil
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
|
2016-05-17 14:05:12 +00:00
|
|
|
// Logs creates a subscription that fires for all new log that match the given filter criteria.
|
2016-03-29 13:07:40 +00:00
|
|
|
func (s *PublicFilterAPI) Logs(ctx context.Context, args NewFilterArgs) (rpc.Subscription, error) {
|
2016-04-15 16:05:24 +00:00
|
|
|
notifier, supported := rpc.NotifierFromContext(ctx)
|
2016-03-29 13:07:40 +00:00
|
|
|
if !supported {
|
|
|
|
return nil, rpc.ErrNotificationsUnsupported
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
externalId string
|
|
|
|
subscription rpc.Subscription
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
|
|
|
|
if externalId, err = newFilterId(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// uninstall filter when subscription is unsubscribed/cancelled
|
|
|
|
if subscription, err = notifier.NewSubscription(func(string) {
|
|
|
|
s.UninstallFilter(externalId)
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
notifySubscriber := func(log *vm.Log, removed bool) {
|
|
|
|
rpcLog := toRPCLogs(vm.Logs{log}, removed)
|
|
|
|
if err := subscription.Notify(rpcLog); err != nil {
|
|
|
|
subscription.Cancel()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// from and to block number are not used since subscriptions don't allow you to travel to "time"
|
|
|
|
var id int
|
|
|
|
if len(args.Addresses) > 0 {
|
|
|
|
id, err = s.newLogFilter(-1, -1, args.Addresses, args.Topics, notifySubscriber)
|
|
|
|
} else {
|
|
|
|
id, err = s.newLogFilter(-1, -1, nil, args.Topics, notifySubscriber)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
subscription.Cancel()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
s.filterMapMu.Lock()
|
|
|
|
s.filterMapping[externalId] = id
|
|
|
|
s.filterMapMu.Unlock()
|
|
|
|
|
|
|
|
return subscription, err
|
|
|
|
}
|
|
|
|
|
2015-10-15 14:07:19 +00:00
|
|
|
// NewFilterArgs represents a request to create a new filter.
|
|
|
|
type NewFilterArgs struct {
|
|
|
|
FromBlock rpc.BlockNumber
|
|
|
|
ToBlock rpc.BlockNumber
|
|
|
|
Addresses []common.Address
|
|
|
|
Topics [][]common.Hash
|
|
|
|
}
|
|
|
|
|
2016-05-17 14:05:12 +00:00
|
|
|
// UnmarshalJSON sets *args fields with given data.
|
2015-10-15 14:07:19 +00:00
|
|
|
func (args *NewFilterArgs) UnmarshalJSON(data []byte) error {
|
|
|
|
type input struct {
|
|
|
|
From *rpc.BlockNumber `json:"fromBlock"`
|
|
|
|
ToBlock *rpc.BlockNumber `json:"toBlock"`
|
|
|
|
Addresses interface{} `json:"address"`
|
2016-05-17 14:05:12 +00:00
|
|
|
Topics []interface{} `json:"topics"`
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var raw input
|
|
|
|
if err := json.Unmarshal(data, &raw); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:01:30 +00:00
|
|
|
if raw.From == nil || raw.From.Int64() < 0 {
|
2015-10-15 14:07:19 +00:00
|
|
|
args.FromBlock = rpc.LatestBlockNumber
|
|
|
|
} else {
|
|
|
|
args.FromBlock = *raw.From
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:01:30 +00:00
|
|
|
if raw.ToBlock == nil || raw.ToBlock.Int64() < 0 {
|
2015-10-15 14:07:19 +00:00
|
|
|
args.ToBlock = rpc.LatestBlockNumber
|
|
|
|
} else {
|
|
|
|
args.ToBlock = *raw.ToBlock
|
|
|
|
}
|
|
|
|
|
|
|
|
args.Addresses = []common.Address{}
|
|
|
|
|
|
|
|
if raw.Addresses != nil {
|
|
|
|
// raw.Address can contain a single address or an array of addresses
|
|
|
|
var addresses []common.Address
|
|
|
|
if strAddrs, ok := raw.Addresses.([]interface{}); ok {
|
|
|
|
for i, addr := range strAddrs {
|
|
|
|
if strAddr, ok := addr.(string); ok {
|
|
|
|
if len(strAddr) >= 2 && strAddr[0] == '0' && (strAddr[1] == 'x' || strAddr[1] == 'X') {
|
|
|
|
strAddr = strAddr[2:]
|
|
|
|
}
|
|
|
|
if decAddr, err := hex.DecodeString(strAddr); err == nil {
|
|
|
|
addresses = append(addresses, common.BytesToAddress(decAddr))
|
|
|
|
} else {
|
2016-04-15 09:06:57 +00:00
|
|
|
return fmt.Errorf("invalid address given")
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("invalid address on index %d", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if singleAddr, ok := raw.Addresses.(string); ok {
|
|
|
|
if len(singleAddr) >= 2 && singleAddr[0] == '0' && (singleAddr[1] == 'x' || singleAddr[1] == 'X') {
|
|
|
|
singleAddr = singleAddr[2:]
|
|
|
|
}
|
|
|
|
if decAddr, err := hex.DecodeString(singleAddr); err == nil {
|
|
|
|
addresses = append(addresses, common.BytesToAddress(decAddr))
|
|
|
|
} else {
|
2016-04-15 09:06:57 +00:00
|
|
|
return fmt.Errorf("invalid address given")
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
} else {
|
2016-04-15 09:06:57 +00:00
|
|
|
return errors.New("invalid address(es) given")
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
args.Addresses = addresses
|
|
|
|
}
|
|
|
|
|
2016-05-17 14:05:12 +00:00
|
|
|
// helper function which parses a string to a topic hash
|
2015-10-15 14:07:19 +00:00
|
|
|
topicConverter := func(raw string) (common.Hash, error) {
|
|
|
|
if len(raw) == 0 {
|
|
|
|
return common.Hash{}, nil
|
|
|
|
}
|
|
|
|
if len(raw) >= 2 && raw[0] == '0' && (raw[1] == 'x' || raw[1] == 'X') {
|
|
|
|
raw = raw[2:]
|
|
|
|
}
|
2016-04-05 13:22:04 +00:00
|
|
|
if len(raw) != 2*common.HashLength {
|
2016-05-17 14:05:12 +00:00
|
|
|
return common.Hash{}, errors.New("invalid topic(s)")
|
|
|
|
}
|
2015-10-15 14:07:19 +00:00
|
|
|
if decAddr, err := hex.DecodeString(raw); err == nil {
|
|
|
|
return common.BytesToHash(decAddr), nil
|
|
|
|
}
|
2016-05-17 14:05:12 +00:00
|
|
|
return common.Hash{}, errors.New("invalid topic(s)")
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
|
2016-05-17 14:05:12 +00:00
|
|
|
// topics is an array consisting of strings and/or arrays of strings.
|
|
|
|
// JSON null values are converted to common.Hash{} and ignored by the filter manager.
|
|
|
|
if len(raw.Topics) > 0 {
|
|
|
|
args.Topics = make([][]common.Hash, len(raw.Topics))
|
|
|
|
for i, t := range raw.Topics {
|
|
|
|
if t == nil { // ignore topic when matching logs
|
|
|
|
args.Topics[i] = []common.Hash{common.Hash{}}
|
|
|
|
} else if topic, ok := t.(string); ok { // match specific topic
|
|
|
|
top, err := topicConverter(topic)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
args.Topics[i] = []common.Hash{top}
|
|
|
|
} else if topics, ok := t.([]interface{}); ok { // or case e.g. [null, "topic0", "topic1"]
|
|
|
|
for _, rawTopic := range topics {
|
|
|
|
if rawTopic == nil {
|
|
|
|
args.Topics[i] = append(args.Topics[i], common.Hash{})
|
|
|
|
} else if topic, ok := rawTopic.(string); ok {
|
|
|
|
parsed, err := topicConverter(topic)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
2016-05-17 14:05:12 +00:00
|
|
|
args.Topics[i] = append(args.Topics[i], parsed)
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("invalid topic(s)")
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
}
|
2016-05-17 14:05:12 +00:00
|
|
|
} else {
|
|
|
|
return fmt.Errorf("invalid topic(s)")
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewFilter creates a new filter and returns the filter id. It can be uses to retrieve logs.
|
|
|
|
func (s *PublicFilterAPI) NewFilter(args NewFilterArgs) (string, error) {
|
|
|
|
externalId, err := newFilterId()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
var id int
|
|
|
|
if len(args.Addresses) > 0 {
|
2016-03-29 13:07:40 +00:00
|
|
|
id, err = s.newLogFilter(args.FromBlock.Int64(), args.ToBlock.Int64(), args.Addresses, args.Topics, nil)
|
2015-10-15 14:07:19 +00:00
|
|
|
} else {
|
2016-03-29 13:07:40 +00:00
|
|
|
id, err = s.newLogFilter(args.FromBlock.Int64(), args.ToBlock.Int64(), nil, args.Topics, nil)
|
2016-02-13 00:40:44 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
s.filterMapMu.Lock()
|
|
|
|
s.filterMapping[externalId] = id
|
|
|
|
s.filterMapMu.Unlock()
|
|
|
|
|
|
|
|
return externalId, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetLogs returns the logs matching the given argument.
|
2016-01-05 13:55:28 +00:00
|
|
|
func (s *PublicFilterAPI) GetLogs(args NewFilterArgs) []vmlog {
|
2015-10-15 14:07:19 +00:00
|
|
|
filter := New(s.chainDb)
|
|
|
|
filter.SetBeginBlock(args.FromBlock.Int64())
|
|
|
|
filter.SetEndBlock(args.ToBlock.Int64())
|
|
|
|
filter.SetAddresses(args.Addresses)
|
|
|
|
filter.SetTopics(args.Topics)
|
|
|
|
|
2016-01-05 13:55:28 +00:00
|
|
|
return toRPCLogs(filter.Find(), false)
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// UninstallFilter removes the filter with the given filter id.
|
|
|
|
func (s *PublicFilterAPI) UninstallFilter(filterId string) bool {
|
2016-06-17 07:53:54 +00:00
|
|
|
s.filterManager.Lock()
|
|
|
|
defer s.filterManager.Unlock()
|
2015-10-15 14:07:19 +00:00
|
|
|
|
2016-06-17 07:53:54 +00:00
|
|
|
s.filterMapMu.Lock()
|
2015-10-15 14:07:19 +00:00
|
|
|
id, ok := s.filterMapping[filterId]
|
|
|
|
if !ok {
|
2016-06-17 07:53:54 +00:00
|
|
|
s.filterMapMu.Unlock()
|
2015-10-15 14:07:19 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
delete(s.filterMapping, filterId)
|
2016-06-17 07:53:54 +00:00
|
|
|
s.filterMapMu.Unlock()
|
2015-10-15 14:07:19 +00:00
|
|
|
|
2016-06-17 07:53:54 +00:00
|
|
|
s.filterManager.Remove(id)
|
|
|
|
|
|
|
|
s.logMu.Lock()
|
2015-10-15 14:07:19 +00:00
|
|
|
if _, ok := s.logQueue[id]; ok {
|
|
|
|
delete(s.logQueue, id)
|
2016-06-17 07:53:54 +00:00
|
|
|
s.logMu.Unlock()
|
2015-10-15 14:07:19 +00:00
|
|
|
return true
|
|
|
|
}
|
2016-06-17 07:53:54 +00:00
|
|
|
s.logMu.Unlock()
|
|
|
|
|
|
|
|
s.blockMu.Lock()
|
2015-10-15 14:07:19 +00:00
|
|
|
if _, ok := s.blockQueue[id]; ok {
|
|
|
|
delete(s.blockQueue, id)
|
2016-06-17 07:53:54 +00:00
|
|
|
s.blockMu.Unlock()
|
2015-10-15 14:07:19 +00:00
|
|
|
return true
|
|
|
|
}
|
2016-06-17 07:53:54 +00:00
|
|
|
s.blockMu.Unlock()
|
|
|
|
|
|
|
|
s.transactionMu.Lock()
|
2015-10-15 14:07:19 +00:00
|
|
|
if _, ok := s.transactionQueue[id]; ok {
|
|
|
|
delete(s.transactionQueue, id)
|
2016-06-17 07:53:54 +00:00
|
|
|
s.transactionMu.Unlock()
|
2015-10-15 14:07:19 +00:00
|
|
|
return true
|
|
|
|
}
|
2016-06-17 07:53:54 +00:00
|
|
|
s.transactionMu.Unlock()
|
2015-10-15 14:07:19 +00:00
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// getFilterType is a helper utility that determine the type of filter for the given filter id.
|
|
|
|
func (s *PublicFilterAPI) getFilterType(id int) byte {
|
|
|
|
if _, ok := s.blockQueue[id]; ok {
|
|
|
|
return blockFilterTy
|
|
|
|
} else if _, ok := s.transactionQueue[id]; ok {
|
|
|
|
return transactionFilterTy
|
|
|
|
} else if _, ok := s.logQueue[id]; ok {
|
|
|
|
return logFilterTy
|
|
|
|
}
|
|
|
|
|
|
|
|
return unknownFilterTy
|
|
|
|
}
|
|
|
|
|
|
|
|
// blockFilterChanged returns a collection of block hashes for the block filter with the given id.
|
|
|
|
func (s *PublicFilterAPI) blockFilterChanged(id int) []common.Hash {
|
|
|
|
s.blockMu.Lock()
|
|
|
|
defer s.blockMu.Unlock()
|
|
|
|
|
|
|
|
if s.blockQueue[id] != nil {
|
|
|
|
return s.blockQueue[id].get()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// transactionFilterChanged returns a collection of transaction hashes for the pending
|
|
|
|
// transaction filter with the given id.
|
|
|
|
func (s *PublicFilterAPI) transactionFilterChanged(id int) []common.Hash {
|
|
|
|
s.blockMu.Lock()
|
|
|
|
defer s.blockMu.Unlock()
|
|
|
|
|
|
|
|
if s.transactionQueue[id] != nil {
|
|
|
|
return s.transactionQueue[id].get()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// logFilterChanged returns a collection of logs for the log filter with the given id.
|
2016-01-05 13:55:28 +00:00
|
|
|
func (s *PublicFilterAPI) logFilterChanged(id int) []vmlog {
|
2015-10-15 14:07:19 +00:00
|
|
|
s.logMu.Lock()
|
|
|
|
defer s.logMu.Unlock()
|
|
|
|
|
|
|
|
if s.logQueue[id] != nil {
|
|
|
|
return s.logQueue[id].get()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetFilterLogs returns the logs for the filter with the given id.
|
2016-01-05 13:55:28 +00:00
|
|
|
func (s *PublicFilterAPI) GetFilterLogs(filterId string) []vmlog {
|
2016-06-17 07:53:54 +00:00
|
|
|
s.filterMapMu.RLock()
|
2015-10-15 14:07:19 +00:00
|
|
|
id, ok := s.filterMapping[filterId]
|
2016-06-17 07:53:54 +00:00
|
|
|
s.filterMapMu.RUnlock()
|
2015-10-15 14:07:19 +00:00
|
|
|
if !ok {
|
2016-01-05 13:55:28 +00:00
|
|
|
return toRPCLogs(nil, false)
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if filter := s.filterManager.Get(id); filter != nil {
|
2016-01-05 13:55:28 +00:00
|
|
|
return toRPCLogs(filter.Find(), false)
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
|
2016-01-05 13:55:28 +00:00
|
|
|
return toRPCLogs(nil, false)
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetFilterChanges returns the logs for the filter with the given id since last time is was called.
|
|
|
|
// This can be used for polling.
|
|
|
|
func (s *PublicFilterAPI) GetFilterChanges(filterId string) interface{} {
|
2016-06-17 07:53:54 +00:00
|
|
|
s.filterMapMu.RLock()
|
2015-10-15 14:07:19 +00:00
|
|
|
id, ok := s.filterMapping[filterId]
|
2016-06-17 07:53:54 +00:00
|
|
|
s.filterMapMu.RUnlock()
|
2015-10-15 14:07:19 +00:00
|
|
|
|
|
|
|
if !ok { // filter not found
|
|
|
|
return []interface{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
switch s.getFilterType(id) {
|
|
|
|
case blockFilterTy:
|
|
|
|
return returnHashes(s.blockFilterChanged(id))
|
|
|
|
case transactionFilterTy:
|
|
|
|
return returnHashes(s.transactionFilterChanged(id))
|
|
|
|
case logFilterTy:
|
2016-01-05 13:55:28 +00:00
|
|
|
return s.logFilterChanged(id)
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return []interface{}{}
|
|
|
|
}
|
|
|
|
|
2016-01-05 13:55:28 +00:00
|
|
|
type vmlog struct {
|
|
|
|
*vm.Log
|
|
|
|
Removed bool `json:"removed"`
|
|
|
|
}
|
|
|
|
|
2015-10-15 14:07:19 +00:00
|
|
|
type logQueue struct {
|
|
|
|
mu sync.Mutex
|
|
|
|
|
2016-01-05 13:55:28 +00:00
|
|
|
logs []vmlog
|
2015-10-15 14:07:19 +00:00
|
|
|
timeout time.Time
|
|
|
|
id int
|
|
|
|
}
|
|
|
|
|
2016-01-05 13:55:28 +00:00
|
|
|
func (l *logQueue) add(logs ...vmlog) {
|
2015-10-15 14:07:19 +00:00
|
|
|
l.mu.Lock()
|
|
|
|
defer l.mu.Unlock()
|
|
|
|
|
|
|
|
l.logs = append(l.logs, logs...)
|
|
|
|
}
|
|
|
|
|
2016-01-05 13:55:28 +00:00
|
|
|
func (l *logQueue) get() []vmlog {
|
2015-10-15 14:07:19 +00:00
|
|
|
l.mu.Lock()
|
|
|
|
defer l.mu.Unlock()
|
|
|
|
|
|
|
|
l.timeout = time.Now()
|
|
|
|
tmp := l.logs
|
|
|
|
l.logs = nil
|
|
|
|
return tmp
|
|
|
|
}
|
|
|
|
|
|
|
|
type hashQueue struct {
|
|
|
|
mu sync.Mutex
|
|
|
|
|
|
|
|
hashes []common.Hash
|
|
|
|
timeout time.Time
|
|
|
|
id int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *hashQueue) add(hashes ...common.Hash) {
|
|
|
|
l.mu.Lock()
|
|
|
|
defer l.mu.Unlock()
|
|
|
|
|
|
|
|
l.hashes = append(l.hashes, hashes...)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *hashQueue) get() []common.Hash {
|
|
|
|
l.mu.Lock()
|
|
|
|
defer l.mu.Unlock()
|
|
|
|
|
|
|
|
l.timeout = time.Now()
|
|
|
|
tmp := l.hashes
|
|
|
|
l.hashes = nil
|
|
|
|
return tmp
|
|
|
|
}
|
|
|
|
|
|
|
|
// newFilterId generates a new random filter identifier that can be exposed to the outer world. By publishing random
|
|
|
|
// identifiers it is not feasible for DApp's to guess filter id's for other DApp's and uninstall or poll for them
|
|
|
|
// causing the affected DApp to miss data.
|
|
|
|
func newFilterId() (string, error) {
|
|
|
|
var subid [16]byte
|
|
|
|
n, _ := rand.Read(subid[:])
|
|
|
|
if n != 16 {
|
|
|
|
return "", errors.New("Unable to generate filter id")
|
|
|
|
}
|
|
|
|
return "0x" + hex.EncodeToString(subid[:]), nil
|
|
|
|
}
|
|
|
|
|
2016-01-05 13:55:28 +00:00
|
|
|
// toRPCLogs is a helper that will convert a vm.Logs array to an structure which
|
|
|
|
// can hold additional information about the logs such as whether it was deleted.
|
|
|
|
// Additionally when nil is given it will by default instead create an empty slice
|
|
|
|
// instead. This is required by the RPC specification.
|
|
|
|
func toRPCLogs(logs vm.Logs, removed bool) []vmlog {
|
|
|
|
convertedLogs := make([]vmlog, len(logs))
|
|
|
|
for i, log := range logs {
|
|
|
|
convertedLogs[i] = vmlog{Log: log, Removed: removed}
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
2016-01-05 13:55:28 +00:00
|
|
|
return convertedLogs
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// returnHashes is a helper that will return an empty hash array case the given hash array is nil, otherwise is will
|
|
|
|
// return the given hashes. The RPC interfaces defines that always an array is returned.
|
|
|
|
func returnHashes(hashes []common.Hash) []common.Hash {
|
|
|
|
if hashes == nil {
|
|
|
|
return []common.Hash{}
|
|
|
|
}
|
|
|
|
return hashes
|
|
|
|
}
|