2015-10-15 14:07:19 +00:00
|
|
|
// Copyright 2015 The go-ethereum Authors
|
2016-04-14 16:18:24 +00:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-10-15 14:07:19 +00:00
|
|
|
//
|
2016-04-14 16:18:24 +00:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
2015-10-15 14:07:19 +00:00
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2016-04-14 16:18:24 +00:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-10-15 14:07:19 +00:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2016-04-14 16:18:24 +00:00
|
|
|
// GNU Lesser General Public License for more details.
|
2015-10-15 14:07:19 +00:00
|
|
|
//
|
2016-04-14 16:18:24 +00:00
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-10-15 14:07:19 +00:00
|
|
|
|
|
|
|
package filters
|
|
|
|
|
|
|
|
import (
|
|
|
|
"crypto/rand"
|
|
|
|
"encoding/hex"
|
|
|
|
"encoding/json"
|
2016-03-29 13:07:40 +00:00
|
|
|
"errors"
|
2015-10-15 14:07:19 +00:00
|
|
|
"fmt"
|
2016-03-29 13:07:40 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
2015-10-15 14:07:19 +00:00
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
|
|
|
"github.com/ethereum/go-ethereum/core/vm"
|
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
|
|
|
"github.com/ethereum/go-ethereum/event"
|
2015-12-16 09:58:01 +00:00
|
|
|
"github.com/ethereum/go-ethereum/rpc"
|
2016-03-29 13:07:40 +00:00
|
|
|
|
|
|
|
"golang.org/x/net/context"
|
2015-10-15 14:07:19 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
filterTickerTime = 5 * time.Minute
|
|
|
|
)
|
|
|
|
|
|
|
|
// byte will be inferred
|
|
|
|
const (
|
|
|
|
unknownFilterTy = iota
|
|
|
|
blockFilterTy
|
|
|
|
transactionFilterTy
|
|
|
|
logFilterTy
|
|
|
|
)
|
|
|
|
|
2016-03-15 18:27:49 +00:00
|
|
|
// PublicFilterAPI offers support to create and manage filters. This will allow external clients to retrieve various
|
2015-10-15 14:07:19 +00:00
|
|
|
// information related to the Ethereum protocol such als blocks, transactions and logs.
|
|
|
|
type PublicFilterAPI struct {
|
|
|
|
mux *event.TypeMux
|
|
|
|
|
|
|
|
quit chan struct{}
|
|
|
|
chainDb ethdb.Database
|
|
|
|
|
|
|
|
filterManager *FilterSystem
|
|
|
|
|
|
|
|
filterMapMu sync.RWMutex
|
|
|
|
filterMapping map[string]int // maps between filter internal filter identifiers and external filter identifiers
|
|
|
|
|
|
|
|
logMu sync.RWMutex
|
|
|
|
logQueue map[int]*logQueue
|
|
|
|
|
|
|
|
blockMu sync.RWMutex
|
|
|
|
blockQueue map[int]*hashQueue
|
|
|
|
|
|
|
|
transactionMu sync.RWMutex
|
|
|
|
transactionQueue map[int]*hashQueue
|
|
|
|
|
|
|
|
transactMu sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewPublicFilterAPI returns a new PublicFilterAPI instance.
|
|
|
|
func NewPublicFilterAPI(chainDb ethdb.Database, mux *event.TypeMux) *PublicFilterAPI {
|
|
|
|
svc := &PublicFilterAPI{
|
|
|
|
mux: mux,
|
|
|
|
chainDb: chainDb,
|
|
|
|
filterManager: NewFilterSystem(mux),
|
|
|
|
filterMapping: make(map[string]int),
|
|
|
|
logQueue: make(map[int]*logQueue),
|
|
|
|
blockQueue: make(map[int]*hashQueue),
|
|
|
|
transactionQueue: make(map[int]*hashQueue),
|
|
|
|
}
|
|
|
|
go svc.start()
|
|
|
|
return svc
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop quits the work loop.
|
|
|
|
func (s *PublicFilterAPI) Stop() {
|
|
|
|
close(s.quit)
|
|
|
|
}
|
|
|
|
|
|
|
|
// start the work loop, wait and process events.
|
|
|
|
func (s *PublicFilterAPI) start() {
|
|
|
|
timer := time.NewTicker(2 * time.Second)
|
|
|
|
defer timer.Stop()
|
|
|
|
done:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-timer.C:
|
|
|
|
s.logMu.Lock()
|
|
|
|
for id, filter := range s.logQueue {
|
|
|
|
if time.Since(filter.timeout) > filterTickerTime {
|
|
|
|
s.filterManager.Remove(id)
|
|
|
|
delete(s.logQueue, id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.logMu.Unlock()
|
|
|
|
|
|
|
|
s.blockMu.Lock()
|
|
|
|
for id, filter := range s.blockQueue {
|
|
|
|
if time.Since(filter.timeout) > filterTickerTime {
|
|
|
|
s.filterManager.Remove(id)
|
|
|
|
delete(s.blockQueue, id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.blockMu.Unlock()
|
|
|
|
|
|
|
|
s.transactionMu.Lock()
|
|
|
|
for id, filter := range s.transactionQueue {
|
|
|
|
if time.Since(filter.timeout) > filterTickerTime {
|
|
|
|
s.filterManager.Remove(id)
|
|
|
|
delete(s.transactionQueue, id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.transactionMu.Unlock()
|
|
|
|
case <-s.quit:
|
|
|
|
break done
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewBlockFilter create a new filter that returns blocks that are included into the canonical chain.
|
|
|
|
func (s *PublicFilterAPI) NewBlockFilter() (string, error) {
|
|
|
|
externalId, err := newFilterId()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
s.blockMu.Lock()
|
|
|
|
filter := New(s.chainDb)
|
2016-02-13 00:40:44 +00:00
|
|
|
id, err := s.filterManager.Add(filter, ChainFilter)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2015-10-15 14:07:19 +00:00
|
|
|
s.blockQueue[id] = &hashQueue{timeout: time.Now()}
|
|
|
|
|
|
|
|
filter.BlockCallback = func(block *types.Block, logs vm.Logs) {
|
|
|
|
s.blockMu.Lock()
|
|
|
|
defer s.blockMu.Unlock()
|
|
|
|
|
|
|
|
if queue := s.blockQueue[id]; queue != nil {
|
|
|
|
queue.add(block.Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
defer s.blockMu.Unlock()
|
|
|
|
|
|
|
|
s.filterMapMu.Lock()
|
|
|
|
s.filterMapping[externalId] = id
|
|
|
|
s.filterMapMu.Unlock()
|
|
|
|
|
|
|
|
return externalId, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewPendingTransactionFilter creates a filter that returns new pending transactions.
|
|
|
|
func (s *PublicFilterAPI) NewPendingTransactionFilter() (string, error) {
|
|
|
|
externalId, err := newFilterId()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
s.transactionMu.Lock()
|
|
|
|
defer s.transactionMu.Unlock()
|
|
|
|
|
|
|
|
filter := New(s.chainDb)
|
2016-02-13 00:40:44 +00:00
|
|
|
id, err := s.filterManager.Add(filter, PendingTxFilter)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2015-10-15 14:07:19 +00:00
|
|
|
s.transactionQueue[id] = &hashQueue{timeout: time.Now()}
|
|
|
|
|
|
|
|
filter.TransactionCallback = func(tx *types.Transaction) {
|
|
|
|
s.transactionMu.Lock()
|
|
|
|
defer s.transactionMu.Unlock()
|
|
|
|
|
|
|
|
if queue := s.transactionQueue[id]; queue != nil {
|
|
|
|
queue.add(tx.Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
s.filterMapMu.Lock()
|
|
|
|
s.filterMapping[externalId] = id
|
|
|
|
s.filterMapMu.Unlock()
|
|
|
|
|
|
|
|
return externalId, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// newLogFilter creates a new log filter.
|
2016-03-29 13:07:40 +00:00
|
|
|
func (s *PublicFilterAPI) newLogFilter(earliest, latest int64, addresses []common.Address, topics [][]common.Hash, callback func(log *vm.Log, removed bool)) (int, error) {
|
2015-10-15 14:07:19 +00:00
|
|
|
s.logMu.Lock()
|
|
|
|
defer s.logMu.Unlock()
|
|
|
|
|
|
|
|
filter := New(s.chainDb)
|
2016-02-13 00:40:44 +00:00
|
|
|
id, err := s.filterManager.Add(filter, LogFilter)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
2015-10-15 14:07:19 +00:00
|
|
|
s.logQueue[id] = &logQueue{timeout: time.Now()}
|
|
|
|
|
|
|
|
filter.SetBeginBlock(earliest)
|
|
|
|
filter.SetEndBlock(latest)
|
|
|
|
filter.SetAddresses(addresses)
|
|
|
|
filter.SetTopics(topics)
|
2016-01-05 13:55:28 +00:00
|
|
|
filter.LogCallback = func(log *vm.Log, removed bool) {
|
2016-03-29 13:07:40 +00:00
|
|
|
if callback != nil {
|
|
|
|
callback(log, removed)
|
|
|
|
} else {
|
|
|
|
s.logMu.Lock()
|
|
|
|
defer s.logMu.Unlock()
|
|
|
|
if queue := s.logQueue[id]; queue != nil {
|
|
|
|
queue.add(vmlog{log, removed})
|
|
|
|
}
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-13 00:40:44 +00:00
|
|
|
return id, nil
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
|
2016-03-29 13:07:40 +00:00
|
|
|
func (s *PublicFilterAPI) Logs(ctx context.Context, args NewFilterArgs) (rpc.Subscription, error) {
|
|
|
|
notifier, supported := ctx.Value(rpc.NotifierContextKey).(rpc.Notifier)
|
|
|
|
if !supported {
|
|
|
|
return nil, rpc.ErrNotificationsUnsupported
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
externalId string
|
|
|
|
subscription rpc.Subscription
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
|
|
|
|
if externalId, err = newFilterId(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// uninstall filter when subscription is unsubscribed/cancelled
|
|
|
|
if subscription, err = notifier.NewSubscription(func(string) {
|
|
|
|
s.UninstallFilter(externalId)
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
notifySubscriber := func(log *vm.Log, removed bool) {
|
|
|
|
rpcLog := toRPCLogs(vm.Logs{log}, removed)
|
|
|
|
if err := subscription.Notify(rpcLog); err != nil {
|
|
|
|
subscription.Cancel()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// from and to block number are not used since subscriptions don't allow you to travel to "time"
|
|
|
|
var id int
|
|
|
|
if len(args.Addresses) > 0 {
|
|
|
|
id, err = s.newLogFilter(-1, -1, args.Addresses, args.Topics, notifySubscriber)
|
|
|
|
} else {
|
|
|
|
id, err = s.newLogFilter(-1, -1, nil, args.Topics, notifySubscriber)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
subscription.Cancel()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
s.filterMapMu.Lock()
|
|
|
|
s.filterMapping[externalId] = id
|
|
|
|
s.filterMapMu.Unlock()
|
|
|
|
|
|
|
|
return subscription, err
|
|
|
|
}
|
|
|
|
|
2015-10-15 14:07:19 +00:00
|
|
|
// NewFilterArgs represents a request to create a new filter.
|
|
|
|
type NewFilterArgs struct {
|
|
|
|
FromBlock rpc.BlockNumber
|
|
|
|
ToBlock rpc.BlockNumber
|
|
|
|
Addresses []common.Address
|
|
|
|
Topics [][]common.Hash
|
|
|
|
}
|
|
|
|
|
|
|
|
func (args *NewFilterArgs) UnmarshalJSON(data []byte) error {
|
|
|
|
type input struct {
|
|
|
|
From *rpc.BlockNumber `json:"fromBlock"`
|
|
|
|
ToBlock *rpc.BlockNumber `json:"toBlock"`
|
|
|
|
Addresses interface{} `json:"address"`
|
|
|
|
Topics interface{} `json:"topics"`
|
|
|
|
}
|
|
|
|
|
|
|
|
var raw input
|
|
|
|
if err := json.Unmarshal(data, &raw); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:01:30 +00:00
|
|
|
if raw.From == nil || raw.From.Int64() < 0 {
|
2015-10-15 14:07:19 +00:00
|
|
|
args.FromBlock = rpc.LatestBlockNumber
|
|
|
|
} else {
|
|
|
|
args.FromBlock = *raw.From
|
|
|
|
}
|
|
|
|
|
2016-01-27 16:01:30 +00:00
|
|
|
if raw.ToBlock == nil || raw.ToBlock.Int64() < 0 {
|
2015-10-15 14:07:19 +00:00
|
|
|
args.ToBlock = rpc.LatestBlockNumber
|
|
|
|
} else {
|
|
|
|
args.ToBlock = *raw.ToBlock
|
|
|
|
}
|
|
|
|
|
|
|
|
args.Addresses = []common.Address{}
|
|
|
|
|
|
|
|
if raw.Addresses != nil {
|
|
|
|
// raw.Address can contain a single address or an array of addresses
|
|
|
|
var addresses []common.Address
|
|
|
|
|
|
|
|
if strAddrs, ok := raw.Addresses.([]interface{}); ok {
|
|
|
|
for i, addr := range strAddrs {
|
|
|
|
if strAddr, ok := addr.(string); ok {
|
|
|
|
if len(strAddr) >= 2 && strAddr[0] == '0' && (strAddr[1] == 'x' || strAddr[1] == 'X') {
|
|
|
|
strAddr = strAddr[2:]
|
|
|
|
}
|
|
|
|
if decAddr, err := hex.DecodeString(strAddr); err == nil {
|
|
|
|
addresses = append(addresses, common.BytesToAddress(decAddr))
|
|
|
|
} else {
|
2016-04-15 09:06:57 +00:00
|
|
|
return fmt.Errorf("invalid address given")
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("invalid address on index %d", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if singleAddr, ok := raw.Addresses.(string); ok {
|
|
|
|
if len(singleAddr) >= 2 && singleAddr[0] == '0' && (singleAddr[1] == 'x' || singleAddr[1] == 'X') {
|
|
|
|
singleAddr = singleAddr[2:]
|
|
|
|
}
|
|
|
|
if decAddr, err := hex.DecodeString(singleAddr); err == nil {
|
|
|
|
addresses = append(addresses, common.BytesToAddress(decAddr))
|
|
|
|
} else {
|
2016-04-15 09:06:57 +00:00
|
|
|
return fmt.Errorf("invalid address given")
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
} else {
|
2016-04-15 09:06:57 +00:00
|
|
|
return errors.New("invalid address(es) given")
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
args.Addresses = addresses
|
|
|
|
}
|
|
|
|
|
|
|
|
topicConverter := func(raw string) (common.Hash, error) {
|
|
|
|
if len(raw) == 0 {
|
|
|
|
return common.Hash{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(raw) >= 2 && raw[0] == '0' && (raw[1] == 'x' || raw[1] == 'X') {
|
|
|
|
raw = raw[2:]
|
|
|
|
}
|
|
|
|
|
|
|
|
if decAddr, err := hex.DecodeString(raw); err == nil {
|
|
|
|
return common.BytesToHash(decAddr), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return common.Hash{}, errors.New("invalid topic given")
|
|
|
|
}
|
|
|
|
|
|
|
|
// topics is an array consisting of strings or arrays of strings
|
|
|
|
if raw.Topics != nil {
|
|
|
|
topics, ok := raw.Topics.([]interface{})
|
|
|
|
if ok {
|
|
|
|
parsedTopics := make([][]common.Hash, len(topics))
|
|
|
|
for i, topic := range topics {
|
|
|
|
if topic == nil {
|
|
|
|
parsedTopics[i] = []common.Hash{common.StringToHash("")}
|
|
|
|
} else if strTopic, ok := topic.(string); ok {
|
|
|
|
if t, err := topicConverter(strTopic); err != nil {
|
|
|
|
return fmt.Errorf("invalid topic on index %d", i)
|
|
|
|
} else {
|
|
|
|
parsedTopics[i] = []common.Hash{t}
|
|
|
|
}
|
|
|
|
} else if arrTopic, ok := topic.([]interface{}); ok {
|
|
|
|
parsedTopics[i] = make([]common.Hash, len(arrTopic))
|
|
|
|
for j := 0; j < len(parsedTopics[i]); i++ {
|
|
|
|
if arrTopic[j] == nil {
|
|
|
|
parsedTopics[i][j] = common.StringToHash("")
|
|
|
|
} else if str, ok := arrTopic[j].(string); ok {
|
|
|
|
if t, err := topicConverter(str); err != nil {
|
|
|
|
return fmt.Errorf("invalid topic on index %d", i)
|
|
|
|
} else {
|
|
|
|
parsedTopics[i] = []common.Hash{t}
|
|
|
|
}
|
|
|
|
} else {
|
2016-04-15 09:06:57 +00:00
|
|
|
return fmt.Errorf("topic[%d][%d] not a string", i, j)
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("topic[%d] invalid", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
args.Topics = parsedTopics
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewFilter creates a new filter and returns the filter id. It can be uses to retrieve logs.
|
|
|
|
func (s *PublicFilterAPI) NewFilter(args NewFilterArgs) (string, error) {
|
|
|
|
externalId, err := newFilterId()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
var id int
|
|
|
|
if len(args.Addresses) > 0 {
|
2016-03-29 13:07:40 +00:00
|
|
|
id, err = s.newLogFilter(args.FromBlock.Int64(), args.ToBlock.Int64(), args.Addresses, args.Topics, nil)
|
2015-10-15 14:07:19 +00:00
|
|
|
} else {
|
2016-03-29 13:07:40 +00:00
|
|
|
id, err = s.newLogFilter(args.FromBlock.Int64(), args.ToBlock.Int64(), nil, args.Topics, nil)
|
2016-02-13 00:40:44 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
s.filterMapMu.Lock()
|
|
|
|
s.filterMapping[externalId] = id
|
|
|
|
s.filterMapMu.Unlock()
|
|
|
|
|
|
|
|
return externalId, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetLogs returns the logs matching the given argument.
|
2016-01-05 13:55:28 +00:00
|
|
|
func (s *PublicFilterAPI) GetLogs(args NewFilterArgs) []vmlog {
|
2015-10-15 14:07:19 +00:00
|
|
|
filter := New(s.chainDb)
|
|
|
|
filter.SetBeginBlock(args.FromBlock.Int64())
|
|
|
|
filter.SetEndBlock(args.ToBlock.Int64())
|
|
|
|
filter.SetAddresses(args.Addresses)
|
|
|
|
filter.SetTopics(args.Topics)
|
|
|
|
|
2016-01-05 13:55:28 +00:00
|
|
|
return toRPCLogs(filter.Find(), false)
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// UninstallFilter removes the filter with the given filter id.
|
|
|
|
func (s *PublicFilterAPI) UninstallFilter(filterId string) bool {
|
|
|
|
s.filterMapMu.Lock()
|
|
|
|
defer s.filterMapMu.Unlock()
|
|
|
|
|
|
|
|
id, ok := s.filterMapping[filterId]
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
defer s.filterManager.Remove(id)
|
|
|
|
delete(s.filterMapping, filterId)
|
|
|
|
|
|
|
|
if _, ok := s.logQueue[id]; ok {
|
|
|
|
s.logMu.Lock()
|
|
|
|
defer s.logMu.Unlock()
|
|
|
|
delete(s.logQueue, id)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if _, ok := s.blockQueue[id]; ok {
|
|
|
|
s.blockMu.Lock()
|
|
|
|
defer s.blockMu.Unlock()
|
|
|
|
delete(s.blockQueue, id)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if _, ok := s.transactionQueue[id]; ok {
|
|
|
|
s.transactionMu.Lock()
|
|
|
|
defer s.transactionMu.Unlock()
|
|
|
|
delete(s.transactionQueue, id)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// getFilterType is a helper utility that determine the type of filter for the given filter id.
|
|
|
|
func (s *PublicFilterAPI) getFilterType(id int) byte {
|
|
|
|
if _, ok := s.blockQueue[id]; ok {
|
|
|
|
return blockFilterTy
|
|
|
|
} else if _, ok := s.transactionQueue[id]; ok {
|
|
|
|
return transactionFilterTy
|
|
|
|
} else if _, ok := s.logQueue[id]; ok {
|
|
|
|
return logFilterTy
|
|
|
|
}
|
|
|
|
|
|
|
|
return unknownFilterTy
|
|
|
|
}
|
|
|
|
|
|
|
|
// blockFilterChanged returns a collection of block hashes for the block filter with the given id.
|
|
|
|
func (s *PublicFilterAPI) blockFilterChanged(id int) []common.Hash {
|
|
|
|
s.blockMu.Lock()
|
|
|
|
defer s.blockMu.Unlock()
|
|
|
|
|
|
|
|
if s.blockQueue[id] != nil {
|
|
|
|
return s.blockQueue[id].get()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// transactionFilterChanged returns a collection of transaction hashes for the pending
|
|
|
|
// transaction filter with the given id.
|
|
|
|
func (s *PublicFilterAPI) transactionFilterChanged(id int) []common.Hash {
|
|
|
|
s.blockMu.Lock()
|
|
|
|
defer s.blockMu.Unlock()
|
|
|
|
|
|
|
|
if s.transactionQueue[id] != nil {
|
|
|
|
return s.transactionQueue[id].get()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// logFilterChanged returns a collection of logs for the log filter with the given id.
|
2016-01-05 13:55:28 +00:00
|
|
|
func (s *PublicFilterAPI) logFilterChanged(id int) []vmlog {
|
2015-10-15 14:07:19 +00:00
|
|
|
s.logMu.Lock()
|
|
|
|
defer s.logMu.Unlock()
|
|
|
|
|
|
|
|
if s.logQueue[id] != nil {
|
|
|
|
return s.logQueue[id].get()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetFilterLogs returns the logs for the filter with the given id.
|
2016-01-05 13:55:28 +00:00
|
|
|
func (s *PublicFilterAPI) GetFilterLogs(filterId string) []vmlog {
|
2015-10-15 14:07:19 +00:00
|
|
|
id, ok := s.filterMapping[filterId]
|
|
|
|
if !ok {
|
2016-01-05 13:55:28 +00:00
|
|
|
return toRPCLogs(nil, false)
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if filter := s.filterManager.Get(id); filter != nil {
|
2016-01-05 13:55:28 +00:00
|
|
|
return toRPCLogs(filter.Find(), false)
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
|
2016-01-05 13:55:28 +00:00
|
|
|
return toRPCLogs(nil, false)
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetFilterChanges returns the logs for the filter with the given id since last time is was called.
|
|
|
|
// This can be used for polling.
|
|
|
|
func (s *PublicFilterAPI) GetFilterChanges(filterId string) interface{} {
|
|
|
|
s.filterMapMu.Lock()
|
|
|
|
id, ok := s.filterMapping[filterId]
|
|
|
|
s.filterMapMu.Unlock()
|
|
|
|
|
|
|
|
if !ok { // filter not found
|
|
|
|
return []interface{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
switch s.getFilterType(id) {
|
|
|
|
case blockFilterTy:
|
|
|
|
return returnHashes(s.blockFilterChanged(id))
|
|
|
|
case transactionFilterTy:
|
|
|
|
return returnHashes(s.transactionFilterChanged(id))
|
|
|
|
case logFilterTy:
|
2016-01-05 13:55:28 +00:00
|
|
|
return s.logFilterChanged(id)
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return []interface{}{}
|
|
|
|
}
|
|
|
|
|
2016-01-05 13:55:28 +00:00
|
|
|
type vmlog struct {
|
|
|
|
*vm.Log
|
|
|
|
Removed bool `json:"removed"`
|
|
|
|
}
|
|
|
|
|
2015-10-15 14:07:19 +00:00
|
|
|
type logQueue struct {
|
|
|
|
mu sync.Mutex
|
|
|
|
|
2016-01-05 13:55:28 +00:00
|
|
|
logs []vmlog
|
2015-10-15 14:07:19 +00:00
|
|
|
timeout time.Time
|
|
|
|
id int
|
|
|
|
}
|
|
|
|
|
2016-01-05 13:55:28 +00:00
|
|
|
func (l *logQueue) add(logs ...vmlog) {
|
2015-10-15 14:07:19 +00:00
|
|
|
l.mu.Lock()
|
|
|
|
defer l.mu.Unlock()
|
|
|
|
|
|
|
|
l.logs = append(l.logs, logs...)
|
|
|
|
}
|
|
|
|
|
2016-01-05 13:55:28 +00:00
|
|
|
func (l *logQueue) get() []vmlog {
|
2015-10-15 14:07:19 +00:00
|
|
|
l.mu.Lock()
|
|
|
|
defer l.mu.Unlock()
|
|
|
|
|
|
|
|
l.timeout = time.Now()
|
|
|
|
tmp := l.logs
|
|
|
|
l.logs = nil
|
|
|
|
return tmp
|
|
|
|
}
|
|
|
|
|
|
|
|
type hashQueue struct {
|
|
|
|
mu sync.Mutex
|
|
|
|
|
|
|
|
hashes []common.Hash
|
|
|
|
timeout time.Time
|
|
|
|
id int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *hashQueue) add(hashes ...common.Hash) {
|
|
|
|
l.mu.Lock()
|
|
|
|
defer l.mu.Unlock()
|
|
|
|
|
|
|
|
l.hashes = append(l.hashes, hashes...)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *hashQueue) get() []common.Hash {
|
|
|
|
l.mu.Lock()
|
|
|
|
defer l.mu.Unlock()
|
|
|
|
|
|
|
|
l.timeout = time.Now()
|
|
|
|
tmp := l.hashes
|
|
|
|
l.hashes = nil
|
|
|
|
return tmp
|
|
|
|
}
|
|
|
|
|
|
|
|
// newFilterId generates a new random filter identifier that can be exposed to the outer world. By publishing random
|
|
|
|
// identifiers it is not feasible for DApp's to guess filter id's for other DApp's and uninstall or poll for them
|
|
|
|
// causing the affected DApp to miss data.
|
|
|
|
func newFilterId() (string, error) {
|
|
|
|
var subid [16]byte
|
|
|
|
n, _ := rand.Read(subid[:])
|
|
|
|
if n != 16 {
|
|
|
|
return "", errors.New("Unable to generate filter id")
|
|
|
|
}
|
|
|
|
return "0x" + hex.EncodeToString(subid[:]), nil
|
|
|
|
}
|
|
|
|
|
2016-01-05 13:55:28 +00:00
|
|
|
// toRPCLogs is a helper that will convert a vm.Logs array to an structure which
|
|
|
|
// can hold additional information about the logs such as whether it was deleted.
|
|
|
|
// Additionally when nil is given it will by default instead create an empty slice
|
|
|
|
// instead. This is required by the RPC specification.
|
|
|
|
func toRPCLogs(logs vm.Logs, removed bool) []vmlog {
|
|
|
|
convertedLogs := make([]vmlog, len(logs))
|
|
|
|
for i, log := range logs {
|
|
|
|
convertedLogs[i] = vmlog{Log: log, Removed: removed}
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
2016-01-05 13:55:28 +00:00
|
|
|
return convertedLogs
|
2015-10-15 14:07:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// returnHashes is a helper that will return an empty hash array case the given hash array is nil, otherwise is will
|
|
|
|
// return the given hashes. The RPC interfaces defines that always an array is returned.
|
|
|
|
func returnHashes(hashes []common.Hash) []common.Hash {
|
|
|
|
if hashes == nil {
|
|
|
|
return []common.Hash{}
|
|
|
|
}
|
|
|
|
return hashes
|
|
|
|
}
|