fix: eth: strict event parsing
We now enforce the following rules: 1. No duplicate topics or data. 2. Topics must have 32 byte keys. 3. Topics may not be skipped. (e.g., no t1 & t3 without a t2). 4. Raw codecs. We _don't_ require that topics/data be emitted in any specific order. We _skip_ events with unknown keys. We _drop_ events that violate the above rules.
This commit is contained in:
parent
f1381ad535
commit
ece8f25511
@ -25,13 +25,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/lib/must"
|
||||
)
|
||||
|
||||
var (
|
||||
EthTopic1 = "t1"
|
||||
EthTopic2 = "t2"
|
||||
EthTopic3 = "t3"
|
||||
EthTopic4 = "t4"
|
||||
)
|
||||
|
||||
var ErrInvalidAddress = errors.New("invalid Filecoin Eth address")
|
||||
|
||||
type EthUint64 uint64
|
||||
|
@ -2153,16 +2153,6 @@ func ParseEthLog(in map[string]interface{}) (*ethtypes.EthLog, error) {
|
||||
return el, err
|
||||
}
|
||||
|
||||
func paddedEthBytes(orig []byte) ethtypes.EthBytes {
|
||||
needed := 32 - len(orig)
|
||||
if needed <= 0 {
|
||||
return orig
|
||||
}
|
||||
ret := make([]byte, 32)
|
||||
copy(ret[needed:], orig)
|
||||
return ret
|
||||
}
|
||||
|
||||
func paddedUint64(v uint64) ethtypes.EthBytes {
|
||||
buf := make([]byte, 32)
|
||||
binary.BigEndian.PutUint64(buf[24:], v)
|
||||
|
@ -1358,6 +1358,65 @@ type filterTipSetCollector interface {
|
||||
TakeCollectedTipSets(context.Context) []types.TipSetKey
|
||||
}
|
||||
|
||||
func ethLogFromEvent(entries []types.EventEntry) (data []byte, topics []ethtypes.EthHash, ok bool) {
|
||||
var (
|
||||
topicsFound [4]bool
|
||||
topicsFoundCount int
|
||||
dataFound bool
|
||||
)
|
||||
for _, entry := range entries {
|
||||
// Check if the key is t1..t4
|
||||
if len(entry.Key) == 2 && "t1" <= entry.Key && entry.Key <= "t4" {
|
||||
// '1' - '1' == 0, etc.
|
||||
idx := int(entry.Key[1] - '1')
|
||||
|
||||
// Drop events with mis-sized topics.
|
||||
if len(entry.Value) != 32 {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Drop events with non-raw topics to avoid mistakes.
|
||||
if entry.Codec != cid.Raw {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Drop events with duplicate topics.
|
||||
if topicsFound[idx] {
|
||||
return nil, nil, false
|
||||
}
|
||||
topicsFound[idx] = true
|
||||
topicsFoundCount++
|
||||
|
||||
// Extend the topics array
|
||||
for len(topics) <= idx {
|
||||
topics = append(topics, ethtypes.EthHash{})
|
||||
}
|
||||
copy(topics[idx][:], entry.Value)
|
||||
} else if entry.Key == "d" {
|
||||
// Drop events with non-raw data to avoid mistakes.
|
||||
if entry.Codec != cid.Raw {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Drop events with duplicate data fields.
|
||||
if dataFound {
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
dataFound = true
|
||||
data = entry.Value
|
||||
}
|
||||
|
||||
// Skip entries we don't understand (makes it easier to extend things).
|
||||
}
|
||||
|
||||
// Drop events with skipped topics.
|
||||
if len(topics) != topicsFoundCount {
|
||||
return nil, nil, false
|
||||
}
|
||||
return data, topics, true
|
||||
}
|
||||
|
||||
func ethFilterResultFromEvents(evs []*filter.CollectedEvent, sa StateAPI) (*ethtypes.EthFilterResult, error) {
|
||||
res := ðtypes.EthFilterResult{}
|
||||
for _, ev := range evs {
|
||||
@ -1367,24 +1426,14 @@ func ethFilterResultFromEvents(evs []*filter.CollectedEvent, sa StateAPI) (*etht
|
||||
TransactionIndex: ethtypes.EthUint64(ev.MsgIdx),
|
||||
BlockNumber: ethtypes.EthUint64(ev.Height),
|
||||
}
|
||||
var (
|
||||
err error
|
||||
ok bool
|
||||
)
|
||||
|
||||
var err error
|
||||
|
||||
for _, entry := range ev.Entries {
|
||||
// Skip all events that aren't "raw" data.
|
||||
if entry.Codec != cid.Raw {
|
||||
continue
|
||||
}
|
||||
if entry.Key == ethtypes.EthTopic1 || entry.Key == ethtypes.EthTopic2 || entry.Key == ethtypes.EthTopic3 || entry.Key == ethtypes.EthTopic4 {
|
||||
if len(entry.Value) != 32 {
|
||||
continue
|
||||
}
|
||||
var value ethtypes.EthHash
|
||||
copy(value[:], entry.Value)
|
||||
log.Topics = append(log.Topics, value)
|
||||
} else {
|
||||
log.Data = entry.Value
|
||||
}
|
||||
log.Data, log.Topics, ok = ethLogFromEvent(ev.Entries)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Address, err = ethtypes.EthAddressFromFilecoinAddress(ev.EmitterAddr)
|
||||
@ -1912,23 +1961,16 @@ func newEthTxReceipt(ctx context.Context, tx ethtypes.EthTx, lookup *api.MsgLook
|
||||
BlockNumber: blockNumber,
|
||||
}
|
||||
|
||||
for _, entry := range evt.Entries {
|
||||
// Ignore any non-raw values/keys.
|
||||
if entry.Codec != cid.Raw {
|
||||
continue
|
||||
}
|
||||
if entry.Key == ethtypes.EthTopic1 || entry.Key == ethtypes.EthTopic2 || entry.Key == ethtypes.EthTopic3 || entry.Key == ethtypes.EthTopic4 {
|
||||
if len(entry.Value) != 32 {
|
||||
continue
|
||||
}
|
||||
var value ethtypes.EthHash
|
||||
copy(value[:], entry.Value)
|
||||
l.Topics = append(l.Topics, value)
|
||||
ethtypes.EthBloomSet(receipt.LogsBloom, entry.Value)
|
||||
} else {
|
||||
l.Data = entry.Value
|
||||
}
|
||||
data, topics, ok := ethLogFromEvent(evt.Entries)
|
||||
if !ok {
|
||||
// not an eth event.
|
||||
continue
|
||||
}
|
||||
for _, topic := range topics {
|
||||
ethtypes.EthBloomSet(receipt.LogsBloom, topic[:])
|
||||
}
|
||||
l.Data = data
|
||||
l.Topics = topics
|
||||
|
||||
addr, err := address.NewIDAddress(uint64(evt.Emitter))
|
||||
if err != nil {
|
||||
|
101
node/impl/full/eth_test.go
Normal file
101
node/impl/full/eth_test.go
Normal file
@ -0,0 +1,101 @@
|
||||
package full
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestEthLogFromEvent(t *testing.T) {
|
||||
// basic empty
|
||||
data, topics, ok := ethLogFromEvent(nil)
|
||||
require.True(t, ok)
|
||||
require.Nil(t, data)
|
||||
require.Nil(t, topics)
|
||||
|
||||
// basic topic
|
||||
data, topics, ok = ethLogFromEvent([]types.EventEntry{{
|
||||
Flags: 0,
|
||||
Key: "t1",
|
||||
Codec: cid.Raw,
|
||||
Value: make([]byte, 32),
|
||||
}})
|
||||
require.True(t, ok)
|
||||
require.Nil(t, data)
|
||||
require.Len(t, topics, 1)
|
||||
require.Equal(t, topics[0], ethtypes.EthHash{})
|
||||
|
||||
// basic topic with data
|
||||
data, topics, ok = ethLogFromEvent([]types.EventEntry{{
|
||||
Flags: 0,
|
||||
Key: "t1",
|
||||
Codec: cid.Raw,
|
||||
Value: make([]byte, 32),
|
||||
}, {
|
||||
Flags: 0,
|
||||
Key: "d",
|
||||
Codec: cid.Raw,
|
||||
Value: []byte{0x0},
|
||||
}})
|
||||
require.True(t, ok)
|
||||
require.Equal(t, data, []byte{0x0})
|
||||
require.Len(t, topics, 1)
|
||||
require.Equal(t, topics[0], ethtypes.EthHash{})
|
||||
|
||||
// skip topic
|
||||
_, _, ok = ethLogFromEvent([]types.EventEntry{{
|
||||
Flags: 0,
|
||||
Key: "t2",
|
||||
Codec: cid.Raw,
|
||||
Value: make([]byte, 32),
|
||||
}})
|
||||
require.False(t, ok)
|
||||
|
||||
// duplicate topic
|
||||
_, _, ok = ethLogFromEvent([]types.EventEntry{{
|
||||
Flags: 0,
|
||||
Key: "t1",
|
||||
Codec: cid.Raw,
|
||||
Value: make([]byte, 32),
|
||||
}, {
|
||||
Flags: 0,
|
||||
Key: "t1",
|
||||
Codec: cid.Raw,
|
||||
Value: make([]byte, 32),
|
||||
}})
|
||||
require.False(t, ok)
|
||||
|
||||
// duplicate data
|
||||
_, _, ok = ethLogFromEvent([]types.EventEntry{{
|
||||
Flags: 0,
|
||||
Key: "d",
|
||||
Codec: cid.Raw,
|
||||
Value: make([]byte, 32),
|
||||
}, {
|
||||
Flags: 0,
|
||||
Key: "d",
|
||||
Codec: cid.Raw,
|
||||
Value: make([]byte, 32),
|
||||
}})
|
||||
require.False(t, ok)
|
||||
|
||||
// unknown key is fine
|
||||
data, topics, ok = ethLogFromEvent([]types.EventEntry{{
|
||||
Flags: 0,
|
||||
Key: "t5",
|
||||
Codec: cid.Raw,
|
||||
Value: make([]byte, 32),
|
||||
}, {
|
||||
Flags: 0,
|
||||
Key: "t1",
|
||||
Codec: cid.Raw,
|
||||
Value: make([]byte, 32),
|
||||
}})
|
||||
require.True(t, ok)
|
||||
require.Nil(t, data)
|
||||
require.Len(t, topics, 1)
|
||||
require.Equal(t, topics[0], ethtypes.EthHash{})
|
||||
}
|
Loading…
Reference in New Issue
Block a user