evm: increase performance of tx Log storage (#461)

* revise the way of the logs store into the stateDB

* wording

* refine GetTxLogs/SetLogs

* nit

* add bytes_uint16 convert helper

* using helper to instead of big.Int

* nit

* helper tests and fixes

* handle the logs length in SetLogs

* revise Get/Set logs with store.iterator

* remove Bytes<->Uint16 functions

* remove unused const

* simplify SetLog(s)

* make logs return determinism

* Add comments

* update changelog

* Update CHANGELOG.md

Co-authored-by: Federico Kunze Küllmer <31522760+fedekunze@users.noreply.github.com>
This commit is contained in:
JayT106 2021-08-31 04:32:11 -04:00 committed by GitHub
parent c6c9330e80
commit 9a8827e790
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 91 additions and 31 deletions

View File

@ -54,6 +54,10 @@ Ref: https://keepachangelog.com/en/1.0.0/
* (encoding) [tharsis#478](https://github.com/tharsis/ethermint/pull/478) Register `Evidence` to amino codec. * (encoding) [tharsis#478](https://github.com/tharsis/ethermint/pull/478) Register `Evidence` to amino codec.
* (rpc) [tharsis#478](https://github.com/tharsis/ethermint/pull/481) Getting the node configuration when calling the `miner` rpc methods. * (rpc) [tharsis#478](https://github.com/tharsis/ethermint/pull/481) Getting the node configuration when calling the `miner` rpc methods.
### Improvements
* (evm) [tharsis#461](https://github.com/tharsis/ethermint/pull/461) Increase performance of `StateDB` transaction log storage (r/w).
## [v0.5.0] - 2021-08-20 ## [v0.5.0] - 2021-08-20
### State Machine Breaking ### State Machine Breaking

View File

@ -232,15 +232,21 @@ func (k Keeper) BlockLogs(c context.Context, req *types.QueryBlockLogsRequest) (
ctx := sdk.UnwrapSDKContext(c) ctx := sdk.UnwrapSDKContext(c)
store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefixLogs) store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefixLogs)
txLogs := []types.TransactionLogs{}
mapOrder := []string{}
logs := make(map[string][]*types.Log)
pageRes, err := query.FilteredPaginate(store, req.Pagination, func(_, value []byte, accumulate bool) (bool, error) { pageRes, err := query.FilteredPaginate(store, req.Pagination, func(_, value []byte, accumulate bool) (bool, error) {
var txLog types.TransactionLogs var txLog types.Log
k.cdc.MustUnmarshal(value, &txLog) k.cdc.MustUnmarshal(value, &txLog)
if len(txLog.Logs) > 0 && txLog.Logs[0].BlockHash == req.Hash { if txLog.BlockHash == req.Hash {
if accumulate { if accumulate {
txLogs = append(txLogs, txLog) if len(logs[txLog.TxHash]) == 0 {
mapOrder = append(mapOrder, txLog.TxHash)
}
logs[txLog.TxHash] = append(logs[txLog.TxHash], &txLog)
} }
return true, nil return true, nil
} }
@ -252,8 +258,15 @@ func (k Keeper) BlockLogs(c context.Context, req *types.QueryBlockLogsRequest) (
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
var txsLogs = []types.TransactionLogs{}
for _, txHash := range mapOrder {
if len(logs[txHash]) > 0 {
txsLogs = append(txsLogs, types.TransactionLogs{Hash: txHash, Logs: logs[txHash]})
}
}
return &types.QueryBlockLogsResponse{ return &types.QueryBlockLogsResponse{
TxLogs: txLogs, TxLogs: txsLogs,
Pagination: pageRes, Pagination: pageRes,
}, nil }, nil
} }

View File

@ -474,7 +474,7 @@ func (suite *KeeperTestSuite) TestQueryBlockLogs() {
TxHash: ethcmn.BytesToHash([]byte("tx_hash_1")).String(), TxHash: ethcmn.BytesToHash([]byte("tx_hash_1")).String(),
TxIndex: 1, TxIndex: 1,
BlockHash: ethcmn.BytesToHash([]byte("block_hash")).String(), BlockHash: ethcmn.BytesToHash([]byte("block_hash")).String(),
Index: 0, Index: 1,
Removed: false, Removed: false,
}, },
{ {
@ -485,7 +485,7 @@ func (suite *KeeperTestSuite) TestQueryBlockLogs() {
TxHash: ethcmn.BytesToHash([]byte("tx_hash_1")).String(), TxHash: ethcmn.BytesToHash([]byte("tx_hash_1")).String(),
TxIndex: 1, TxIndex: 1,
BlockHash: ethcmn.BytesToHash([]byte("block_hash")).String(), BlockHash: ethcmn.BytesToHash([]byte("block_hash")).String(),
Index: 0, Index: 2,
Removed: false, Removed: false,
}, },
}, },

View File

@ -229,16 +229,30 @@ func (k Keeper) ResetRefundTransient(ctx sdk.Context) {
// GetAllTxLogs return all the transaction logs from the store. // GetAllTxLogs return all the transaction logs from the store.
func (k Keeper) GetAllTxLogs(ctx sdk.Context) []types.TransactionLogs { func (k Keeper) GetAllTxLogs(ctx sdk.Context) []types.TransactionLogs {
store := ctx.KVStore(k.storeKey) store := ctx.KVStore(k.storeKey)
iterator := sdk.KVStorePrefixIterator(store, types.KeyPrefixLogs) iter := sdk.KVStorePrefixIterator(store, types.KeyPrefixLogs)
defer iterator.Close() defer iter.Close()
mapOrder := []string{}
var mapLogs = make(map[string][]*types.Log)
for ; iter.Valid(); iter.Next() {
var txLog types.Log
k.cdc.MustUnmarshal(iter.Value(), &txLog)
txlogs := mapLogs[txLog.TxHash]
if len(txlogs) == 0 {
mapOrder = append(mapOrder, txLog.TxHash)
}
txlogs = append(txlogs, &txLog)
mapLogs[txLog.TxHash] = txlogs
}
txsLogs := []types.TransactionLogs{} txsLogs := []types.TransactionLogs{}
for ; iterator.Valid(); iterator.Next() { for _, txHash := range mapOrder {
var txLog types.TransactionLogs if len(mapLogs[txHash]) > 0 {
k.cdc.MustUnmarshal(iterator.Value(), &txLog) txLogs := types.TransactionLogs{Hash: txHash, Logs: mapLogs[txHash]}
txsLogs = append(txsLogs, txLogs)
// add a new entry }
txsLogs = append(txsLogs, txLog)
} }
return txsLogs return txsLogs
} }
@ -248,31 +262,64 @@ func (k Keeper) GetAllTxLogs(ctx sdk.Context) []types.TransactionLogs {
func (k Keeper) GetTxLogs(txHash common.Hash) []*ethtypes.Log { func (k Keeper) GetTxLogs(txHash common.Hash) []*ethtypes.Log {
store := prefix.NewStore(k.Ctx().KVStore(k.storeKey), types.KeyPrefixLogs) store := prefix.NewStore(k.Ctx().KVStore(k.storeKey), types.KeyPrefixLogs)
bz := store.Get(txHash.Bytes()) // We store the logs with key equal to txHash.Bytes() | sdk.Uint64ToBigEndian(uint64(log.Index)),
if len(bz) == 0 { // therefore, we set the end boundary(excluded) to txHash.Bytes() | uint64.Max -> []byte
return []*ethtypes.Log{} var end = txHash.Bytes()
end = append(end, []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}...)
iter := store.Iterator(txHash.Bytes(), end)
defer iter.Close()
logs := []*ethtypes.Log{}
for ; iter.Valid(); iter.Next() {
var log types.Log
k.cdc.MustUnmarshal(iter.Value(), &log)
logs = append(logs, log.ToEthereum())
} }
var logs types.TransactionLogs return logs
k.cdc.MustUnmarshal(bz, &logs)
return logs.EthLogs()
} }
// SetLogs sets the logs for a transaction in the KVStore. // SetLogs sets the logs for a transaction in the KVStore.
func (k Keeper) SetLogs(txHash common.Hash, logs []*ethtypes.Log) { func (k Keeper) SetLogs(txHash common.Hash, logs []*ethtypes.Log) {
store := prefix.NewStore(k.Ctx().KVStore(k.storeKey), types.KeyPrefixLogs) store := prefix.NewStore(k.Ctx().KVStore(k.storeKey), types.KeyPrefixLogs)
txLogs := types.NewTransactionLogsFromEth(txHash, logs) for _, log := range logs {
bz := k.cdc.MustMarshal(&txLogs) var key = txHash.Bytes()
key = append(key, sdk.Uint64ToBigEndian(uint64(log.Index))...)
txIndexLog := types.NewLogFromEth(log)
bz := k.cdc.MustMarshal(txIndexLog)
store.Set(key, bz)
}
}
store.Set(txHash.Bytes(), bz) // SetLog sets the log for a transaction in the KVStore.
func (k Keeper) SetLog(log *ethtypes.Log) {
store := prefix.NewStore(k.Ctx().KVStore(k.storeKey), types.KeyPrefixLogs)
var key = log.TxHash.Bytes()
key = append(key, sdk.Uint64ToBigEndian(uint64(log.Index))...)
txIndexLog := types.NewLogFromEth(log)
bz := k.cdc.MustMarshal(txIndexLog)
store.Set(key, bz)
} }
// DeleteLogs removes the logs from the KVStore. It is used during journal.Revert. // DeleteLogs removes the logs from the KVStore. It is used during journal.Revert.
func (k Keeper) DeleteTxLogs(ctx sdk.Context, txHash common.Hash) { func (k Keeper) DeleteTxLogs(ctx sdk.Context, txHash common.Hash) {
store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefixLogs) store := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefixLogs)
store.Delete(txHash.Bytes())
// We store the logs with key equal to txHash.Bytes() | sdk.Uint64ToBigEndian(uint64(log.Index)),
// therefore, we set the end boundary(excluded) to txHash.Bytes() | uint64.Max -> []byte
var end = txHash.Bytes()
end = append(end, []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}...)
iter := store.Iterator(txHash.Bytes(), end)
defer iter.Close()
for ; iter.Valid(); iter.Next() {
store.Delete(iter.Key())
}
} }
// GetLogSizeTransient returns EVM log index on the current block. // GetLogSizeTransient returns EVM log index on the current block.

View File

@ -607,11 +607,7 @@ func (k *Keeper) AddLog(log *ethtypes.Log) {
log.Index = uint(k.GetLogSizeTransient()) log.Index = uint(k.GetLogSizeTransient())
k.IncreaseLogSizeTransient() k.IncreaseLogSizeTransient()
k.SetLog(log)
logs := k.GetTxLogs(log.TxHash)
logs = append(logs, log)
k.SetLogs(log.TxHash, logs)
k.Logger(k.Ctx()).Debug( k.Logger(k.Ctx()).Debug(
"log added", "log added",