fix(x/group): propagate events correctly to current context (#12888)

* fix(x/groups) propagate events correctly to current context

* update to use current context on logger

* adding changelog entry

Co-authored-by: Aleksandr Bezobchuk <alexanderbez@users.noreply.github.com>
This commit is contained in:
Damian Nolan 2022-08-10 15:35:33 +02:00 committed by GitHub
parent 1c16c11d5a
commit 014bfae00f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 9 additions and 2 deletions

View File

@ -115,6 +115,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
* (x/staking) [#12303](https://github.com/cosmos/cosmos-sdk/pull/12303) Use bytes instead of string comparison in delete validator queue
* (x/auth/tx) [#12474](https://github.com/cosmos/cosmos-sdk/pull/12474) Remove condition in GetTxsEvent that disallowed multiple equal signs, which would break event queries with base64 strings (i.e. query by signature).
* (store/rootmulti) [#12487](https://github.com/cosmos/cosmos-sdk/pull/12487) Fix non-deterministic map iteration.
* (x/group) [#12888](https://github.com/cosmos/cosmos-sdk/pull/12888) Fix event propagation to the current context of `x/group` message execution `[]sdk.Result`.
### Deprecated

View File

@ -747,13 +747,14 @@ func (k Keeper) Exec(goCtx context.Context, req *group.MsgExec) (*group.MsgExecR
var logs string
if proposal.Status == group.PROPOSAL_STATUS_ACCEPTED && proposal.ExecutorResult != group.PROPOSAL_EXECUTOR_RESULT_SUCCESS {
// Caching context so that we don't update the store in case of failure.
ctx, flush := ctx.CacheContext()
cacheCtx, flush := ctx.CacheContext()
addr, err := sdk.AccAddressFromBech32(policyInfo.Address)
if err != nil {
return nil, err
}
_, err = k.doExecuteMsgs(ctx, k.router, proposal, addr)
results, err := k.doExecuteMsgs(cacheCtx, k.router, proposal, addr)
if err != nil {
proposal.ExecutorResult = group.PROPOSAL_EXECUTOR_RESULT_FAILURE
logs = fmt.Sprintf("proposal execution failed on proposal %d, because of error %s", id, err.Error())
@ -761,6 +762,11 @@ func (k Keeper) Exec(goCtx context.Context, req *group.MsgExec) (*group.MsgExecR
} else {
proposal.ExecutorResult = group.PROPOSAL_EXECUTOR_RESULT_SUCCESS
flush()
for _, res := range results {
// NOTE: The sdk msg handler creates a new EventManager, so events must be correctly propagated back to the current context
ctx.EventManager().EmitEvents(res.GetEvents())
}
}
}