Manage event sending rate for SubscribeActorEvents

This commit is contained in:
Rod Vagg 2024-02-26 15:10:26 +11:00 committed by Phi-rjan
parent 590ce97edf
commit eaf2cd672b

View File

@ -3,6 +3,7 @@ package full
import ( import (
"context" "context"
"fmt" "fmt"
"time"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"go.uber.org/fx" "go.uber.org/fx"
@ -10,6 +11,7 @@ import (
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/events/filter" "github.com/filecoin-project/lotus/chain/events/filter"
"github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
@ -62,9 +64,11 @@ func (a *ActorEventHandler) GetActorEvents(ctx context.Context, evtFilter *types
return nil, err return nil, err
} }
evs, err := getCollected(ctx, f) evs, _, _ := getCollected(ctx, f)
_ = a.EventFilterManager.Remove(ctx, f.ID()) if err := a.EventFilterManager.Remove(ctx, f.ID()); err != nil {
return evs, err log.Warnf("failed to remove filter: %s", err)
}
return evs, nil
} }
type filterParams struct { type filterParams struct {
@ -172,75 +176,143 @@ func (a *ActorEventHandler) SubscribeActorEvents(ctx context.Context, evtFilter
return nil, err return nil, err
} }
out := make(chan *types.ActorEvent, 25) // The goal for the code below is to be able to send events on the `out` channel as fast as
// possible and not let it get too far behind the rate at which the events are generated.
// For historical events we see the rate at which they were generated by looking the height range;
// we then make sure that the client can receive them at least twice as fast as they were
// generated so they catch up quick enough to receive new events.
// For ongoing events we use an exponential moving average of the events per height to make sure
// that the client doesn't fall behind.
// In both cases we allow a little bit of slack but need to avoid letting the client bloat the
// buffer too much.
// There is no special handling for reverts, so they will just look like a lot more events per
// epoch and the user has to receive them anyway.
out := make(chan *types.ActorEvent)
go func() { go func() {
defer func() { defer func() {
// Tell the caller we're done // tell the caller we're done
close(out) close(out)
// Unsubscribe.
fm.ClearSubChannel() fm.ClearSubChannel()
_ = a.EventFilterManager.Remove(ctx, fm.ID()) if err := a.EventFilterManager.Remove(ctx, fm.ID()); err != nil {
log.Warnf("failed to remove filter: %s", err)
}
}() }()
evs, err := getCollected(ctx, fm) // Handle any historical events that our filter may have picked up -----------------------------
if err != nil {
log.Errorf("failed to get collected events: %w", err)
return
}
for _, ev := range evs { evs, minEpoch, maxEpoch := getCollected(ctx, fm)
ev := ev if len(evs) > 0 {
select { // must be able to send events at least twice as fast as they were generated
case out <- ev: epochRange := maxEpoch - minEpoch
case <-ctx.Done(): if epochRange <= 0 {
return epochRange = 1
default: }
// TODO: need to fix this, buffer of 25 isn't going to work for prefill without a _really_ fast client or a small number of events eventsPerEpoch := float64(len(evs)) / float64(epochRange)
log.Errorf("closing event subscription due to slow reader") eventsPerSecond := 2 * eventsPerEpoch / float64(build.BlockDelaySecs)
return // a minimum rate of 1 event per second if we don't have many events
if eventsPerSecond < 1 {
eventsPerSecond = 1
}
// send events from evs to the out channel and ensure we don't do it slower than eventsPerMs
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
const maxSlowTicks = 3 // slightly forgiving, allow 3 slow ticks (seconds) before giving up
slowTicks := 0
sentEvents := 0.0
for _, ev := range evs {
select {
case out <- ev:
sentEvents++
case <-ticker.C:
if sentEvents < eventsPerSecond {
slowTicks++
if slowTicks >= maxSlowTicks {
log.Errorf("closing event subscription due to slow event sending rate")
return
}
} else {
slowTicks = 0
}
sentEvents = 0
case <-ctx.Done():
return
}
} }
} }
// Handle ongoing events from the filter -------------------------------------------------------
in := make(chan interface{}, 256) in := make(chan interface{}, 256)
fm.SetSubChannel(in) fm.SetSubChannel(in)
var buffer []*types.ActorEvent
const α = 0.2 // decay factor for the events per height EMA
var eventsPerHeightEma float64 = 256 // exponential moving average of events per height, initially guess at 256
var lastHeight abi.ChainEpoch // last seen event height
var eventsAtCurrentHeight int // number of events at the current height
collectEvent := func(ev interface{}) bool {
ce, ok := ev.(*filter.CollectedEvent)
if !ok {
log.Errorf("got unexpected value from event filter: %T", ev)
return false
}
if ce.Height > lastHeight {
// update the EMA of events per height when the height increases
if lastHeight != 0 {
eventsPerHeightEma = α*float64(eventsAtCurrentHeight) + (1-α)*eventsPerHeightEma
}
lastHeight = ce.Height
eventsAtCurrentHeight = 0
}
eventsAtCurrentHeight++
buffer = append(buffer, &types.ActorEvent{
Entries: ce.Entries,
Emitter: ce.EmitterAddr,
Reverted: ce.Reverted,
Height: ce.Height,
TipSetKey: ce.TipSetKey,
MsgCid: ce.MsgCid,
})
return true
}
for ctx.Err() == nil { for ctx.Err() == nil {
select { if len(buffer) > 0 {
case val, ok := <-in: // check if we need to disconnect the client because they've fallen behind, always allow at
if !ok { // least 8 events in the buffer to provide a little bit of slack
// Shutting down. if len(buffer) > 8 && float64(len(buffer)) > eventsPerHeightEma/2 {
log.Errorf("closing event subscription due to slow event sending rate")
return return
} }
ce, ok := val.(*filter.CollectedEvent)
if !ok {
log.Errorf("got unexpected value from event filter: %T", val)
return
}
ev := &types.ActorEvent{
Entries: ce.Entries,
Emitter: ce.EmitterAddr,
Reverted: ce.Reverted,
Height: ce.Height,
TipSetKey: ce.TipSetKey,
MsgCid: ce.MsgCid,
}
select { select {
case out <- ev: case ev, ok := <-in: // incoming event
default: // TODO: need to fix this to be more intelligent about the consumption rate vs the accumulation rate if !ok || !collectEvent(ev) {
log.Errorf("closing event subscription due to slow reader") return
}
case out <- buffer[0]: // successful send
buffer[0] = nil
buffer = buffer[1:]
case <-ctx.Done():
return return
} }
if len(out) > 5 { } else {
log.Warnf("event subscription is slow, has %d buffered entries", len(out)) select {
case ev, ok := <-in: // incoming event
if !ok || !collectEvent(ev) {
return
}
case <-ctx.Done():
return
} }
case <-ctx.Done():
return
} }
} }
}() }()
@ -248,12 +320,19 @@ func (a *ActorEventHandler) SubscribeActorEvents(ctx context.Context, evtFilter
return out, nil return out, nil
} }
func getCollected(ctx context.Context, f *filter.EventFilter) ([]*types.ActorEvent, error) { func getCollected(ctx context.Context, f *filter.EventFilter) ([]*types.ActorEvent, abi.ChainEpoch, abi.ChainEpoch) {
ces := f.TakeCollectedEvents(ctx) ces := f.TakeCollectedEvents(ctx)
var out []*types.ActorEvent var out []*types.ActorEvent
var min, max abi.ChainEpoch
for _, e := range ces { for _, e := range ces {
if min == 0 || e.Height < min {
min = e.Height
}
if e.Height > max {
max = e.Height
}
out = append(out, &types.ActorEvent{ out = append(out, &types.ActorEvent{
Entries: e.Entries, Entries: e.Entries,
Emitter: e.EmitterAddr, Emitter: e.EmitterAddr,
@ -264,5 +343,5 @@ func getCollected(ctx context.Context, f *filter.EventFilter) ([]*types.ActorEve
}) })
} }
return out, nil return out, min, max
} }