lotus/chain/messagepool/pruning.go

123 lines
2.9 KiB
Go
Raw Permalink Normal View History

2020-07-16 22:28:35 +00:00
package messagepool
import (
2020-08-01 22:54:21 +00:00
"context"
"sort"
2020-07-16 22:28:35 +00:00
"time"
2020-08-01 22:54:21 +00:00
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
2022-06-14 15:00:51 +00:00
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/types"
2020-07-16 22:28:35 +00:00
)
func (mp *MessagePool) pruneExcessMessages() error {
2020-08-01 23:25:13 +00:00
mp.curTsLk.Lock()
ts := mp.curTs
mp.curTsLk.Unlock()
2020-07-16 22:28:35 +00:00
mp.lk.Lock()
defer mp.lk.Unlock()
mpCfg := mp.getConfig()
if mp.currentSize < mpCfg.SizeLimitHigh {
2020-07-16 22:28:35 +00:00
return nil
}
2020-08-07 16:50:10 +00:00
select {
case <-mp.pruneCooldown:
err := mp.pruneMessages(context.TODO(), ts)
go func() {
time.Sleep(mpCfg.PruneCooldown)
2020-08-07 16:50:10 +00:00
mp.pruneCooldown <- struct{}{}
}()
return err
default:
return xerrors.New("cannot prune before cooldown")
}
}
2020-08-01 23:25:13 +00:00
func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) error {
start := time.Now()
defer func() {
log.Infof("message pruning took %s", time.Since(start))
}()
2020-08-01 22:54:21 +00:00
baseFee, err := mp.api.ChainComputeBaseFee(ctx, ts)
if err != nil {
return xerrors.Errorf("computing basefee: %w", err)
}
baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor)
2021-12-11 21:03:00 +00:00
pending, _ := mp.getPendingMessages(ctx, ts, ts)
2020-09-11 18:12:11 +00:00
// protected actors -- not pruned
protected := make(map[address.Address]struct{})
mpCfg := mp.getConfig()
2020-09-11 18:12:11 +00:00
// we never prune priority addresses
for _, actor := range mpCfg.PriorityAddrs {
2021-05-31 22:12:42 +00:00
pk, err := mp.resolveToKey(ctx, actor)
if err != nil {
log.Debugf("pruneMessages failed to resolve priority address: %s", err)
}
protected[pk] = struct{}{}
2020-08-07 15:32:05 +00:00
}
2020-09-11 17:32:52 +00:00
// we also never prune locally published messages
mp.forEachLocal(ctx, func(ctx context.Context, actor address.Address) {
2020-09-11 18:12:11 +00:00
protected[actor] = struct{}{}
})
2020-09-11 17:32:52 +00:00
// Collect all messages to track which ones to remove and create chains for block inclusion
pruneMsgs := make(map[cid.Cid]*types.SignedMessage, mp.currentSize)
2020-08-07 15:32:05 +00:00
keepCount := 0
var chains []*msgChain
for actor, mset := range pending {
2020-09-11 18:12:11 +00:00
// we never prune protected actors
_, keep := protected[actor]
2020-08-07 15:32:05 +00:00
if keep {
keepCount += len(mset)
continue
}
2020-09-11 18:12:11 +00:00
// not a protected actor, track the messages and create chains
for _, m := range mset {
pruneMsgs[m.Message.Cid()] = m
2020-08-01 22:54:21 +00:00
}
actorChains := mp.createMessageChains(actor, mset, baseFeeLowerBound, ts)
chains = append(chains, actorChains...)
2020-08-01 22:54:21 +00:00
}
// Sort the chains
sort.Slice(chains, func(i, j int) bool {
return chains[i].Before(chains[j])
2020-08-01 22:54:21 +00:00
})
// Keep messages (remove them from pruneMsgs) from chains while we are under the low water mark
loWaterMark := mpCfg.SizeLimitLow
keepLoop:
for _, chain := range chains {
for _, m := range chain.msgs {
2020-08-07 14:33:55 +00:00
if keepCount < loWaterMark {
delete(pruneMsgs, m.Message.Cid())
keepCount++
} else {
break keepLoop
2020-08-01 22:54:21 +00:00
}
}
}
// and remove all messages that are still in pruneMsgs after processing the chains
log.Infof("Pruning %d messages", len(pruneMsgs))
for _, m := range pruneMsgs {
2021-05-18 18:56:42 +00:00
mp.remove(ctx, m.Message.From, m.Message.Nonce, false)
2020-08-01 22:54:21 +00:00
}
return nil
}