2020-08-10 08:07:36 +00:00
|
|
|
package messagepool
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"sort"
|
2020-09-01 22:17:22 +00:00
|
|
|
"time"
|
2020-08-10 08:07:36 +00:00
|
|
|
|
2022-06-14 15:00:51 +00:00
|
|
|
"github.com/ipfs/go-cid"
|
2020-08-10 08:07:36 +00:00
|
|
|
"golang.org/x/xerrors"
|
|
|
|
|
|
|
|
"github.com/filecoin-project/go-address"
|
2022-06-14 15:00:51 +00:00
|
|
|
|
2020-08-10 08:07:36 +00:00
|
|
|
"github.com/filecoin-project/lotus/build"
|
2020-08-12 06:41:02 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
|
2020-08-10 08:07:36 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
|
|
|
)
|
|
|
|
|
2020-08-11 11:26:06 +00:00
|
|
|
const repubMsgLimit = 30
|
2020-08-10 08:07:36 +00:00
|
|
|
|
2020-09-06 10:36:10 +00:00
|
|
|
var RepublishBatchDelay = 100 * time.Millisecond
|
2020-09-01 22:17:22 +00:00
|
|
|
|
2021-05-29 00:35:50 +00:00
|
|
|
func (mp *MessagePool) republishPendingMessages(ctx context.Context) error {
|
2023-03-28 08:11:00 +00:00
|
|
|
mp.curTsLk.RLock()
|
2020-08-10 08:07:36 +00:00
|
|
|
ts := mp.curTs
|
2023-05-03 20:31:39 +00:00
|
|
|
mp.curTsLk.RUnlock()
|
2020-08-10 08:07:36 +00:00
|
|
|
|
|
|
|
baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts)
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("computing basefee: %w", err)
|
|
|
|
}
|
2020-09-11 14:11:57 +00:00
|
|
|
baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor)
|
2020-08-10 08:07:36 +00:00
|
|
|
|
|
|
|
pending := make(map[address.Address]map[uint64]*types.SignedMessage)
|
2023-05-03 20:31:39 +00:00
|
|
|
|
2023-03-28 08:40:41 +00:00
|
|
|
mp.lk.Lock()
|
2020-08-17 07:03:39 +00:00
|
|
|
mp.republished = nil // clear this to avoid races triggering an early republish
|
2023-05-03 20:31:39 +00:00
|
|
|
mp.lk.Unlock()
|
|
|
|
|
|
|
|
mp.lk.RLock()
|
2021-05-29 00:35:50 +00:00
|
|
|
mp.forEachLocal(ctx, func(ctx context.Context, actor address.Address) {
|
|
|
|
mset, ok, err := mp.getPendingMset(ctx, actor)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf("failed to get mset: %w", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-08-10 08:07:36 +00:00
|
|
|
if !ok {
|
2021-05-29 00:35:50 +00:00
|
|
|
return
|
2020-08-10 08:07:36 +00:00
|
|
|
}
|
|
|
|
if len(mset.msgs) == 0 {
|
2021-05-29 00:35:50 +00:00
|
|
|
return
|
2020-08-10 08:07:36 +00:00
|
|
|
}
|
|
|
|
// we need to copy this while holding the lock to avoid races with concurrent modification
|
|
|
|
pend := make(map[uint64]*types.SignedMessage, len(mset.msgs))
|
|
|
|
for nonce, m := range mset.msgs {
|
|
|
|
pend[nonce] = m
|
|
|
|
}
|
|
|
|
pending[actor] = pend
|
2021-05-29 00:35:50 +00:00
|
|
|
})
|
2023-05-03 20:31:39 +00:00
|
|
|
mp.lk.RUnlock()
|
2020-08-10 08:07:36 +00:00
|
|
|
|
|
|
|
if len(pending) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var chains []*msgChain
|
|
|
|
for actor, mset := range pending {
|
2020-09-07 18:53:30 +00:00
|
|
|
// We use the baseFee lower bound for createChange so that we optimistically include
|
|
|
|
// chains that might become profitable in the next 20 blocks.
|
|
|
|
// We still check the lowerBound condition for individual messages so that we don't send
|
|
|
|
// messages that will be rejected by the mpool spam protector, so this is safe to do.
|
|
|
|
next := mp.createMessageChains(actor, mset, baseFeeLowerBound, ts)
|
2020-08-10 08:07:36 +00:00
|
|
|
chains = append(chains, next...)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(chains) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Slice(chains, func(i, j int) bool {
|
|
|
|
return chains[i].Before(chains[j])
|
|
|
|
})
|
|
|
|
|
2022-07-18 16:36:51 +00:00
|
|
|
gasLimit := build.BlockGasLimit
|
2020-08-12 06:41:02 +00:00
|
|
|
minGas := int64(gasguess.MinGas)
|
2020-08-10 08:07:36 +00:00
|
|
|
var msgs []*types.SignedMessage
|
2020-09-06 10:36:10 +00:00
|
|
|
loop:
|
2020-08-12 06:41:02 +00:00
|
|
|
for i := 0; i < len(chains); {
|
|
|
|
chain := chains[i]
|
|
|
|
|
2020-08-10 08:07:36 +00:00
|
|
|
// we can exceed this if we have picked (some) longer chain already
|
|
|
|
if len(msgs) > repubMsgLimit {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2020-08-12 06:41:02 +00:00
|
|
|
// there is not enough gas for any message
|
|
|
|
if gasLimit <= minGas {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// has the chain been invalidated?
|
|
|
|
if !chain.valid {
|
|
|
|
i++
|
|
|
|
continue
|
2020-08-10 08:07:36 +00:00
|
|
|
}
|
|
|
|
|
2020-08-12 06:41:02 +00:00
|
|
|
// does it fit in a block?
|
|
|
|
if chain.gasLimit <= gasLimit {
|
2020-09-06 10:36:10 +00:00
|
|
|
// check the baseFee lower bound -- only republish messages that can be included in the chain
|
|
|
|
// within the next 20 blocks.
|
|
|
|
for _, m := range chain.msgs {
|
2020-12-04 03:54:46 +00:00
|
|
|
if m.Message.GasFeeCap.LessThan(baseFeeLowerBound) {
|
2020-09-06 10:36:10 +00:00
|
|
|
chain.Invalidate()
|
|
|
|
continue loop
|
|
|
|
}
|
|
|
|
gasLimit -= m.Message.GasLimit
|
|
|
|
msgs = append(msgs, m)
|
|
|
|
}
|
|
|
|
|
|
|
|
// we processed the whole chain, advance
|
2020-08-12 06:41:02 +00:00
|
|
|
i++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// we can't fit the current chain but there is gas to spare
|
|
|
|
// trim it and push it down
|
2021-09-11 00:24:12 +00:00
|
|
|
chain.Trim(gasLimit, repubMsgLimit, mp, baseFee)
|
2020-08-12 06:41:02 +00:00
|
|
|
for j := i; j < len(chains)-1; j++ {
|
|
|
|
if chains[j].Before(chains[j+1]) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
chains[j], chains[j+1] = chains[j+1], chains[j]
|
|
|
|
}
|
2020-08-10 08:07:36 +00:00
|
|
|
}
|
|
|
|
|
2020-08-17 07:03:39 +00:00
|
|
|
count := 0
|
2021-09-11 00:24:12 +00:00
|
|
|
if len(msgs) > repubMsgLimit {
|
|
|
|
msgs = msgs[:repubMsgLimit]
|
|
|
|
}
|
|
|
|
|
2020-08-10 08:07:36 +00:00
|
|
|
log.Infof("republishing %d messages", len(msgs))
|
|
|
|
for _, m := range msgs {
|
|
|
|
mb, err := m.Serialize()
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("cannot serialize message: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), mb)
|
|
|
|
if err != nil {
|
|
|
|
return xerrors.Errorf("cannot publish: %w", err)
|
|
|
|
}
|
2020-08-17 07:03:39 +00:00
|
|
|
|
|
|
|
count++
|
2020-09-01 22:17:22 +00:00
|
|
|
|
|
|
|
if count < len(msgs) {
|
|
|
|
// this delay is here to encourage the pubsub subsystem to process the messages serially
|
|
|
|
// and avoid creating nonce gaps because of concurrent validation.
|
|
|
|
time.Sleep(RepublishBatchDelay)
|
|
|
|
}
|
2020-08-17 07:03:39 +00:00
|
|
|
}
|
|
|
|
|
2020-08-26 15:38:23 +00:00
|
|
|
if len(msgs) > 0 {
|
2020-10-09 19:52:04 +00:00
|
|
|
mp.journal.RecordEvent(mp.evtTypes[evtTypeMpoolRepub], func() interface{} {
|
2020-10-07 19:13:04 +00:00
|
|
|
msgsEv := make([]MessagePoolEvtMessage, 0, len(msgs))
|
2020-08-26 15:38:23 +00:00
|
|
|
for _, m := range msgs {
|
2020-10-07 19:13:04 +00:00
|
|
|
msgsEv = append(msgsEv, MessagePoolEvtMessage{Message: m.Message, CID: m.Cid()})
|
2020-08-26 15:38:23 +00:00
|
|
|
}
|
|
|
|
return MessagePoolEvt{
|
|
|
|
Action: "repub",
|
2020-10-07 19:13:04 +00:00
|
|
|
Messages: msgsEv,
|
2020-08-26 15:38:23 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-08-17 07:03:39 +00:00
|
|
|
// track most recently republished messages
|
|
|
|
republished := make(map[cid.Cid]struct{})
|
|
|
|
for _, m := range msgs[:count] {
|
|
|
|
republished[m.Cid()] = struct{}{}
|
2020-08-10 08:07:36 +00:00
|
|
|
}
|
|
|
|
|
2020-08-17 07:03:39 +00:00
|
|
|
// update the republished set so that we can trigger early republish from head changes
|
2023-05-03 20:31:39 +00:00
|
|
|
mp.lk.Lock()
|
2020-08-17 07:03:39 +00:00
|
|
|
mp.republished = republished
|
|
|
|
mp.lk.Unlock()
|
|
|
|
|
2020-08-10 08:07:36 +00:00
|
|
|
return nil
|
|
|
|
}
|