lotus/chain/messagepool/messagepool.go

1667 lines
44 KiB
Go
Raw Normal View History

2019-12-01 23:11:43 +00:00
package messagepool
2019-07-05 14:46:21 +00:00
import (
2019-11-23 19:01:56 +00:00
"bytes"
2019-11-17 07:44:06 +00:00
"context"
"errors"
"fmt"
"math"
2020-08-26 11:38:26 +00:00
stdbig "math/big"
"sort"
2019-07-05 14:46:21 +00:00
"sync"
"time"
2019-07-08 12:51:45 +00:00
2020-08-21 17:17:30 +00:00
"github.com/hashicorp/go-multierror"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/ipfs/go-cid"
2019-11-23 19:01:56 +00:00
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
"github.com/ipfs/go-datastore/query"
logging "github.com/ipfs/go-log/v2"
pubsub "github.com/libp2p/go-libp2p-pubsub"
2022-06-14 15:00:51 +00:00
"github.com/minio/blake2b-simd"
"github.com/raulk/clock"
"go.opencensus.io/stats"
2019-09-20 09:01:49 +00:00
"golang.org/x/xerrors"
2022-06-14 15:00:51 +00:00
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/go-address"
2022-06-14 15:00:51 +00:00
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/network"
lps "github.com/filecoin-project/pubsub"
2020-07-10 14:43:14 +00:00
2019-11-17 07:44:06 +00:00
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/journal"
2021-06-11 11:19:26 +00:00
"github.com/filecoin-project/lotus/metrics"
2019-11-23 19:01:56 +00:00
"github.com/filecoin-project/lotus/node/modules/dtypes"
2019-07-05 14:46:21 +00:00
)
2019-12-01 23:11:43 +00:00
var log = logging.Logger("messagepool")
2020-08-25 10:03:50 +00:00
var futureDebug = false
var rbfNumBig = types.NewInt(uint64(ReplaceByFeePercentageMinimum))
var rbfDenomBig = types.NewInt(100)
var RepublishInterval = time.Duration(10*build.BlockDelaySecs+build.PropagationDelaySecs) * time.Second
2020-08-26 18:37:32 +00:00
var minimumBaseFee = types.NewInt(uint64(build.MinimumBaseFee))
var baseFeeLowerBoundFactor = types.NewInt(10)
var baseFeeLowerBoundFactorConservative = types.NewInt(100)
2020-08-26 18:23:28 +00:00
var MaxActorPendingMessages = 1000
2020-09-18 06:40:43 +00:00
var MaxUntrustedActorPendingMessages = 10
var MaxNonceGap = uint64(4)
const MaxMessageSize = 64 << 10 // 64KiB
// NOTE: When adding a new error type, please make sure to add the new error type in
// func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message)
// in /chain/sub/incoming.go
var (
ErrMessageTooBig = errors.New("message too big")
ErrMessageValueTooHigh = errors.New("cannot send more filecoin than will ever exist")
ErrNonceTooLow = errors.New("message nonce too low")
2020-08-26 18:23:28 +00:00
ErrGasFeeCapTooLow = errors.New("gas fee cap too low")
ErrNotEnoughFunds = errors.New("not enough funds to execute transaction")
ErrInvalidToAddr = errors.New("message had invalid to address")
ErrSoftValidationFailure = errors.New("validation failure")
ErrRBFTooLowPremium = errors.New("replace by fee has too low GasPremium")
ErrTooManyPendingMessages = errors.New("too many pending messages for actor")
ErrNonceGap = errors.New("unfulfilled nonce gap")
ErrExistingNonce = errors.New("message with nonce already exists")
)
const (
2019-11-23 19:01:56 +00:00
localMsgsDs = "/mpool/local"
2019-11-17 07:44:06 +00:00
localUpdates = "update"
)
// Journal event types.
const (
evtTypeMpoolAdd = iota
evtTypeMpoolRemove
evtTypeMpoolRepub
)
// MessagePoolEvt is the journal entry for message pool events.
type MessagePoolEvt struct {
Action string
2020-09-04 09:54:56 +00:00
Messages []MessagePoolEvtMessage
Error error `json:",omitempty"`
}
2020-09-04 09:54:56 +00:00
type MessagePoolEvtMessage struct {
types.Message
CID cid.Cid
}
func init() {
// if the republish interval is too short compared to the pubsub timecache, adjust it
minInterval := pubsub.TimeCacheDuration + time.Duration(build.PropagationDelaySecs)*time.Second
if RepublishInterval < minInterval {
RepublishInterval = minInterval
}
}
2019-07-05 14:46:21 +00:00
type MessagePool struct {
lk sync.RWMutex
2019-07-05 14:46:21 +00:00
2020-08-07 17:10:09 +00:00
ds dtypes.MetadataDS
addSema chan struct{}
closer chan struct{}
repubTk *clock.Ticker
repubTrigger chan struct{}
republished map[cid.Cid]struct{}
// do NOT access this map directly, use isLocal, setLocal, and forEachLocal respectively
localAddrs map[address.Address]struct{}
// do NOT access this map directly, use getPendingMset, setPendingMset, deletePendingMset, forEachPending, and clearPending respectively
pending map[address.Address]*msgSet
2019-07-05 14:46:21 +00:00
keyCache *lru.Cache[address.Address, address.Address]
curTsLk sync.RWMutex // DO NOT LOCK INSIDE lk
curTs *types.TipSet
2019-09-16 14:17:08 +00:00
cfgLk sync.RWMutex
2020-08-07 14:33:55 +00:00
cfg *types.MpoolConfig
2019-12-02 19:39:50 +00:00
api Provider
minGasPrice types.BigInt
getNtwkVersion func(abi.ChainEpoch) (network.Version, error)
2020-08-07 14:33:55 +00:00
currentSize int
2020-07-16 22:28:35 +00:00
// pruneTrigger is a channel used to trigger a mempool pruning
pruneTrigger chan struct{}
2020-08-07 16:50:10 +00:00
// pruneCooldown is a channel used to allow a cooldown time between prunes
pruneCooldown chan struct{}
blsSigCache *lru.TwoQueueCache[cid.Cid, crypto.Signature]
2019-11-17 07:44:06 +00:00
changes *lps.PubSub
2019-11-23 19:01:56 +00:00
localMsgs datastore.Datastore
2020-03-31 23:13:37 +00:00
netName dtypes.NetworkName
sigValCache *lru.TwoQueueCache[string, struct{}]
stateNonceCache *lru.Cache[stateNonceCacheKey, uint64]
2022-01-26 14:39:58 +00:00
evtTypes [3]journal.EventType
journal journal.Journal
2019-07-05 14:46:21 +00:00
}
type stateNonceCacheKey struct {
2022-01-26 14:39:58 +00:00
tsk types.TipSetKey
addr address.Address
}
2019-07-05 14:46:21 +00:00
type msgSet struct {
2020-08-26 11:38:26 +00:00
msgs map[uint64]*types.SignedMessage
nextNonce uint64
requiredFunds *stdbig.Int
2019-07-05 14:46:21 +00:00
}
func newMsgSet(nonce uint64) *msgSet {
2019-07-05 14:46:21 +00:00
return &msgSet{
2020-08-26 11:38:26 +00:00
msgs: make(map[uint64]*types.SignedMessage),
nextNonce: nonce,
2020-08-26 11:38:26 +00:00
requiredFunds: stdbig.NewInt(0),
2019-07-05 14:46:21 +00:00
}
}
func ComputeMinRBF(curPrem abi.TokenAmount) abi.TokenAmount {
minPrice := types.BigDiv(types.BigMul(curPrem, rbfNumBig), rbfDenomBig)
return types.BigAdd(minPrice, types.NewInt(1))
}
func ComputeRBF(curPrem abi.TokenAmount, replaceByFeeRatio types.Percent) abi.TokenAmount {
rbfNumBig := types.NewInt(uint64(replaceByFeeRatio))
minPrice := types.BigDiv(types.BigMul(curPrem, rbfNumBig), rbfDenomBig)
return types.BigAdd(minPrice, types.NewInt(1))
}
func CapGasFee(mff dtypes.DefaultMaxFeeFunc, msg *types.Message, sendSpec *api.MessageSendSpec) {
var maxFee abi.TokenAmount
var maximizeFeeCap bool
if sendSpec != nil {
maxFee = sendSpec.MaxFee
maximizeFeeCap = sendSpec.MaximizeFeeCap
}
if maxFee.Int == nil || maxFee.Equals(big.Zero()) {
mf, err := mff()
if err != nil {
log.Errorf("failed to get default max gas fee: %+v", err)
mf = big.Zero()
}
maxFee = mf
}
gaslimit := types.NewInt(uint64(msg.GasLimit))
totalFee := types.BigMul(msg.GasFeeCap, gaslimit)
if maximizeFeeCap || totalFee.GreaterThan(maxFee) {
msg.GasFeeCap = big.Div(maxFee, gaslimit)
}
msg.GasPremium = big.Min(msg.GasFeeCap, msg.GasPremium) // cap premium at FeeCap
}
2020-09-18 06:40:43 +00:00
func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict, untrusted bool) (bool, error) {
nextNonce := ms.nextNonce
nonceGap := false
2020-09-18 06:40:43 +00:00
maxNonceGap := MaxNonceGap
maxActorPendingMessages := MaxActorPendingMessages
if untrusted {
maxNonceGap = 0
maxActorPendingMessages = MaxUntrustedActorPendingMessages
}
switch {
case m.Message.Nonce == nextNonce:
nextNonce++
// advance if we are filling a gap
for _, fillGap := ms.msgs[nextNonce]; fillGap; _, fillGap = ms.msgs[nextNonce] {
nextNonce++
}
2020-09-18 06:40:43 +00:00
case strict && m.Message.Nonce > nextNonce+maxNonceGap:
return false, xerrors.Errorf("message nonce has too big a gap from expected nonce (Nonce: %d, nextNonce: %d): %w", m.Message.Nonce, nextNonce, ErrNonceGap)
case m.Message.Nonce > nextNonce:
nonceGap = true
2019-09-20 09:01:49 +00:00
}
exms, has := ms.msgs[m.Message.Nonce]
if has {
// refuse RBF if we have a gap
if strict && nonceGap {
return false, xerrors.Errorf("rejecting replace by fee because of nonce gap (Nonce: %d, nextNonce: %d): %w", m.Message.Nonce, nextNonce, ErrNonceGap)
}
if m.Cid() != exms.Cid() {
// check if RBF passes
minPrice := ComputeMinRBF(exms.Message.GasPremium)
if types.BigCmp(m.Message.GasPremium, minPrice) >= 0 {
log.Debugw("add with RBF", "oldpremium", exms.Message.GasPremium,
"newpremium", m.Message.GasPremium, "addr", m.Message.From, "nonce", m.Message.Nonce)
} else {
log.Debugf("add with duplicate nonce. message from %s with nonce %d already in mpool,"+
" increase GasPremium to %s from %s to trigger replace by fee: %s",
m.Message.From, m.Message.Nonce, minPrice, m.Message.GasPremium,
ErrRBFTooLowPremium)
2020-07-16 22:28:35 +00:00
return false, xerrors.Errorf("message from %s with nonce %d already in mpool,"+
" increase GasPremium to %s from %s to trigger replace by fee: %w",
m.Message.From, m.Message.Nonce, minPrice, m.Message.GasPremium,
ErrRBFTooLowPremium)
}
2020-09-03 06:00:03 +00:00
} else {
return false, xerrors.Errorf("message from %s with nonce %d already in mpool: %w",
m.Message.From, m.Message.Nonce, ErrExistingNonce)
2019-09-20 09:01:49 +00:00
}
2020-08-26 11:38:26 +00:00
ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.RequiredFunds().Int)
// ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.Value.Int)
2019-07-05 14:46:21 +00:00
}
2020-08-26 11:38:26 +00:00
if !has && strict && len(ms.msgs) >= maxActorPendingMessages {
log.Errorf("too many pending messages from actor %s", m.Message.From)
return false, ErrTooManyPendingMessages
}
if strict && nonceGap {
2020-11-03 12:28:31 +00:00
log.Debugf("adding nonce-gapped message from %s (nonce: %d, nextNonce: %d)",
m.Message.From, m.Message.Nonce, nextNonce)
}
ms.nextNonce = nextNonce
2019-07-05 14:46:21 +00:00
ms.msgs[m.Message.Nonce] = m
2020-08-26 11:38:26 +00:00
ms.requiredFunds.Add(ms.requiredFunds, m.Message.RequiredFunds().Int)
// ms.requiredFunds.Add(ms.requiredFunds, m.Message.Value.Int)
2019-09-20 09:01:49 +00:00
2020-07-16 22:28:35 +00:00
return !has, nil
2019-07-05 14:46:21 +00:00
}
func (ms *msgSet) rm(nonce uint64, applied bool) {
2020-08-26 11:38:26 +00:00
m, has := ms.msgs[nonce]
if !has {
if applied && nonce >= ms.nextNonce {
// we removed a message we did not know about because it was applied
// we need to adjust the nonce and check if we filled a gap
ms.nextNonce = nonce + 1
for _, fillGap := ms.msgs[ms.nextNonce]; fillGap; _, fillGap = ms.msgs[ms.nextNonce] {
ms.nextNonce++
}
}
return
}
ms.requiredFunds.Sub(ms.requiredFunds, m.Message.RequiredFunds().Int)
// ms.requiredFunds.Sub(ms.requiredFunds, m.Message.Value.Int)
delete(ms.msgs, nonce)
// adjust next nonce
if applied {
// we removed a (known) message because it was applied in a tipset
// we can't possibly have filled a gap in this case
if nonce >= ms.nextNonce {
ms.nextNonce = nonce + 1
}
return
}
// we removed a message because it was pruned
// we have to adjust the nonce if it creates a gap or rewinds state
if nonce < ms.nextNonce {
ms.nextNonce = nonce
2020-08-26 11:38:26 +00:00
}
}
func (ms *msgSet) getRequiredFunds(nonce uint64) types.BigInt {
2020-08-26 12:37:42 +00:00
requiredFunds := new(stdbig.Int).Set(ms.requiredFunds)
m, has := ms.msgs[nonce]
if has {
requiredFunds.Sub(requiredFunds, m.Message.RequiredFunds().Int)
// requiredFunds.Sub(requiredFunds, m.Message.Value.Int)
}
2020-08-26 12:37:42 +00:00
return types.BigInt{Int: requiredFunds}
}
func (ms *msgSet) toSlice() []*types.SignedMessage {
set := make([]*types.SignedMessage, 0, len(ms.msgs))
for _, m := range ms.msgs {
set = append(set, m)
}
sort.Slice(set, func(i, j int) bool {
return set[i].Message.Nonce < set[j].Message.Nonce
})
return set
}
2021-12-11 21:03:00 +00:00
func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) {
cache, _ := lru.New2Q[cid.Cid, crypto.Signature](build.BlsSignatureCacheSize)
verifcache, _ := lru.New2Q[string, struct{}](build.VerifSigCacheSize)
stateNonceCache, _ := lru.New[stateNonceCacheKey, uint64](32768) // 32k * ~200 bytes = 6MB
keycache, _ := lru.New[address.Address, address.Address](1_000_000)
2021-12-11 21:03:00 +00:00
cfg, err := loadConfig(ctx, ds)
2020-08-07 17:10:09 +00:00
if err != nil {
2020-08-31 07:11:45 +00:00
return nil, xerrors.Errorf("error loading mpool config: %w", err)
2020-08-07 17:10:09 +00:00
}
if j == nil {
j = journal.NilJournal()
}
2019-07-05 14:46:21 +00:00
mp := &MessagePool{
ds: ds,
addSema: make(chan struct{}, 1),
closer: make(chan struct{}),
repubTk: build.Clock.Ticker(RepublishInterval),
repubTrigger: make(chan struct{}, 1),
localAddrs: make(map[address.Address]struct{}),
pending: make(map[address.Address]*msgSet),
keyCache: keycache,
minGasPrice: types.NewInt(0),
getNtwkVersion: us.GetNtwkVersion,
pruneTrigger: make(chan struct{}, 1),
pruneCooldown: make(chan struct{}, 1),
blsSigCache: cache,
sigValCache: verifcache,
stateNonceCache: stateNonceCache,
changes: lps.New(50),
localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)),
api: api,
netName: netName,
cfg: cfg,
evtTypes: [...]journal.EventType{
evtTypeMpoolAdd: j.RegisterEventType("mpool", "add"),
evtTypeMpoolRemove: j.RegisterEventType("mpool", "remove"),
evtTypeMpoolRepub: j.RegisterEventType("mpool", "repub"),
},
journal: j,
2019-11-23 19:01:56 +00:00
}
2020-08-07 16:50:10 +00:00
// enable initial prunes
mp.pruneCooldown <- struct{}{}
2021-05-31 22:12:42 +00:00
ctx, cancel := context.WithCancel(context.TODO())
// load the current tipset and subscribe to head changes _before_ loading local messages
2019-12-03 19:33:29 +00:00
mp.curTs = api.SubscribeHeadChanges(func(rev, app []*types.TipSet) error {
2021-05-18 18:56:42 +00:00
err := mp.HeadChange(ctx, rev, app)
if err != nil {
log.Errorf("mpool head notif handler error: %+v", err)
}
return err
})
2019-07-05 14:46:21 +00:00
mp.curTsLk.Lock()
mp.lk.Lock()
go func() {
defer cancel()
2021-05-18 18:56:42 +00:00
err := mp.loadLocal(ctx)
mp.lk.Unlock()
mp.curTsLk.Unlock()
if err != nil {
log.Errorf("loading local messages: %+v", err)
}
log.Info("mpool ready")
mp.runLoop(ctx)
}()
2019-11-23 19:01:56 +00:00
return mp, nil
2019-07-05 14:46:21 +00:00
}
func (mp *MessagePool) ForEachPendingMessage(f func(cid.Cid) error) error {
mp.lk.Lock()
defer mp.lk.Unlock()
for _, mset := range mp.pending {
for _, m := range mset.msgs {
err := f(m.Cid())
if err != nil {
return err
}
err = f(m.Message.Cid())
if err != nil {
return err
}
}
}
return nil
}
func (mp *MessagePool) resolveToKey(ctx context.Context, addr address.Address) (address.Address, error) {
//if addr is not an ID addr, then it is already resolved to a key
if addr.Protocol() != address.ID {
return addr, nil
}
return mp.resolveToKeyFromID(ctx, addr)
}
func (mp *MessagePool) resolveToKeyFromID(ctx context.Context, addr address.Address) (address.Address, error) {
// check the cache
a, ok := mp.keyCache.Get(addr)
if ok {
return a, nil
}
// resolve the address
ka, err := mp.api.StateDeterministicAddressAtFinality(ctx, addr, mp.curTs)
if err != nil {
return address.Undef, err
}
// place both entries in the cache (may both be key addresses, which is fine)
mp.keyCache.Add(addr, ka)
return ka, nil
}
func (mp *MessagePool) getPendingMset(ctx context.Context, addr address.Address) (*msgSet, bool, error) {
ra, err := mp.resolveToKey(ctx, addr)
if err != nil {
return nil, false, err
}
ms, f := mp.pending[ra]
return ms, f, nil
}
func (mp *MessagePool) setPendingMset(ctx context.Context, addr address.Address, ms *msgSet) error {
ra, err := mp.resolveToKey(ctx, addr)
if err != nil {
return err
}
mp.pending[ra] = ms
return nil
}
// This method isn't strictly necessary, since it doesn't resolve any addresses, but it's safer to have
func (mp *MessagePool) forEachPending(f func(address.Address, *msgSet)) {
for la, ms := range mp.pending {
f(la, ms)
}
}
func (mp *MessagePool) deletePendingMset(ctx context.Context, addr address.Address) error {
ra, err := mp.resolveToKey(ctx, addr)
if err != nil {
return err
}
delete(mp.pending, ra)
return nil
}
// This method isn't strictly necessary, since it doesn't resolve any addresses, but it's safer to have
func (mp *MessagePool) clearPending() {
mp.pending = make(map[address.Address]*msgSet)
}
func (mp *MessagePool) isLocal(ctx context.Context, addr address.Address) (bool, error) {
ra, err := mp.resolveToKey(ctx, addr)
if err != nil {
return false, err
}
_, f := mp.localAddrs[ra]
return f, nil
}
func (mp *MessagePool) setLocal(ctx context.Context, addr address.Address) error {
ra, err := mp.resolveToKey(ctx, addr)
if err != nil {
return err
}
mp.localAddrs[ra] = struct{}{}
return nil
}
// This method isn't strictly necessary, since it doesn't resolve any addresses, but it's safer to have
func (mp *MessagePool) forEachLocal(ctx context.Context, f func(context.Context, address.Address)) {
for la := range mp.localAddrs {
f(ctx, la)
}
}
func (mp *MessagePool) Close() error {
close(mp.closer)
return nil
}
2020-08-01 22:54:21 +00:00
func (mp *MessagePool) Prune() {
2020-08-07 16:50:10 +00:00
// this magic incantation of triggering prune thrice is here to make the Prune method
// synchronous:
// so, its a single slot buffered channel. The first send fills the channel,
// the second send goes through when the pruning starts,
// and the third send goes through (and noops) after the pruning finishes
// and goes through the loop again
2020-08-01 22:54:21 +00:00
mp.pruneTrigger <- struct{}{}
mp.pruneTrigger <- struct{}{}
mp.pruneTrigger <- struct{}{}
}
func (mp *MessagePool) runLoop(ctx context.Context) {
for {
select {
case <-mp.repubTk.C:
if err := mp.republishPendingMessages(ctx); err != nil {
2020-08-10 08:07:36 +00:00
log.Errorf("error while republishing messages: %s", err)
}
case <-mp.repubTrigger:
if err := mp.republishPendingMessages(ctx); err != nil {
log.Errorf("error while republishing messages: %s", err)
}
2020-07-16 22:28:35 +00:00
case <-mp.pruneTrigger:
if err := mp.pruneExcessMessages(); err != nil {
log.Errorf("failed to prune excess messages from mempool: %s", err)
}
2020-08-11 11:41:11 +00:00
case <-mp.closer:
mp.repubTk.Stop()
return
}
}
}
2021-05-18 18:56:42 +00:00
func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) error {
if err := mp.setLocal(ctx, m.Message.From); err != nil {
return err
}
2020-10-20 16:13:17 +00:00
msgb, err := m.Serialize()
if err != nil {
return xerrors.Errorf("error serializing message: %w", err)
}
2021-12-11 21:03:00 +00:00
if err := mp.localMsgs.Put(ctx, datastore.NewKey(string(m.Cid().Bytes())), msgb); err != nil {
2019-11-23 19:01:56 +00:00
return xerrors.Errorf("persisting local message: %w", err)
}
return nil
}
2021-06-11 11:19:26 +00:00
// verifyMsgBeforeAdd verifies that the message meets the minimum criteria for block inclusion
2020-09-07 17:26:51 +00:00
// and whether the message has enough funds to be included in the next 20 blocks.
// If the message is not valid for block inclusion, it returns an error.
// For local messages, if the message can be included in the next 20 blocks, it returns true to
// signal that it should be immediately published. If the message cannot be included in the next 20
// blocks, it returns false so that the message doesn't immediately get published (and ignored by our
// peers); instead it will be published through the republish loop, once the base fee has fallen
// sufficiently.
// For non local messages, if the message cannot be included in the next 20 blocks it returns
// a (soft) validation error.
func (mp *MessagePool) verifyMsgBeforeAdd(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) {
epoch := curTs.Height() + 1
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), mp.api.StateNetworkVersion(ctx, epoch)); err != nil {
return false, xerrors.Errorf("message will not be included in a block: %w", err)
}
// this checks if the GasFeeCap is sufficiently high for inclusion in the next 20 blocks
// if the GasFeeCap is too low, we soft reject the message (Ignore in pubsub) and rely
// on republish to push it through later, if the baseFee has fallen.
// this is a defensive check that stops minimum baseFee spam attacks from overloading validation
// queues.
// Note that for local messages, we always add them so that they can be accepted and republished
// automatically.
publish := local
var baseFee big.Int
2020-09-08 06:33:21 +00:00
if len(curTs.Blocks()) > 0 {
baseFee = curTs.Blocks()[0].ParentBaseFee
} else {
var err error
baseFee, err = mp.api.ChainComputeBaseFee(context.TODO(), curTs)
if err != nil {
return false, xerrors.Errorf("computing basefee: %w", err)
}
}
baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactorConservative)
if m.Message.GasFeeCap.LessThan(baseFeeLowerBound) {
if local {
log.Warnf("local message will not be immediately published because GasFeeCap doesn't meet the lower bound for inclusion in the next 20 blocks (GasFeeCap: %s, baseFeeLowerBound: %s)",
m.Message.GasFeeCap, baseFeeLowerBound)
publish = false
} else {
return false, xerrors.Errorf("GasFeeCap doesn't meet base fee lower bound for inclusion in the next 20 blocks (GasFeeCap: %s, baseFeeLowerBound: %s): %w",
m.Message.GasFeeCap, baseFeeLowerBound, ErrSoftValidationFailure)
}
}
return publish, nil
}
2022-11-15 22:02:08 +00:00
// Push checks the signed message for any violations, adds the message to the message pool and
// publishes the message if the publish flag is set
func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage, publish bool) (cid.Cid, error) {
2021-06-11 11:19:26 +00:00
done := metrics.Timer(ctx, metrics.MpoolPushDuration)
defer done()
err := mp.checkMessage(ctx, m)
2020-08-18 07:19:46 +00:00
if err != nil {
return cid.Undef, err
}
// serialize push access to reduce lock contention
mp.addSema <- struct{}{}
defer func() {
<-mp.addSema
}()
2020-08-18 07:19:46 +00:00
mp.curTsLk.Lock()
ok, err := mp.addTs(ctx, m, mp.curTs, true, false)
if err != nil {
2020-08-18 07:19:46 +00:00
mp.curTsLk.Unlock()
2020-01-07 16:44:55 +00:00
return cid.Undef, err
2019-09-16 14:17:08 +00:00
}
2020-08-18 07:19:46 +00:00
mp.curTsLk.Unlock()
2019-09-16 14:17:08 +00:00
if ok && publish {
2020-10-20 16:13:17 +00:00
msgb, err := m.Serialize()
if err != nil {
return cid.Undef, xerrors.Errorf("error serializing message: %w", err)
}
err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb)
2020-10-20 16:13:17 +00:00
if err != nil {
return cid.Undef, xerrors.Errorf("error publishing message: %w", err)
}
}
2020-10-20 16:13:17 +00:00
return m.Cid(), nil
2019-09-16 14:17:08 +00:00
}
func (mp *MessagePool) checkMessage(ctx context.Context, m *types.SignedMessage) error {
// big messages are bad, anti DOS
if m.Size() > MaxMessageSize {
return xerrors.Errorf("mpool message too large (%dB): %w", m.Size(), ErrMessageTooBig)
}
2020-08-26 16:37:27 +00:00
// Perform syntactic validation, minGas=0 as we check the actual mingas before we add it
if err := m.Message.ValidForBlockInclusion(0, mp.api.StateNetworkVersion(ctx, mp.curTs.Height())); err != nil {
return xerrors.Errorf("message not valid for block inclusion: %w", err)
}
if m.Message.To == address.Undef {
return ErrInvalidToAddr
}
if !m.Message.Value.LessThan(types.TotalFilecoinInt) {
return ErrMessageValueTooHigh
}
2020-08-26 18:23:28 +00:00
if m.Message.GasFeeCap.LessThan(minimumBaseFee) {
return ErrGasFeeCapTooLow
}
if err := mp.VerifyMsgSig(m); err != nil {
2023-05-05 09:50:07 +00:00
return xerrors.Errorf("signature verification failed: %s", err)
}
2020-08-18 07:19:46 +00:00
return nil
}
2021-05-18 18:56:42 +00:00
func (mp *MessagePool) Add(ctx context.Context, m *types.SignedMessage) error {
2021-06-11 11:19:26 +00:00
done := metrics.Timer(ctx, metrics.MpoolAddDuration)
defer done()
err := mp.checkMessage(ctx, m)
2020-08-18 07:19:46 +00:00
if err != nil {
return err
}
// serialize push access to reduce lock contention
mp.addSema <- struct{}{}
defer func() {
<-mp.addSema
}()
mp.curTsLk.RLock()
tmpCurTs := mp.curTs
mp.curTsLk.RUnlock()
//ensures computations are cached without holding lock
_, _ = mp.api.GetActorAfter(m.Message.From, tmpCurTs)
_, _ = mp.getStateNonce(ctx, m.Message.From, tmpCurTs)
mp.curTsLk.Lock()
if tmpCurTs != mp.curTs {
//curTs has been updated so we want to cache the new one:
tmpCurTs = mp.curTs
//we want to release the lock, cache the computations then grab it again
mp.curTsLk.Unlock()
_, _ = mp.api.GetActorAfter(m.Message.From, tmpCurTs)
_, _ = mp.getStateNonce(ctx, m.Message.From, tmpCurTs)
mp.curTsLk.Lock()
//now that we have the lock, we continue, we could do this as a loop forever, but that's bad to loop forever, and this was added as an optimization and it seems once is enough because the computation < block time
} // else with the lock enabled, mp.curTs is the same Ts as we just had, so we know that our computations are cached
defer mp.curTsLk.Unlock()
2021-05-18 18:56:42 +00:00
_, err = mp.addTs(ctx, m, mp.curTs, false, false)
return err
}
func sigCacheKey(m *types.SignedMessage) (string, error) {
switch m.Signature.Type {
case crypto.SigTypeBLS:
2021-09-08 16:21:36 +00:00
if len(m.Signature.Data) != ffi.SignatureBytes {
return "", fmt.Errorf("bls signature incorrectly sized")
}
2021-09-08 16:21:36 +00:00
hashCache := blake2b.Sum256(append(m.Cid().Bytes(), m.Signature.Data...))
return string(hashCache[:]), nil
case crypto.SigTypeSecp256k1, crypto.SigTypeDelegated:
return string(m.Cid().Bytes()), nil
default:
return "", xerrors.Errorf("unrecognized signature type: %d", m.Signature.Type)
}
}
func (mp *MessagePool) VerifyMsgSig(m *types.SignedMessage) error {
sck, err := sigCacheKey(m)
if err != nil {
return err
}
_, ok := mp.sigValCache.Get(sck)
if ok {
// already validated, great
return nil
}
if err := consensus.AuthenticateMessage(m, m.Message.From); err != nil {
return xerrors.Errorf("failed to validate signature: %w", err)
}
mp.sigValCache.Add(sck, struct{}{})
return nil
}
2021-05-18 18:56:42 +00:00
func (mp *MessagePool) checkBalance(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet) error {
2021-06-11 11:19:26 +00:00
balance, err := mp.getStateBalance(ctx, m.Message.From, curTs)
if err != nil {
return xerrors.Errorf("failed to check sender balance: %s: %w", err, ErrSoftValidationFailure)
}
requiredFunds := m.Message.RequiredFunds()
if balance.LessThan(requiredFunds) {
return xerrors.Errorf("not enough funds (required: %s, balance: %s): %w", types.FIL(requiredFunds), types.FIL(balance), ErrNotEnoughFunds)
}
// add Value for soft failure check
// requiredFunds = types.BigAdd(requiredFunds, m.Message.Value)
mset, ok, err := mp.getPendingMset(ctx, m.Message.From)
if err != nil {
log.Debugf("mpoolcheckbalance failed to get pending mset: %s", err)
return err
}
if ok {
requiredFunds = types.BigAdd(requiredFunds, mset.getRequiredFunds(m.Message.Nonce))
}
if balance.LessThan(requiredFunds) {
// Note: we fail here for ErrSoftValidationFailure to signal a soft failure because we might
// be out of sync.
return xerrors.Errorf("not enough funds including pending messages (required: %s, balance: %s): %w", types.FIL(requiredFunds), types.FIL(balance), ErrSoftValidationFailure)
}
return nil
}
2021-05-18 18:56:42 +00:00
func (mp *MessagePool) addTs(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet, local, untrusted bool) (bool, error) {
2021-06-11 11:19:26 +00:00
done := metrics.Timer(ctx, metrics.MpoolAddTsDuration)
defer done()
snonce, err := mp.getStateNonce(ctx, m.Message.From, curTs)
if err != nil {
return false, xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
}
if snonce > m.Message.Nonce {
return false, xerrors.Errorf("minimum expected nonce is %d: %w", snonce, ErrNonceTooLow)
}
senderAct, err := mp.api.GetActorAfter(m.Message.From, curTs)
if err != nil {
return false, xerrors.Errorf("failed to get sender actor: %w", err)
}
// This message can only be included in the _next_ epoch and beyond, hence the +1.
epoch := curTs.Height() + 1
nv := mp.api.StateNetworkVersion(ctx, epoch)
// TODO: I'm not thrilled about depending on filcns here, but I prefer this to duplicating logic
if !consensus.IsValidForSending(nv, senderAct) {
return false, xerrors.Errorf("sender actor %s is not a valid top-level sender", m.Message.From)
}
2023-03-28 22:35:09 +00:00
mp.lk.Lock()
defer mp.lk.Unlock()
publish, err := mp.verifyMsgBeforeAdd(ctx, m, curTs, local)
if err != nil {
return false, xerrors.Errorf("verify msg failed: %w", err)
2020-08-26 16:37:27 +00:00
}
2021-05-18 18:56:42 +00:00
if err := mp.checkBalance(ctx, m, curTs); err != nil {
return false, xerrors.Errorf("failed to check balance: %w", err)
}
2021-05-18 18:56:42 +00:00
err = mp.addLocked(ctx, m, !local, untrusted)
2020-10-20 16:13:17 +00:00
if err != nil {
return false, xerrors.Errorf("failed to add locked: %w", err)
2020-10-20 16:13:17 +00:00
}
if local {
2021-05-18 18:56:42 +00:00
err = mp.addLocal(ctx, m)
2020-10-20 16:13:17 +00:00
if err != nil {
return false, xerrors.Errorf("error persisting local message: %w", err)
}
}
return publish, nil
2019-09-16 14:17:08 +00:00
}
2021-05-18 18:56:42 +00:00
func (mp *MessagePool) addLoaded(ctx context.Context, m *types.SignedMessage) error {
err := mp.checkMessage(ctx, m)
if err != nil {
return err
}
curTs := mp.curTs
if curTs == nil {
return xerrors.Errorf("current tipset not loaded")
}
2021-06-11 11:19:26 +00:00
snonce, err := mp.getStateNonce(ctx, m.Message.From, curTs)
if err != nil {
return xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
}
if snonce > m.Message.Nonce {
return xerrors.Errorf("minimum expected nonce is %d: %w", snonce, ErrNonceTooLow)
}
_, err = mp.verifyMsgBeforeAdd(ctx, m, curTs, true)
if err != nil {
return err
}
2021-05-18 18:56:42 +00:00
if err := mp.checkBalance(ctx, m, curTs); err != nil {
return err
}
2021-05-18 18:56:42 +00:00
return mp.addLocked(ctx, m, false, false)
}
2021-05-18 18:56:42 +00:00
func (mp *MessagePool) addSkipChecks(ctx context.Context, m *types.SignedMessage) error {
2019-12-07 11:41:30 +00:00
mp.lk.Lock()
defer mp.lk.Unlock()
2021-05-18 18:56:42 +00:00
return mp.addLocked(ctx, m, false, false)
2019-12-07 11:41:30 +00:00
}
2021-05-18 18:56:42 +00:00
func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, strict, untrusted bool) error {
log.Debugf("mpooladd: %s %d", m.Message.From, m.Message.Nonce)
2020-02-12 23:52:36 +00:00
if m.Signature.Type == crypto.SigTypeBLS {
mp.blsSigCache.Add(m.Cid(), m.Signature)
}
2019-08-09 15:59:12 +00:00
2021-12-17 09:42:09 +00:00
if _, err := mp.api.PutMessage(ctx, m); err != nil {
2023-05-05 09:50:07 +00:00
return xerrors.Errorf("mpooladd cs.PutMessage failed: %s", err)
2019-07-05 14:46:21 +00:00
}
2021-12-17 09:42:09 +00:00
if _, err := mp.api.PutMessage(ctx, &m.Message); err != nil {
2023-05-05 09:50:07 +00:00
return xerrors.Errorf("mpooladd cs.PutMessage failed: %s", err)
2019-11-24 16:35:50 +00:00
}
2021-05-31 22:12:42 +00:00
// Note: If performance becomes an issue, making this getOrCreatePendingMset will save some work
mset, ok, err := mp.getPendingMset(ctx, m.Message.From)
if err != nil {
log.Debug(err)
return err
}
2019-07-05 14:46:21 +00:00
if !ok {
2021-06-11 11:19:26 +00:00
nonce, err := mp.getStateNonce(ctx, m.Message.From, mp.curTs)
if err != nil {
return xerrors.Errorf("failed to get initial actor nonce: %w", err)
}
mset = newMsgSet(nonce)
if err = mp.setPendingMset(ctx, m.Message.From, mset); err != nil {
return xerrors.Errorf("failed to set pending mset: %w", err)
}
2019-07-05 14:46:21 +00:00
}
2020-09-18 06:40:43 +00:00
incr, err := mset.add(m, mp, strict, untrusted)
2020-07-16 22:28:35 +00:00
if err != nil {
2020-09-04 21:28:13 +00:00
log.Debug(err)
return err
2020-07-16 22:28:35 +00:00
}
if incr {
mp.currentSize++
if mp.currentSize > mp.getConfig().SizeLimitHigh {
2020-07-16 22:28:35 +00:00
// send signal to prune messages if it hasnt already been sent
select {
case mp.pruneTrigger <- struct{}{}:
default:
}
}
2019-11-23 19:01:56 +00:00
}
2019-11-17 07:44:06 +00:00
mp.changes.Pub(api.MpoolUpdate{
Type: api.MpoolAdd,
Message: m,
}, localUpdates)
mp.journal.RecordEvent(mp.evtTypes[evtTypeMpoolAdd], func() interface{} {
return MessagePoolEvt{
Action: "add",
2020-09-04 09:54:56 +00:00
Messages: []MessagePoolEvtMessage{{Message: m.Message, CID: m.Cid()}},
}
})
// Record the current size of the Mpool
stats.Record(ctx, metrics.MpoolMessageCount.M(int64(mp.currentSize)))
2019-07-05 14:46:21 +00:00
return nil
}
func (mp *MessagePool) GetNonce(ctx context.Context, addr address.Address, _ types.TipSetKey) (uint64, error) {
mp.curTsLk.RLock()
defer mp.curTsLk.RUnlock()
2019-12-03 19:33:29 +00:00
mp.lk.RLock()
defer mp.lk.RUnlock()
2019-07-17 06:05:11 +00:00
2021-05-18 18:56:42 +00:00
return mp.getNonceLocked(ctx, addr, mp.curTs)
2019-09-16 14:17:08 +00:00
}
// GetActor should not be used. It is only here to satisfy interface mess caused by lite node handling
func (mp *MessagePool) GetActor(_ context.Context, addr address.Address, _ types.TipSetKey) (*types.Actor, error) {
mp.curTsLk.RLock()
defer mp.curTsLk.RUnlock()
return mp.api.GetActorAfter(addr, mp.curTs)
}
2021-05-18 18:56:42 +00:00
func (mp *MessagePool) getNonceLocked(ctx context.Context, addr address.Address, curTs *types.TipSet) (uint64, error) {
2021-06-11 11:19:26 +00:00
stateNonce, err := mp.getStateNonce(ctx, addr, curTs) // sanity check
2019-11-23 01:26:32 +00:00
if err != nil {
return 0, err
}
mset, ok, err := mp.getPendingMset(ctx, addr)
if err != nil {
log.Debugf("mpoolgetnonce failed to get mset: %s", err)
return 0, err
}
2019-07-17 06:05:11 +00:00
if ok {
2019-11-23 01:26:32 +00:00
if stateNonce > mset.nextNonce {
2019-11-24 16:35:50 +00:00
log.Errorf("state nonce was larger than mset.nextNonce (%d > %d)", stateNonce, mset.nextNonce)
2019-11-23 01:26:32 +00:00
return stateNonce, nil
}
2019-09-20 09:01:49 +00:00
return mset.nextNonce, nil
2019-07-17 06:05:11 +00:00
}
2019-11-23 01:26:32 +00:00
return stateNonce, nil
}
func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address, ts *types.TipSet) (uint64, error) {
2021-06-11 11:19:26 +00:00
done := metrics.Timer(ctx, metrics.MpoolGetNonceDuration)
defer done()
nk := stateNonceCacheKey{
2022-01-26 14:39:58 +00:00
tsk: ts.Key(),
addr: addr,
}
n, ok := mp.stateNonceCache.Get(nk)
2022-01-26 14:39:58 +00:00
if ok {
return n, nil
2022-01-26 14:39:58 +00:00
}
2023-04-21 20:03:13 +00:00
// get the nonce from the actor before ts
actor, err := mp.api.GetActorBefore(addr, ts)
if err != nil {
return 0, err
}
2023-04-21 20:03:13 +00:00
nextNonce := actor.Nonce
2023-04-21 20:03:13 +00:00
raddr, err := mp.resolveToKey(ctx, addr)
if err != nil {
return 0, err
}
// loop over all messages sent by 'addr' and find the highest nonce
messages, err := mp.api.MessagesForTipset(ctx, ts)
2019-07-17 06:05:11 +00:00
if err != nil {
return 0, err
}
for _, message := range messages {
msg := message.VMMessage()
maddr, err := mp.resolveToKey(ctx, msg.From)
if err != nil {
log.Warnf("failed to resolve message from address: %s", err)
continue
}
if maddr == raddr {
if n := msg.Nonce + 1; n > nextNonce {
nextNonce = n
}
}
}
2019-07-17 06:05:11 +00:00
mp.stateNonceCache.Add(nk, nextNonce)
2022-01-26 14:39:58 +00:00
return nextNonce, nil
2019-07-17 06:05:11 +00:00
}
2021-06-11 11:19:26 +00:00
func (mp *MessagePool) getStateBalance(ctx context.Context, addr address.Address, ts *types.TipSet) (types.BigInt, error) {
done := metrics.Timer(ctx, metrics.MpoolGetBalanceDuration)
defer done()
act, err := mp.api.GetActorAfter(addr, ts)
if err != nil {
return types.EmptyInt, err
}
return act.Balance, nil
}
2020-09-18 06:40:43 +00:00
// this method is provided for the gateway to push messages.
// differences from Push:
2022-08-29 14:25:30 +00:00
// - strict checks are enabled
// - extra strict add checks are used when adding the messages to the msgSet
// that means: no nonce gaps, at most 10 pending messages for the actor
2021-05-18 18:56:42 +00:00
func (mp *MessagePool) PushUntrusted(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) {
err := mp.checkMessage(ctx, m)
2020-09-18 06:40:43 +00:00
if err != nil {
return cid.Undef, err
}
// serialize push access to reduce lock contention
mp.addSema <- struct{}{}
defer func() {
<-mp.addSema
}()
mp.curTsLk.Lock()
2021-05-18 18:56:42 +00:00
publish, err := mp.addTs(ctx, m, mp.curTs, true, true)
2020-09-18 06:40:43 +00:00
if err != nil {
mp.curTsLk.Unlock()
return cid.Undef, err
}
mp.curTsLk.Unlock()
if publish {
2020-10-20 16:13:17 +00:00
msgb, err := m.Serialize()
if err != nil {
return cid.Undef, xerrors.Errorf("error serializing message: %w", err)
}
2020-09-18 06:40:43 +00:00
err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb)
2020-10-20 16:13:17 +00:00
if err != nil {
return cid.Undef, xerrors.Errorf("error publishing message: %w", err)
}
2020-09-18 06:40:43 +00:00
}
2020-10-20 16:13:17 +00:00
return m.Cid(), nil
2020-09-18 06:40:43 +00:00
}
2021-05-18 18:56:42 +00:00
func (mp *MessagePool) Remove(ctx context.Context, from address.Address, nonce uint64, applied bool) {
2019-07-05 14:46:21 +00:00
mp.lk.Lock()
defer mp.lk.Unlock()
2021-05-18 18:56:42 +00:00
mp.remove(ctx, from, nonce, applied)
2020-08-01 22:54:21 +00:00
}
2021-05-18 18:56:42 +00:00
func (mp *MessagePool) remove(ctx context.Context, from address.Address, nonce uint64, applied bool) {
mset, ok, err := mp.getPendingMset(ctx, from)
if err != nil {
log.Debugf("mpoolremove failed to get mset: %s", err)
return
}
2019-07-05 14:46:21 +00:00
if !ok {
return
}
2019-11-19 21:26:25 +00:00
if m, ok := mset.msgs[nonce]; ok {
mp.changes.Pub(api.MpoolUpdate{
Type: api.MpoolRemove,
Message: m,
}, localUpdates)
mp.journal.RecordEvent(mp.evtTypes[evtTypeMpoolRemove], func() interface{} {
return MessagePoolEvt{
Action: "remove",
2020-09-04 09:54:56 +00:00
Messages: []MessagePoolEvtMessage{{Message: m.Message, CID: m.Cid()}}}
})
2020-08-11 11:41:11 +00:00
2020-07-16 22:28:35 +00:00
mp.currentSize--
2019-11-19 21:26:25 +00:00
}
2019-11-17 07:44:06 +00:00
2019-07-05 14:46:21 +00:00
// NB: This deletes any message with the given nonce. This makes sense
// as two messages with the same sender cannot have the same nonce
mset.rm(nonce, applied)
2019-07-05 14:46:21 +00:00
if len(mset.msgs) == 0 {
if err = mp.deletePendingMset(ctx, from); err != nil {
log.Debugf("mpoolremove failed to delete mset: %s", err)
return
}
2019-07-05 14:46:21 +00:00
}
// Record the current size of the Mpool
stats.Record(ctx, metrics.MpoolMessageCount.M(int64(mp.currentSize)))
2019-07-05 14:46:21 +00:00
}
2021-05-18 18:56:42 +00:00
func (mp *MessagePool) Pending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) {
mp.curTsLk.RLock()
defer mp.curTsLk.RUnlock()
2019-12-03 19:33:29 +00:00
mp.lk.RLock()
defer mp.lk.RUnlock()
2019-12-03 19:33:29 +00:00
2021-05-18 18:56:42 +00:00
return mp.allPending(ctx)
2020-08-25 10:03:50 +00:00
}
2021-05-18 18:56:42 +00:00
func (mp *MessagePool) allPending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) {
2019-09-06 22:32:42 +00:00
out := make([]*types.SignedMessage, 0)
mp.forEachPending(func(a address.Address, mset *msgSet) {
out = append(out, mset.toSlice()...)
})
2019-09-20 09:01:49 +00:00
2019-12-03 19:33:29 +00:00
return out, mp.curTs
}
2020-08-25 10:03:50 +00:00
2021-05-18 18:56:42 +00:00
func (mp *MessagePool) PendingFor(ctx context.Context, a address.Address) ([]*types.SignedMessage, *types.TipSet) {
mp.curTsLk.RLock()
defer mp.curTsLk.RUnlock()
mp.lk.RLock()
defer mp.lk.RUnlock()
2021-05-18 18:56:42 +00:00
return mp.pendingFor(ctx, a), mp.curTs
}
2019-09-20 09:01:49 +00:00
2021-05-18 18:56:42 +00:00
func (mp *MessagePool) pendingFor(ctx context.Context, a address.Address) []*types.SignedMessage {
mset, ok, err := mp.getPendingMset(ctx, a)
if err != nil {
log.Debugf("mpoolpendingfor failed to get mset: %s", err)
return nil
}
if mset == nil || !ok || len(mset.msgs) == 0 {
return nil
2019-07-05 14:46:21 +00:00
}
return mset.toSlice()
2019-07-05 14:46:21 +00:00
}
2021-05-18 18:56:42 +00:00
func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, apply []*types.TipSet) error {
2019-12-03 19:33:29 +00:00
mp.curTsLk.Lock()
defer mp.curTsLk.Unlock()
repubTrigger := false
2019-12-07 11:41:30 +00:00
rmsgs := make(map[address.Address]map[uint64]*types.SignedMessage)
add := func(m *types.SignedMessage) {
s, ok := rmsgs[m.Message.From]
if !ok {
s = make(map[uint64]*types.SignedMessage)
rmsgs[m.Message.From] = s
}
s[m.Message.Nonce] = m
}
rm := func(from address.Address, nonce uint64) {
s, ok := rmsgs[from]
if !ok {
2021-05-18 18:56:42 +00:00
mp.Remove(ctx, from, nonce, true)
2019-12-07 11:41:30 +00:00
return
}
if _, ok := s[nonce]; ok {
delete(s, nonce)
return
}
2021-05-18 18:56:42 +00:00
mp.Remove(ctx, from, nonce, true)
2019-12-07 11:41:30 +00:00
}
maybeRepub := func(cid cid.Cid) {
if !repubTrigger {
mp.lk.RLock()
_, republished := mp.republished[cid]
mp.lk.RUnlock()
if republished {
repubTrigger = true
}
}
}
2020-08-21 17:17:30 +00:00
var merr error
2019-07-05 14:46:21 +00:00
for _, ts := range revert {
2021-12-11 21:03:00 +00:00
pts, err := mp.api.LoadTipSet(ctx, ts.Parents())
2019-12-02 20:46:25 +00:00
if err != nil {
2020-08-21 17:17:30 +00:00
log.Errorf("error loading reverted tipset parent: %s", err)
merr = multierror.Append(merr, err)
continue
2019-12-02 20:46:25 +00:00
}
2020-08-21 17:17:30 +00:00
mp.curTs = pts
2021-12-17 09:42:09 +00:00
msgs, err := mp.MessagesForBlocks(ctx, ts.Blocks())
2019-12-03 19:33:29 +00:00
if err != nil {
2020-08-21 17:17:30 +00:00
log.Errorf("error retrieving messages for reverted block: %s", err)
merr = multierror.Append(merr, err)
continue
2019-12-03 19:33:29 +00:00
}
2019-12-03 19:33:29 +00:00
for _, msg := range msgs {
2019-12-07 11:41:30 +00:00
add(msg)
2019-07-05 14:46:21 +00:00
}
}
for _, ts := range apply {
2020-08-21 17:17:30 +00:00
mp.curTs = ts
2019-07-05 14:46:21 +00:00
for _, b := range ts.Blocks() {
2021-12-17 09:42:09 +00:00
bmsgs, smsgs, err := mp.api.MessagesForBlock(ctx, b)
2019-07-05 14:46:21 +00:00
if err != nil {
2020-08-21 17:17:30 +00:00
xerr := xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err)
log.Errorf("error retrieving messages for block: %s", xerr)
merr = multierror.Append(merr, xerr)
continue
2019-07-05 14:46:21 +00:00
}
2020-08-21 17:17:30 +00:00
for _, msg := range smsgs {
2019-12-07 11:41:30 +00:00
rm(msg.Message.From, msg.Message.Nonce)
maybeRepub(msg.Cid())
2019-07-05 14:46:21 +00:00
}
for _, msg := range bmsgs {
2019-12-07 11:41:30 +00:00
rm(msg.From, msg.Nonce)
maybeRepub(msg.Cid())
}
2019-07-05 14:46:21 +00:00
}
}
2019-12-03 19:33:29 +00:00
if repubTrigger {
select {
case mp.repubTrigger <- struct{}{}:
default:
}
2019-07-05 14:46:21 +00:00
}
2019-12-07 11:41:30 +00:00
for _, s := range rmsgs {
for _, msg := range s {
2021-05-18 18:56:42 +00:00
if err := mp.addSkipChecks(ctx, msg); err != nil {
2019-12-07 11:41:30 +00:00
log.Errorf("Failed to readd message from reorg to mpool: %s", err)
}
}
}
if len(revert) > 0 && futureDebug {
mp.lk.RLock()
2021-05-18 18:56:42 +00:00
msgs, ts := mp.allPending(ctx)
mp.lk.RUnlock()
buckets := map[address.Address]*statBucket{}
for _, v := range msgs {
bkt, ok := buckets[v.Message.From]
if !ok {
bkt = &statBucket{
msgs: map[uint64]*types.SignedMessage{},
}
buckets[v.Message.From] = bkt
}
bkt.msgs[v.Message.Nonce] = v
}
for a, bkt := range buckets {
// TODO that might not be correct with GatActorAfter but it is only debug code
act, err := mp.api.GetActorAfter(a, ts)
if err != nil {
log.Debugf("%s, err: %s\n", a, err)
continue
}
var cmsg *types.SignedMessage
var ok bool
cur := act.Nonce
for {
cmsg, ok = bkt.msgs[cur]
if !ok {
break
}
cur++
}
ff := uint64(math.MaxUint64)
for k := range bkt.msgs {
if k > cur && k < ff {
ff = k
}
}
if ff != math.MaxUint64 {
m := bkt.msgs[ff]
// cmsg can be nil if no messages from the current nonce are in the mpool
ccid := "nil"
if cmsg != nil {
ccid = cmsg.Cid().String()
}
log.Debugw("Nonce gap",
"actor", a,
"future_cid", m.Cid(),
"future_nonce", ff,
"current_cid", ccid,
"current_nonce", cur,
"revert_tipset", revert[0].Key(),
"new_head", ts.Key(),
)
}
}
}
2020-08-21 17:17:30 +00:00
return merr
2019-07-05 14:46:21 +00:00
}
2021-12-11 21:03:00 +00:00
func (mp *MessagePool) runHeadChange(ctx context.Context, from *types.TipSet, to *types.TipSet, rmsgs map[address.Address]map[uint64]*types.SignedMessage) error {
add := func(m *types.SignedMessage) {
s, ok := rmsgs[m.Message.From]
if !ok {
s = make(map[uint64]*types.SignedMessage)
rmsgs[m.Message.From] = s
}
s[m.Message.Nonce] = m
}
rm := func(from address.Address, nonce uint64) {
s, ok := rmsgs[from]
if !ok {
return
}
if _, ok := s[nonce]; ok {
delete(s, nonce)
return
}
}
2021-12-11 21:03:00 +00:00
revert, apply, err := store.ReorgOps(ctx, mp.api.LoadTipSet, from, to)
if err != nil {
return xerrors.Errorf("failed to compute reorg ops for mpool pending messages: %w", err)
}
var merr error
for _, ts := range revert {
2021-12-17 09:42:09 +00:00
msgs, err := mp.MessagesForBlocks(ctx, ts.Blocks())
if err != nil {
log.Errorf("error retrieving messages for reverted block: %s", err)
merr = multierror.Append(merr, err)
continue
}
for _, msg := range msgs {
add(msg)
}
}
for _, ts := range apply {
for _, b := range ts.Blocks() {
2021-12-17 09:42:09 +00:00
bmsgs, smsgs, err := mp.api.MessagesForBlock(ctx, b)
if err != nil {
xerr := xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err)
log.Errorf("error retrieving messages for block: %s", xerr)
merr = multierror.Append(merr, xerr)
continue
}
for _, msg := range smsgs {
rm(msg.Message.From, msg.Message.Nonce)
}
for _, msg := range bmsgs {
rm(msg.From, msg.Nonce)
}
}
}
return merr
2019-07-05 14:46:21 +00:00
}
type statBucket struct {
msgs map[uint64]*types.SignedMessage
}
2021-12-17 09:42:09 +00:00
func (mp *MessagePool) MessagesForBlocks(ctx context.Context, blks []*types.BlockHeader) ([]*types.SignedMessage, error) {
2019-12-03 19:33:29 +00:00
out := make([]*types.SignedMessage, 0)
for _, b := range blks {
2021-12-17 09:42:09 +00:00
bmsgs, smsgs, err := mp.api.MessagesForBlock(ctx, b)
2019-12-03 19:33:29 +00:00
if err != nil {
return nil, xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err)
}
out = append(out, smsgs...)
2019-12-03 19:33:29 +00:00
for _, msg := range bmsgs {
smsg := mp.RecoverSig(msg)
if smsg != nil {
out = append(out, smsg)
} else {
2020-11-03 12:28:31 +00:00
log.Debugf("could not recover signature for bls message %s", msg.Cid())
2019-12-03 19:33:29 +00:00
}
}
}
return out, nil
}
func (mp *MessagePool) RecoverSig(msg *types.Message) *types.SignedMessage {
sig, ok := mp.blsSigCache.Get(msg.Cid())
if !ok {
return nil
}
return &types.SignedMessage{
Message: *msg,
2019-11-12 11:42:19 +00:00
Signature: sig,
}
}
2019-11-17 07:44:06 +00:00
func (mp *MessagePool) Updates(ctx context.Context) (<-chan api.MpoolUpdate, error) {
out := make(chan api.MpoolUpdate, 20)
sub := mp.changes.Sub(localUpdates)
go func() {
defer mp.changes.Unsub(sub)
defer close(out)
2019-11-19 19:49:11 +00:00
2019-11-17 07:44:06 +00:00
for {
select {
case u := <-sub:
select {
case out <- u.(api.MpoolUpdate):
case <-ctx.Done():
return
case <-mp.closer:
return
2019-11-17 07:44:06 +00:00
}
case <-ctx.Done():
return
case <-mp.closer:
return
2019-11-17 07:44:06 +00:00
}
}
}()
return out, nil
}
2019-11-23 19:01:56 +00:00
2021-05-18 18:56:42 +00:00
func (mp *MessagePool) loadLocal(ctx context.Context) error {
2021-12-11 21:03:00 +00:00
res, err := mp.localMsgs.Query(ctx, query.Query{})
2019-11-23 19:01:56 +00:00
if err != nil {
return xerrors.Errorf("query local messages: %w", err)
}
for r := range res.Next() {
if r.Error != nil {
return xerrors.Errorf("r.Error: %w", r.Error)
}
var sm types.SignedMessage
if err := sm.UnmarshalCBOR(bytes.NewReader(r.Value)); err != nil {
return xerrors.Errorf("unmarshaling local message: %w", err)
}
2021-05-18 18:56:42 +00:00
if err := mp.addLoaded(ctx, &sm); err != nil {
2019-11-23 19:01:56 +00:00
if xerrors.Is(err, ErrNonceTooLow) {
continue // todo: drop the message from local cache (if above certain confidence threshold)
}
log.Errorf("adding local message: %+v", err)
2019-11-23 19:01:56 +00:00
}
2020-08-07 22:41:57 +00:00
if err = mp.setLocal(ctx, sm.Message.From); err != nil {
log.Debugf("mpoolloadLocal errored: %s", err)
return err
}
2019-11-23 19:01:56 +00:00
}
return nil
}
2020-08-21 17:28:45 +00:00
func (mp *MessagePool) Clear(ctx context.Context, local bool) {
2020-08-21 17:28:45 +00:00
mp.lk.Lock()
defer mp.lk.Unlock()
// remove everything if local is true, including removing local messages from
// the datastore
if local {
mp.forEachLocal(ctx, func(ctx context.Context, la address.Address) {
mset, ok, err := mp.getPendingMset(ctx, la)
if err != nil {
log.Warnf("errored while getting pending mset: %w", err)
return
}
if ok {
for _, m := range mset.msgs {
2021-12-11 21:03:00 +00:00
err := mp.localMsgs.Delete(ctx, datastore.NewKey(string(m.Cid().Bytes())))
if err != nil {
log.Warnf("error deleting local message: %s", err)
}
}
}
})
2020-08-21 17:59:40 +00:00
mp.clearPending()
mp.republished = nil
return
2020-08-21 17:59:40 +00:00
}
mp.forEachPending(func(a address.Address, ms *msgSet) {
isLocal, err := mp.isLocal(ctx, a)
if err != nil {
log.Warnf("errored while determining isLocal: %w", err)
return
}
if isLocal {
return
}
if err = mp.deletePendingMset(ctx, a); err != nil {
log.Warnf("errored while deleting mset: %w", err)
return
}
})
2020-08-21 17:28:45 +00:00
}
func getBaseFeeLowerBound(baseFee, factor types.BigInt) types.BigInt {
2020-09-14 19:05:24 +00:00
baseFeeLowerBound := types.BigDiv(baseFee, factor)
if baseFeeLowerBound.LessThan(minimumBaseFee) {
baseFeeLowerBound = minimumBaseFee
}
return baseFeeLowerBound
}
type MpoolNonceAPI interface {
GetNonce(context.Context, address.Address, types.TipSetKey) (uint64, error)
GetActor(context.Context, address.Address, types.TipSetKey) (*types.Actor, error)
}