lotus/chain/messagepool/repub.go

186 lines
4.5 KiB
Go
Raw Permalink Normal View History

2020-08-10 08:07:36 +00:00
package messagepool
import (
"context"
"sort"
"time"
2020-08-10 08:07:36 +00:00
2022-06-14 15:00:51 +00:00
"github.com/ipfs/go-cid"
2020-08-10 08:07:36 +00:00
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
2022-06-14 15:00:51 +00:00
2020-08-10 08:07:36 +00:00
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
2020-08-10 08:07:36 +00:00
"github.com/filecoin-project/lotus/chain/types"
)
2020-08-11 11:26:06 +00:00
const repubMsgLimit = 30
2020-08-10 08:07:36 +00:00
var RepublishBatchDelay = 100 * time.Millisecond
func (mp *MessagePool) republishPendingMessages(ctx context.Context) error {
2020-08-10 08:07:36 +00:00
mp.curTsLk.Lock()
ts := mp.curTs
baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts)
if err != nil {
2020-08-12 06:18:04 +00:00
mp.curTsLk.Unlock()
2020-08-10 08:07:36 +00:00
return xerrors.Errorf("computing basefee: %w", err)
}
baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor)
2020-08-10 08:07:36 +00:00
pending := make(map[address.Address]map[uint64]*types.SignedMessage)
mp.lk.Lock()
mp.republished = nil // clear this to avoid races triggering an early republish
mp.forEachLocal(ctx, func(ctx context.Context, actor address.Address) {
mset, ok, err := mp.getPendingMset(ctx, actor)
if err != nil {
log.Debugf("failed to get mset: %w", err)
return
}
2020-08-10 08:07:36 +00:00
if !ok {
return
2020-08-10 08:07:36 +00:00
}
if len(mset.msgs) == 0 {
return
2020-08-10 08:07:36 +00:00
}
// we need to copy this while holding the lock to avoid races with concurrent modification
pend := make(map[uint64]*types.SignedMessage, len(mset.msgs))
for nonce, m := range mset.msgs {
pend[nonce] = m
}
pending[actor] = pend
})
2020-08-10 08:07:36 +00:00
mp.lk.Unlock()
2020-08-12 06:18:04 +00:00
mp.curTsLk.Unlock()
2020-08-10 08:07:36 +00:00
if len(pending) == 0 {
return nil
}
var chains []*msgChain
for actor, mset := range pending {
// We use the baseFee lower bound for createChange so that we optimistically include
// chains that might become profitable in the next 20 blocks.
// We still check the lowerBound condition for individual messages so that we don't send
// messages that will be rejected by the mpool spam protector, so this is safe to do.
next := mp.createMessageChains(actor, mset, baseFeeLowerBound, ts)
2020-08-10 08:07:36 +00:00
chains = append(chains, next...)
}
if len(chains) == 0 {
return nil
}
sort.Slice(chains, func(i, j int) bool {
return chains[i].Before(chains[j])
})
build: release: v1.18.0 (#9652) * build: Bump version to v1.17.3-dev * build: set version to v1.18.0-dev * chore: actors: Allow builtin-actors to return a map of methods (#9342) * Allow builtin-actors to return a map of methods * go mod * Fix tests * Fix tests, check carefully please * Delete lotus-pond (#9352) * feat: add StateNetworkVersion to mpool API * chore: refactor: rename NewestNetworkVersion * feat: actors: Integrate datacap actor into lotus (#9348) * Integrate datacap actor * Implement datacap actor in chain/builtin * feat: support typed errors over RPC * chore: deps: update to go-jsonrpc 0.1.8 * remove duplicate import * fix: itest: check for closed connection * chore: refactor: move retry test to API * address magik supernit * Add ability to only have single partition per msg for partitions with recovery sectors * doc gen * Address comments * Return beneficiary info from miner state Info() * Update builtin-actors to dev/20220922-v9 which includes FIP-0045 changes in progress * Integrate verifreg changes to lotus * Setup datacap actor * Update builtin-actors to dev/20220922-v9-1 * Update datacap actor to query datacap instead of verifreg * update gst * update markets * update actors with hamt fix * update gst * Update datacap to parse tokens * Update bundles * datacap and verifreg actors use ID addresses without protocol byte * update builtin-actors to rc1 * update go-fil-markets * Update bundles to rc2 * Integrate the v9 migration * Add api for getting allocation * Add upgrade epoch for butterfly * Tweak PreSeal struct to be infra-friendly * docsgen * More tweaking of PreSeal for genesis * review fixes * Use fake cid for test * add butterfly artifacts for oct 5 upgrade * check datacaps for v8 verifreg match v9 datacap actor * Remove print statements * Update to go-state-types master * Update to go-state-types v0.9.0-rc1 * review fixes * use go-fil-markets v1.24.0-v17 * Add accessors for allocations and claims maps * fix: missing permissions tag * butterfly * update butterfly artifacts * sealing pipeline: Prepare deal assigning logic for FIP-45 * sealing pipeline: Get allocationId with StateApi * use NoAllocationID instead of nil AllocationId * address review * Add datacap actor to registry.go * Add cli for listing allocations and removing expired allocations * Update to go-state-types master * deps: upgrade go-merkledag to 0.8.0 * shark params * Update cli/filplus.go Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> * revert change to verifreg util * docsgen-cli * miss the stuff * Update FFI * Update go-state-types to v0.9.0 * Update builtin-actors to v9.0.0 * add calib upgrade epcoh * update the upgrade envvar * kill shark * Remove fvm splash banner from nv17 upgrade * check invariance for pending deals and allocations * check pending verified deal proposal migrated to allocation * Add check for unsealed CID in precommit sectors * Fix counting of allocations in nv17 migration test * make gen * pass state trees as pointers * Add assertion that migrations with & without cache are the same * compare allocation to verified deal proposal * Fix miner state precommit info * fix migration test tool * add changelog * Update to go-state-types v0.9.1 * Integrate builtin-actors v9.0.1 * chore: ver: bump version for rc3 (#9512) * Bump version to 1.18.0-rc3 * Update CHANGELOG.md * Update CHANGELOG.md Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> * Update CHANGELOG.md Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> Co-authored-by: Aayush Rajasekaran <arajasek94@gmail.com> * Migration: Use autobatch bs * Fix autobatch Signed-off-by: Jakub Sztandera <kubuxu@protocol.ai> * Invoker: Use MethodMeta from go-state-types * Add a second premigration for nv17 * Add more shed tools for migration checking * address review * Lotus release v1.18.0-rc4 * fix: ci: fix app-image build on ci (#9527) * Remove old go version first * Add GO_VERSION file * Use GO_VERSION to set / verify go version * mv GO_VERSION GO_VERSION_MIN * Use GO_VERSION_MIN in Makefile check Co-authored-by: Ian Davis <jungziege@gmail.com> * Update to latest go-state-types for migration fixes * go mod tidy * fix: use api.ErrActorNotFound instead of types.ErrActorNotFound * fix: add fields to ForkUpgradeParams * docs: update actors_version_checklist.md * chore: fix lint * update to go state type v0.9.6 with market migration fix (#9545) * update go-state-types to v-0.9.7 * Add invariant checks to migration * fix invariant check: number of entries in datacap actor should include verifreg * Invariant checks: Only include not-activated deals * test: nv17 migration * Address review * add lotus-shed invariance method * Migration cli takes a stateroot cid and a height * make gen * Update to builtin-actors v9.0.2 * Failing test that shows that notaries can remove datacap from the verifreg actor * Test that should pass when the problem is solved * make gen * Review fixes * statemanager call function will return call information even if call errors * update go-state-types * update builtin-actors * bubble up errors properly from ApplyImplicitMessage * bump to rc5 * set new upgrade heights for calibnet * set new upgrade height for butterfly * tweak calibnet upgrade schedule * clarify changelog note about calibnet * butterfly * update calibnet artifacts * Allow setting local bundles for Debug FVM for av 9+ * fix: autobatch: remove potential deadlock when a block is missing Check the _underlying_ blockstore instead of recursing. Also, drop the lock before we do that. * fix imports * build: set shark mainnet epoch (#9640) * chore: build: Lotus release v1.18.0 (#9641) * Lotus release v1.18.0 * add changelog * address review * changelog improvement Co-authored-by: Jennifer Wang <jiayingw703@gmail.com> Co-authored-by: Jiaying Wang <42981373+jennijuju@users.noreply.github.com> Signed-off-by: Jakub Sztandera <kubuxu@protocol.ai> Co-authored-by: Łukasz Magiera <magik6k@gmail.com> Co-authored-by: Łukasz Magiera <magik6k@users.noreply.github.com> Co-authored-by: Aayush <arajasek94@gmail.com> Co-authored-by: Geoff Stuart <geoff.vball@gmail.com> Co-authored-by: Shrenuj Bansal <shrenuj.bansal@protocol.ai> Co-authored-by: simlecode <69969590+simlecode@users.noreply.github.com> Co-authored-by: Rod Vagg <rod@vagg.org> Co-authored-by: Jakub Sztandera <kubuxu@protocol.ai> Co-authored-by: Ian Davis <jungziege@gmail.com> Co-authored-by: zenground0 <ZenGround0@users.noreply.github.com> Co-authored-by: Steven Allen <steven@stebalien.com>
2022-11-16 01:57:23 +00:00
gasLimit := build.BlockGasLimit
minGas := int64(gasguess.MinGas)
2020-08-10 08:07:36 +00:00
var msgs []*types.SignedMessage
loop:
for i := 0; i < len(chains); {
chain := chains[i]
2020-08-10 08:07:36 +00:00
// we can exceed this if we have picked (some) longer chain already
if len(msgs) > repubMsgLimit {
break
}
// there is not enough gas for any message
if gasLimit <= minGas {
break
}
// has the chain been invalidated?
if !chain.valid {
i++
continue
2020-08-10 08:07:36 +00:00
}
// does it fit in a block?
if chain.gasLimit <= gasLimit {
// check the baseFee lower bound -- only republish messages that can be included in the chain
// within the next 20 blocks.
for _, m := range chain.msgs {
if m.Message.GasFeeCap.LessThan(baseFeeLowerBound) {
chain.Invalidate()
continue loop
}
gasLimit -= m.Message.GasLimit
msgs = append(msgs, m)
}
// we processed the whole chain, advance
i++
continue
}
// we can't fit the current chain but there is gas to spare
// trim it and push it down
chain.Trim(gasLimit, repubMsgLimit, mp, baseFee)
for j := i; j < len(chains)-1; j++ {
if chains[j].Before(chains[j+1]) {
break
}
chains[j], chains[j+1] = chains[j+1], chains[j]
}
2020-08-10 08:07:36 +00:00
}
count := 0
if len(msgs) > repubMsgLimit {
msgs = msgs[:repubMsgLimit]
}
2020-08-10 08:07:36 +00:00
log.Infof("republishing %d messages", len(msgs))
for _, m := range msgs {
mb, err := m.Serialize()
if err != nil {
return xerrors.Errorf("cannot serialize message: %w", err)
}
err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), mb)
if err != nil {
return xerrors.Errorf("cannot publish: %w", err)
}
count++
if count < len(msgs) {
// this delay is here to encourage the pubsub subsystem to process the messages serially
// and avoid creating nonce gaps because of concurrent validation.
time.Sleep(RepublishBatchDelay)
}
}
if len(msgs) > 0 {
mp.journal.RecordEvent(mp.evtTypes[evtTypeMpoolRepub], func() interface{} {
msgsEv := make([]MessagePoolEvtMessage, 0, len(msgs))
for _, m := range msgs {
msgsEv = append(msgsEv, MessagePoolEvtMessage{Message: m.Message, CID: m.Cid()})
}
return MessagePoolEvt{
Action: "repub",
Messages: msgsEv,
}
})
}
// track most recently republished messages
republished := make(map[cid.Cid]struct{})
for _, m := range msgs[:count] {
republished[m.Cid()] = struct{}{}
2020-08-10 08:07:36 +00:00
}
mp.lk.Lock()
// update the republished set so that we can trigger early republish from head changes
mp.republished = republished
mp.lk.Unlock()
2020-08-10 08:07:36 +00:00
return nil
}