Merge pull request #7255 from filecoin-project/cleanup/consensus

chain: Cleanup consensus logic
This commit is contained in:
Łukasz Magiera 2021-09-02 19:48:37 +02:00 committed by GitHub
commit 31efe8a690
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
64 changed files with 1872 additions and 1515 deletions

View File

@ -177,6 +177,11 @@ func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntr
// TODO handle genesis better
return nil
}
if curr.Round != prev.Round+1 {
return xerrors.Errorf("invalid beacon entry: cur (%d) != prev (%d) + 1", curr.Round, prev.Round)
}
if be := db.getCachedValue(curr.Round); be != nil {
if !bytes.Equal(curr.Data, be.Data) {
return xerrors.New("invalid beacon value, does not match cached good value")

View File

@ -0,0 +1,297 @@
package filcns
import (
"context"
"sync/atomic"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported"
exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported"
exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported"
exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported"
exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/cron"
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/metrics"
)
func NewActorRegistry() *vm.ActorRegistry {
inv := vm.NewActorRegistry()
// TODO: define all these properties on the actors themselves, in specs-actors.
inv.Register(vm.ActorsVersionPredicate(actors.Version0), exported0.BuiltinActors()...)
inv.Register(vm.ActorsVersionPredicate(actors.Version2), exported2.BuiltinActors()...)
inv.Register(vm.ActorsVersionPredicate(actors.Version3), exported3.BuiltinActors()...)
inv.Register(vm.ActorsVersionPredicate(actors.Version4), exported4.BuiltinActors()...)
inv.Register(vm.ActorsVersionPredicate(actors.Version5), exported5.BuiltinActors()...)
return inv
}
type TipSetExecutor struct{}
func NewTipSetExecutor() *TipSetExecutor {
return &TipSetExecutor{}
}
func (t *TipSetExecutor) NewActorRegistry() *vm.ActorRegistry {
return NewActorRegistry()
}
type FilecoinBlockMessages struct {
store.BlockMessages
WinCount int64
}
func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []FilecoinBlockMessages, epoch abi.ChainEpoch, r vm.Rand, em stmgr.ExecMonitor, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) {
done := metrics.Timer(ctx, metrics.VMApplyBlocksTotal)
defer done()
partDone := metrics.Timer(ctx, metrics.VMApplyEarly)
defer func() {
partDone()
}()
makeVmWithBaseState := func(base cid.Cid) (*vm.VM, error) {
vmopt := &vm.VMOpts{
StateBase: base,
Epoch: epoch,
Rand: r,
Bstore: sm.ChainStore().StateBlockstore(),
Actors: NewActorRegistry(),
Syscalls: sm.Syscalls,
CircSupplyCalc: sm.GetVMCirculatingSupply,
NtwkVersion: sm.GetNtwkVersion,
BaseFee: baseFee,
LookbackState: stmgr.LookbackStateGetterForTipset(sm, ts),
}
return sm.VMConstructor()(ctx, vmopt)
}
vmi, err := makeVmWithBaseState(pstate)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err)
}
runCron := func(epoch abi.ChainEpoch) error {
cronMsg := &types.Message{
To: cron.Address,
From: builtin.SystemActorAddr,
Nonce: uint64(epoch),
Value: types.NewInt(0),
GasFeeCap: types.NewInt(0),
GasPremium: types.NewInt(0),
GasLimit: build.BlockGasLimit * 10000, // Make super sure this is never too little
Method: cron.Methods.EpochTick,
Params: nil,
}
ret, err := vmi.ApplyImplicitMessage(ctx, cronMsg)
if err != nil {
return err
}
if em != nil {
if err := em.MessageApplied(ctx, ts, cronMsg.Cid(), cronMsg, ret, true); err != nil {
return xerrors.Errorf("callback failed on cron message: %w", err)
}
}
if ret.ExitCode != 0 {
return xerrors.Errorf("CheckProofSubmissions exit was non-zero: %d", ret.ExitCode)
}
return nil
}
for i := parentEpoch; i < epoch; i++ {
if i > parentEpoch {
// run cron for null rounds if any
if err := runCron(i); err != nil {
return cid.Undef, cid.Undef, err
}
pstate, err = vmi.Flush(ctx)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("flushing vm: %w", err)
}
}
// handle state forks
// XXX: The state tree
newState, err := sm.HandleStateForks(ctx, pstate, i, em, ts)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err)
}
if pstate != newState {
vmi, err = makeVmWithBaseState(newState)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err)
}
}
vmi.SetBlockHeight(i + 1)
pstate = newState
}
partDone()
partDone = metrics.Timer(ctx, metrics.VMApplyMessages)
var receipts []cbg.CBORMarshaler
processedMsgs := make(map[cid.Cid]struct{})
for _, b := range bms {
penalty := types.NewInt(0)
gasReward := big.Zero()
for _, cm := range append(b.BlsMessages, b.SecpkMessages...) {
m := cm.VMMessage()
if _, found := processedMsgs[m.Cid()]; found {
continue
}
r, err := vmi.ApplyMessage(ctx, cm)
if err != nil {
return cid.Undef, cid.Undef, err
}
receipts = append(receipts, &r.MessageReceipt)
gasReward = big.Add(gasReward, r.GasCosts.MinerTip)
penalty = big.Add(penalty, r.GasCosts.MinerPenalty)
if em != nil {
if err := em.MessageApplied(ctx, ts, cm.Cid(), m, r, false); err != nil {
return cid.Undef, cid.Undef, err
}
}
processedMsgs[m.Cid()] = struct{}{}
}
params, err := actors.SerializeParams(&reward.AwardBlockRewardParams{
Miner: b.Miner,
Penalty: penalty,
GasReward: gasReward,
WinCount: b.WinCount,
})
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("failed to serialize award params: %w", err)
}
rwMsg := &types.Message{
From: builtin.SystemActorAddr,
To: reward.Address,
Nonce: uint64(epoch),
Value: types.NewInt(0),
GasFeeCap: types.NewInt(0),
GasPremium: types.NewInt(0),
GasLimit: 1 << 30,
Method: reward.Methods.AwardBlockReward,
Params: params,
}
ret, actErr := vmi.ApplyImplicitMessage(ctx, rwMsg)
if actErr != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, actErr)
}
if em != nil {
if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("callback failed on reward message: %w", err)
}
}
if ret.ExitCode != 0 {
return cid.Undef, cid.Undef, xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr)
}
}
partDone()
partDone = metrics.Timer(ctx, metrics.VMApplyCron)
if err := runCron(epoch); err != nil {
return cid.Cid{}, cid.Cid{}, err
}
partDone()
partDone = metrics.Timer(ctx, metrics.VMApplyFlush)
rectarr := blockadt.MakeEmptyArray(sm.ChainStore().ActorStore(ctx))
for i, receipt := range receipts {
if err := rectarr.Set(uint64(i), receipt); err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err)
}
}
rectroot, err := rectarr.Root()
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err)
}
st, err := vmi.Flush(ctx)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("vm flush failed: %w", err)
}
stats.Record(ctx, metrics.VMSends.M(int64(atomic.LoadUint64(&vm.StatSends))),
metrics.VMApplied.M(int64(atomic.LoadUint64(&vm.StatApplied))))
return st, rectroot, nil
}
func (t *TipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManager, ts *types.TipSet, em stmgr.ExecMonitor) (stateroot cid.Cid, rectsroot cid.Cid, err error) {
ctx, span := trace.StartSpan(ctx, "computeTipSetState")
defer span.End()
blks := ts.Blocks()
for i := 0; i < len(blks); i++ {
for j := i + 1; j < len(blks); j++ {
if blks[i].Miner == blks[j].Miner {
return cid.Undef, cid.Undef,
xerrors.Errorf("duplicate miner in a tipset (%s %s)",
blks[i].Miner, blks[j].Miner)
}
}
}
var parentEpoch abi.ChainEpoch
pstate := blks[0].ParentStateRoot
if blks[0].Height > 0 {
parent, err := sm.ChainStore().GetBlock(blks[0].Parents[0])
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("getting parent block: %w", err)
}
parentEpoch = parent.Height
}
r := store.NewChainRand(sm.ChainStore(), ts.Cids())
blkmsgs, err := sm.ChainStore().BlockMsgsForTipset(ts)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("getting block messages for tipset: %w", err)
}
fbmsgs := make([]FilecoinBlockMessages, len(blkmsgs))
for i := range fbmsgs {
fbmsgs[i].BlockMessages = blkmsgs[i]
fbmsgs[i].WinCount = ts.Blocks()[i].ElectionProof.WinCount
}
baseFee := blks[0].ParentBaseFee
return t.ApplyBlocks(ctx, sm, parentEpoch, pstate, fbmsgs, blks[0].Height, r, em, baseFee, ts)
}
var _ stmgr.Executor = &TipSetExecutor{}

View File

@ -0,0 +1,847 @@
package filcns
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"strings"
"time"
"github.com/hashicorp/go-multierror"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
pubsub "github.com/libp2p/go-libp2p-pubsub"
cbg "github.com/whyrusleeping/cbor-gen"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/network"
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
bstore "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/lib/async"
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/metrics"
)
var log = logging.Logger("fil-consensus")
type FilecoinEC struct {
// The interface for accessing and putting tipsets into local storage
store *store.ChainStore
// handle to the random beacon for verification
beacon beacon.Schedule
// the state manager handles making state queries
sm *stmgr.StateManager
verifier ffiwrapper.Verifier
genesis *types.TipSet
}
// Blocks that are more than MaxHeightDrift epochs above
// the theoretical max height based on systime are quickly rejected
const MaxHeightDrift = 5
func NewFilecoinExpectedConsensus(sm *stmgr.StateManager, beacon beacon.Schedule, verifier ffiwrapper.Verifier, genesis chain.Genesis) consensus.Consensus {
if build.InsecurePoStValidation {
log.Warn("*********************************************************************************************")
log.Warn(" [INSECURE-POST-VALIDATION] Insecure test validation is enabled. If you see this outside of a test, it is a severe bug! ")
log.Warn("*********************************************************************************************")
}
return &FilecoinEC{
store: sm.ChainStore(),
beacon: beacon,
sm: sm,
verifier: verifier,
genesis: genesis,
}
}
func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) (err error) {
if err := blockSanityChecks(b.Header); err != nil {
return xerrors.Errorf("incoming header failed basic sanity checks: %w", err)
}
h := b.Header
baseTs, err := filec.store.LoadTipSet(types.NewTipSetKey(h.Parents...))
if err != nil {
return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err)
}
winPoStNv := filec.sm.GetNtwkVersion(ctx, baseTs.Height())
lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, filec.sm, baseTs, h.Height)
if err != nil {
return xerrors.Errorf("failed to get lookback tipset for block: %w", err)
}
prevBeacon, err := filec.store.GetLatestBeaconEntry(baseTs)
if err != nil {
return xerrors.Errorf("failed to get latest beacon entry: %w", err)
}
// fast checks first
if h.Height <= baseTs.Height() {
return xerrors.Errorf("block height not greater than parent height: %d != %d", h.Height, baseTs.Height())
}
nulls := h.Height - (baseTs.Height() + 1)
if tgtTs := baseTs.MinTimestamp() + build.BlockDelaySecs*uint64(nulls+1); h.Timestamp != tgtTs {
return xerrors.Errorf("block has wrong timestamp: %d != %d", h.Timestamp, tgtTs)
}
now := uint64(build.Clock.Now().Unix())
if h.Timestamp > now+build.AllowableClockDriftSecs {
return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, consensus.ErrTemporal)
}
if h.Timestamp > now {
log.Warn("Got block from the future, but within threshold", h.Timestamp, build.Clock.Now().Unix())
}
msgsCheck := async.Err(func() error {
if b.Cid() == build.WhitelistedBlock {
return nil
}
if err := filec.checkBlockMessages(ctx, b, baseTs); err != nil {
return xerrors.Errorf("block had invalid messages: %w", err)
}
return nil
})
minerCheck := async.Err(func() error {
if err := filec.minerIsValid(ctx, h.Miner, baseTs); err != nil {
return xerrors.Errorf("minerIsValid failed: %w", err)
}
return nil
})
baseFeeCheck := async.Err(func() error {
baseFee, err := filec.store.ComputeBaseFee(ctx, baseTs)
if err != nil {
return xerrors.Errorf("computing base fee: %w", err)
}
if types.BigCmp(baseFee, b.Header.ParentBaseFee) != 0 {
return xerrors.Errorf("base fee doesn't match: %s (header) != %s (computed)",
b.Header.ParentBaseFee, baseFee)
}
return nil
})
pweight, err := filec.store.Weight(ctx, baseTs)
if err != nil {
return xerrors.Errorf("getting parent weight: %w", err)
}
if types.BigCmp(pweight, b.Header.ParentWeight) != 0 {
return xerrors.Errorf("parrent weight different: %s (header) != %s (computed)",
b.Header.ParentWeight, pweight)
}
stateRootCheck := async.Err(func() error {
stateroot, precp, err := filec.sm.TipSetState(ctx, baseTs)
if err != nil {
return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err)
}
if stateroot != h.ParentStateRoot {
msgs, err := filec.store.MessagesForTipset(baseTs)
if err != nil {
log.Error("failed to load messages for tipset during tipset state mismatch error: ", err)
} else {
log.Warn("Messages for tipset with mismatching state:")
for i, m := range msgs {
mm := m.VMMessage()
log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params)
}
}
return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot)
}
if precp != h.ParentMessageReceipts {
return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts)
}
return nil
})
// Stuff that needs worker address
waddr, err := stmgr.GetMinerWorkerRaw(ctx, filec.sm, lbst, h.Miner)
if err != nil {
return xerrors.Errorf("GetMinerWorkerRaw failed: %w", err)
}
winnerCheck := async.Err(func() error {
if h.ElectionProof.WinCount < 1 {
return xerrors.Errorf("block is not claiming to be a winner")
}
eligible, err := stmgr.MinerEligibleToMine(ctx, filec.sm, h.Miner, baseTs, lbts)
if err != nil {
return xerrors.Errorf("determining if miner has min power failed: %w", err)
}
if !eligible {
return xerrors.New("block's miner is ineligible to mine")
}
rBeacon := *prevBeacon
if len(h.BeaconEntries) != 0 {
rBeacon = h.BeaconEntries[len(h.BeaconEntries)-1]
}
buf := new(bytes.Buffer)
if err := h.Miner.MarshalCBOR(buf); err != nil {
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
}
vrfBase, err := store.DrawRandomness(rBeacon.Data, crypto.DomainSeparationTag_ElectionProofProduction, h.Height, buf.Bytes())
if err != nil {
return xerrors.Errorf("could not draw randomness: %w", err)
}
if err := VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.ElectionProof.VRFProof); err != nil {
return xerrors.Errorf("validating block election proof failed: %w", err)
}
slashed, err := stmgr.GetMinerSlashed(ctx, filec.sm, baseTs, h.Miner)
if err != nil {
return xerrors.Errorf("failed to check if block miner was slashed: %w", err)
}
if slashed {
return xerrors.Errorf("received block was from slashed or invalid miner")
}
mpow, tpow, _, err := stmgr.GetPowerRaw(ctx, filec.sm, lbst, h.Miner)
if err != nil {
return xerrors.Errorf("failed getting power: %w", err)
}
j := h.ElectionProof.ComputeWinCount(mpow.QualityAdjPower, tpow.QualityAdjPower)
if h.ElectionProof.WinCount != j {
return xerrors.Errorf("miner claims wrong number of wins: miner: %d, computed: %d", h.ElectionProof.WinCount, j)
}
return nil
})
blockSigCheck := async.Err(func() error {
if err := sigs.CheckBlockSignature(ctx, h, waddr); err != nil {
return xerrors.Errorf("check block signature failed: %w", err)
}
return nil
})
beaconValuesCheck := async.Err(func() error {
if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" {
return nil
}
if err := beacon.ValidateBlockValues(filec.beacon, h, baseTs.Height(), *prevBeacon); err != nil {
return xerrors.Errorf("failed to validate blocks random beacon values: %w", err)
}
return nil
})
tktsCheck := async.Err(func() error {
buf := new(bytes.Buffer)
if err := h.Miner.MarshalCBOR(buf); err != nil {
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
}
if h.Height > build.UpgradeSmokeHeight {
buf.Write(baseTs.MinTicket().VRFProof)
}
beaconBase := *prevBeacon
if len(h.BeaconEntries) != 0 {
beaconBase = h.BeaconEntries[len(h.BeaconEntries)-1]
}
vrfBase, err := store.DrawRandomness(beaconBase.Data, crypto.DomainSeparationTag_TicketProduction, h.Height-build.TicketRandomnessLookback, buf.Bytes())
if err != nil {
return xerrors.Errorf("failed to compute vrf base for ticket: %w", err)
}
err = VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.Ticket.VRFProof)
if err != nil {
return xerrors.Errorf("validating block tickets failed: %w", err)
}
return nil
})
wproofCheck := async.Err(func() error {
if err := filec.VerifyWinningPoStProof(ctx, winPoStNv, h, *prevBeacon, lbst, waddr); err != nil {
return xerrors.Errorf("invalid election post: %w", err)
}
return nil
})
await := []async.ErrorFuture{
minerCheck,
tktsCheck,
blockSigCheck,
beaconValuesCheck,
wproofCheck,
winnerCheck,
msgsCheck,
baseFeeCheck,
stateRootCheck,
}
var merr error
for _, fut := range await {
if err := fut.AwaitContext(ctx); err != nil {
merr = multierror.Append(merr, err)
}
}
if merr != nil {
mulErr := merr.(*multierror.Error)
mulErr.ErrorFormat = func(es []error) string {
if len(es) == 1 {
return fmt.Sprintf("1 error occurred:\n\t* %+v\n\n", es[0])
}
points := make([]string, len(es))
for i, err := range es {
points[i] = fmt.Sprintf("* %+v", err)
}
return fmt.Sprintf(
"%d errors occurred:\n\t%s\n\n",
len(es), strings.Join(points, "\n\t"))
}
return mulErr
}
return nil
}
func blockSanityChecks(h *types.BlockHeader) error {
if h.ElectionProof == nil {
return xerrors.Errorf("block cannot have nil election proof")
}
if h.Ticket == nil {
return xerrors.Errorf("block cannot have nil ticket")
}
if h.BlockSig == nil {
return xerrors.Errorf("block had nil signature")
}
if h.BLSAggregate == nil {
return xerrors.Errorf("block had nil bls aggregate signature")
}
if h.Miner.Protocol() != address.ID {
return xerrors.Errorf("block had non-ID miner address")
}
return nil
}
func (filec *FilecoinEC) VerifyWinningPoStProof(ctx context.Context, nv network.Version, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error {
if build.InsecurePoStValidation {
if len(h.WinPoStProof) == 0 {
return xerrors.Errorf("[INSECURE-POST-VALIDATION] No winning post proof given")
}
if string(h.WinPoStProof[0].ProofBytes) == "valid proof" {
return nil
}
return xerrors.Errorf("[INSECURE-POST-VALIDATION] winning post was invalid")
}
buf := new(bytes.Buffer)
if err := h.Miner.MarshalCBOR(buf); err != nil {
return xerrors.Errorf("failed to marshal miner address: %w", err)
}
rbase := prevBeacon
if len(h.BeaconEntries) > 0 {
rbase = h.BeaconEntries[len(h.BeaconEntries)-1]
}
rand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes())
if err != nil {
return xerrors.Errorf("failed to get randomness for verifying winning post proof: %w", err)
}
mid, err := address.IDFromAddress(h.Miner)
if err != nil {
return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err)
}
sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, filec.verifier, filec.sm, lbst, h.Miner, rand)
if err != nil {
return xerrors.Errorf("getting winning post sector set: %w", err)
}
ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof2.WinningPoStVerifyInfo{
Randomness: rand,
Proofs: h.WinPoStProof,
ChallengedSectors: sectors,
Prover: abi.ActorID(mid),
})
if err != nil {
return xerrors.Errorf("failed to verify election post: %w", err)
}
if !ok {
log.Errorf("invalid winning post (block: %s, %x; %v)", h.Cid(), rand, sectors)
return xerrors.Errorf("winning post was invalid")
}
return nil
}
// TODO: We should extract this somewhere else and make the message pool and miner use the same logic
func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBlock, baseTs *types.TipSet) error {
{
var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type
var pubks [][]byte
for _, m := range b.BlsMessages {
sigCids = append(sigCids, m.Cid())
pubk, err := filec.sm.GetBlsPublicKey(ctx, m.From, baseTs)
if err != nil {
return xerrors.Errorf("failed to load bls public to validate block: %w", err)
}
pubks = append(pubks, pubk)
}
if err := consensus.VerifyBlsAggregate(ctx, b.Header.BLSAggregate, sigCids, pubks); err != nil {
return xerrors.Errorf("bls aggregate signature was invalid: %w", err)
}
}
nonces := make(map[address.Address]uint64)
stateroot, _, err := filec.sm.TipSetState(ctx, baseTs)
if err != nil {
return err
}
st, err := state.LoadStateTree(filec.store.ActorStore(ctx), stateroot)
if err != nil {
return xerrors.Errorf("failed to load base state tree: %w", err)
}
nv := filec.sm.GetNtwkVersion(ctx, b.Header.Height)
pl := vm.PricelistByEpoch(baseTs.Height())
var sumGasLimit int64
checkMsg := func(msg types.ChainMsg) error {
m := msg.VMMessage()
// Phase 1: syntactic validation, as defined in the spec
minGas := pl.OnChainMessage(msg.ChainLength())
if err := m.ValidForBlockInclusion(minGas.Total(), nv); err != nil {
return err
}
// ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit
// So below is overflow safe
sumGasLimit += m.GasLimit
if sumGasLimit > build.BlockGasLimit {
return xerrors.Errorf("block gas limit exceeded")
}
// Phase 2: (Partial) semantic validation:
// the sender exists and is an account actor, and the nonces make sense
var sender address.Address
if filec.sm.GetNtwkVersion(ctx, b.Header.Height) >= network.Version13 {
sender, err = st.LookupID(m.From)
if err != nil {
return err
}
} else {
sender = m.From
}
if _, ok := nonces[sender]; !ok {
// `GetActor` does not validate that this is an account actor.
act, err := st.GetActor(sender)
if err != nil {
return xerrors.Errorf("failed to get actor: %w", err)
}
if !builtin.IsAccountActor(act.Code) {
return xerrors.New("Sender must be an account actor")
}
nonces[sender] = act.Nonce
}
if nonces[sender] != m.Nonce {
return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[sender], m.Nonce)
}
nonces[sender]++
return nil
}
// Validate message arrays in a temporary blockstore.
tmpbs := bstore.NewMemory()
tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs))
bmArr := blockadt.MakeEmptyArray(tmpstore)
for i, m := range b.BlsMessages {
if err := checkMsg(m); err != nil {
return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err)
}
c, err := store.PutMessage(tmpbs, m)
if err != nil {
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
}
k := cbg.CborCid(c)
if err := bmArr.Set(uint64(i), &k); err != nil {
return xerrors.Errorf("failed to put bls message at index %d: %w", i, err)
}
}
smArr := blockadt.MakeEmptyArray(tmpstore)
for i, m := range b.SecpkMessages {
if err := checkMsg(m); err != nil {
return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err)
}
// `From` being an account actor is only validated inside the `vm.ResolveToKeyAddr` call
// in `StateManager.ResolveToKeyAddress` here (and not in `checkMsg`).
kaddr, err := filec.sm.ResolveToKeyAddress(ctx, m.Message.From, baseTs)
if err != nil {
return xerrors.Errorf("failed to resolve key addr: %w", err)
}
if err := sigs.Verify(&m.Signature, kaddr, m.Message.Cid().Bytes()); err != nil {
return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err)
}
c, err := store.PutMessage(tmpbs, m)
if err != nil {
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
}
k := cbg.CborCid(c)
if err := smArr.Set(uint64(i), &k); err != nil {
return xerrors.Errorf("failed to put secpk message at index %d: %w", i, err)
}
}
bmroot, err := bmArr.Root()
if err != nil {
return err
}
smroot, err := smArr.Root()
if err != nil {
return err
}
mrcid, err := tmpstore.Put(ctx, &types.MsgMeta{
BlsMessages: bmroot,
SecpkMessages: smroot,
})
if err != nil {
return err
}
if b.Header.Messages != mrcid {
return fmt.Errorf("messages didnt match message root in header")
}
// Finally, flush.
return vm.Copy(ctx, tmpbs, filec.store.ChainBlockstore(), mrcid)
}
func (filec *FilecoinEC) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool {
if filec.genesis == nil {
return false
}
now := uint64(build.Clock.Now().Unix())
return epoch > (abi.ChainEpoch((now-filec.genesis.MinTimestamp())/build.BlockDelaySecs) + MaxHeightDrift)
}
func (filec *FilecoinEC) minerIsValid(ctx context.Context, maddr address.Address, baseTs *types.TipSet) error {
act, err := filec.sm.LoadActor(ctx, power.Address, baseTs)
if err != nil {
return xerrors.Errorf("failed to load power actor: %w", err)
}
powState, err := power.Load(filec.store.ActorStore(ctx), act)
if err != nil {
return xerrors.Errorf("failed to load power actor state: %w", err)
}
_, exist, err := powState.MinerPower(maddr)
if err != nil {
return xerrors.Errorf("failed to look up miner's claim: %w", err)
}
if !exist {
return xerrors.New("miner isn't valid")
}
return nil
}
func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []byte, evrf []byte) error {
return VerifyVRF(ctx, worker, rand, evrf)
}
func VerifyVRF(ctx context.Context, worker address.Address, vrfBase, vrfproof []byte) error {
_, span := trace.StartSpan(ctx, "VerifyVRF")
defer span.End()
sig := &crypto.Signature{
Type: crypto.SigTypeBLS,
Data: vrfproof,
}
if err := sigs.Verify(sig, worker, vrfBase); err != nil {
return xerrors.Errorf("vrf was invalid: %w", err)
}
return nil
}
var ErrSoftFailure = errors.New("soft validation failure")
var ErrInsufficientPower = errors.New("incoming block's miner does not have minimum power")
func (filec *FilecoinEC) ValidateBlockPubsub(ctx context.Context, self bool, msg *pubsub.Message) (pubsub.ValidationResult, string) {
if self {
return filec.validateLocalBlock(ctx, msg)
}
// track validation time
begin := build.Clock.Now()
defer func() {
log.Debugf("block validation time: %s", build.Clock.Since(begin))
}()
stats.Record(ctx, metrics.BlockReceived.M(1))
recordFailureFlagPeer := func(what string) {
// bv.Validate will flag the peer in that case
panic(what)
}
blk, what, err := filec.decodeAndCheckBlock(msg)
if err != nil {
log.Error("got invalid block over pubsub: ", err)
recordFailureFlagPeer(what)
return pubsub.ValidationReject, what
}
// validate the block meta: the Message CID in the header must match the included messages
err = filec.validateMsgMeta(ctx, blk)
if err != nil {
log.Warnf("error validating message metadata: %s", err)
recordFailureFlagPeer("invalid_block_meta")
return pubsub.ValidationReject, "invalid_block_meta"
}
reject, err := filec.validateBlockHeader(ctx, blk.Header)
if err != nil {
if reject == "" {
log.Warn("ignoring block msg: ", err)
return pubsub.ValidationIgnore, reject
}
recordFailureFlagPeer(reject)
return pubsub.ValidationReject, reject
}
// all good, accept the block
msg.ValidatorData = blk
stats.Record(ctx, metrics.BlockValidationSuccess.M(1))
return pubsub.ValidationAccept, ""
}
func (filec *FilecoinEC) validateLocalBlock(ctx context.Context, msg *pubsub.Message) (pubsub.ValidationResult, string) {
stats.Record(ctx, metrics.BlockPublished.M(1))
if size := msg.Size(); size > 1<<20-1<<15 {
log.Errorf("ignoring oversize block (%dB)", size)
return pubsub.ValidationIgnore, "oversize_block"
}
blk, what, err := filec.decodeAndCheckBlock(msg)
if err != nil {
log.Errorf("got invalid local block: %s", err)
return pubsub.ValidationIgnore, what
}
msg.ValidatorData = blk
stats.Record(ctx, metrics.BlockValidationSuccess.M(1))
return pubsub.ValidationAccept, ""
}
func (filec *FilecoinEC) decodeAndCheckBlock(msg *pubsub.Message) (*types.BlockMsg, string, error) {
blk, err := types.DecodeBlockMsg(msg.GetData())
if err != nil {
return nil, "invalid", xerrors.Errorf("error decoding block: %w", err)
}
if count := len(blk.BlsMessages) + len(blk.SecpkMessages); count > build.BlockMessageLimit {
return nil, "too_many_messages", fmt.Errorf("block contains too many messages (%d)", count)
}
// make sure we have a signature
if blk.Header.BlockSig == nil {
return nil, "missing_signature", fmt.Errorf("block without a signature")
}
return blk, "", nil
}
func (filec *FilecoinEC) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error {
// TODO there has to be a simpler way to do this without the blockstore dance
// block headers use adt0
store := blockadt.WrapStore(ctx, cbor.NewCborStore(bstore.NewMemory()))
bmArr := blockadt.MakeEmptyArray(store)
smArr := blockadt.MakeEmptyArray(store)
for i, m := range msg.BlsMessages {
c := cbg.CborCid(m)
if err := bmArr.Set(uint64(i), &c); err != nil {
return err
}
}
for i, m := range msg.SecpkMessages {
c := cbg.CborCid(m)
if err := smArr.Set(uint64(i), &c); err != nil {
return err
}
}
bmroot, err := bmArr.Root()
if err != nil {
return err
}
smroot, err := smArr.Root()
if err != nil {
return err
}
mrcid, err := store.Put(store.Context(), &types.MsgMeta{
BlsMessages: bmroot,
SecpkMessages: smroot,
})
if err != nil {
return err
}
if msg.Header.Messages != mrcid {
return fmt.Errorf("messages didn't match root cid in header")
}
return nil
}
func (filec *FilecoinEC) validateBlockHeader(ctx context.Context, b *types.BlockHeader) (rejectReason string, err error) {
// we want to ensure that it is a block from a known miner; we reject blocks from unknown miners
// to prevent spam attacks.
// the logic works as follows: we lookup the miner in the chain for its key.
// if we can find it then it's a known miner and we can validate the signature.
// if we can't find it, we check whether we are (near) synced in the chain.
// if we are not synced we cannot validate the block and we must ignore it.
// if we are synced and the miner is unknown, then the block is rejcected.
key, err := filec.checkPowerAndGetWorkerKey(ctx, b)
if err != nil {
if err != ErrSoftFailure && filec.isChainNearSynced() {
log.Warnf("received block from unknown miner or miner that doesn't meet min power over pubsub; rejecting message")
return "unknown_miner", err
}
log.Warnf("cannot validate block message; unknown miner or miner that doesn't meet min power in unsynced chain: %s", b.Cid())
return "", err // ignore
}
if b.ElectionProof.WinCount < 1 {
log.Errorf("block is not claiming to be winning")
return "not_winning", xerrors.Errorf("block not winning")
}
err = sigs.CheckBlockSignature(ctx, b, key)
if err != nil {
log.Errorf("block signature verification failed: %s", err)
return "signature_verification_failed", err
}
return "", nil
}
func (filec *FilecoinEC) checkPowerAndGetWorkerKey(ctx context.Context, bh *types.BlockHeader) (address.Address, error) {
// we check that the miner met the minimum power at the lookback tipset
baseTs := filec.store.GetHeaviestTipSet()
lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, filec.sm, baseTs, bh.Height)
if err != nil {
log.Warnf("failed to load lookback tipset for incoming block: %s", err)
return address.Undef, ErrSoftFailure
}
key, err := stmgr.GetMinerWorkerRaw(ctx, filec.sm, lbst, bh.Miner)
if err != nil {
log.Warnf("failed to resolve worker key for miner %s: %s", bh.Miner, err)
return address.Undef, ErrSoftFailure
}
// NOTE: we check to see if the miner was eligible in the lookback
// tipset - 1 for historical reasons. DO NOT use the lookback state
// returned by GetLookbackTipSetForRound.
eligible, err := stmgr.MinerEligibleToMine(ctx, filec.sm, bh.Miner, baseTs, lbts)
if err != nil {
log.Warnf("failed to determine if incoming block's miner has minimum power: %s", err)
return address.Undef, ErrSoftFailure
}
if !eligible {
log.Warnf("incoming block's miner is ineligible")
return address.Undef, ErrInsufficientPower
}
return key, nil
}
func (filec *FilecoinEC) isChainNearSynced() bool {
ts := filec.store.GetHeaviestTipSet()
timestamp := ts.MinTimestamp()
timestampTime := time.Unix(int64(timestamp), 0)
return build.Clock.Since(timestampTime) < 6*time.Hour
}
var _ consensus.Consensus = &FilecoinEC{}

View File

@ -1,38 +1,36 @@
package gen
package filcns
import (
"context"
"github.com/filecoin-project/go-state-types/crypto"
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
)
func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error) {
pts, err := sm.ChainStore().LoadTipSet(bt.Parents)
func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error) {
pts, err := filec.sm.ChainStore().LoadTipSet(bt.Parents)
if err != nil {
return nil, xerrors.Errorf("failed to load parent tipset: %w", err)
}
st, recpts, err := sm.TipSetState(ctx, pts)
st, recpts, err := filec.sm.TipSetState(ctx, pts)
if err != nil {
return nil, xerrors.Errorf("failed to load tipset state: %w", err)
}
_, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, sm, pts, bt.Epoch)
_, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, filec.sm, pts, bt.Epoch)
if err != nil {
return nil, xerrors.Errorf("getting lookback miner actor state: %w", err)
}
worker, err := stmgr.GetMinerWorkerRaw(ctx, sm, lbst, bt.Miner)
worker, err := stmgr.GetMinerWorkerRaw(ctx, filec.sm, lbst, bt.Miner)
if err != nil {
return nil, xerrors.Errorf("failed to get miner worker: %w", err)
}
@ -61,14 +59,14 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.Wallet,
blsSigs = append(blsSigs, msg.Signature)
blsMessages = append(blsMessages, &msg.Message)
c, err := sm.ChainStore().PutMessage(&msg.Message)
c, err := filec.sm.ChainStore().PutMessage(&msg.Message)
if err != nil {
return nil, err
}
blsMsgCids = append(blsMsgCids, c)
} else {
c, err := sm.ChainStore().PutMessage(msg)
c, err := filec.sm.ChainStore().PutMessage(msg)
if err != nil {
return nil, err
}
@ -79,12 +77,12 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.Wallet,
}
}
store := sm.ChainStore().ActorStore(ctx)
blsmsgroot, err := toArray(store, blsMsgCids)
store := filec.sm.ChainStore().ActorStore(ctx)
blsmsgroot, err := consensus.ToMessagesArray(store, blsMsgCids)
if err != nil {
return nil, xerrors.Errorf("building bls amt: %w", err)
}
secpkmsgroot, err := toArray(store, secpkMsgCids)
secpkmsgroot, err := consensus.ToMessagesArray(store, secpkMsgCids)
if err != nil {
return nil, xerrors.Errorf("building secpk amt: %w", err)
}
@ -98,19 +96,19 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.Wallet,
}
next.Messages = mmcid
aggSig, err := aggregateSignatures(blsSigs)
aggSig, err := consensus.AggregateSignatures(blsSigs)
if err != nil {
return nil, err
}
next.BLSAggregate = aggSig
pweight, err := sm.ChainStore().Weight(ctx, pts)
pweight, err := filec.sm.ChainStore().Weight(ctx, pts)
if err != nil {
return nil, err
}
next.ParentWeight = pweight
baseFee, err := sm.ChainStore().ComputeBaseFee(ctx, pts)
baseFee, err := filec.sm.ChainStore().ComputeBaseFee(ctx, pts)
if err != nil {
return nil, xerrors.Errorf("computing base fee: %w", err)
}
@ -138,41 +136,3 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.Wallet,
return fullBlock, nil
}
func aggregateSignatures(sigs []crypto.Signature) (*crypto.Signature, error) {
sigsS := make([]ffi.Signature, len(sigs))
for i := 0; i < len(sigs); i++ {
copy(sigsS[i][:], sigs[i].Data[:ffi.SignatureBytes])
}
aggSig := ffi.Aggregate(sigsS)
if aggSig == nil {
if len(sigs) > 0 {
return nil, xerrors.Errorf("bls.Aggregate returned nil with %d signatures", len(sigs))
}
zeroSig := ffi.CreateZeroSignature()
// Note: for blst this condition should not happen - nil should not
// be returned
return &crypto.Signature{
Type: crypto.SigTypeBLS,
Data: zeroSig[:],
}, nil
}
return &crypto.Signature{
Type: crypto.SigTypeBLS,
Data: aggSig[:],
}, nil
}
func toArray(store blockadt.Store, cids []cid.Cid) (cid.Cid, error) {
arr := blockadt.MakeEmptyArray(store)
for i, c := range cids {
oc := cbg.CborCid(c)
if err := arr.Set(uint64(i), &oc); err != nil {
return cid.Undef, err
}
}
return arr.Root()
}

View File

@ -1,4 +1,4 @@
package stmgr
package filcns
import (
"context"
@ -13,6 +13,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/go-state-types/rt"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
@ -31,15 +32,16 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
)
func DefaultUpgradeSchedule() UpgradeSchedule {
var us UpgradeSchedule
func DefaultUpgradeSchedule() stmgr.UpgradeSchedule {
var us stmgr.UpgradeSchedule
updates := []Upgrade{{
updates := []stmgr.Upgrade{{
Height: build.UpgradeBreezeHeight,
Network: network.Version1,
Migration: UpgradeFaucetBurnRecovery,
@ -88,7 +90,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
Height: build.UpgradeTrustHeight,
Network: network.Version10,
Migration: UpgradeActorsV3,
PreMigrations: []PreMigration{{
PreMigrations: []stmgr.PreMigration{{
PreMigration: PreUpgradeActorsV3,
StartWithin: 120,
DontStartWithin: 60,
@ -108,7 +110,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
Height: build.UpgradeTurboHeight,
Network: network.Version12,
Migration: UpgradeActorsV4,
PreMigrations: []PreMigration{{
PreMigrations: []stmgr.PreMigration{{
PreMigration: PreUpgradeActorsV4,
StartWithin: 120,
DontStartWithin: 60,
@ -124,7 +126,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
Height: build.UpgradeHyperdriveHeight,
Network: network.Version13,
Migration: UpgradeActorsV5,
PreMigrations: []PreMigration{{
PreMigrations: []stmgr.PreMigration{{
PreMigration: PreUpgradeActorsV5,
StartWithin: 120,
DontStartWithin: 60,
@ -147,7 +149,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
return us
}
func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ MigrationCache, em ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
func UpgradeFaucetBurnRecovery(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, em stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Some initial parameters
FundsForMiners := types.FromFil(1_000_000)
LookbackEpoch := abi.ChainEpoch(32000)
@ -249,7 +251,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
// Execute transfers from previous step
for _, t := range transfers {
if err := doTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil {
if err := stmgr.DoTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil {
return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err)
}
}
@ -352,7 +354,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
}
for _, t := range transfersBack {
if err := doTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil {
if err := stmgr.DoTransfer(tree, t.From, t.To, t.Amt, transferCb); err != nil {
return cid.Undef, xerrors.Errorf("transfer %s %s->%s failed: %w", t.Amt, t.From, t.To, err)
}
}
@ -362,7 +364,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
if err != nil {
return cid.Undef, xerrors.Errorf("failed to load burnt funds actor: %w", err)
}
if err := doTransfer(tree, builtin0.BurntFundsActorAddr, builtin.ReserveAddress, burntAct.Balance, transferCb); err != nil {
if err := stmgr.DoTransfer(tree, builtin0.BurntFundsActorAddr, builtin.ReserveAddress, burntAct.Balance, transferCb); err != nil {
return cid.Undef, xerrors.Errorf("failed to unburn funds: %w", err)
}
@ -378,7 +380,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
}
difference := types.BigSub(DesiredReimbursementBalance, reimb.Balance)
if err := doTransfer(tree, builtin.ReserveAddress, reimbAddr, difference, transferCb); err != nil {
if err := stmgr.DoTransfer(tree, builtin.ReserveAddress, reimbAddr, difference, transferCb); err != nil {
return cid.Undef, xerrors.Errorf("failed to top up reimbursement account: %w", err)
}
@ -400,14 +402,14 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
if em != nil {
// record the transfer in execution traces
fakeMsg := makeFakeMsg(builtin.SystemActorAddr, builtin.SystemActorAddr, big.Zero(), uint64(epoch))
fakeMsg := stmgr.MakeFakeMsg(builtin.SystemActorAddr, builtin.SystemActorAddr, big.Zero(), uint64(epoch))
if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
MessageReceipt: *makeFakeRct(),
MessageReceipt: *stmgr.MakeFakeRct(),
ActorErr: nil,
ExecutionTrace: types.ExecutionTrace{
Msg: fakeMsg,
MsgRct: makeFakeRct(),
MsgRct: stmgr.MakeFakeRct(),
Error: "",
Duration: 0,
GasCharges: nil,
@ -423,8 +425,8 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ Migratio
return tree.Flush(ctx)
}
func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
store := sm.cs.ActorStore(ctx)
func UpgradeIgnition(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
store := sm.ChainStore().ActorStore(ctx)
if build.UpgradeLiftoffHeight <= epoch {
return cid.Undef, xerrors.Errorf("liftoff height must be beyond ignition height")
@ -440,7 +442,7 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
}
err = setNetworkName(ctx, store, tree, "ignition")
err = stmgr.SetNetworkName(ctx, store, tree, "ignition")
if err != nil {
return cid.Undef, xerrors.Errorf("setting network name: %w", err)
}
@ -478,7 +480,7 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb
return tree.Flush(ctx)
}
func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch, ts *types.TipSet) error {
func splitGenesisMultisig0(ctx context.Context, em stmgr.ExecMonitor, addr address.Address, store adt0.Store, tree *state.StateTree, portions uint64, epoch abi.ChainEpoch, ts *types.TipSet) error {
if portions < 1 {
return xerrors.Errorf("cannot split into 0 portions")
}
@ -553,7 +555,7 @@ func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Add
}
for i < portions {
keyAddr, err := makeKeyAddr(addr, i)
keyAddr, err := stmgr.MakeKeyAddr(addr, i)
if err != nil {
return xerrors.Errorf("creating key address: %w", err)
}
@ -568,7 +570,7 @@ func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Add
return xerrors.Errorf("setting new msig actor state: %w", err)
}
if err := doTransfer(tree, addr, idAddr, newIbal, transferCb); err != nil {
if err := stmgr.DoTransfer(tree, addr, idAddr, newIbal, transferCb); err != nil {
return xerrors.Errorf("transferring split msig balance: %w", err)
}
@ -578,14 +580,14 @@ func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Add
if em != nil {
// record the transfer in execution traces
fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
fakeMsg := stmgr.MakeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
MessageReceipt: *makeFakeRct(),
MessageReceipt: *stmgr.MakeFakeRct(),
ActorErr: nil,
ExecutionTrace: types.ExecutionTrace{
Msg: fakeMsg,
MsgRct: makeFakeRct(),
MsgRct: stmgr.MakeFakeRct(),
Error: "",
Duration: 0,
GasCharges: nil,
@ -602,8 +604,8 @@ func splitGenesisMultisig0(ctx context.Context, em ExecMonitor, addr address.Add
}
// TODO: After the Liftoff epoch, refactor this to use resetMultisigVesting
func resetGenesisMsigs0(ctx context.Context, sm *StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error {
gb, err := sm.cs.GetGenesis()
func resetGenesisMsigs0(ctx context.Context, sm *stmgr.StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error {
gb, err := sm.ChainStore().GetGenesis()
if err != nil {
return xerrors.Errorf("getting genesis block: %w", err)
}
@ -613,7 +615,7 @@ func resetGenesisMsigs0(ctx context.Context, sm *StateManager, store adt0.Store,
return xerrors.Errorf("getting genesis tipset: %w", err)
}
cst := cbor.NewCborStore(sm.cs.StateBlockstore())
cst := cbor.NewCborStore(sm.ChainStore().StateBlockstore())
genesisTree, err := state.LoadStateTree(cst, gts.ParentState())
if err != nil {
return xerrors.Errorf("loading state tree: %w", err)
@ -683,9 +685,9 @@ func resetMultisigVesting0(ctx context.Context, store adt0.Store, tree *state.St
return nil
}
func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
func UpgradeRefuel(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
store := sm.cs.ActorStore(ctx)
store := sm.ChainStore().ActorStore(ctx)
tree, err := sm.StateTree(root)
if err != nil {
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
@ -709,8 +711,8 @@ func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb E
return tree.Flush(ctx)
}
func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
func UpgradeActorsV2(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
store := store.ActorStore(ctx, buf)
info, err := store.Put(ctx, new(types.StateInfo0))
@ -755,13 +757,13 @@ func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb
return newRoot, nil
}
func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
func UpgradeLiftoff(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
tree, err := sm.StateTree(root)
if err != nil {
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
}
err = setNetworkName(ctx, sm.cs.ActorStore(ctx), tree, "mainnet")
err = stmgr.SetNetworkName(ctx, sm.ChainStore().ActorStore(ctx), tree, "mainnet")
if err != nil {
return cid.Undef, xerrors.Errorf("setting network name: %w", err)
}
@ -769,12 +771,12 @@ func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb
return tree.Flush(ctx)
}
func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
func UpgradeCalico(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
if build.BuildType != build.BuildMainnet {
return root, nil
}
store := sm.cs.ActorStore(ctx)
store := sm.ChainStore().ActorStore(ctx)
var stateRoot types.StateRoot
if err := store.Get(ctx, root, &stateRoot); err != nil {
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
@ -815,7 +817,7 @@ func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb E
return newRoot, nil
}
func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
func UpgradeActorsV3(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3.
workerCount := runtime.NumCPU() - 3
if workerCount <= 0 {
@ -839,7 +841,7 @@ func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache
}
if build.BuildType == build.BuildMainnet {
err := terminateActor(ctx, tree, build.ZeroAddress, cb, epoch, ts)
err := stmgr.TerminateActor(ctx, tree, build.ZeroAddress, cb, epoch, ts)
if err != nil && !xerrors.Is(err, types.ErrActorNotFound) {
return cid.Undef, xerrors.Errorf("deleting zero bls actor: %w", err)
}
@ -853,7 +855,7 @@ func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache
return newRoot, nil
}
func PreUpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
func PreUpgradeActorsV3(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
// Use half the CPUs for pre-migration, but leave at least 3.
workerCount := runtime.NumCPU()
if workerCount <= 4 {
@ -867,11 +869,11 @@ func PreUpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCa
}
func upgradeActorsV3Common(
ctx context.Context, sm *StateManager, cache MigrationCache,
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
config nv10.Config,
) (cid.Cid, error) {
buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
store := store.ActorStore(ctx, buf)
// Load the state root.
@ -917,7 +919,7 @@ func upgradeActorsV3Common(
return newRoot, nil
}
func UpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
func UpgradeActorsV4(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3.
workerCount := runtime.NumCPU() - 3
if workerCount <= 0 {
@ -939,7 +941,7 @@ func UpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache
return newRoot, nil
}
func PreUpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
func PreUpgradeActorsV4(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
// Use half the CPUs for pre-migration, but leave at least 3.
workerCount := runtime.NumCPU()
if workerCount <= 4 {
@ -953,11 +955,11 @@ func PreUpgradeActorsV4(ctx context.Context, sm *StateManager, cache MigrationCa
}
func upgradeActorsV4Common(
ctx context.Context, sm *StateManager, cache MigrationCache,
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
config nv12.Config,
) (cid.Cid, error) {
buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
store := store.ActorStore(ctx, buf)
// Load the state root.
@ -1003,7 +1005,7 @@ func upgradeActorsV4Common(
return newRoot, nil
}
func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
func UpgradeActorsV5(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3.
workerCount := runtime.NumCPU() - 3
if workerCount <= 0 {
@ -1025,7 +1027,7 @@ func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache
return newRoot, nil
}
func PreUpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
func PreUpgradeActorsV5(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
// Use half the CPUs for pre-migration, but leave at least 3.
workerCount := runtime.NumCPU()
if workerCount <= 4 {
@ -1039,11 +1041,11 @@ func PreUpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCa
}
func upgradeActorsV5Common(
ctx context.Context, sm *StateManager, cache MigrationCache,
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
config nv13.Config,
) (cid.Cid, error) {
buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
store := store.ActorStore(ctx, buf)
// Load the state root.
@ -1088,3 +1090,18 @@ func upgradeActorsV5Common(
return newRoot, nil
}
type migrationLogger struct{}
func (ml migrationLogger) Log(level rt.LogLevel, msg string, args ...interface{}) {
switch level {
case rt.DEBUG:
log.Debugf(msg, args...)
case rt.INFO:
log.Infof(msg, args...)
case rt.WARN:
log.Warnf(msg, args...)
case rt.ERROR:
log.Errorf(msg, args...)
}
}

View File

@ -1,22 +1,25 @@
package store
package filcns
import (
"context"
"math/big"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
big2 "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/types"
cbor "github.com/ipfs/go-ipld-cbor"
"golang.org/x/xerrors"
big2 "github.com/filecoin-project/go-state-types/big"
bstore "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
)
var zero = types.NewInt(0)
func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigInt, error) {
func Weight(ctx context.Context, stateBs bstore.Blockstore, ts *types.TipSet) (types.BigInt, error) {
if ts == nil {
return types.NewInt(0), nil
}
@ -28,7 +31,7 @@ func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigIn
tpow := big2.Zero()
{
cst := cbor.NewCborStore(cs.StateBlockstore())
cst := cbor.NewCborStore(stateBs)
state, err := state.LoadStateTree(cst, ts.ParentState())
if err != nil {
return types.NewInt(0), xerrors.Errorf("load state tree: %w", err)
@ -39,7 +42,7 @@ func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigIn
return types.NewInt(0), xerrors.Errorf("get power actor: %w", err)
}
powState, err := power.Load(cs.ActorStore(ctx), act)
powState, err := power.Load(store.ActorStore(ctx, stateBs), act)
if err != nil {
return types.NewInt(0), xerrors.Errorf("failed to load power actor state: %w", err)
}

19
chain/consensus/iface.go Normal file
View File

@ -0,0 +1,19 @@
package consensus
import (
"context"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
)
type Consensus interface {
ValidateBlock(ctx context.Context, b *types.FullBlock) (err error)
ValidateBlockPubsub(ctx context.Context, self bool, msg *pubsub.Message) (pubsub.ValidationResult, string)
IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool
CreateBlock(ctx context.Context, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error)
}

83
chain/consensus/utils.go Normal file
View File

@ -0,0 +1,83 @@
package consensus
import (
"context"
"errors"
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/go-state-types/crypto"
)
var ErrTemporal = errors.New("temporal error")
func VerifyBlsAggregate(ctx context.Context, sig *crypto.Signature, msgs []cid.Cid, pubks [][]byte) error {
_, span := trace.StartSpan(ctx, "syncer.VerifyBlsAggregate")
defer span.End()
span.AddAttributes(
trace.Int64Attribute("msgCount", int64(len(msgs))),
)
msgsS := make([]ffi.Message, len(msgs))
pubksS := make([]ffi.PublicKey, len(msgs))
for i := 0; i < len(msgs); i++ {
msgsS[i] = msgs[i].Bytes()
copy(pubksS[i][:], pubks[i][:ffi.PublicKeyBytes])
}
sigS := new(ffi.Signature)
copy(sigS[:], sig.Data[:ffi.SignatureBytes])
if len(msgs) == 0 {
return nil
}
valid := ffi.HashVerify(sigS, msgsS, pubksS)
if !valid {
return xerrors.New("bls aggregate signature failed to verify")
}
return nil
}
func AggregateSignatures(sigs []crypto.Signature) (*crypto.Signature, error) {
sigsS := make([]ffi.Signature, len(sigs))
for i := 0; i < len(sigs); i++ {
copy(sigsS[i][:], sigs[i].Data[:ffi.SignatureBytes])
}
aggSig := ffi.Aggregate(sigsS)
if aggSig == nil {
if len(sigs) > 0 {
return nil, xerrors.Errorf("bls.Aggregate returned nil with %d signatures", len(sigs))
}
zeroSig := ffi.CreateZeroSignature()
// Note: for blst this condition should not happen - nil should not
// be returned
return &crypto.Signature{
Type: crypto.SigTypeBLS,
Data: zeroSig[:],
}, nil
}
return &crypto.Signature{
Type: crypto.SigTypeBLS,
Data: aggSig[:],
}, nil
}
func ToMessagesArray(store blockadt.Store, cids []cid.Cid) (cid.Cid, error) {
arr := blockadt.MakeEmptyArray(store)
for i, c := range cids {
oc := cbg.CborCid(c)
if err := arr.Set(uint64(i), &oc); err != nil {
return cid.Undef, err
}
}
return arr.Root()
}

View File

@ -151,12 +151,20 @@ func (c *client) doRequest(
// errors. Peer penalization should happen here then, before returning, so
// we can apply the correct penalties depending on the cause of the error.
// FIXME: Add the `peer` as argument once we implement penalties.
func (c *client) processResponse(req *Request, res *Response, tipsets []*types.TipSet) (*validatedResponse, error) {
err := res.statusToError()
func (c *client) processResponse(req *Request, res *Response, tipsets []*types.TipSet) (r *validatedResponse, err error) {
err = res.statusToError()
if err != nil {
return nil, xerrors.Errorf("status error: %s", err)
}
defer func() {
if rerr := recover(); rerr != nil {
log.Errorf("process response error: %s", rerr)
err = xerrors.Errorf("process response error: %s", rerr)
return
}
}()
options := parseOptions(req.Options)
if options.noOptionsSet() {
// Safety check: this shouldn't have been sent, and even if it did

View File

@ -23,7 +23,6 @@ import (
logging "github.com/ipfs/go-log/v2"
"github.com/ipfs/go-merkledag"
"github.com/ipld/go-car"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
@ -33,6 +32,7 @@ import (
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
@ -43,7 +43,6 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/genesis"
"github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/node/repo"
)
@ -233,7 +232,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS
return nil, xerrors.Errorf("make genesis block failed: %w", err)
}
cs := store.NewChainStore(bs, bs, ds, j)
cs := store.NewChainStore(bs, bs, ds, filcns.Weight, j)
genfb := &types.FullBlock{Header: genb.Genesis}
gents := store.NewFullTipSet([]*types.FullBlock{genfb})
@ -247,7 +246,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS
mgen[genesis2.MinerAddress(uint64(i))] = &wppProvider{}
}
sm, err := stmgr.NewStateManagerWithUpgradeSchedule(cs, sys, us)
sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), sys, us)
if err != nil {
return nil, xerrors.Errorf("initing stmgr: %w", err)
}
@ -289,7 +288,7 @@ func NewGenerator() (*ChainGen, error) {
}
func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
return NewGeneratorWithSectorsAndUpgradeSchedule(numSectors, stmgr.DefaultUpgradeSchedule())
return NewGeneratorWithSectorsAndUpgradeSchedule(numSectors, filcns.DefaultUpgradeSchedule())
}
func NewGeneratorWithUpgradeSchedule(us stmgr.UpgradeSchedule) (*ChainGen, error) {
@ -487,7 +486,7 @@ func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticke
ts = parents.MinTimestamp() + uint64(height-parents.Height())*build.BlockDelaySecs
}
fblk, err := MinerCreateBlock(context.TODO(), cg.sm, cg.w, &api.BlockTemplate{
fblk, err := filcns.NewFilecoinExpectedConsensus(cg.sm, nil, nil, nil).CreateBlock(context.TODO(), cg.w, &api.BlockTemplate{
Miner: m,
Parents: parents.Key(),
Ticket: vrfticket,
@ -667,22 +666,6 @@ func IsRoundWinner(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch,
type SignFunc func(context.Context, address.Address, []byte) (*crypto.Signature, error)
func VerifyVRF(ctx context.Context, worker address.Address, vrfBase, vrfproof []byte) error {
_, span := trace.StartSpan(ctx, "VerifyVRF")
defer span.End()
sig := &crypto.Signature{
Type: crypto.SigTypeBLS,
Data: vrfproof,
}
if err := sigs.Verify(sig, worker, vrfBase); err != nil {
return xerrors.Errorf("vrf was invalid: %w", err)
}
return nil
}
func ComputeVRF(ctx context.Context, sign SignFunc, worker address.Address, sigInput []byte) ([]byte, error) {
sig, err := sign(ctx, worker, sigInput)
if err != nil {

View File

@ -6,6 +6,7 @@ import (
"encoding/json"
"fmt"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
@ -222,7 +223,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
return nil, nil, xerrors.Errorf("set verified registry actor: %w", err)
}
bact, err := makeAccountActor(ctx, cst, av, builtin.BurntFundsActorAddr, big.Zero())
bact, err := MakeAccountActor(ctx, cst, av, builtin.BurntFundsActorAddr, big.Zero())
if err != nil {
return nil, nil, xerrors.Errorf("setup burnt funds actor state: %w", err)
}
@ -235,7 +236,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
switch info.Type {
case genesis.TAccount:
if err := createAccountActor(ctx, cst, state, info, keyIDs, av); err != nil {
if err := CreateAccountActor(ctx, cst, state, info, keyIDs, av); err != nil {
return nil, nil, xerrors.Errorf("failed to create account actor: %w", err)
}
@ -247,7 +248,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
}
idStart++
if err := createMultisigAccount(ctx, cst, state, ida, info, keyIDs, av); err != nil {
if err := CreateMultisigAccount(ctx, cst, state, ida, info, keyIDs, av); err != nil {
return nil, nil, err
}
default:
@ -268,7 +269,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
return nil, nil, fmt.Errorf("rootkey account has already been declared, cannot be assigned 80: %s", ainfo.Owner)
}
vact, err := makeAccountActor(ctx, cst, av, ainfo.Owner, template.VerifregRootKey.Balance)
vact, err := MakeAccountActor(ctx, cst, av, ainfo.Owner, template.VerifregRootKey.Balance)
if err != nil {
return nil, nil, xerrors.Errorf("setup verifreg rootkey account state: %w", err)
}
@ -276,7 +277,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
return nil, nil, xerrors.Errorf("set verifreg rootkey account actor: %w", err)
}
case genesis.TMultisig:
if err = createMultisigAccount(ctx, cst, state, builtin.RootVerifierAddress, template.VerifregRootKey, keyIDs, av); err != nil {
if err = CreateMultisigAccount(ctx, cst, state, builtin.RootVerifierAddress, template.VerifregRootKey, keyIDs, av); err != nil {
return nil, nil, xerrors.Errorf("failed to set up verified registry signer: %w", err)
}
default:
@ -305,7 +306,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
return nil, nil, err
}
verifierAct, err := makeAccountActor(ctx, cst, av, verifierAd, big.Zero())
verifierAct, err := MakeAccountActor(ctx, cst, av, verifierAd, big.Zero())
if err != nil {
return nil, nil, xerrors.Errorf("setup first verifier state: %w", err)
}
@ -348,13 +349,13 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
}
keyIDs[ainfo.Owner] = builtin.ReserveAddress
err = createAccountActor(ctx, cst, state, template.RemainderAccount, keyIDs, av)
err = CreateAccountActor(ctx, cst, state, template.RemainderAccount, keyIDs, av)
if err != nil {
return nil, nil, xerrors.Errorf("creating remainder acct: %w", err)
}
case genesis.TMultisig:
if err = createMultisigAccount(ctx, cst, state, builtin.ReserveAddress, template.RemainderAccount, keyIDs, av); err != nil {
if err = CreateMultisigAccount(ctx, cst, state, builtin.ReserveAddress, template.RemainderAccount, keyIDs, av); err != nil {
return nil, nil, xerrors.Errorf("failed to set up remainder: %w", err)
}
default:
@ -364,7 +365,7 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
return state, keyIDs, nil
}
func makeAccountActor(ctx context.Context, cst cbor.IpldStore, av actors.Version, addr address.Address, bal types.BigInt) (*types.Actor, error) {
func MakeAccountActor(ctx context.Context, cst cbor.IpldStore, av actors.Version, addr address.Address, bal types.BigInt) (*types.Actor, error) {
ast, err := account.MakeState(adt.WrapStore(ctx, cst), av, addr)
if err != nil {
return nil, err
@ -389,13 +390,13 @@ func makeAccountActor(ctx context.Context, cst cbor.IpldStore, av actors.Version
return act, nil
}
func createAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, info genesis.Actor, keyIDs map[address.Address]address.Address, av actors.Version) error {
func CreateAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, info genesis.Actor, keyIDs map[address.Address]address.Address, av actors.Version) error {
var ainfo genesis.AccountMeta
if err := json.Unmarshal(info.Meta, &ainfo); err != nil {
return xerrors.Errorf("unmarshaling account meta: %w", err)
}
aa, err := makeAccountActor(ctx, cst, av, ainfo.Owner, info.Balance)
aa, err := MakeAccountActor(ctx, cst, av, ainfo.Owner, info.Balance)
if err != nil {
return err
}
@ -412,9 +413,9 @@ func createAccountActor(ctx context.Context, cst cbor.IpldStore, state *state.St
return nil
}
func createMultisigAccount(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, ida address.Address, info genesis.Actor, keyIDs map[address.Address]address.Address, av actors.Version) error {
func CreateMultisigAccount(ctx context.Context, cst cbor.IpldStore, state *state.StateTree, ida address.Address, info genesis.Actor, keyIDs map[address.Address]address.Address, av actors.Version) error {
if info.Type != genesis.TMultisig {
return fmt.Errorf("can only call createMultisigAccount with multisig Actor info")
return fmt.Errorf("can only call CreateMultisigAccount with multisig Actor info")
}
var ainfo genesis.MultisigMeta
if err := json.Unmarshal(info.Meta, &ainfo); err != nil {
@ -436,7 +437,7 @@ func createMultisigAccount(ctx context.Context, cst cbor.IpldStore, state *state
continue
}
aa, err := makeAccountActor(ctx, cst, av, e, big.Zero())
aa, err := MakeAccountActor(ctx, cst, av, e, big.Zero())
if err != nil {
return err
}
@ -483,6 +484,7 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, sys vm.Sysca
Epoch: 0,
Rand: &fakeRand{},
Bstore: cs.StateBlockstore(),
Actors: filcns.NewActorRegistry(),
Syscalls: mkFakedSigSyscalls(sys),
CircSupplyCalc: nil,
NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version {
@ -562,7 +564,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto
}
// temp chainstore
cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), j)
cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), nil, j)
// Verify PreSealed Data
stateroot, err = VerifyPreSealedData(ctx, cs, sys, stateroot, template, keyIDs, template.NetworkVersion)

View File

@ -37,6 +37,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@ -87,6 +88,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal
Epoch: 0,
Rand: &fakeRand{},
Bstore: cs.StateBlockstore(),
Actors: filcns.NewActorRegistry(),
Syscalls: mkFakedSigSyscalls(sys),
CircSupplyCalc: csc,
NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version {

View File

@ -354,7 +354,7 @@ func (ms *msgSet) toSlice() []*types.SignedMessage {
return set
}
func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) {
func New(api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) {
cache, _ := lru.New2Q(build.BlsSignatureCacheSize)
verifcache, _ := lru.New2Q(build.VerifSigCacheSize)
@ -366,7 +366,6 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ
if j == nil {
j = journal.NilJournal()
}
us := stmgr.DefaultUpgradeSchedule()
mp := &MessagePool{
ds: ds,

View File

@ -11,17 +11,18 @@ import (
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/assert"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock"
"github.com/filecoin-project/lotus/chain/wallet"
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
"github.com/stretchr/testify/assert"
)
func init() {
@ -232,7 +233,7 @@ func TestMessagePool(t *testing.T) {
ds := datastore.NewMapDatastore()
mp, err := New(tma, ds, "mptest", nil)
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
if err != nil {
t.Fatal(err)
}
@ -276,7 +277,7 @@ func TestCheckMessageBig(t *testing.T) {
ds := datastore.NewMapDatastore()
mp, err := New(tma, ds, "mptest", nil)
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
assert.NoError(t, err)
to := mock.Address(1001)
@ -339,7 +340,7 @@ func TestMessagePoolMessagesInEachBlock(t *testing.T) {
ds := datastore.NewMapDatastore()
mp, err := New(tma, ds, "mptest", nil)
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
if err != nil {
t.Fatal(err)
}
@ -388,7 +389,7 @@ func TestRevertMessages(t *testing.T) {
ds := datastore.NewMapDatastore()
mp, err := New(tma, ds, "mptest", nil)
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
if err != nil {
t.Fatal(err)
}
@ -451,7 +452,7 @@ func TestPruningSimple(t *testing.T) {
ds := datastore.NewMapDatastore()
mp, err := New(tma, ds, "mptest", nil)
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
if err != nil {
t.Fatal(err)
}
@ -495,7 +496,7 @@ func TestLoadLocal(t *testing.T) {
tma := newTestMpoolAPI()
ds := datastore.NewMapDatastore()
mp, err := New(tma, ds, "mptest", nil)
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
if err != nil {
t.Fatal(err)
}
@ -538,7 +539,7 @@ func TestLoadLocal(t *testing.T) {
t.Fatal(err)
}
mp, err = New(tma, ds, "mptest", nil)
mp, err = New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
if err != nil {
t.Fatal(err)
}
@ -567,7 +568,7 @@ func TestClearAll(t *testing.T) {
tma := newTestMpoolAPI()
ds := datastore.NewMapDatastore()
mp, err := New(tma, ds, "mptest", nil)
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
if err != nil {
t.Fatal(err)
}
@ -621,7 +622,7 @@ func TestClearNonLocal(t *testing.T) {
tma := newTestMpoolAPI()
ds := datastore.NewMapDatastore()
mp, err := New(tma, ds, "mptest", nil)
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
if err != nil {
t.Fatal(err)
}
@ -682,7 +683,7 @@ func TestUpdates(t *testing.T) {
tma := newTestMpoolAPI()
ds := datastore.NewMapDatastore()
mp, err := New(tma, ds, "mptest", nil)
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
if err != nil {
t.Fatal(err)
}

View File

@ -9,6 +9,7 @@ import (
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet"
@ -24,7 +25,7 @@ func TestRepubMessages(t *testing.T) {
tma := newTestMpoolAPI()
ds := datastore.NewMapDatastore()
mp, err := New(tma, ds, "mptest", nil)
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil)
if err != nil {
t.Fatal(err)
}

View File

@ -20,8 +20,6 @@ import (
var bigBlockGasLimit = big.NewInt(build.BlockGasLimit)
var MaxBlockMessages = 16000
const MaxBlocks = 15
type msgChain struct {
@ -58,8 +56,8 @@ func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq
return nil, err
}
if len(msgs) > MaxBlockMessages {
msgs = msgs[:MaxBlockMessages]
if len(msgs) > build.BlockMessageLimit {
msgs = msgs[:build.BlockMessageLimit]
}
return msgs, nil

View File

@ -21,6 +21,7 @@ import (
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/messagepool/gasguess"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock"
@ -60,7 +61,7 @@ func makeTestMessage(w *wallet.LocalWallet, from, to address.Address, nonce uint
func makeTestMpool() (*MessagePool, *testMpoolAPI) {
tma := newTestMpoolAPI()
ds := datastore.NewMapDatastore()
mp, err := New(tma, ds, "test", nil)
mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "test", nil)
if err != nil {
panic(err)
}

View File

@ -66,7 +66,7 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
bstate := ts.ParentState()
// Run the (not expensive) migration.
bstate, err := sm.handleStateForks(ctx, bstate, pheight, nil, ts)
bstate, err := sm.HandleStateForks(ctx, bstate, pheight, nil, ts)
if err != nil {
return nil, fmt.Errorf("failed to handle fork: %w", err)
}
@ -76,7 +76,8 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
Epoch: pheight + 1,
Rand: store.NewChainRand(sm.cs, ts.Cids()),
Bstore: sm.cs.StateBlockstore(),
Syscalls: sm.syscalls,
Actors: sm.tsExec.NewActorRegistry(),
Syscalls: sm.Syscalls,
CircSupplyCalc: sm.GetVMCirculatingSupply,
NtwkVersion: sm.GetNtwkVersion,
BaseFee: types.NewInt(0),
@ -179,7 +180,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
}
// Technically, the tipset we're passing in here should be ts+1, but that may not exist.
state, err = sm.handleStateForks(ctx, state, ts.Height(), nil, ts)
state, err = sm.HandleStateForks(ctx, state, ts.Height(), nil, ts)
if err != nil {
return nil, fmt.Errorf("failed to handle fork: %w", err)
}
@ -199,7 +200,8 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
Epoch: ts.Height() + 1,
Rand: r,
Bstore: sm.cs.StateBlockstore(),
Syscalls: sm.syscalls,
Actors: sm.tsExec.NewActorRegistry(),
Syscalls: sm.Syscalls,
CircSupplyCalc: sm.GetVMCirculatingSupply,
NtwkVersion: sm.GetNtwkVersion,
BaseFee: ts.Blocks()[0].ParentBaseFee,
@ -272,7 +274,7 @@ func (sm *StateManager) Replay(ctx context.Context, ts *types.TipSet, mcid cid.C
// message to find
finder.mcid = mcid
_, _, err := sm.computeTipSetState(ctx, ts, &finder)
_, _, err := sm.tsExec.ExecuteTipSet(ctx, sm, ts, &finder)
if err != nil && !xerrors.Is(err, errHaltExecution) {
return nil, nil, xerrors.Errorf("unexpected error during execution: %w", err)
}

View File

@ -3,217 +3,14 @@ package stmgr
import (
"context"
"fmt"
"sync/atomic"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/cron"
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/metrics"
)
func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, em ExecMonitor, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) {
done := metrics.Timer(ctx, metrics.VMApplyBlocksTotal)
defer done()
partDone := metrics.Timer(ctx, metrics.VMApplyEarly)
defer func() {
partDone()
}()
makeVmWithBaseState := func(base cid.Cid) (*vm.VM, error) {
vmopt := &vm.VMOpts{
StateBase: base,
Epoch: epoch,
Rand: r,
Bstore: sm.cs.StateBlockstore(),
Syscalls: sm.syscalls,
CircSupplyCalc: sm.GetVMCirculatingSupply,
NtwkVersion: sm.GetNtwkVersion,
BaseFee: baseFee,
LookbackState: LookbackStateGetterForTipset(sm, ts),
}
return sm.newVM(ctx, vmopt)
}
vmi, err := makeVmWithBaseState(pstate)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err)
}
runCron := func(epoch abi.ChainEpoch) error {
cronMsg := &types.Message{
To: cron.Address,
From: builtin.SystemActorAddr,
Nonce: uint64(epoch),
Value: types.NewInt(0),
GasFeeCap: types.NewInt(0),
GasPremium: types.NewInt(0),
GasLimit: build.BlockGasLimit * 10000, // Make super sure this is never too little
Method: cron.Methods.EpochTick,
Params: nil,
}
ret, err := vmi.ApplyImplicitMessage(ctx, cronMsg)
if err != nil {
return err
}
if em != nil {
if err := em.MessageApplied(ctx, ts, cronMsg.Cid(), cronMsg, ret, true); err != nil {
return xerrors.Errorf("callback failed on cron message: %w", err)
}
}
if ret.ExitCode != 0 {
return xerrors.Errorf("CheckProofSubmissions exit was non-zero: %d", ret.ExitCode)
}
return nil
}
for i := parentEpoch; i < epoch; i++ {
if i > parentEpoch {
// run cron for null rounds if any
if err := runCron(i); err != nil {
return cid.Undef, cid.Undef, err
}
pstate, err = vmi.Flush(ctx)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("flushing vm: %w", err)
}
}
// handle state forks
newState, err := sm.handleStateForks(ctx, pstate, i, em, ts)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err)
}
if pstate != newState {
vmi, err = makeVmWithBaseState(newState)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err)
}
}
vmi.SetBlockHeight(i + 1)
pstate = newState
}
partDone()
partDone = metrics.Timer(ctx, metrics.VMApplyMessages)
var receipts []cbg.CBORMarshaler
processedMsgs := make(map[cid.Cid]struct{})
for _, b := range bms {
penalty := types.NewInt(0)
gasReward := big.Zero()
for _, cm := range append(b.BlsMessages, b.SecpkMessages...) {
m := cm.VMMessage()
if _, found := processedMsgs[m.Cid()]; found {
continue
}
r, err := vmi.ApplyMessage(ctx, cm)
if err != nil {
return cid.Undef, cid.Undef, err
}
receipts = append(receipts, &r.MessageReceipt)
gasReward = big.Add(gasReward, r.GasCosts.MinerTip)
penalty = big.Add(penalty, r.GasCosts.MinerPenalty)
if em != nil {
if err := em.MessageApplied(ctx, ts, cm.Cid(), m, r, false); err != nil {
return cid.Undef, cid.Undef, err
}
}
processedMsgs[m.Cid()] = struct{}{}
}
params, err := actors.SerializeParams(&reward.AwardBlockRewardParams{
Miner: b.Miner,
Penalty: penalty,
GasReward: gasReward,
WinCount: b.WinCount,
})
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("failed to serialize award params: %w", err)
}
rwMsg := &types.Message{
From: builtin.SystemActorAddr,
To: reward.Address,
Nonce: uint64(epoch),
Value: types.NewInt(0),
GasFeeCap: types.NewInt(0),
GasPremium: types.NewInt(0),
GasLimit: 1 << 30,
Method: reward.Methods.AwardBlockReward,
Params: params,
}
ret, actErr := vmi.ApplyImplicitMessage(ctx, rwMsg)
if actErr != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, actErr)
}
if em != nil {
if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("callback failed on reward message: %w", err)
}
}
if ret.ExitCode != 0 {
return cid.Undef, cid.Undef, xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr)
}
}
partDone()
partDone = metrics.Timer(ctx, metrics.VMApplyCron)
if err := runCron(epoch); err != nil {
return cid.Cid{}, cid.Cid{}, err
}
partDone()
partDone = metrics.Timer(ctx, metrics.VMApplyFlush)
rectarr := blockadt.MakeEmptyArray(sm.cs.ActorStore(ctx))
for i, receipt := range receipts {
if err := rectarr.Set(uint64(i), receipt); err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err)
}
}
rectroot, err := rectarr.Root()
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err)
}
st, err := vmi.Flush(ctx)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("vm flush failed: %w", err)
}
stats.Record(ctx, metrics.VMSends.M(int64(atomic.LoadUint64(&vm.StatSends))),
metrics.VMApplied.M(int64(atomic.LoadUint64(&vm.StatApplied))))
return st, rectroot, nil
}
func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st cid.Cid, rec cid.Cid, err error) {
ctx, span := trace.StartSpan(ctx, "tipSetState")
defer span.End()
@ -263,7 +60,7 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil
}
st, rec, err = sm.computeTipSetState(ctx, ts, sm.tsExecMonitor)
st, rec, err = sm.tsExec.ExecuteTipSet(ctx, sm, ts, sm.tsExecMonitor)
if err != nil {
return cid.Undef, cid.Undef, err
}
@ -272,7 +69,7 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
}
func (sm *StateManager) ExecutionTraceWithMonitor(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, error) {
st, _, err := sm.computeTipSetState(ctx, ts, em)
st, _, err := sm.tsExec.ExecuteTipSet(ctx, sm, ts, em)
return st, err
}
@ -284,42 +81,3 @@ func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (c
}
return st, invocTrace, nil
}
func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, cid.Cid, error) {
ctx, span := trace.StartSpan(ctx, "computeTipSetState")
defer span.End()
blks := ts.Blocks()
for i := 0; i < len(blks); i++ {
for j := i + 1; j < len(blks); j++ {
if blks[i].Miner == blks[j].Miner {
return cid.Undef, cid.Undef,
xerrors.Errorf("duplicate miner in a tipset (%s %s)",
blks[i].Miner, blks[j].Miner)
}
}
}
var parentEpoch abi.ChainEpoch
pstate := blks[0].ParentStateRoot
if blks[0].Height > 0 {
parent, err := sm.cs.GetBlock(blks[0].Parents[0])
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("getting parent block: %w", err)
}
parentEpoch = parent.Height
}
r := store.NewChainRand(sm.cs, ts.Cids())
blkmsgs, err := sm.cs.BlockMsgsForTipset(ts)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("getting block messages for tipset: %w", err)
}
baseFee := blks[0].ParentBaseFee
return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, em, baseFee, ts)
}

View File

@ -15,8 +15,6 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/go-state-types/rt"
"github.com/filecoin-project/specs-actors/v3/actors/migration/nv10"
"github.com/filecoin-project/lotus/chain/actors/adt"
@ -98,21 +96,6 @@ type Upgrade struct {
type UpgradeSchedule []Upgrade
type migrationLogger struct{}
func (ml migrationLogger) Log(level rt.LogLevel, msg string, args ...interface{}) {
switch level {
case rt.DEBUG:
log.Debugf(msg, args...)
case rt.INFO:
log.Infof(msg, args...)
case rt.WARN:
log.Warnf(msg, args...)
case rt.ERROR:
log.Errorf(msg, args...)
}
}
func (us UpgradeSchedule) Validate() error {
// Make sure each upgrade is valid.
for _, u := range us {
@ -181,7 +164,7 @@ func (us UpgradeSchedule) GetNtwkVersion(e abi.ChainEpoch) (network.Version, err
return network.Version0, xerrors.Errorf("Epoch %d has no defined network version", e)
}
func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) {
func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) {
retCid := root
var err error
u := sm.stateMigrations[height]
@ -331,7 +314,7 @@ func (sm *StateManager) preMigrationWorker(ctx context.Context) {
}
}
func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmount, cb func(trace types.ExecutionTrace)) error {
func DoTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmount, cb func(trace types.ExecutionTrace)) error {
fromAct, err := tree.GetActor(from)
if err != nil {
return xerrors.Errorf("failed to get 'from' actor for transfer: %w", err)
@ -361,8 +344,8 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo
// record the transfer in execution traces
cb(types.ExecutionTrace{
Msg: makeFakeMsg(from, to, amt, 0),
MsgRct: makeFakeRct(),
Msg: MakeFakeMsg(from, to, amt, 0),
MsgRct: MakeFakeRct(),
Error: "",
Duration: 0,
GasCharges: nil,
@ -373,7 +356,7 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo
return nil
}
func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Address, em ExecMonitor, epoch abi.ChainEpoch, ts *types.TipSet) error {
func TerminateActor(ctx context.Context, tree *state.StateTree, addr address.Address, em ExecMonitor, epoch abi.ChainEpoch, ts *types.TipSet) error {
a, err := tree.GetActor(addr)
if xerrors.Is(err, types.ErrActorNotFound) {
return types.ErrActorNotFound
@ -382,7 +365,7 @@ func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Add
}
var trace types.ExecutionTrace
if err := doTransfer(tree, addr, builtin.BurntFundsActorAddr, a.Balance, func(t types.ExecutionTrace) {
if err := DoTransfer(tree, addr, builtin.BurntFundsActorAddr, a.Balance, func(t types.ExecutionTrace) {
trace = t
}); err != nil {
return xerrors.Errorf("transferring terminated actor's balance: %w", err)
@ -391,10 +374,10 @@ func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Add
if em != nil {
// record the transfer in execution traces
fakeMsg := makeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
fakeMsg := MakeFakeMsg(builtin.SystemActorAddr, addr, big.Zero(), uint64(epoch))
if err := em.MessageApplied(ctx, ts, fakeMsg.Cid(), fakeMsg, &vm.ApplyRet{
MessageReceipt: *makeFakeRct(),
MessageReceipt: *MakeFakeRct(),
ActorErr: nil,
ExecutionTrace: trace,
Duration: 0,
@ -433,7 +416,7 @@ func terminateActor(ctx context.Context, tree *state.StateTree, addr address.Add
return tree.SetActor(init_.Address, ia)
}
func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, name string) error {
func SetNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, name string) error {
ia, err := tree.GetActor(init_.Address)
if err != nil {
return xerrors.Errorf("getting init actor: %w", err)
@ -460,7 +443,7 @@ func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree,
return nil
}
func makeKeyAddr(splitAddr address.Address, count uint64) (address.Address, error) {
func MakeKeyAddr(splitAddr address.Address, count uint64) (address.Address, error) {
var b bytes.Buffer
if err := splitAddr.MarshalCBOR(&b); err != nil {
return address.Undef, xerrors.Errorf("marshalling split address: %w", err)
@ -482,7 +465,7 @@ func makeKeyAddr(splitAddr address.Address, count uint64) (address.Address, erro
return addr, nil
}
func makeFakeMsg(from address.Address, to address.Address, amt abi.TokenAmount, nonce uint64) *types.Message {
func MakeFakeMsg(from address.Address, to address.Address, amt abi.TokenAmount, nonce uint64) *types.Message {
return &types.Message{
From: from,
To: to,
@ -491,7 +474,7 @@ func makeFakeMsg(from address.Address, to address.Address, amt abi.TokenAmount,
}
}
func makeFakeRct() *types.MessageReceipt {
func MakeFakeRct() *types.MessageReceipt {
return &types.MessageReceipt{
ExitCode: 0,
Return: nil,

View File

@ -28,6 +28,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/aerrors"
_init "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/gen"
. "github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
@ -120,8 +121,8 @@ func TestForkHeightTriggers(t *testing.T) {
t.Fatal(err)
}
sm, err := NewStateManagerWithUpgradeSchedule(
cg.ChainStore(), cg.StateManager().VMSys(), UpgradeSchedule{{
sm, err := NewStateManager(
cg.ChainStore(), filcns.NewTipSetExecutor(), cg.StateManager().VMSys(), UpgradeSchedule{{
Network: network.Version1,
Height: testForkHeight,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
@ -162,7 +163,7 @@ func TestForkHeightTriggers(t *testing.T) {
t.Fatal(err)
}
inv := vm.NewActorRegistry()
inv := filcns.NewActorRegistry()
inv.Register(nil, testActor{})
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) {
@ -263,8 +264,8 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
}
var migrationCount int
sm, err := NewStateManagerWithUpgradeSchedule(
cg.ChainStore(), cg.StateManager().VMSys(), UpgradeSchedule{{
sm, err := NewStateManager(
cg.ChainStore(), filcns.NewTipSetExecutor(), cg.StateManager().VMSys(), UpgradeSchedule{{
Network: network.Version1,
Expensive: true,
Height: testForkHeight,
@ -277,7 +278,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
t.Fatal(err)
}
inv := vm.NewActorRegistry()
inv := filcns.NewActorRegistry()
inv.Register(nil, testActor{})
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) {
@ -398,8 +399,8 @@ func TestForkPreMigration(t *testing.T) {
counter := make(chan struct{}, 10)
sm, err := NewStateManagerWithUpgradeSchedule(
cg.ChainStore(), cg.StateManager().VMSys(), UpgradeSchedule{{
sm, err := NewStateManager(
cg.ChainStore(), filcns.NewTipSetExecutor(), cg.StateManager().VMSys(), UpgradeSchedule{{
Network: network.Version1,
Height: testForkHeight,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
@ -496,7 +497,7 @@ func TestForkPreMigration(t *testing.T) {
require.NoError(t, sm.Stop(context.Background()))
}()
inv := vm.NewActorRegistry()
inv := filcns.NewActorRegistry()
inv.Register(nil, testActor{})
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) {

View File

@ -2,7 +2,6 @@ package stmgr
import (
"context"
"fmt"
"sync"
"github.com/ipfs/go-cid"
@ -52,6 +51,11 @@ type migration struct {
cache *nv10.MemMigrationCache
}
type Executor interface {
NewActorRegistry() *vm.ActorRegistry
ExecuteTipSet(ctx context.Context, sm *StateManager, ts *types.TipSet, em ExecMonitor) (stateroot cid.Cid, rectsroot cid.Cid, err error)
}
type StateManager struct {
cs *store.ChainStore
@ -75,7 +79,7 @@ type StateManager struct {
stlk sync.Mutex
genesisMsigLk sync.Mutex
newVM func(context.Context, *vm.VMOpts) (*vm.VM, error)
syscalls vm.SyscallBuilder
Syscalls vm.SyscallBuilder
preIgnitionVesting []msig0.State
postIgnitionVesting []msig0.State
postCalicoVesting []msig0.State
@ -83,6 +87,7 @@ type StateManager struct {
genesisPledge abi.TokenAmount
genesisMarketFunds abi.TokenAmount
tsExec Executor
tsExecMonitor ExecMonitor
}
@ -92,15 +97,7 @@ type treeCache struct {
tree *state.StateTree
}
func NewStateManager(cs *store.ChainStore, sys vm.SyscallBuilder) *StateManager {
sm, err := NewStateManagerWithUpgradeSchedule(cs, sys, DefaultUpgradeSchedule())
if err != nil {
panic(fmt.Sprintf("default upgrade schedule is invalid: %s", err))
}
return sm
}
func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, sys vm.SyscallBuilder, us UpgradeSchedule) (*StateManager, error) {
func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule) (*StateManager, error) {
// If we have upgrades, make sure they're in-order and make sense.
if err := us.Validate(); err != nil {
return nil, err
@ -142,8 +139,9 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, sys vm.SyscallBuil
stateMigrations: stateMigrations,
expensiveUpgrades: expensiveUpgrades,
newVM: vm.NewVM,
syscalls: sys,
Syscalls: sys,
cs: cs,
tsExec: exec,
stCache: make(map[string][]cid.Cid),
tCache: treeCache{
root: cid.Undef,
@ -153,8 +151,8 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, sys vm.SyscallBuil
}, nil
}
func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, sys vm.SyscallBuilder, us UpgradeSchedule, em ExecMonitor) (*StateManager, error) {
sm, err := NewStateManagerWithUpgradeSchedule(cs, sys, us)
func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, em ExecMonitor) (*StateManager, error) {
sm, err := NewStateManager(cs, exec, sys, us)
if err != nil {
return nil, err
}
@ -344,6 +342,12 @@ func (sm *StateManager) SetVMConstructor(nvm func(context.Context, *vm.VMOpts) (
sm.newVM = nvm
}
func (sm *StateManager) VMConstructor() func(context.Context, *vm.VMOpts) (*vm.VM, error) {
return func(ctx context.Context, opts *vm.VMOpts) (*vm.VM, error) {
return sm.newVM(ctx, opts)
}
}
func (sm *StateManager) GetNtwkVersion(ctx context.Context, height abi.ChainEpoch) network.Version {
// The epochs here are the _last_ epoch for every version, or -1 if the
// version is disabled.
@ -356,5 +360,5 @@ func (sm *StateManager) GetNtwkVersion(ctx context.Context, height abi.ChainEpoc
}
func (sm *StateManager) VMSys() vm.SyscallBuilder {
return sm.syscalls
return sm.Syscalls
}

View File

@ -4,8 +4,6 @@ import (
"context"
"fmt"
"reflect"
"runtime"
"strings"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
@ -14,16 +12,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/rt"
exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported"
exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported"
exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported"
exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported"
exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin"
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/state"
@ -33,86 +22,21 @@ import (
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
type MethodMeta struct {
Name string
Params reflect.Type
Ret reflect.Type
}
var MethodsMap = map[cid.Cid]map[abi.MethodNum]MethodMeta{}
func init() {
// TODO: combine with the runtime actor registry.
var actors []rt.VMActor
actors = append(actors, exported0.BuiltinActors()...)
actors = append(actors, exported2.BuiltinActors()...)
actors = append(actors, exported3.BuiltinActors()...)
actors = append(actors, exported4.BuiltinActors()...)
actors = append(actors, exported5.BuiltinActors()...)
for _, actor := range actors {
exports := actor.Exports()
methods := make(map[abi.MethodNum]MethodMeta, len(exports))
// Explicitly add send, it's special.
methods[builtin.MethodSend] = MethodMeta{
Name: "Send",
Params: reflect.TypeOf(new(abi.EmptyValue)),
Ret: reflect.TypeOf(new(abi.EmptyValue)),
}
// Iterate over exported methods. Some of these _may_ be nil and
// must be skipped.
for number, export := range exports {
if export == nil {
continue
}
ev := reflect.ValueOf(export)
et := ev.Type()
// Extract the method names using reflection. These
// method names always match the field names in the
// `builtin.Method*` structs (tested in the specs-actors
// tests).
fnName := runtime.FuncForPC(ev.Pointer()).Name()
fnName = strings.TrimSuffix(fnName[strings.LastIndexByte(fnName, '.')+1:], "-fm")
switch abi.MethodNum(number) {
case builtin.MethodSend:
panic("method 0 is reserved for Send")
case builtin.MethodConstructor:
if fnName != "Constructor" {
panic("method 1 is reserved for Constructor")
}
}
methods[abi.MethodNum(number)] = MethodMeta{
Name: fnName,
Params: et.In(1),
Ret: et.Out(0),
}
}
MethodsMap[actor.Code()] = methods
}
}
func GetReturnType(ctx context.Context, sm *StateManager, to address.Address, method abi.MethodNum, ts *types.TipSet) (cbg.CBORUnmarshaler, error) {
act, err := sm.LoadActor(ctx, to, ts)
if err != nil {
return nil, xerrors.Errorf("(get sset) failed to load miner actor: %w", err)
}
m, found := MethodsMap[act.Code][method]
m, found := sm.tsExec.NewActorRegistry().Methods[act.Code][method]
if !found {
return nil, fmt.Errorf("unknown method %d for actor %s", method, act.Code)
}
return reflect.New(m.Ret.Elem()).Interface().(cbg.CBORUnmarshaler), nil
}
func GetParamType(actCode cid.Cid, method abi.MethodNum) (cbg.CBORUnmarshaler, error) {
m, found := MethodsMap[actCode][method]
func GetParamType(ar *vm.ActorRegistry, actCode cid.Cid, method abi.MethodNum) (cbg.CBORUnmarshaler, error) {
m, found := ar.Methods[actCode][method]
if !found {
return nil, fmt.Errorf("unknown method %d for actor %s", method, actCode)
}
@ -144,7 +68,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
for i := ts.Height(); i < height; i++ {
// Technically, the tipset we're passing in here should be ts+1, but that may not exist.
base, err = sm.handleStateForks(ctx, base, i, &InvocationTracer{trace: &trace}, ts)
base, err = sm.HandleStateForks(ctx, base, i, &InvocationTracer{trace: &trace}, ts)
if err != nil {
return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err)
}
@ -159,7 +83,8 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
Epoch: height,
Rand: r,
Bstore: sm.cs.StateBlockstore(),
Syscalls: sm.syscalls,
Actors: sm.tsExec.NewActorRegistry(),
Syscalls: sm.Syscalls,
CircSupplyCalc: sm.GetVMCirculatingSupply,
NtwkVersion: sm.GetNtwkVersion,
BaseFee: ts.Blocks()[0].ParentBaseFee,

View File

@ -7,6 +7,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types/mock"
@ -31,7 +32,7 @@ func TestIndexSeeks(t *testing.T) {
ctx := context.TODO()
nbs := blockstore.NewMemorySync()
cs := store.NewChainStore(nbs, nbs, syncds.MutexWrap(datastore.NewMapDatastore()), nil)
cs := store.NewChainStore(nbs, nbs, syncds.MutexWrap(datastore.NewMapDatastore()), filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
_, err = cs.Import(bytes.NewReader(gencar))

View File

@ -101,10 +101,11 @@ type BlockMessages struct {
Miner address.Address
BlsMessages []types.ChainMsg
SecpkMessages []types.ChainMsg
WinCount int64
}
func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) {
// returned BlockMessages match block order in tipset
applied := make(map[address.Address]uint64)
cst := cbor.NewCborStore(cs.stateBlockstore)
@ -150,7 +151,6 @@ func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, err
Miner: b.Miner,
BlsMessages: make([]types.ChainMsg, 0, len(bms)),
SecpkMessages: make([]types.ChainMsg, 0, len(sms)),
WinCount: b.ElectionProof.WinCount,
}
for _, bmsg := range bms {

View File

@ -87,6 +87,8 @@ type HeadChangeEvt struct {
ApplyCount int
}
type WeightFunc func(ctx context.Context, stateBs bstore.Blockstore, ts *types.TipSet) (types.BigInt, error)
// ChainStore is the main point of access to chain data.
//
// Raw chain data is stored in the Blockstore, with relevant markers (genesis,
@ -101,6 +103,8 @@ type ChainStore struct {
stateBlockstore bstore.Blockstore
metadataDs dstore.Batching
weight WeightFunc
chainLocalBlockstore bstore.Blockstore
heaviestLk sync.RWMutex
@ -128,7 +132,7 @@ type ChainStore struct {
wg sync.WaitGroup
}
func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dstore.Batching, j journal.Journal) *ChainStore {
func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dstore.Batching, weight WeightFunc, j journal.Journal) *ChainStore {
c, _ := lru.NewARC(DefaultMsgMetaCacheSize)
tsc, _ := lru.NewARC(DefaultTipSetCacheSize)
if j == nil {
@ -143,6 +147,7 @@ func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dsto
chainBlockstore: chainBs,
stateBlockstore: stateBs,
chainLocalBlockstore: localbs,
weight: weight,
metadataDs: ds,
bestTips: pubsub.New(64),
tipsets: make(map[abi.ChainEpoch][]cid.Cid),
@ -410,11 +415,11 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS
}
defer cs.heaviestLk.Unlock()
w, err := cs.Weight(ctx, ts)
w, err := cs.weight(ctx, cs.StateBlockstore(), ts)
if err != nil {
return err
}
heaviestW, err := cs.Weight(ctx, cs.heaviest)
heaviestW, err := cs.weight(ctx, cs.StateBlockstore(), cs.heaviest)
if err != nil {
return err
}
@ -1156,3 +1161,7 @@ func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, t
return cs.LoadTipSet(lbts.Parents())
}
func (cs *ChainStore) Weight(ctx context.Context, hts *types.TipSet) (types.BigInt, error) { // todo remove
return cs.weight(ctx, cs.StateBlockstore(), hts)
}

View File

@ -13,6 +13,7 @@ import (
"github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
@ -70,7 +71,7 @@ func BenchmarkGetRandomness(b *testing.B) {
b.Fatal(err)
}
cs := store.NewChainStore(bs, bs, mds, nil)
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
b.ResetTimer()
@ -105,7 +106,7 @@ func TestChainExportImport(t *testing.T) {
}
nbs := blockstore.NewMemory()
cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), nil)
cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
root, err := cs.Import(buf)
@ -140,7 +141,7 @@ func TestChainExportImportFull(t *testing.T) {
}
nbs := blockstore.NewMemory()
cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), nil)
cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
root, err := cs.Import(buf)
@ -157,7 +158,11 @@ func TestChainExportImportFull(t *testing.T) {
t.Fatal("imported chain differed from exported chain")
}
sm := stmgr.NewStateManager(cs, nil)
sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), nil, filcns.DefaultUpgradeSchedule())
if err != nil {
t.Fatal(err)
}
for i := 0; i < 100; i++ {
ts, err := cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(i), nil, false)
if err != nil {

View File

@ -2,32 +2,26 @@ package sub
import (
"context"
"errors"
"fmt"
"time"
address "github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node/impl/client"
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
lru "github.com/hashicorp/golang-lru"
blocks "github.com/ipfs/go-block-format"
bserv "github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
connmgr "github.com/libp2p/go-libp2p-core/connmgr"
"github.com/libp2p/go-libp2p-core/peer"
pubsub "github.com/libp2p/go-libp2p-pubsub"
cbg "github.com/whyrusleeping/cbor-gen"
"go.opencensus.io/stats"
"go.opencensus.io/tag"
"golang.org/x/xerrors"
@ -35,9 +29,6 @@ import (
var log = logging.Logger("sub")
var ErrSoftFailure = errors.New("soft validation failure")
var ErrInsufficientPower = errors.New("incoming block's miner does not have minimum power")
var msgCidPrefix = cid.Prefix{
Version: 1,
Codec: cid.DagCBOR,
@ -225,11 +216,11 @@ type BlockValidator struct {
blacklist func(peer.ID)
// necessary for block validation
chain *store.ChainStore
stmgr *stmgr.StateManager
chain *store.ChainStore
consensus consensus.Consensus
}
func NewBlockValidator(self peer.ID, chain *store.ChainStore, stmgr *stmgr.StateManager, blacklist func(peer.ID)) *BlockValidator {
func NewBlockValidator(self peer.ID, chain *store.ChainStore, cns consensus.Consensus, blacklist func(peer.ID)) *BlockValidator {
p, _ := lru.New2Q(4096)
return &BlockValidator{
self: self,
@ -238,7 +229,7 @@ func NewBlockValidator(self peer.ID, chain *store.ChainStore, stmgr *stmgr.State
blacklist: blacklist,
recvBlocks: newBlockReceiptCache(),
chain: chain,
stmgr: stmgr,
consensus: cns,
}
}
@ -260,214 +251,35 @@ func (bv *BlockValidator) flagPeer(p peer.ID) {
bv.peers.Add(p, v.(int)+1)
}
func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) pubsub.ValidationResult {
if pid == bv.self {
return bv.validateLocalBlock(ctx, msg)
}
// track validation time
begin := build.Clock.Now()
func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) (res pubsub.ValidationResult) {
defer func() {
log.Debugf("block validation time: %s", build.Clock.Since(begin))
if rerr := recover(); rerr != nil {
err := xerrors.Errorf("validate block: %s", rerr)
recordFailure(ctx, metrics.BlockValidationFailure, err.Error())
bv.flagPeer(pid)
res = pubsub.ValidationReject
return
}
}()
stats.Record(ctx, metrics.BlockReceived.M(1))
var what string
res, what = bv.consensus.ValidateBlockPubsub(ctx, pid == bv.self, msg)
if res == pubsub.ValidationAccept {
// it's a good block! make sure we've only seen it once
if count := bv.recvBlocks.add(msg.ValidatorData.(*types.BlockMsg).Cid()); count > 0 {
if pid == bv.self {
log.Warnf("local block has been seen %d times; ignoring", count)
}
recordFailureFlagPeer := func(what string) {
// TODO: once these changes propagate to the network, we can consider
// dropping peers who send us the same block multiple times
return pubsub.ValidationIgnore
}
} else {
recordFailure(ctx, metrics.BlockValidationFailure, what)
bv.flagPeer(pid)
}
blk, what, err := bv.decodeAndCheckBlock(msg)
if err != nil {
log.Error("got invalid block over pubsub: ", err)
recordFailureFlagPeer(what)
return pubsub.ValidationReject
}
// validate the block meta: the Message CID in the header must match the included messages
err = bv.validateMsgMeta(ctx, blk)
if err != nil {
log.Warnf("error validating message metadata: %s", err)
recordFailureFlagPeer("invalid_block_meta")
return pubsub.ValidationReject
}
// we want to ensure that it is a block from a known miner; we reject blocks from unknown miners
// to prevent spam attacks.
// the logic works as follows: we lookup the miner in the chain for its key.
// if we can find it then it's a known miner and we can validate the signature.
// if we can't find it, we check whether we are (near) synced in the chain.
// if we are not synced we cannot validate the block and we must ignore it.
// if we are synced and the miner is unknown, then the block is rejcected.
key, err := bv.checkPowerAndGetWorkerKey(ctx, blk.Header)
if err != nil {
if err != ErrSoftFailure && bv.isChainNearSynced() {
log.Warnf("received block from unknown miner or miner that doesn't meet min power over pubsub; rejecting message")
recordFailureFlagPeer("unknown_miner")
return pubsub.ValidationReject
}
log.Warnf("cannot validate block message; unknown miner or miner that doesn't meet min power in unsynced chain: %s", blk.Header.Cid())
return pubsub.ValidationIgnore
}
err = sigs.CheckBlockSignature(ctx, blk.Header, key)
if err != nil {
log.Errorf("block signature verification failed: %s", err)
recordFailureFlagPeer("signature_verification_failed")
return pubsub.ValidationReject
}
if blk.Header.ElectionProof.WinCount < 1 {
log.Errorf("block is not claiming to be winning")
recordFailureFlagPeer("not_winning")
return pubsub.ValidationReject
}
// it's a good block! make sure we've only seen it once
if bv.recvBlocks.add(blk.Header.Cid()) > 0 {
// TODO: once these changes propagate to the network, we can consider
// dropping peers who send us the same block multiple times
return pubsub.ValidationIgnore
}
// all good, accept the block
msg.ValidatorData = blk
stats.Record(ctx, metrics.BlockValidationSuccess.M(1))
return pubsub.ValidationAccept
}
func (bv *BlockValidator) validateLocalBlock(ctx context.Context, msg *pubsub.Message) pubsub.ValidationResult {
stats.Record(ctx, metrics.BlockPublished.M(1))
if size := msg.Size(); size > 1<<20-1<<15 {
log.Errorf("ignoring oversize block (%dB)", size)
recordFailure(ctx, metrics.BlockValidationFailure, "oversize_block")
return pubsub.ValidationIgnore
}
blk, what, err := bv.decodeAndCheckBlock(msg)
if err != nil {
log.Errorf("got invalid local block: %s", err)
recordFailure(ctx, metrics.BlockValidationFailure, what)
return pubsub.ValidationIgnore
}
if count := bv.recvBlocks.add(blk.Header.Cid()); count > 0 {
log.Warnf("local block has been seen %d times; ignoring", count)
return pubsub.ValidationIgnore
}
msg.ValidatorData = blk
stats.Record(ctx, metrics.BlockValidationSuccess.M(1))
return pubsub.ValidationAccept
}
func (bv *BlockValidator) decodeAndCheckBlock(msg *pubsub.Message) (*types.BlockMsg, string, error) {
blk, err := types.DecodeBlockMsg(msg.GetData())
if err != nil {
return nil, "invalid", xerrors.Errorf("error decoding block: %w", err)
}
if count := len(blk.BlsMessages) + len(blk.SecpkMessages); count > build.BlockMessageLimit {
return nil, "too_many_messages", fmt.Errorf("block contains too many messages (%d)", count)
}
// make sure we have a signature
if blk.Header.BlockSig == nil {
return nil, "missing_signature", fmt.Errorf("block without a signature")
}
return blk, "", nil
}
func (bv *BlockValidator) isChainNearSynced() bool {
ts := bv.chain.GetHeaviestTipSet()
timestamp := ts.MinTimestamp()
timestampTime := time.Unix(int64(timestamp), 0)
return build.Clock.Since(timestampTime) < 6*time.Hour
}
func (bv *BlockValidator) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error {
// TODO there has to be a simpler way to do this without the blockstore dance
// block headers use adt0
store := blockadt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewMemory()))
bmArr := blockadt.MakeEmptyArray(store)
smArr := blockadt.MakeEmptyArray(store)
for i, m := range msg.BlsMessages {
c := cbg.CborCid(m)
if err := bmArr.Set(uint64(i), &c); err != nil {
return err
}
}
for i, m := range msg.SecpkMessages {
c := cbg.CborCid(m)
if err := smArr.Set(uint64(i), &c); err != nil {
return err
}
}
bmroot, err := bmArr.Root()
if err != nil {
return err
}
smroot, err := smArr.Root()
if err != nil {
return err
}
mrcid, err := store.Put(store.Context(), &types.MsgMeta{
BlsMessages: bmroot,
SecpkMessages: smroot,
})
if err != nil {
return err
}
if msg.Header.Messages != mrcid {
return fmt.Errorf("messages didn't match root cid in header")
}
return nil
}
func (bv *BlockValidator) checkPowerAndGetWorkerKey(ctx context.Context, bh *types.BlockHeader) (address.Address, error) {
// we check that the miner met the minimum power at the lookback tipset
baseTs := bv.chain.GetHeaviestTipSet()
lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, bv.stmgr, baseTs, bh.Height)
if err != nil {
log.Warnf("failed to load lookback tipset for incoming block: %s", err)
return address.Undef, ErrSoftFailure
}
key, err := stmgr.GetMinerWorkerRaw(ctx, bv.stmgr, lbst, bh.Miner)
if err != nil {
log.Warnf("failed to resolve worker key for miner %s: %s", bh.Miner, err)
return address.Undef, ErrSoftFailure
}
// NOTE: we check to see if the miner was eligible in the lookback
// tipset - 1 for historical reasons. DO NOT use the lookback state
// returned by GetLookbackTipSetForRound.
eligible, err := stmgr.MinerEligibleToMine(ctx, bv.stmgr, bh.Miner, baseTs, lbts)
if err != nil {
log.Warnf("failed to determine if incoming block's miner has minimum power: %s", err)
return address.Undef, ErrSoftFailure
}
if !eligible {
log.Warnf("incoming block's miner is ineligible")
return address.Undef, ErrInsufficientPower
}
return key, nil
return res
}
type blockReceiptCache struct {

View File

@ -5,13 +5,11 @@ import (
"context"
"errors"
"fmt"
"os"
"sort"
"strings"
"sync"
"time"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/node/modules/dtypes"
@ -29,40 +27,23 @@ import (
"go.opencensus.io/trace"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
ffi "github.com/filecoin-project/filecoin-ffi"
// named msgarray here to make it clear that these are the types used by
// messages, regardless of specs-actors version.
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
"github.com/filecoin-project/lotus/api"
bstore "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/exchange"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/metrics"
)
// Blocks that are more than MaxHeightDrift epochs above
// the theoretical max height based on systime are quickly rejected
const MaxHeightDrift = 5
var (
// LocalIncoming is the _local_ pubsub (unrelated to libp2p pubsub) topic
// where the Syncer publishes candidate chain heads to be synced.
@ -108,6 +89,8 @@ type Syncer struct {
// the state manager handles making state queries
sm *stmgr.StateManager
consensus consensus.Consensus
// The known Genesis tipset
Genesis *types.TipSet
@ -127,8 +110,6 @@ type Syncer struct {
receiptTracker *blockReceiptTracker
verifier ffiwrapper.Verifier
tickerCtxCancel context.CancelFunc
ds dtypes.MetadataDS
@ -136,40 +117,44 @@ type Syncer struct {
type SyncManagerCtor func(syncFn SyncFunc) SyncManager
// NewSyncer creates a new Syncer object.
func NewSyncer(ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.Client, syncMgrCtor SyncManagerCtor, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.Schedule, verifier ffiwrapper.Verifier) (*Syncer, error) {
type Genesis *types.TipSet
func LoadGenesis(sm *stmgr.StateManager) (Genesis, error) {
gen, err := sm.ChainStore().GetGenesis()
if err != nil {
return nil, xerrors.Errorf("getting genesis block: %w", err)
}
gent, err := types.NewTipSet([]*types.BlockHeader{gen})
if err != nil {
return nil, err
}
return types.NewTipSet([]*types.BlockHeader{gen})
}
// NewSyncer creates a new Syncer object.
func NewSyncer(ds dtypes.MetadataDS,
sm *stmgr.StateManager,
exchange exchange.Client,
syncMgrCtor SyncManagerCtor,
connmgr connmgr.ConnManager,
self peer.ID,
beacon beacon.Schedule,
gent Genesis,
consensus consensus.Consensus) (*Syncer, error) {
s := &Syncer{
ds: ds,
beacon: beacon,
bad: NewBadBlockCache(),
Genesis: gent,
consensus: consensus,
Exchange: exchange,
store: sm.ChainStore(),
sm: sm,
self: self,
receiptTracker: newBlockReceiptTracker(),
connmgr: connmgr,
verifier: verifier,
incoming: pubsub.New(50),
}
if build.InsecurePoStValidation {
log.Warn("*********************************************************************************************")
log.Warn(" [INSECURE-POST-VALIDATION] Insecure test validation is enabled. If you see this outside of a test, it is a severe bug! ")
log.Warn("*********************************************************************************************")
}
s.syncmgr = syncMgrCtor(s.Sync)
return s, nil
}
@ -212,7 +197,7 @@ func (syncer *Syncer) Stop() {
func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
defer func() {
if err := recover(); err != nil {
log.Errorf("panic in InformNewHead: ", err)
log.Errorf("panic in InformNewHead: %s", err)
}
}()
@ -222,7 +207,7 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool {
return false
}
if syncer.IsEpochBeyondCurrMax(fts.TipSet().Height()) {
if syncer.consensus.IsEpochBeyondCurrMax(fts.TipSet().Height()) {
log.Errorf("Received block with impossibly large height %d", fts.TipSet().Height())
return false
}
@ -399,33 +384,21 @@ func zipTipSetAndMessages(bs cbor.IpldStore, ts *types.TipSet, allbmsgs []*types
return nil, fmt.Errorf("msgincl length didnt match tipset size")
}
if err := checkMsgMeta(ts, allbmsgs, allsmsgs, bmi, smi); err != nil {
return nil, err
}
fts := &store.FullTipSet{}
for bi, b := range ts.Blocks() {
if msgc := len(bmi[bi]) + len(smi[bi]); msgc > build.BlockMessageLimit {
return nil, fmt.Errorf("block %q has too many messages (%d)", b.Cid(), msgc)
}
var smsgs []*types.SignedMessage
var smsgCids []cid.Cid
for _, m := range smi[bi] {
smsgs = append(smsgs, allsmsgs[m])
smsgCids = append(smsgCids, allsmsgs[m].Cid())
}
var bmsgs []*types.Message
var bmsgCids []cid.Cid
for _, m := range bmi[bi] {
bmsgs = append(bmsgs, allbmsgs[m])
bmsgCids = append(bmsgCids, allbmsgs[m].Cid())
}
mrcid, err := computeMsgMeta(bs, bmsgCids, smsgCids)
if err != nil {
return nil, err
}
if b.Messages != mrcid {
return nil, fmt.Errorf("messages didnt match message root in header for ts %s", ts.Key())
}
fb := &types.FullBlock{
@ -584,7 +557,7 @@ func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error {
}
func isPermanent(err error) bool {
return !errors.Is(err, ErrTemporal)
return !errors.Is(err, consensus.ErrTemporal)
}
func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet, useCache bool) error {
@ -624,55 +597,6 @@ func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet,
return nil
}
func (syncer *Syncer) minerIsValid(ctx context.Context, maddr address.Address, baseTs *types.TipSet) error {
act, err := syncer.sm.LoadActor(ctx, power.Address, baseTs)
if err != nil {
return xerrors.Errorf("failed to load power actor: %w", err)
}
powState, err := power.Load(syncer.store.ActorStore(ctx), act)
if err != nil {
return xerrors.Errorf("failed to load power actor state: %w", err)
}
_, exist, err := powState.MinerPower(maddr)
if err != nil {
return xerrors.Errorf("failed to look up miner's claim: %w", err)
}
if !exist {
return xerrors.New("miner isn't valid")
}
return nil
}
var ErrTemporal = errors.New("temporal error")
func blockSanityChecks(h *types.BlockHeader) error {
if h.ElectionProof == nil {
return xerrors.Errorf("block cannot have nil election proof")
}
if h.Ticket == nil {
return xerrors.Errorf("block cannot have nil ticket")
}
if h.BlockSig == nil {
return xerrors.Errorf("block had nil signature")
}
if h.BLSAggregate == nil {
return xerrors.Errorf("block had nil bls aggregate signature")
}
if h.Miner.Protocol() != address.ID {
return xerrors.Errorf("block had non-ID miner address")
}
return nil
}
// ValidateBlock should match up with 'Semantical Validation' in validation.md in the spec
func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, useCache bool) (err error) {
defer func() {
@ -703,262 +627,8 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use
ctx, span := trace.StartSpan(ctx, "validateBlock")
defer span.End()
if err := blockSanityChecks(b.Header); err != nil {
return xerrors.Errorf("incoming header failed basic sanity checks: %w", err)
}
h := b.Header
baseTs, err := syncer.store.LoadTipSet(types.NewTipSetKey(h.Parents...))
if err != nil {
return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err)
}
winPoStNv := syncer.sm.GetNtwkVersion(ctx, baseTs.Height())
lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, syncer.sm, baseTs, h.Height)
if err != nil {
return xerrors.Errorf("failed to get lookback tipset for block: %w", err)
}
prevBeacon, err := syncer.store.GetLatestBeaconEntry(baseTs)
if err != nil {
return xerrors.Errorf("failed to get latest beacon entry: %w", err)
}
// fast checks first
if h.Height <= baseTs.Height() {
return xerrors.Errorf("block height not greater than parent height: %d != %d", h.Height, baseTs.Height())
}
nulls := h.Height - (baseTs.Height() + 1)
if tgtTs := baseTs.MinTimestamp() + build.BlockDelaySecs*uint64(nulls+1); h.Timestamp != tgtTs {
return xerrors.Errorf("block has wrong timestamp: %d != %d", h.Timestamp, tgtTs)
}
now := uint64(build.Clock.Now().Unix())
if h.Timestamp > now+build.AllowableClockDriftSecs {
return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, ErrTemporal)
}
if h.Timestamp > now {
log.Warn("Got block from the future, but within threshold", h.Timestamp, build.Clock.Now().Unix())
}
msgsCheck := async.Err(func() error {
if b.Cid() == build.WhitelistedBlock {
return nil
}
if err := syncer.checkBlockMessages(ctx, b, baseTs); err != nil {
return xerrors.Errorf("block had invalid messages: %w", err)
}
return nil
})
minerCheck := async.Err(func() error {
if err := syncer.minerIsValid(ctx, h.Miner, baseTs); err != nil {
return xerrors.Errorf("minerIsValid failed: %w", err)
}
return nil
})
baseFeeCheck := async.Err(func() error {
baseFee, err := syncer.store.ComputeBaseFee(ctx, baseTs)
if err != nil {
return xerrors.Errorf("computing base fee: %w", err)
}
if types.BigCmp(baseFee, b.Header.ParentBaseFee) != 0 {
return xerrors.Errorf("base fee doesn't match: %s (header) != %s (computed)",
b.Header.ParentBaseFee, baseFee)
}
return nil
})
pweight, err := syncer.store.Weight(ctx, baseTs)
if err != nil {
return xerrors.Errorf("getting parent weight: %w", err)
}
if types.BigCmp(pweight, b.Header.ParentWeight) != 0 {
return xerrors.Errorf("parrent weight different: %s (header) != %s (computed)",
b.Header.ParentWeight, pweight)
}
stateRootCheck := async.Err(func() error {
stateroot, precp, err := syncer.sm.TipSetState(ctx, baseTs)
if err != nil {
return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err)
}
if stateroot != h.ParentStateRoot {
msgs, err := syncer.store.MessagesForTipset(baseTs)
if err != nil {
log.Error("failed to load messages for tipset during tipset state mismatch error: ", err)
} else {
log.Warn("Messages for tipset with mismatching state:")
for i, m := range msgs {
mm := m.VMMessage()
log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params)
}
}
return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot)
}
if precp != h.ParentMessageReceipts {
return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts)
}
return nil
})
// Stuff that needs worker address
waddr, err := stmgr.GetMinerWorkerRaw(ctx, syncer.sm, lbst, h.Miner)
if err != nil {
return xerrors.Errorf("GetMinerWorkerRaw failed: %w", err)
}
winnerCheck := async.Err(func() error {
if h.ElectionProof.WinCount < 1 {
return xerrors.Errorf("block is not claiming to be a winner")
}
eligible, err := stmgr.MinerEligibleToMine(ctx, syncer.sm, h.Miner, baseTs, lbts)
if err != nil {
return xerrors.Errorf("determining if miner has min power failed: %w", err)
}
if !eligible {
return xerrors.New("block's miner is ineligible to mine")
}
rBeacon := *prevBeacon
if len(h.BeaconEntries) != 0 {
rBeacon = h.BeaconEntries[len(h.BeaconEntries)-1]
}
buf := new(bytes.Buffer)
if err := h.Miner.MarshalCBOR(buf); err != nil {
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
}
vrfBase, err := store.DrawRandomness(rBeacon.Data, crypto.DomainSeparationTag_ElectionProofProduction, h.Height, buf.Bytes())
if err != nil {
return xerrors.Errorf("could not draw randomness: %w", err)
}
if err := VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.ElectionProof.VRFProof); err != nil {
return xerrors.Errorf("validating block election proof failed: %w", err)
}
slashed, err := stmgr.GetMinerSlashed(ctx, syncer.sm, baseTs, h.Miner)
if err != nil {
return xerrors.Errorf("failed to check if block miner was slashed: %w", err)
}
if slashed {
return xerrors.Errorf("received block was from slashed or invalid miner")
}
mpow, tpow, _, err := stmgr.GetPowerRaw(ctx, syncer.sm, lbst, h.Miner)
if err != nil {
return xerrors.Errorf("failed getting power: %w", err)
}
j := h.ElectionProof.ComputeWinCount(mpow.QualityAdjPower, tpow.QualityAdjPower)
if h.ElectionProof.WinCount != j {
return xerrors.Errorf("miner claims wrong number of wins: miner: %d, computed: %d", h.ElectionProof.WinCount, j)
}
return nil
})
blockSigCheck := async.Err(func() error {
if err := sigs.CheckBlockSignature(ctx, h, waddr); err != nil {
return xerrors.Errorf("check block signature failed: %w", err)
}
return nil
})
beaconValuesCheck := async.Err(func() error {
if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" {
return nil
}
if err := beacon.ValidateBlockValues(syncer.beacon, h, baseTs.Height(), *prevBeacon); err != nil {
return xerrors.Errorf("failed to validate blocks random beacon values: %w", err)
}
return nil
})
tktsCheck := async.Err(func() error {
buf := new(bytes.Buffer)
if err := h.Miner.MarshalCBOR(buf); err != nil {
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
}
if h.Height > build.UpgradeSmokeHeight {
buf.Write(baseTs.MinTicket().VRFProof)
}
beaconBase := *prevBeacon
if len(h.BeaconEntries) != 0 {
beaconBase = h.BeaconEntries[len(h.BeaconEntries)-1]
}
vrfBase, err := store.DrawRandomness(beaconBase.Data, crypto.DomainSeparationTag_TicketProduction, h.Height-build.TicketRandomnessLookback, buf.Bytes())
if err != nil {
return xerrors.Errorf("failed to compute vrf base for ticket: %w", err)
}
err = VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.Ticket.VRFProof)
if err != nil {
return xerrors.Errorf("validating block tickets failed: %w", err)
}
return nil
})
wproofCheck := async.Err(func() error {
if err := syncer.VerifyWinningPoStProof(ctx, winPoStNv, h, *prevBeacon, lbst, waddr); err != nil {
return xerrors.Errorf("invalid election post: %w", err)
}
return nil
})
await := []async.ErrorFuture{
minerCheck,
tktsCheck,
blockSigCheck,
beaconValuesCheck,
wproofCheck,
winnerCheck,
msgsCheck,
baseFeeCheck,
stateRootCheck,
}
var merr error
for _, fut := range await {
if err := fut.AwaitContext(ctx); err != nil {
merr = multierror.Append(merr, err)
}
}
if merr != nil {
mulErr := merr.(*multierror.Error)
mulErr.ErrorFormat = func(es []error) string {
if len(es) == 1 {
return fmt.Sprintf("1 error occurred:\n\t* %+v\n\n", es[0])
}
points := make([]string, len(es))
for i, err := range es {
points[i] = fmt.Sprintf("* %+v", err)
}
return fmt.Sprintf(
"%d errors occurred:\n\t%s\n\n",
len(es), strings.Join(points, "\n\t"))
}
return mulErr
if err := syncer.consensus.ValidateBlock(ctx, b); err != nil {
return err
}
if useCache {
@ -970,249 +640,6 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, use
return nil
}
func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, nv network.Version, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error {
if build.InsecurePoStValidation {
if len(h.WinPoStProof) == 0 {
return xerrors.Errorf("[INSECURE-POST-VALIDATION] No winning post proof given")
}
if string(h.WinPoStProof[0].ProofBytes) == "valid proof" {
return nil
}
return xerrors.Errorf("[INSECURE-POST-VALIDATION] winning post was invalid")
}
buf := new(bytes.Buffer)
if err := h.Miner.MarshalCBOR(buf); err != nil {
return xerrors.Errorf("failed to marshal miner address: %w", err)
}
rbase := prevBeacon
if len(h.BeaconEntries) > 0 {
rbase = h.BeaconEntries[len(h.BeaconEntries)-1]
}
rand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes())
if err != nil {
return xerrors.Errorf("failed to get randomness for verifying winning post proof: %w", err)
}
mid, err := address.IDFromAddress(h.Miner)
if err != nil {
return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err)
}
sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, syncer.verifier, syncer.sm, lbst, h.Miner, rand)
if err != nil {
return xerrors.Errorf("getting winning post sector set: %w", err)
}
ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof2.WinningPoStVerifyInfo{
Randomness: rand,
Proofs: h.WinPoStProof,
ChallengedSectors: sectors,
Prover: abi.ActorID(mid),
})
if err != nil {
return xerrors.Errorf("failed to verify election post: %w", err)
}
if !ok {
log.Errorf("invalid winning post (block: %s, %x; %v)", h.Cid(), rand, sectors)
return xerrors.Errorf("winning post was invalid")
}
return nil
}
// TODO: We should extract this somewhere else and make the message pool and miner use the same logic
func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock, baseTs *types.TipSet) error {
{
var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type
var pubks [][]byte
for _, m := range b.BlsMessages {
sigCids = append(sigCids, m.Cid())
pubk, err := syncer.sm.GetBlsPublicKey(ctx, m.From, baseTs)
if err != nil {
return xerrors.Errorf("failed to load bls public to validate block: %w", err)
}
pubks = append(pubks, pubk)
}
if err := syncer.verifyBlsAggregate(ctx, b.Header.BLSAggregate, sigCids, pubks); err != nil {
return xerrors.Errorf("bls aggregate signature was invalid: %w", err)
}
}
nonces := make(map[address.Address]uint64)
stateroot, _, err := syncer.sm.TipSetState(ctx, baseTs)
if err != nil {
return err
}
st, err := state.LoadStateTree(syncer.store.ActorStore(ctx), stateroot)
if err != nil {
return xerrors.Errorf("failed to load base state tree: %w", err)
}
nv := syncer.sm.GetNtwkVersion(ctx, b.Header.Height)
pl := vm.PricelistByEpoch(baseTs.Height())
var sumGasLimit int64
checkMsg := func(msg types.ChainMsg) error {
m := msg.VMMessage()
// Phase 1: syntactic validation, as defined in the spec
minGas := pl.OnChainMessage(msg.ChainLength())
if err := m.ValidForBlockInclusion(minGas.Total(), nv); err != nil {
return err
}
// ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit
// So below is overflow safe
sumGasLimit += m.GasLimit
if sumGasLimit > build.BlockGasLimit {
return xerrors.Errorf("block gas limit exceeded")
}
// Phase 2: (Partial) semantic validation:
// the sender exists and is an account actor, and the nonces make sense
var sender address.Address
if nv >= network.Version13 {
sender, err = st.LookupID(m.From)
if err != nil {
return err
}
} else {
sender = m.From
}
if _, ok := nonces[sender]; !ok {
// `GetActor` does not validate that this is an account actor.
act, err := st.GetActor(sender)
if err != nil {
return xerrors.Errorf("failed to get actor: %w", err)
}
if !builtin.IsAccountActor(act.Code) {
return xerrors.New("Sender must be an account actor")
}
nonces[sender] = act.Nonce
}
if nonces[sender] != m.Nonce {
return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[sender], m.Nonce)
}
nonces[sender]++
return nil
}
// Validate message arrays in a temporary blockstore.
tmpbs := bstore.NewMemory()
tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs))
bmArr := blockadt.MakeEmptyArray(tmpstore)
for i, m := range b.BlsMessages {
if err := checkMsg(m); err != nil {
return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err)
}
c, err := store.PutMessage(tmpbs, m)
if err != nil {
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
}
k := cbg.CborCid(c)
if err := bmArr.Set(uint64(i), &k); err != nil {
return xerrors.Errorf("failed to put bls message at index %d: %w", i, err)
}
}
smArr := blockadt.MakeEmptyArray(tmpstore)
for i, m := range b.SecpkMessages {
if err := checkMsg(m); err != nil {
return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err)
}
// `From` being an account actor is only validated inside the `vm.ResolveToKeyAddr` call
// in `StateManager.ResolveToKeyAddress` here (and not in `checkMsg`).
kaddr, err := syncer.sm.ResolveToKeyAddress(ctx, m.Message.From, baseTs)
if err != nil {
return xerrors.Errorf("failed to resolve key addr: %w", err)
}
if err := sigs.Verify(&m.Signature, kaddr, m.Message.Cid().Bytes()); err != nil {
return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err)
}
c, err := store.PutMessage(tmpbs, m)
if err != nil {
return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err)
}
k := cbg.CborCid(c)
if err := smArr.Set(uint64(i), &k); err != nil {
return xerrors.Errorf("failed to put secpk message at index %d: %w", i, err)
}
}
bmroot, err := bmArr.Root()
if err != nil {
return err
}
smroot, err := smArr.Root()
if err != nil {
return err
}
mrcid, err := tmpstore.Put(ctx, &types.MsgMeta{
BlsMessages: bmroot,
SecpkMessages: smroot,
})
if err != nil {
return err
}
if b.Header.Messages != mrcid {
return fmt.Errorf("messages didnt match message root in header")
}
// Finally, flush.
return vm.Copy(ctx, tmpbs, syncer.store.ChainBlockstore(), mrcid)
}
func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig *crypto.Signature, msgs []cid.Cid, pubks [][]byte) error {
_, span := trace.StartSpan(ctx, "syncer.verifyBlsAggregate")
defer span.End()
span.AddAttributes(
trace.Int64Attribute("msgCount", int64(len(msgs))),
)
msgsS := make([]ffi.Message, len(msgs))
pubksS := make([]ffi.PublicKey, len(msgs))
for i := 0; i < len(msgs); i++ {
msgsS[i] = msgs[i].Bytes()
copy(pubksS[i][:], pubks[i][:ffi.PublicKeyBytes])
}
sigS := new(ffi.Signature)
copy(sigS[:], sig.Data[:ffi.SignatureBytes])
if len(msgs) == 0 {
return nil
}
valid := ffi.HashVerify(sigS, msgsS, pubksS)
if !valid {
return xerrors.New("bls aggregate signature failed to verify")
}
return nil
}
type syncStateKey struct{}
func extractSyncState(ctx context.Context) *SyncerState {
@ -1374,7 +801,7 @@ loop:
return nil, xerrors.Errorf("retrieved segments of the chain are not connected at heights %d/%d",
blockSet[len(blockSet)-1].Height(), blks[0].Height())
// A successful `GetBlocks()` call is guaranteed to fetch at least
// one tipset so the acess `blks[0]` is safe.
// one tipset so the access `blks[0]` is safe.
}
for _, b := range blks {
@ -1598,6 +1025,35 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS
return nil
}
func checkMsgMeta(ts *types.TipSet, allbmsgs []*types.Message, allsmsgs []*types.SignedMessage, bmi, smi [][]uint64) error {
for bi, b := range ts.Blocks() {
if msgc := len(bmi[bi]) + len(smi[bi]); msgc > build.BlockMessageLimit {
return fmt.Errorf("block %q has too many messages (%d)", b.Cid(), msgc)
}
var smsgCids []cid.Cid
for _, m := range smi[bi] {
smsgCids = append(smsgCids, allsmsgs[m].Cid())
}
var bmsgCids []cid.Cid
for _, m := range bmi[bi] {
bmsgCids = append(bmsgCids, allbmsgs[m].Cid())
}
mrcid, err := computeMsgMeta(cbor.NewCborStore(bstore.NewMemory()), bmsgCids, smsgCids)
if err != nil {
return err
}
if b.Messages != mrcid {
return fmt.Errorf("messages didnt match message root in header for ts %s", ts.Key())
}
}
return nil
}
func (syncer *Syncer) fetchMessages(ctx context.Context, headers []*types.TipSet, startOffset int) ([]*exchange.CompactedMessages, error) {
batchSize := len(headers)
batch := make([]*exchange.CompactedMessages, batchSize)
@ -1636,7 +1092,19 @@ func (syncer *Syncer) fetchMessages(ctx context.Context, headers []*types.TipSet
if err != nil {
requestErr = multierror.Append(requestErr, err)
} else {
requestResult = result
isGood := true
for index, ts := range headers[nextI:lastI] {
cm := result[index]
if err := checkMsgMeta(ts, cm.Bls, cm.Secpk, cm.BlsIncludes, cm.SecpkIncludes); err != nil {
log.Errorf("fetched messages not as expected: %s", err)
isGood = false
break
}
}
if isGood {
requestResult = result
}
}
}
@ -1754,10 +1222,6 @@ func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet, hts *t
return nil
}
func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []byte, evrf []byte) error {
return gen.VerifyVRF(ctx, worker, rand, evrf)
}
func (syncer *Syncer) State() []SyncerStateSnapshot {
return syncer.syncmgr.State()
}
@ -1802,12 +1266,3 @@ func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet)
return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets")
}
func (syncer *Syncer) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool {
if syncer.Genesis == nil {
return false
}
now := uint64(build.Clock.Now().Unix())
return epoch > (abi.ChainEpoch((now-syncer.Genesis.MinTimestamp())/build.BlockDelaySecs) + MaxHeightDrift)
}

View File

@ -28,6 +28,7 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
"github.com/filecoin-project/lotus/chain/store"
@ -105,7 +106,7 @@ func prepSyncTest(t testing.TB, h int) *syncTestUtil {
mn: mocknet.New(ctx),
g: g,
us: stmgr.DefaultUpgradeSchedule(),
us: filcns.DefaultUpgradeSchedule(),
}
tu.addSourceNode(h)
@ -125,19 +126,19 @@ func prepSyncTestWithV5Height(t testing.TB, h int, v5height abi.ChainEpoch) *syn
// prepare for upgrade.
Network: network.Version9,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
Migration: filcns.UpgradeActorsV2,
}, {
Network: network.Version10,
Height: 2,
Migration: stmgr.UpgradeActorsV3,
Migration: filcns.UpgradeActorsV3,
}, {
Network: network.Version12,
Height: 3,
Migration: stmgr.UpgradeActorsV4,
Migration: filcns.UpgradeActorsV4,
}, {
Network: network.Version13,
Height: v5height,
Migration: stmgr.UpgradeActorsV5,
Migration: filcns.UpgradeActorsV5,
}}
g, err := gen.NewGeneratorWithUpgradeSchedule(sched)

View File

@ -47,7 +47,8 @@ func NewBeaconEntry(round uint64, data []byte) BeaconEntry {
}
type BlockHeader struct {
Miner address.Address // 0 unique per block/miner
Miner address.Address // 0 unique per block/miner
Ticket *Ticket // 1 unique per block/miner: should be a valid VRF
ElectionProof *ElectionProof // 2 unique per block/miner: should be a valid VRF
BeaconEntries []BeaconEntry // 3 identical for all blocks in same tipset

View File

@ -5,6 +5,8 @@ import (
"encoding/hex"
"fmt"
"reflect"
"runtime"
"strings"
"github.com/filecoin-project/go-state-types/network"
@ -14,11 +16,6 @@ import (
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported"
exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported"
exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported"
exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported"
exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported"
vmr "github.com/filecoin-project/specs-actors/v5/actors/runtime"
"github.com/filecoin-project/go-state-types/abi"
@ -30,8 +27,17 @@ import (
"github.com/filecoin-project/lotus/chain/types"
)
type MethodMeta struct {
Name string
Params reflect.Type
Ret reflect.Type
}
type ActorRegistry struct {
actors map[cid.Cid]*actorInfo
Methods map[cid.Cid]map[abi.MethodNum]MethodMeta
}
// An ActorPredicate returns an error if the given actor is not valid for the given runtime environment (e.g., chain height, version, etc.).
@ -61,18 +67,10 @@ type actorInfo struct {
}
func NewActorRegistry() *ActorRegistry {
inv := &ActorRegistry{actors: make(map[cid.Cid]*actorInfo)}
// TODO: define all these properties on the actors themselves, in specs-actors.
// add builtInCode using: register(cid, singleton)
inv.Register(ActorsVersionPredicate(actors.Version0), exported0.BuiltinActors()...)
inv.Register(ActorsVersionPredicate(actors.Version2), exported2.BuiltinActors()...)
inv.Register(ActorsVersionPredicate(actors.Version3), exported3.BuiltinActors()...)
inv.Register(ActorsVersionPredicate(actors.Version4), exported4.BuiltinActors()...)
inv.Register(ActorsVersionPredicate(actors.Version5), exported5.BuiltinActors()...)
return inv
return &ActorRegistry{
actors: make(map[cid.Cid]*actorInfo),
Methods: map[cid.Cid]map[abi.MethodNum]MethodMeta{},
}
}
func (ar *ActorRegistry) Invoke(codeCid cid.Cid, rt vmr.Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) {
@ -96,6 +94,7 @@ func (ar *ActorRegistry) Register(pred ActorPredicate, actors ...rtt.VMActor) {
pred = func(vmr.Runtime, rtt.VMActor) error { return nil }
}
for _, a := range actors {
// register in the `actors` map (for the invoker)
code, err := ar.transform(a)
if err != nil {
panic(xerrors.Errorf("%s: %w", string(a.Code().Hash()), err))
@ -105,6 +104,51 @@ func (ar *ActorRegistry) Register(pred ActorPredicate, actors ...rtt.VMActor) {
vmActor: a,
predicate: pred,
}
// register in the `Methods` map (used by statemanager utils)
exports := a.Exports()
methods := make(map[abi.MethodNum]MethodMeta, len(exports))
// Explicitly add send, it's special.
methods[builtin.MethodSend] = MethodMeta{
Name: "Send",
Params: reflect.TypeOf(new(abi.EmptyValue)),
Ret: reflect.TypeOf(new(abi.EmptyValue)),
}
// Iterate over exported methods. Some of these _may_ be nil and
// must be skipped.
for number, export := range exports {
if export == nil {
continue
}
ev := reflect.ValueOf(export)
et := ev.Type()
// Extract the method names using reflection. These
// method names always match the field names in the
// `builtin.Method*` structs (tested in the specs-actors
// tests).
fnName := runtime.FuncForPC(ev.Pointer()).Name()
fnName = strings.TrimSuffix(fnName[strings.LastIndexByte(fnName, '.')+1:], "-fm")
switch abi.MethodNum(number) {
case builtin.MethodSend:
panic("method 0 is reserved for Send")
case builtin.MethodConstructor:
if fnName != "Constructor" {
panic("method 1 is reserved for Constructor")
}
}
methods[abi.MethodNum(number)] = MethodMeta{
Name: fnName,
Params: et.In(1),
Ret: et.Out(0),
}
}
ar.Methods[a.Code()] = methods
}
}
@ -226,13 +270,11 @@ func DecodeParams(b []byte, out interface{}) error {
return um.UnmarshalCBOR(bytes.NewReader(b))
}
func DumpActorState(act *types.Actor, b []byte) (interface{}, error) {
func DumpActorState(i *ActorRegistry, act *types.Actor, b []byte) (interface{}, error) {
if builtin.IsAccountActor(act.Code) { // Account code special case
return nil, nil
}
i := NewActorRegistry()
actInfo, ok := i.actors[act.Code]
if !ok {
return nil, xerrors.Errorf("state type for actor %s not found", act.Code)

View File

@ -223,6 +223,7 @@ type VMOpts struct {
Epoch abi.ChainEpoch
Rand Rand
Bstore blockstore.Blockstore
Actors *ActorRegistry
Syscalls SyscallBuilder
CircSupplyCalc CircSupplyCalculator
NtwkVersion NtwkVersionGetter // TODO: stebalien: In what cases do we actually need this? It seems like even when creating new networks we want to use the 'global'/build-default version getter
@ -244,7 +245,7 @@ func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) {
cst: cst,
buf: buf,
blockHeight: opts.Epoch,
areg: NewActorRegistry(),
areg: opts.Actors,
rand: opts.Rand, // TODO: Probably should be a syscall
circSupplyCalc: opts.CircSupplyCalc,
ntwkVersion: opts.NtwkVersion,

View File

@ -35,7 +35,7 @@ import (
"github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
types "github.com/filecoin-project/lotus/chain/types"
)
@ -476,7 +476,7 @@ var ChainInspectUsage = &cli.Command{
return err
}
mm := stmgr.MethodsMap[code][m.Message.Method]
mm := filcns.NewActorRegistry().Methods[code][m.Message.Method] // TODO: use remote map
byMethod[mm.Name] += m.Message.GasLimit
byMethodC[mm.Name]++

View File

@ -10,10 +10,6 @@ import (
"strconv"
"text/tabwriter"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/stmgr"
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/go-state-types/big"
@ -31,8 +27,11 @@ import (
"github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/types"
)
@ -325,7 +324,7 @@ var msigInspectCmd = &cli.Command{
fmt.Fprintf(w, "%d\t%s\t%d\t%s\t%s\t%s(%d)\t%s\n", txid, "pending", len(tx.Approved), target, types.FIL(tx.Value), "new account, unknown method", tx.Method, paramStr)
}
} else {
method := stmgr.MethodsMap[targAct.Code][tx.Method]
method := filcns.NewActorRegistry().Methods[targAct.Code][tx.Method] // TODO: use remote map
if decParams && tx.Method != 0 {
ptyp := reflect.New(method.Params.Elem()).Interface().(cbg.CBORUnmarshaler)

View File

@ -12,7 +12,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
types "github.com/filecoin-project/lotus/chain/types"
cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
@ -86,7 +86,7 @@ func (s *ServicesImpl) DecodeTypedParamsFromJSON(ctx context.Context, to address
return nil, err
}
methodMeta, found := stmgr.MethodsMap[act.Code][method]
methodMeta, found := filcns.NewActorRegistry().Methods[act.Code][method] // TODO: use remote map
if !found {
return nil, fmt.Errorf("method %d not found on actor %s", method, act.Code)
}

View File

@ -18,6 +18,7 @@ import (
"time"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/api/v0api"
@ -1366,7 +1367,7 @@ func codeStr(c cid.Cid) string {
}
func getMethod(code cid.Cid, method abi.MethodNum) string {
return stmgr.MethodsMap[code][method].Name
return filcns.NewActorRegistry().Methods[code][method].Name // todo: use remote
}
func toFil(f types.BigInt) types.FIL {
@ -1397,7 +1398,7 @@ func sumGas(changes []*types.GasTrace) types.GasTrace {
}
func JsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, error) {
p, err := stmgr.GetParamType(code, method)
p, err := stmgr.GetParamType(filcns.NewActorRegistry(), code, method) // todo use api for correct actor registry
if err != nil {
return "", err
}
@ -1411,7 +1412,7 @@ func JsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, erro
}
func jsonReturn(code cid.Cid, method abi.MethodNum, ret []byte) (string, error) {
methodMeta, found := stmgr.MethodsMap[code][method]
methodMeta, found := filcns.NewActorRegistry().Methods[code][method] // TODO: use remote
if !found {
return "", fmt.Errorf("method %d not found on actor %s", method, code)
}

View File

@ -26,6 +26,7 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/blockstore"
badgerbs "github.com/filecoin-project/lotus/blockstore/badger"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@ -253,10 +254,13 @@ var importBenchCmd = &cli.Command{
}
metadataDs := datastore.NewMapDatastore()
cs := store.NewChainStore(bs, bs, metadataDs, nil)
cs := store.NewChainStore(bs, bs, metadataDs, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
stm := stmgr.NewStateManager(cs, vm.Syscalls(verifier))
stm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule())
if err != nil {
return err
}
var carFile *os.File
// open the CAR file if one is provided.

View File

@ -14,6 +14,7 @@ import (
"time"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/gen/genesis"
@ -510,13 +511,16 @@ var chainBalanceStateCmd = &cli.Command{
return err
}
cs := store.NewChainStore(bs, bs, mds, nil)
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
cst := cbor.NewCborStore(bs)
store := adt.WrapStore(ctx, cst)
sm := stmgr.NewStateManager(cs, vm.Syscalls(ffiwrapper.ProofVerifier))
sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule())
if err != nil {
return err
}
tree, err := state.LoadStateTree(cst, sroot)
if err != nil {
@ -731,14 +735,16 @@ var chainPledgeCmd = &cli.Command{
return err
}
cs := store.NewChainStore(bs, bs, mds, nil)
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
cst := cbor.NewCborStore(bs)
store := adt.WrapStore(ctx, cst)
sm := stmgr.NewStateManager(cs, vm.Syscalls(ffiwrapper.ProofVerifier))
sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule())
if err != nil {
return err
}
state, err := state.LoadStateTree(cst, sroot)
if err != nil {
return err

View File

@ -90,7 +90,7 @@ var exportChainCmd = &cli.Command{
return err
}
cs := store.NewChainStore(bs, bs, mds, nil)
cs := store.NewChainStore(bs, bs, mds, nil, nil)
defer cs.Close() //nolint:errcheck
if err := cs.Load(); err != nil {

View File

@ -9,6 +9,7 @@ import (
_init "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/fatih/color"
"github.com/ipfs/go-datastore"
@ -54,7 +55,7 @@ var genesisVerifyCmd = &cli.Command{
}
bs := blockstore.FromDatastore(datastore.NewMapDatastore())
cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), nil)
cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
cf := cctx.Args().Get(0)

View File

@ -7,6 +7,7 @@ import (
"math/big"
big2 "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
@ -74,7 +75,7 @@ var minerTypesCmd = &cli.Command{
return err
}
cs := store.NewChainStore(bs, bs, mds, nil)
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
cst := cbor.NewCborStore(bs)

View File

@ -8,7 +8,6 @@ import (
"fmt"
"github.com/fatih/color"
"github.com/ipfs/go-cid"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
@ -16,7 +15,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
@ -141,7 +140,7 @@ func printMessage(cctx *cli.Context, msg *types.Message) error {
return nil
}
fmt.Println("Method:", stmgr.MethodsMap[toact.Code][msg.Method].Name)
fmt.Println("Method:", filcns.NewActorRegistry().Methods[toact.Code][msg.Method].Name) // todo use remote
p, err := lcli.JsonParams(toact.Code, msg.Method, msg.Params)
if err != nil {
return err

View File

@ -6,6 +6,7 @@ import (
"io"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/ipfs/bbloom"
"github.com/ipfs/go-cid"
"github.com/urfave/cli/v2"
@ -167,7 +168,7 @@ var stateTreePruneCmd = &cli.Command{
return nil
}
cs := store.NewChainStore(bs, bs, mds, nil)
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
if err := cs.Load(); err != nil {

View File

@ -4,6 +4,7 @@ import (
"context"
"math"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"go.uber.org/zap"
"golang.org/x/xerrors"
@ -83,6 +84,7 @@ func NewBlockBuilder(ctx context.Context, logger *zap.SugaredLogger, sm *stmgr.S
Epoch: parentTs.Height() + 1,
Rand: r,
Bstore: sm.ChainStore().StateBlockstore(),
Actors: filcns.NewActorRegistry(),
Syscalls: sm.VMSys(),
CircSupplyCalc: sm.GetVMCirculatingSupply,
NtwkVersion: sm.GetNtwkVersion,

View File

@ -11,6 +11,7 @@ import (
"github.com/ipfs/go-datastore/query"
"github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@ -61,7 +62,7 @@ func NewNode(ctx context.Context, r repo.Repo) (nd *Node, _err error) {
}
return &Node{
repo: lr,
Chainstore: store.NewChainStore(bs, bs, ds, nil),
Chainstore: store.NewChainStore(bs, bs, ds, filcns.Weight, nil),
MetadataDS: ds,
Blockstore: bs,
}, err
@ -105,7 +106,7 @@ func (nd *Node) LoadSim(ctx context.Context, name string) (*Simulation, error) {
if err != nil {
return nil, xerrors.Errorf("failed to create upgrade schedule for simulation %s: %w", name, err)
}
sim.StateManager, err = stmgr.NewStateManagerWithUpgradeSchedule(nd.Chainstore, vm.Syscalls(mock.Verifier), us)
sim.StateManager, err = stmgr.NewStateManager(nd.Chainstore, filcns.NewTipSetExecutor(), vm.Syscalls(mock.Verifier), us)
if err != nil {
return nil, xerrors.Errorf("failed to create state manager for simulation %s: %w", name, err)
}
@ -124,10 +125,14 @@ func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet)
if err != nil {
return nil, err
}
sm, err := stmgr.NewStateManager(nd.Chainstore, filcns.NewTipSetExecutor(), vm.Syscalls(mock.Verifier), filcns.DefaultUpgradeSchedule())
if err != nil {
return nil, xerrors.Errorf("creating state manager: %w", err)
}
sim := &Simulation{
name: name,
Node: nd,
StateManager: stmgr.NewStateManager(nd.Chainstore, vm.Syscalls(mock.Verifier)),
StateManager: sm,
stages: stages,
}
if has, err := nd.MetadataDS.Has(sim.key("head")); err != nil {

View File

@ -16,6 +16,7 @@ import (
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
@ -35,7 +36,7 @@ type config struct {
// upgradeSchedule constructs an stmgr.StateManager upgrade schedule, overriding any network upgrade
// epochs as specified in the config.
func (c *config) upgradeSchedule() (stmgr.UpgradeSchedule, error) {
upgradeSchedule := stmgr.DefaultUpgradeSchedule()
upgradeSchedule := filcns.DefaultUpgradeSchedule()
expected := make(map[network.Version]struct{}, len(c.Upgrades))
for nv := range c.Upgrades {
expected[nv] = struct{}{}
@ -200,7 +201,7 @@ func (sim *Simulation) SetUpgradeHeight(nv network.Version, epoch abi.ChainEpoch
if err != nil {
return err
}
sm, err := stmgr.NewStateManagerWithUpgradeSchedule(sim.Node.Chainstore, vm.Syscalls(mock.Verifier), newUpgradeSchedule)
sm, err := stmgr.NewStateManager(sim.Node.Chainstore, filcns.NewTipSetExecutor(), vm.Syscalls(mock.Verifier), newUpgradeSchedule)
if err != nil {
return err
}

View File

@ -23,7 +23,7 @@ import (
"github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
)
@ -103,7 +103,7 @@ func (c *InteractiveWallet) WalletSign(ctx context.Context, k address.Address, m
return xerrors.Errorf("looking up dest actor: %w", err)
}
fmt.Println("Method:", stmgr.MethodsMap[toact.Code][cmsg.Method].Name)
fmt.Println("Method:", filcns.NewActorRegistry().Methods[toact.Code][cmsg.Method].Name)
p, err := lcli.JsonParams(toact.Code, cmsg.Method, cmsg.Params)
if err != nil {
return err
@ -125,7 +125,7 @@ func (c *InteractiveWallet) WalletSign(ctx context.Context, k address.Address, m
return xerrors.Errorf("looking up msig dest actor: %w", err)
}
fmt.Println("\tMultiSig Proposal Method:", stmgr.MethodsMap[toact.Code][mp.Method].Name)
fmt.Println("\tMultiSig Proposal Method:", filcns.NewActorRegistry().Methods[toact.Code][mp.Method].Name) // todo use remote
p, err := lcli.JsonParams(toact.Code, mp.Method, mp.Params)
if err != nil {
return err

View File

@ -31,6 +31,7 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@ -483,7 +484,7 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool)
return xerrors.Errorf("failed to open journal: %w", err)
}
cst := store.NewChainStore(bs, bs, mds, j)
cst := store.NewChainStore(bs, bs, mds, filcns.Weight, j)
defer cst.Close() //nolint:errcheck
log.Infof("importing chain from %s...", fname)
@ -519,7 +520,10 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool)
return err
}
stm := stmgr.NewStateManager(cst, vm.Syscalls(ffiwrapper.ProofVerifier))
stm, err := stmgr.NewStateManager(cst, filcns.NewTipSetExecutor(), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule())
if err != nil {
return err
}
if !snapshot {
log.Infof("validating imported chain...")

View File

@ -13,12 +13,11 @@ import (
"github.com/fatih/color"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/hashicorp/go-multierror"
"github.com/ipfs/go-cid"
"github.com/multiformats/go-multihash"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/lotus/chain/stmgr"
)
var extractManyFlags struct {
@ -158,7 +157,7 @@ func runExtractMany(c *cli.Context) error {
}
// Lookup the method in actor method table.
if m, ok := stmgr.MethodsMap[codeCid]; !ok {
if m, ok := filcns.NewActorRegistry().Methods[codeCid]; !ok {
return fmt.Errorf("unrecognized actor: %s", actorcode)
} else if methodnum >= len(m) {
return fmt.Errorf("unrecognized method number for actor %s: %d", actorcode, methodnum)

View File

@ -6,6 +6,7 @@ import (
"os"
"github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
@ -101,9 +102,13 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params
tipset = params.Tipset
syscalls = vm.Syscalls(ffiwrapper.ProofVerifier)
cs = store.NewChainStore(bs, bs, ds, nil)
sm = stmgr.NewStateManager(cs, syscalls)
cs = store.NewChainStore(bs, bs, ds, filcns.Weight, nil)
tse = filcns.NewTipSetExecutor()
sm, err = stmgr.NewStateManager(cs, tse, syscalls, filcns.DefaultUpgradeSchedule())
)
if err != nil {
return nil, err
}
if params.Rand == nil {
params.Rand = NewFixedRand()
@ -115,11 +120,10 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params
defer cs.Close() //nolint:errcheck
blocks := make([]store.BlockMessages, 0, len(tipset.Blocks))
blocks := make([]filcns.FilecoinBlockMessages, 0, len(tipset.Blocks))
for _, b := range tipset.Blocks {
sb := store.BlockMessages{
Miner: b.MinerAddr,
WinCount: b.WinCount,
Miner: b.MinerAddr,
}
for _, m := range b.Messages {
msg, err := types.DecodeMessage(m)
@ -138,7 +142,10 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params
sb.BlsMessages = append(sb.BlsMessages, msg)
}
}
blocks = append(blocks, sb)
blocks = append(blocks, filcns.FilecoinBlockMessages{
BlockMessages: sb,
WinCount: b.WinCount,
})
}
recordOutputs := &outputRecorder{
@ -146,7 +153,8 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params
results: []*vm.ApplyRet{},
}
postcid, receiptsroot, err := sm.ApplyBlocks(context.Background(),
postcid, receiptsroot, err := tse.ApplyBlocks(context.Background(),
sm,
params.ParentEpoch,
params.Preroot,
blocks,
@ -196,7 +204,10 @@ func (d *Driver) ExecuteMessage(bs blockstore.Blockstore, params ExecuteMessageP
// dummy state manager; only to reference the GetNetworkVersion method,
// which does not depend on state.
sm := stmgr.NewStateManager(nil, nil)
sm, err := stmgr.NewStateManager(nil, filcns.NewTipSetExecutor(), nil, filcns.DefaultUpgradeSchedule())
if err != nil {
return nil, cid.Cid{}, err
}
vmOpts := &vm.VMOpts{
StateBase: params.Preroot,
@ -216,7 +227,7 @@ func (d *Driver) ExecuteMessage(bs blockstore.Blockstore, params ExecuteMessageP
return nil, cid.Undef, err
}
invoker := vm.NewActorRegistry()
invoker := filcns.NewActorRegistry()
// register the chaos actor if required by the vector.
if chaosOn, ok := d.selector["chaos_actor"]; ok && chaosOn == "true" {

2
go.sum
View File

@ -169,6 +169,7 @@ github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmf
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
@ -1513,6 +1514,7 @@ github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM=
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=

View File

@ -3,6 +3,8 @@ package kit
import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/stmgr"
)
@ -28,7 +30,7 @@ func SDRUpgradeAt(calico, persian abi.ChainEpoch) EnsembleOpt {
}, stmgr.Upgrade{
Network: network.Version7,
Height: calico,
Migration: stmgr.UpgradeCalico,
Migration: filcns.UpgradeCalico,
}, stmgr.Upgrade{
Network: network.Version8,
Height: persian,
@ -42,7 +44,7 @@ func LatestActorsAt(upgradeHeight abi.ChainEpoch) EnsembleOpt {
}, stmgr.Upgrade{
Network: network.Version13,
Height: upgradeHeight,
Migration: stmgr.UpgradeActorsV5,
Migration: filcns.UpgradeActorsV5,
})
}
@ -53,6 +55,6 @@ func TurboUpgradeAt(upgradeHeight abi.ChainEpoch) EnsembleOpt {
}, stmgr.Upgrade{
Network: network.Version12,
Height: upgradeHeight,
Migration: stmgr.UpgradeActorsV4,
Migration: filcns.UpgradeActorsV4,
})
}

52
lib/async/error.go Normal file
View File

@ -0,0 +1,52 @@
package async
// based on https://github.com/Gurpartap/async
// Apache-2.0 License, see https://github.com/Gurpartap/async/blob/master/License.txt
import (
"context"
"golang.org/x/xerrors"
)
type ErrorFuture interface {
Await() error
AwaitContext(ctx context.Context) error
}
type errorFuture struct {
await func(ctx context.Context) error
}
func (f errorFuture) Await() error {
return f.await(context.Background())
}
func (f errorFuture) AwaitContext(ctx context.Context) error {
return f.await(ctx)
}
func Err(f func() error) ErrorFuture {
var err error
c := make(chan struct{}, 1)
go func() {
defer close(c)
defer func() {
if rerr := recover(); rerr != nil {
err = xerrors.Errorf("async error: %s", rerr)
return
}
}()
err = f()
}()
return errorFuture{
await: func(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
case <-c:
return err
}
},
}
}

View File

@ -8,7 +8,8 @@ import (
"github.com/multiformats/go-multihash"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
)
func main() {
@ -44,7 +45,7 @@ func main() {
out := map[string][]string{}
for c, methods := range stmgr.MethodsMap {
for c, methods := range filcns.NewActorRegistry().Methods {
cmh, err := multihash.Decode(c.Hash())
if err != nil {
panic(err)

View File

@ -14,6 +14,8 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/exchange"
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
"github.com/filecoin-project/lotus/chain/market"
@ -48,7 +50,7 @@ var ChainNode = Options(
// Consensus settings
Override(new(dtypes.DrandSchedule), modules.BuiltinDrandConfig),
Override(new(stmgr.UpgradeSchedule), stmgr.DefaultUpgradeSchedule()),
Override(new(stmgr.UpgradeSchedule), filcns.DefaultUpgradeSchedule()),
Override(new(dtypes.NetworkName), modules.NetworkName),
Override(new(modules.Genesis), modules.ErrorGenesis),
Override(new(dtypes.AfterGenesisSet), modules.SetGenesis),
@ -67,6 +69,10 @@ var ChainNode = Options(
Override(new(vm.SyscallBuilder), vm.Syscalls),
// Consensus: Chain storage/access
Override(new(chain.Genesis), chain.LoadGenesis),
Override(new(store.WeightFunc), filcns.Weight),
Override(new(stmgr.Executor), filcns.NewTipSetExecutor()),
Override(new(consensus.Consensus), filcns.NewFilecoinExpectedConsensus),
Override(new(*store.ChainStore), modules.ChainStore),
Override(new(*stmgr.StateManager), modules.StateManager),
Override(new(dtypes.ChainBitswap), modules.ChainBitswap),

View File

@ -7,6 +7,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"golang.org/x/xerrors"
cborutil "github.com/filecoin-project/go-cbor-util"
"github.com/filecoin-project/go-state-types/big"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
@ -15,9 +16,9 @@ import (
"github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/protocol"
cborutil "github.com/filecoin-project/go-cbor-util"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/peermgr"
@ -48,10 +49,11 @@ type Service struct {
cs *store.ChainStore
syncer *chain.Syncer
cons consensus.Consensus
pmgr *peermgr.PeerMgr
}
func NewHelloService(h host.Host, cs *store.ChainStore, syncer *chain.Syncer, pmgr peermgr.MaybePeerMgr) *Service {
func NewHelloService(h host.Host, cs *store.ChainStore, syncer *chain.Syncer, cons consensus.Consensus, pmgr peermgr.MaybePeerMgr) *Service {
if pmgr.Mgr == nil {
log.Warn("running without peer manager")
}
@ -61,6 +63,7 @@ func NewHelloService(h host.Host, cs *store.ChainStore, syncer *chain.Syncer, pm
cs: cs,
syncer: syncer,
cons: cons,
pmgr: pmgr.Mgr,
}
}

View File

@ -11,6 +11,7 @@ import (
"sync"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/stmgr"
"go.uber.org/fx"
"golang.org/x/xerrors"
@ -78,7 +79,8 @@ type ChainAPI struct {
WalletAPI
ChainModuleAPI
Chain *store.ChainStore
Chain *store.ChainStore
TsExec stmgr.Executor
// ExposedBlockstore is the global monolith blockstore that is safe to
// expose externally. In the future, this will be segregated into two
@ -394,7 +396,7 @@ func (s stringKey) Key() string {
// TODO: ActorUpgrade: this entire function is a problem (in theory) as we don't know the HAMT version.
// In practice, hamt v0 should work "just fine" for reading.
func resolveOnce(bs blockstore.Blockstore) func(ctx context.Context, ds ipld.NodeGetter, nd ipld.Node, names []string) (*ipld.Link, []string, error) {
func resolveOnce(bs blockstore.Blockstore, tse stmgr.Executor) func(ctx context.Context, ds ipld.NodeGetter, nd ipld.Node, names []string) (*ipld.Link, []string, error) {
return func(ctx context.Context, ds ipld.NodeGetter, nd ipld.Node, names []string) (*ipld.Link, []string, error) {
store := adt.WrapStore(ctx, cbor.NewCborStore(bs))
@ -468,7 +470,7 @@ func resolveOnce(bs blockstore.Blockstore) func(ctx context.Context, ds ipld.Nod
}, nil, nil
}
return resolveOnce(bs)(ctx, ds, n, names[1:])
return resolveOnce(bs, tse)(ctx, ds, n, names[1:])
}
if strings.HasPrefix(names[0], "@A:") {
@ -517,7 +519,7 @@ func resolveOnce(bs blockstore.Blockstore) func(ctx context.Context, ds ipld.Nod
}, nil, nil
}
return resolveOnce(bs)(ctx, ds, n, names[1:])
return resolveOnce(bs, tse)(ctx, ds, n, names[1:])
}
if names[0] == "@state" {
@ -531,7 +533,7 @@ func resolveOnce(bs blockstore.Blockstore) func(ctx context.Context, ds ipld.Nod
return nil, nil, xerrors.Errorf("getting actor head for @state: %w", err)
}
m, err := vm.DumpActorState(&act, head.RawData())
m, err := vm.DumpActorState(tse.NewActorRegistry(), &act, head.RawData())
if err != nil {
return nil, nil, err
}
@ -565,7 +567,7 @@ func resolveOnce(bs blockstore.Blockstore) func(ctx context.Context, ds ipld.Nod
}, nil, nil
}
return resolveOnce(bs)(ctx, ds, n, names[1:])
return resolveOnce(bs, tse)(ctx, ds, n, names[1:])
}
return nd.ResolveLink(names)
@ -585,7 +587,7 @@ func (a *ChainAPI) ChainGetNode(ctx context.Context, p string) (*api.IpldObject,
r := &resolver.Resolver{
DAG: dag,
ResolveOnce: resolveOnce(bs),
ResolveOnce: resolveOnce(bs, a.TsExec),
}
node, err := r.ResolvePath(ctx, ip)

View File

@ -29,7 +29,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
@ -88,6 +88,8 @@ type StateAPI struct {
StateManager *stmgr.StateManager
Chain *store.ChainStore
Beacon beacon.Schedule
Consensus consensus.Consensus
TsExec stmgr.Executor
}
func (a *StateAPI) StateNetworkName(ctx context.Context) (dtypes.NetworkName, error) {
@ -469,7 +471,7 @@ func (a *StateAPI) StateReadState(ctx context.Context, actor address.Address, ts
return nil, xerrors.Errorf("getting actor head: %w", err)
}
oif, err := vm.DumpActorState(act, blk.RawData())
oif, err := vm.DumpActorState(a.TsExec.NewActorRegistry(), act, blk.RawData())
if err != nil {
return nil, xerrors.Errorf("dumping actor state (a:%s): %w", actor, err)
}
@ -487,7 +489,7 @@ func (a *StateAPI) StateDecodeParams(ctx context.Context, toAddr address.Address
return nil, xerrors.Errorf("getting actor: %w", err)
}
paramType, err := stmgr.GetParamType(act.Code, method)
paramType, err := stmgr.GetParamType(a.TsExec.NewActorRegistry(), act.Code, method)
if err != nil {
return nil, xerrors.Errorf("getting params type: %w", err)
}
@ -500,7 +502,7 @@ func (a *StateAPI) StateDecodeParams(ctx context.Context, toAddr address.Address
}
func (a *StateAPI) StateEncodeParams(ctx context.Context, toActCode cid.Cid, method abi.MethodNum, params json.RawMessage) ([]byte, error) {
paramType, err := stmgr.GetParamType(toActCode, method)
paramType, err := stmgr.GetParamType(a.TsExec.NewActorRegistry(), toActCode, method)
if err != nil {
return nil, xerrors.Errorf("getting params type: %w", err)
}
@ -524,10 +526,13 @@ func (a *StateAPI) MinerGetBaseInfo(ctx context.Context, maddr address.Address,
}
func (a *StateAPI) MinerCreateBlock(ctx context.Context, bt *api.BlockTemplate) (*types.BlockMsg, error) {
fblk, err := gen.MinerCreateBlock(ctx, a.StateManager, a.Wallet, bt)
fblk, err := a.Consensus.CreateBlock(ctx, a.Wallet, bt)
if err != nil {
return nil, err
}
if fblk == nil {
return nil, nil
}
var out types.BlockMsg
out.Header = fblk.Header

View File

@ -21,7 +21,7 @@ import (
type SyncAPI struct {
fx.In
SlashFilter *slashfilter.SlashFilter
SlashFilter *slashfilter.SlashFilter `optional:"true"`
Syncer *chain.Syncer
PubSub *pubsub.PubSub
NetName dtypes.NetworkName
@ -56,9 +56,11 @@ func (a *SyncAPI) SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) erro
return xerrors.Errorf("loading parent block: %w", err)
}
if err := a.SlashFilter.MinedBlock(blk.Header, parent.Height); err != nil {
log.Errorf("<!!> SLASH FILTER ERROR: %s", err)
return xerrors.Errorf("<!!> SLASH FILTER ERROR: %w", err)
if a.SlashFilter != nil {
if err := a.SlashFilter.MinedBlock(blk.Header, parent.Height); err != nil {
log.Errorf("<!!> SLASH FILTER ERROR: %s", err)
return xerrors.Errorf("<!!> SLASH FILTER ERROR: %w", err)
}
}
// TODO: should we have some sort of fast path to adding a local block?

View File

@ -17,13 +17,13 @@ import (
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/exchange"
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/modules/helpers"
@ -58,8 +58,8 @@ func ChainBlockService(bs dtypes.ExposedBlockstore, rem dtypes.ChainBitswap) dty
return blockservice.New(bs, rem)
}
func MessagePool(lc fx.Lifecycle, mpp messagepool.Provider, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal, protector dtypes.GCReferenceProtector) (*messagepool.MessagePool, error) {
mp, err := messagepool.New(mpp, ds, nn, j)
func MessagePool(lc fx.Lifecycle, us stmgr.UpgradeSchedule, mpp messagepool.Provider, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal, protector dtypes.GCReferenceProtector) (*messagepool.MessagePool, error) {
mp, err := messagepool.New(mpp, ds, us, nn, j)
if err != nil {
return nil, xerrors.Errorf("constructing mpool: %w", err)
}
@ -72,8 +72,15 @@ func MessagePool(lc fx.Lifecycle, mpp messagepool.Provider, ds dtypes.MetadataDS
return mp, nil
}
func ChainStore(lc fx.Lifecycle, cbs dtypes.ChainBlockstore, sbs dtypes.StateBlockstore, ds dtypes.MetadataDS, basebs dtypes.BaseBlockstore, j journal.Journal) *store.ChainStore {
chain := store.NewChainStore(cbs, sbs, ds, j)
func ChainStore(lc fx.Lifecycle,
cbs dtypes.ChainBlockstore,
sbs dtypes.StateBlockstore,
ds dtypes.MetadataDS,
basebs dtypes.BaseBlockstore,
weight store.WeightFunc,
j journal.Journal) *store.ChainStore {
chain := store.NewChainStore(cbs, sbs, ds, weight, j)
if err := chain.Load(); err != nil {
log.Warnf("loading chain state from disk: %s", err)
@ -100,14 +107,20 @@ func ChainStore(lc fx.Lifecycle, cbs dtypes.ChainBlockstore, sbs dtypes.StateBlo
return chain
}
func NetworkName(mctx helpers.MetricsCtx, lc fx.Lifecycle, cs *store.ChainStore, syscalls vm.SyscallBuilder, us stmgr.UpgradeSchedule, _ dtypes.AfterGenesisSet) (dtypes.NetworkName, error) {
func NetworkName(mctx helpers.MetricsCtx,
lc fx.Lifecycle,
cs *store.ChainStore,
tsexec stmgr.Executor,
syscalls vm.SyscallBuilder,
us stmgr.UpgradeSchedule,
_ dtypes.AfterGenesisSet) (dtypes.NetworkName, error) {
if !build.Devnet {
return "testnetnet", nil
}
ctx := helpers.LifecycleCtx(mctx, lc)
sm, err := stmgr.NewStateManagerWithUpgradeSchedule(cs, syscalls, us)
sm, err := stmgr.NewStateManager(cs, tsexec, syscalls, us)
if err != nil {
return "", err
}
@ -126,7 +139,8 @@ type SyncerParams struct {
SyncMgrCtor chain.SyncManagerCtor
Host host.Host
Beacon beacon.Schedule
Verifier ffiwrapper.Verifier
Gent chain.Genesis
Consensus consensus.Consensus
}
func NewSyncer(params SyncerParams) (*chain.Syncer, error) {
@ -138,9 +152,8 @@ func NewSyncer(params SyncerParams) (*chain.Syncer, error) {
smCtor = params.SyncMgrCtor
h = params.Host
b = params.Beacon
v = params.Verifier
)
syncer, err := chain.NewSyncer(ds, sm, ex, smCtor, h.ConnManager(), h.ID(), b, v)
syncer, err := chain.NewSyncer(ds, sm, ex, smCtor, h.ConnManager(), h.ID(), b, params.Gent, params.Consensus)
if err != nil {
return nil, err
}

View File

@ -6,6 +6,7 @@ import (
"strconv"
"time"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
eventbus "github.com/libp2p/go-eventbus"
@ -136,11 +137,19 @@ func waitForSync(stmgr *stmgr.StateManager, epochs int, subscribe func()) {
})
}
func HandleIncomingBlocks(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.PubSub, s *chain.Syncer, bserv dtypes.ChainBlockService, chain *store.ChainStore, stmgr *stmgr.StateManager, h host.Host, nn dtypes.NetworkName) {
func HandleIncomingBlocks(mctx helpers.MetricsCtx,
lc fx.Lifecycle,
ps *pubsub.PubSub,
s *chain.Syncer,
bserv dtypes.ChainBlockService,
chain *store.ChainStore,
cns consensus.Consensus,
h host.Host,
nn dtypes.NetworkName) {
ctx := helpers.LifecycleCtx(mctx, lc)
v := sub.NewBlockValidator(
h.ID(), chain, stmgr,
h.ID(), chain, cns,
func(p peer.ID) {
ps.BlacklistPeer(p)
h.ConnManager().TagPeer(p, "badblock", -1000)

View File

@ -8,8 +8,8 @@ import (
"github.com/filecoin-project/lotus/chain/store"
)
func StateManager(lc fx.Lifecycle, cs *store.ChainStore, sys vm.SyscallBuilder, us stmgr.UpgradeSchedule) (*stmgr.StateManager, error) {
sm, err := stmgr.NewStateManagerWithUpgradeSchedule(cs, sys, us)
func StateManager(lc fx.Lifecycle, cs *store.ChainStore, exec stmgr.Executor, sys vm.SyscallBuilder, us stmgr.UpgradeSchedule) (*stmgr.StateManager, error) {
sm, err := stmgr.NewStateManager(cs, exec, sys, us)
if err != nil {
return nil, err
}

View File

@ -110,7 +110,9 @@ func as(in interface{}, as interface{}) interface{} {
panic("outType is not a pointer")
}
if reflect.TypeOf(in).Kind() != reflect.Func {
inType := reflect.TypeOf(in)
if inType.Kind() != reflect.Func || inType.AssignableTo(outType.Elem()) {
ctype := reflect.FuncOf(nil, []reflect.Type{outType.Elem()}, false)
return reflect.MakeFunc(ctype, func(args []reflect.Value) (results []reflect.Value) {
@ -121,8 +123,6 @@ func as(in interface{}, as interface{}) interface{} {
}).Interface()
}
inType := reflect.TypeOf(in)
ins := make([]reflect.Type, inType.NumIn())
outs := make([]reflect.Type, inType.NumOut())