Merge remote-tracking branch 'origin/master' into refactor/net-upgrade
This commit is contained in:
commit
70faa36b7f
@ -254,6 +254,25 @@ jobs:
|
||||
path: /tmp/test-reports
|
||||
- store_artifacts:
|
||||
path: /tmp/test-artifacts/conformance-coverage.html
|
||||
build-lotus-soup:
|
||||
description: |
|
||||
Compile `lotus-soup` Testground test plan using the current version of Lotus.
|
||||
parameters:
|
||||
<<: *test-params
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: cd extern/oni && git submodule sync
|
||||
- run: cd extern/oni && git submodule update --init
|
||||
- run: cd extern/filecoin-ffi && make
|
||||
- run:
|
||||
name: "replace lotus, filecoin-ffi, blst and fil-blst deps"
|
||||
command: cd extern/oni/lotus-soup && go mod edit -replace github.com/filecoin-project/lotus=../../../ && go mod edit -replace github.com/filecoin-project/filecoin-ffi=../../filecoin-ffi && go mod edit -replace github.com/supranational/blst=../../fil-blst/blst && go mod edit -replace github.com/filecoin-project/fil-blst=../../fil-blst
|
||||
- run:
|
||||
name: "build lotus-soup testplan"
|
||||
command: pushd extern/oni/lotus-soup && go build -tags=testground .
|
||||
|
||||
|
||||
build-macos:
|
||||
description: build darwin lotus binary
|
||||
@ -428,6 +447,7 @@ workflows:
|
||||
test-suite-name: conformance-bleeding-edge
|
||||
packages: "./conformance"
|
||||
vectors-branch: master
|
||||
- build-lotus-soup
|
||||
- build-debug
|
||||
- build-all:
|
||||
requires:
|
||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -10,6 +10,8 @@
|
||||
/lotus-fountain
|
||||
/lotus-stats
|
||||
/lotus-bench
|
||||
/lotus-gateway
|
||||
/lotus-pcr
|
||||
/bench.json
|
||||
/lotuspond/front/node_modules
|
||||
/lotuspond/front/build
|
||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -11,3 +11,6 @@
|
||||
[submodule "extern/fil-blst"]
|
||||
path = extern/fil-blst
|
||||
url = https://github.com/filecoin-project/fil-blst.git
|
||||
[submodule "extern/oni"]
|
||||
path = extern/oni
|
||||
url = https://github.com/filecoin-project/oni
|
||||
|
24
CHANGELOG.md
24
CHANGELOG.md
@ -1,5 +1,29 @@
|
||||
# Lotus changelog
|
||||
|
||||
# 0.7.1 / 2020-09-17
|
||||
|
||||
This optional release of Lotus introduces some critical fixes to the window PoSt process. It also upgrades some core dependencies, and introduces many improvements to the mining process, deal-making cycle, and overall User Experience.
|
||||
|
||||
## Changes
|
||||
|
||||
#### Some notable improvements:
|
||||
|
||||
- Correctly construct params for `SubmitWindowedPoSt` messages (https://github.com/filecoin-project/lotus/pull/3909)
|
||||
- Skip sectors correctly for Window PoSt (https://github.com/filecoin-project/lotus/pull/3839)
|
||||
- Split window PoST submission into multiple messages (https://github.com/filecoin-project/lotus/pull/3689)
|
||||
- Improve journal coverage (https://github.com/filecoin-project/lotus/pull/2455)
|
||||
- Allow retrievals while sealing (https://github.com/filecoin-project/lotus/pull/3778)
|
||||
- Don't prune locally published messages (https://github.com/filecoin-project/lotus/pull/3772)
|
||||
- Add get-ask, set-ask retrieval commands (https://github.com/filecoin-project/lotus/pull/3886)
|
||||
- Consistently name winning and window post in logs (https://github.com/filecoin-project/lotus/pull/3873))
|
||||
- Add auto flag to mpool replace (https://github.com/filecoin-project/lotus/pull/3752))
|
||||
|
||||
#### Dependencies
|
||||
|
||||
- Upgrade markets to `v0.6.1` (https://github.com/filecoin-project/lotus/pull/3906)
|
||||
- Upgrade specs-actors to `v0.9.10` (https://github.com/filecoin-project/lotus/pull/3846)
|
||||
- Upgrade badger (https://github.com/filecoin-project/lotus/pull/3739)
|
||||
|
||||
# 0.7.0 / 2020-09-10
|
||||
|
||||
This consensus-breaking release of Lotus is designed to test a network upgrade on the space race testnet. The changes that break consensus are:
|
||||
|
6
Makefile
6
Makefile
@ -92,6 +92,12 @@ lotus-shed: $(BUILD_DEPS)
|
||||
.PHONY: lotus-shed
|
||||
BINS+=lotus-shed
|
||||
|
||||
lotus-gateway: $(BUILD_DEPS)
|
||||
rm -f lotus-gateway
|
||||
go build $(GOFLAGS) -o lotus-gateway ./cmd/lotus-gateway
|
||||
.PHONY: lotus-gateway
|
||||
BINS+=lotus-gateway
|
||||
|
||||
build: lotus lotus-miner lotus-worker
|
||||
@[[ $$(type -P "lotus") ]] && echo "Caution: you have \
|
||||
an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true
|
||||
|
@ -18,7 +18,7 @@ Lotus is an implementation of the Filecoin Distributed Storage Network. For more
|
||||
|
||||
## Building & Documentation
|
||||
|
||||
For instructions on how to build lotus from source, please visit [https://lotu.sh](https://lotu.sh) or read the source [here](https://github.com/filecoin-project/lotus/tree/master/documentation).
|
||||
For instructions on how to build lotus from source, please visit [Lotus build and setup instruction](https://docs.filecoin.io/get-started/lotus/installation/#minimal-requirements) or read the source [here](https://github.com/filecoin-project/lotus/tree/master/documentation).
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
|
@ -353,6 +353,8 @@ type FullNode interface {
|
||||
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error)
|
||||
// StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
|
||||
StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error)
|
||||
// StateMsgGasCost searches for a message in the chain, and returns details of the messages gas costs, including the penalty and miner tip
|
||||
StateMsgGasCost(context.Context, cid.Cid, types.TipSetKey) (*MsgGasCost, error)
|
||||
// StateWaitMsg looks back in the chain for a message. If not found, it blocks until the
|
||||
// message arrives on chain, and gets to the indicated confidence depth.
|
||||
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*MsgLookup, error)
|
||||
@ -453,8 +455,8 @@ type FullNode interface {
|
||||
|
||||
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error)
|
||||
PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error)
|
||||
PaychAvailableFunds(ch address.Address) (*ChannelAvailableFunds, error)
|
||||
PaychAvailableFundsByFromTo(from, to address.Address) (*ChannelAvailableFunds, error)
|
||||
PaychAvailableFunds(ctx context.Context, ch address.Address) (*ChannelAvailableFunds, error)
|
||||
PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*ChannelAvailableFunds, error)
|
||||
PaychList(context.Context) ([]address.Address, error)
|
||||
PaychStatus(context.Context, address.Address) (*PaychStatus, error)
|
||||
PaychSettle(context.Context, address.Address) (cid.Cid, error)
|
||||
@ -523,6 +525,17 @@ type MsgLookup struct {
|
||||
Height abi.ChainEpoch
|
||||
}
|
||||
|
||||
type MsgGasCost struct {
|
||||
Message cid.Cid // Can be different than requested, in case it was replaced, but only gas values changed
|
||||
GasUsed abi.TokenAmount
|
||||
BaseFeeBurn abi.TokenAmount
|
||||
OverEstimationBurn abi.TokenAmount
|
||||
MinerPenalty abi.TokenAmount
|
||||
MinerTip abi.TokenAmount
|
||||
Refund abi.TokenAmount
|
||||
TotalCost abi.TokenAmount
|
||||
}
|
||||
|
||||
type BlockMessages struct {
|
||||
BlsMessages []*types.Message
|
||||
SecpkMessages []*types.SignedMessage
|
||||
|
@ -183,6 +183,7 @@ type FullNodeStruct struct {
|
||||
StateReplay func(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) `perm:"read"`
|
||||
StateGetActor func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) `perm:"read"`
|
||||
StateReadState func(context.Context, address.Address, types.TipSetKey) (*api.ActorState, error) `perm:"read"`
|
||||
StateMsgGasCost func(context.Context, cid.Cid, types.TipSetKey) (*api.MsgGasCost, error) `perm:"read"`
|
||||
StateWaitMsg func(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) `perm:"read"`
|
||||
StateSearchMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"`
|
||||
StateListMiners func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"`
|
||||
@ -220,8 +221,8 @@ type FullNodeStruct struct {
|
||||
|
||||
PaychGet func(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) `perm:"sign"`
|
||||
PaychGetWaitReady func(context.Context, cid.Cid) (address.Address, error) `perm:"sign"`
|
||||
PaychAvailableFunds func(address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
|
||||
PaychAvailableFundsByFromTo func(address.Address, address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
|
||||
PaychAvailableFunds func(context.Context, address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
|
||||
PaychAvailableFundsByFromTo func(context.Context, address.Address, address.Address) (*api.ChannelAvailableFunds, error) `perm:"sign"`
|
||||
PaychList func(context.Context) ([]address.Address, error) `perm:"read"`
|
||||
PaychStatus func(context.Context, address.Address) (*api.PaychStatus, error) `perm:"read"`
|
||||
PaychSettle func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
@ -817,6 +818,10 @@ func (c *FullNodeStruct) StateReadState(ctx context.Context, addr address.Addres
|
||||
return c.Internal.StateReadState(ctx, addr, tsk)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) StateMsgGasCost(ctx context.Context, msgc cid.Cid, tsk types.TipSetKey) (*api.MsgGasCost, error) {
|
||||
return c.Internal.StateMsgGasCost(ctx, msgc, tsk)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) StateWaitMsg(ctx context.Context, msgc cid.Cid, confidence uint64) (*api.MsgLookup, error) {
|
||||
return c.Internal.StateWaitMsg(ctx, msgc, confidence)
|
||||
}
|
||||
@ -949,12 +954,12 @@ func (c *FullNodeStruct) PaychGetWaitReady(ctx context.Context, sentinel cid.Cid
|
||||
return c.Internal.PaychGetWaitReady(ctx, sentinel)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) PaychAvailableFunds(ch address.Address) (*api.ChannelAvailableFunds, error) {
|
||||
return c.Internal.PaychAvailableFunds(ch)
|
||||
func (c *FullNodeStruct) PaychAvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) {
|
||||
return c.Internal.PaychAvailableFunds(ctx, ch)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) PaychAvailableFundsByFromTo(from, to address.Address) (*api.ChannelAvailableFunds, error) {
|
||||
return c.Internal.PaychAvailableFundsByFromTo(from, to)
|
||||
func (c *FullNodeStruct) PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*api.ChannelAvailableFunds, error) {
|
||||
return c.Internal.PaychAvailableFundsByFromTo(ctx, from, to)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) PaychList(ctx context.Context) ([]address.Address, error) {
|
||||
|
@ -29,7 +29,7 @@ func init() {
|
||||
BuildType |= Build2k
|
||||
}
|
||||
|
||||
const BlockDelaySecs = uint64(30)
|
||||
const BlockDelaySecs = uint64(4)
|
||||
|
||||
const PropagationDelaySecs = uint64(1)
|
||||
|
||||
|
@ -29,7 +29,7 @@ func buildType() string {
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version, set by build system
|
||||
const BuildVersion = "0.7.0"
|
||||
const BuildVersion = "0.7.1"
|
||||
|
||||
func UserVersion() string {
|
||||
return BuildVersion + buildType() + CurrentCommit
|
||||
|
@ -54,6 +54,8 @@ type State interface {
|
||||
|
||||
DeadlineInfo(epoch abi.ChainEpoch) *dline.Info
|
||||
|
||||
MaxAddressedSectors() (uint64, error)
|
||||
|
||||
// Diff helpers. Used by Diff* functions internally.
|
||||
sectors() (adt.Array, error)
|
||||
decodeSectorOnChainInfo(*cbg.Deferred) (SectorOnChainInfo, error)
|
||||
|
@ -208,6 +208,11 @@ func (s *v0State) NumDeadlines() (uint64, error) {
|
||||
return v0miner.WPoStPeriodDeadlines, nil
|
||||
}
|
||||
|
||||
// Max sectors per PoSt
|
||||
func (s *v0State) MaxAddressedSectors() (uint64, error) {
|
||||
return v0miner.AddressedSectorsMax, nil
|
||||
}
|
||||
|
||||
func (s *v0State) DeadlinesChanged(other State) bool {
|
||||
v0other, ok := other.(*v0State)
|
||||
if !ok {
|
||||
|
@ -52,6 +52,7 @@ var RepublishInterval = time.Duration(10*build.BlockDelaySecs+build.PropagationD
|
||||
|
||||
var minimumBaseFee = types.NewInt(uint64(build.MinimumBaseFee))
|
||||
var baseFeeLowerBoundFactor = types.NewInt(10)
|
||||
var baseFeeLowerBoundFactorConservative = types.NewInt(100)
|
||||
|
||||
var MaxActorPendingMessages = 1000
|
||||
|
||||
@ -104,10 +105,6 @@ type MessagePoolEvtMessage struct {
|
||||
CID cid.Cid
|
||||
}
|
||||
|
||||
// this is *temporary* mutilation until we have implemented uncapped miner penalties -- it will go
|
||||
// away in the next fork.
|
||||
var strictBaseFeeValidation = false
|
||||
|
||||
func init() {
|
||||
// if the republish interval is too short compared to the pubsub timecache, adjust it
|
||||
minInterval := pubsub.TimeCacheDuration + time.Duration(build.PropagationDelaySecs)
|
||||
@ -444,18 +441,27 @@ func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.T
|
||||
// Note that for local messages, we always add them so that they can be accepted and republished
|
||||
// automatically.
|
||||
publish := local
|
||||
if strictBaseFeeValidation && len(curTs.Blocks()) > 0 {
|
||||
baseFee := curTs.Blocks()[0].ParentBaseFee
|
||||
baseFeeLowerBound := getBaseFeeLowerBound(baseFee)
|
||||
if m.Message.GasFeeCap.LessThan(baseFeeLowerBound) {
|
||||
if local {
|
||||
log.Warnf("local message will not be immediately published because GasFeeCap doesn't meet the lower bound for inclusion in the next 20 blocks (GasFeeCap: %s, baseFeeLowerBound: %s)",
|
||||
m.Message.GasFeeCap, baseFeeLowerBound)
|
||||
publish = false
|
||||
} else {
|
||||
return false, xerrors.Errorf("GasFeeCap doesn't meet base fee lower bound for inclusion in the next 20 blocks (GasFeeCap: %s, baseFeeLowerBound: %s): %w",
|
||||
m.Message.GasFeeCap, baseFeeLowerBound, ErrSoftValidationFailure)
|
||||
}
|
||||
|
||||
var baseFee big.Int
|
||||
if len(curTs.Blocks()) > 0 {
|
||||
baseFee = curTs.Blocks()[0].ParentBaseFee
|
||||
} else {
|
||||
var err error
|
||||
baseFee, err = mp.api.ChainComputeBaseFee(context.TODO(), curTs)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("computing basefee: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactorConservative)
|
||||
if m.Message.GasFeeCap.LessThan(baseFeeLowerBound) {
|
||||
if local {
|
||||
log.Warnf("local message will not be immediately published because GasFeeCap doesn't meet the lower bound for inclusion in the next 20 blocks (GasFeeCap: %s, baseFeeLowerBound: %s)",
|
||||
m.Message.GasFeeCap, baseFeeLowerBound)
|
||||
publish = false
|
||||
} else {
|
||||
return false, xerrors.Errorf("GasFeeCap doesn't meet base fee lower bound for inclusion in the next 20 blocks (GasFeeCap: %s, baseFeeLowerBound: %s): %w",
|
||||
m.Message.GasFeeCap, baseFeeLowerBound, ErrSoftValidationFailure)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1344,8 +1350,8 @@ func (mp *MessagePool) Clear(local bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func getBaseFeeLowerBound(baseFee types.BigInt) types.BigInt {
|
||||
baseFeeLowerBound := types.BigDiv(baseFee, baseFeeLowerBoundFactor)
|
||||
func getBaseFeeLowerBound(baseFee, factor types.BigInt) types.BigInt {
|
||||
baseFeeLowerBound := types.BigDiv(baseFee, factor)
|
||||
if baseFeeLowerBound.LessThan(minimumBaseFee) {
|
||||
baseFeeLowerBound = minimumBaseFee
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro
|
||||
if err != nil {
|
||||
return xerrors.Errorf("computing basefee: %w", err)
|
||||
}
|
||||
baseFeeLowerBound := getBaseFeeLowerBound(baseFee)
|
||||
baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor)
|
||||
|
||||
pending, _ := mp.getPendingMessages(ts, ts)
|
||||
|
||||
|
@ -28,7 +28,7 @@ func (mp *MessagePool) republishPendingMessages() error {
|
||||
mp.curTsLk.Unlock()
|
||||
return xerrors.Errorf("computing basefee: %w", err)
|
||||
}
|
||||
baseFeeLowerBound := getBaseFeeLowerBound(baseFee)
|
||||
baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor)
|
||||
|
||||
pending := make(map[address.Address]map[uint64]*types.SignedMessage)
|
||||
mp.lk.Lock()
|
||||
|
@ -236,8 +236,8 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
|
||||
}
|
||||
|
||||
receipts = append(receipts, &r.MessageReceipt)
|
||||
gasReward = big.Add(gasReward, r.MinerTip)
|
||||
penalty = big.Add(penalty, r.Penalty)
|
||||
gasReward = big.Add(gasReward, r.GasCosts.MinerTip)
|
||||
penalty = big.Add(penalty, r.GasCosts.MinerPenalty)
|
||||
|
||||
if cb != nil {
|
||||
if err := cb(cm.Cid(), m, r); err != nil {
|
||||
|
@ -509,7 +509,7 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule
|
||||
|
||||
sectors, err := GetSectorsForWinningPoSt(ctx, pv, sm, lbst, maddr, prand)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting wpost proving set: %w", err)
|
||||
return nil, xerrors.Errorf("getting winning post proving set: %w", err)
|
||||
}
|
||||
|
||||
if len(sectors) == 0 {
|
||||
|
@ -18,7 +18,6 @@ import (
|
||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
"github.com/filecoin-project/lotus/journal"
|
||||
bstore "github.com/filecoin-project/lotus/lib/blockstore"
|
||||
@ -767,33 +766,16 @@ type BlockMessages struct {
|
||||
func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) {
|
||||
applied := make(map[address.Address]uint64)
|
||||
|
||||
cst := cbor.NewCborStore(cs.bs)
|
||||
|
||||
st, err := state.LoadStateTree(cst, ts.Blocks()[0].ParentStateRoot)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to load state tree")
|
||||
}
|
||||
|
||||
preloadAddr := func(a address.Address) error {
|
||||
if _, ok := applied[a]; !ok {
|
||||
act, err := st.GetActor(a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
applied[a] = act.Nonce
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
selectMsg := func(m *types.Message) (bool, error) {
|
||||
if err := preloadAddr(m.From); err != nil {
|
||||
return false, err
|
||||
// The first match for a sender is guaranteed to have correct nonce -- the block isn't valid otherwise
|
||||
if _, ok := applied[m.From]; !ok {
|
||||
applied[m.From] = m.Nonce
|
||||
}
|
||||
|
||||
if applied[m.From] != m.Nonce {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
applied[m.From]++
|
||||
|
||||
return true, nil
|
||||
|
@ -58,7 +58,14 @@ import (
|
||||
// the theoretical max height based on systime are quickly rejected
|
||||
const MaxHeightDrift = 5
|
||||
|
||||
var defaultMessageFetchWindowSize = 200
|
||||
var (
|
||||
// LocalIncoming is the _local_ pubsub (unrelated to libp2p pubsub) topic
|
||||
// where the Syncer publishes candidate chain heads to be synced.
|
||||
LocalIncoming = "incoming"
|
||||
|
||||
log = logging.Logger("chain")
|
||||
defaultMessageFetchWindowSize = 200
|
||||
)
|
||||
|
||||
func init() {
|
||||
if s := os.Getenv("LOTUS_BSYNC_MSG_WINDOW"); s != "" {
|
||||
@ -71,10 +78,6 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
var log = logging.Logger("chain")
|
||||
|
||||
var LocalIncoming = "incoming"
|
||||
|
||||
// Syncer is in charge of running the chain synchronization logic. As such, it
|
||||
// is tasked with these functions, amongst others:
|
||||
//
|
||||
@ -119,7 +122,7 @@ type Syncer struct {
|
||||
|
||||
self peer.ID
|
||||
|
||||
syncmgr *SyncManager
|
||||
syncmgr SyncManager
|
||||
|
||||
connmgr connmgr.ConnManager
|
||||
|
||||
@ -140,8 +143,10 @@ type Syncer struct {
|
||||
ds dtypes.MetadataDS
|
||||
}
|
||||
|
||||
type SyncManagerCtor func(syncFn SyncFunc) SyncManager
|
||||
|
||||
// NewSyncer creates a new Syncer object.
|
||||
func NewSyncer(ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.Client, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.Schedule, verifier ffiwrapper.Verifier) (*Syncer, error) {
|
||||
func NewSyncer(ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.Client, syncMgrCtor SyncManagerCtor, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.Schedule, verifier ffiwrapper.Verifier) (*Syncer, error) {
|
||||
gen, err := sm.ChainStore().GetGenesis()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting genesis block: %w", err)
|
||||
@ -181,7 +186,7 @@ func NewSyncer(ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.C
|
||||
log.Warn("*********************************************************************************************")
|
||||
}
|
||||
|
||||
s.syncmgr = NewSyncManager(s.Sync)
|
||||
s.syncmgr = syncMgrCtor(s.Sync)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
@ -985,7 +990,7 @@ func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.Block
|
||||
|
||||
rand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get randomness for verifying winningPost proof: %w", err)
|
||||
return xerrors.Errorf("failed to get randomness for verifying winning post proof: %w", err)
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(h.Miner)
|
||||
@ -1664,11 +1669,7 @@ func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []b
|
||||
}
|
||||
|
||||
func (syncer *Syncer) State() []SyncerState {
|
||||
var out []SyncerState
|
||||
for _, ss := range syncer.syncmgr.syncStates {
|
||||
out = append(out, ss.Snapshot())
|
||||
}
|
||||
return out
|
||||
return syncer.syncmgr.State()
|
||||
}
|
||||
|
||||
// MarkBad manually adds a block to the "bad blocks" cache.
|
||||
|
@ -20,7 +20,28 @@ const (
|
||||
|
||||
type SyncFunc func(context.Context, *types.TipSet) error
|
||||
|
||||
type SyncManager struct {
|
||||
// SyncManager manages the chain synchronization process, both at bootstrap time
|
||||
// and during ongoing operation.
|
||||
//
|
||||
// It receives candidate chain heads in the form of tipsets from peers,
|
||||
// and schedules them onto sync workers, deduplicating processing for
|
||||
// already-active syncs.
|
||||
type SyncManager interface {
|
||||
// Start starts the SyncManager.
|
||||
Start()
|
||||
|
||||
// Stop stops the SyncManager.
|
||||
Stop()
|
||||
|
||||
// SetPeerHead informs the SyncManager that the supplied peer reported the
|
||||
// supplied tipset.
|
||||
SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet)
|
||||
|
||||
// State retrieves the state of the sync workers.
|
||||
State() []SyncerState
|
||||
}
|
||||
|
||||
type syncManager struct {
|
||||
lk sync.Mutex
|
||||
peerHeads map[peer.ID]*types.TipSet
|
||||
|
||||
@ -48,6 +69,8 @@ type SyncManager struct {
|
||||
workerChan chan *types.TipSet
|
||||
}
|
||||
|
||||
var _ SyncManager = (*syncManager)(nil)
|
||||
|
||||
type syncResult struct {
|
||||
ts *types.TipSet
|
||||
success bool
|
||||
@ -55,8 +78,8 @@ type syncResult struct {
|
||||
|
||||
const syncWorkerCount = 3
|
||||
|
||||
func NewSyncManager(sync SyncFunc) *SyncManager {
|
||||
return &SyncManager{
|
||||
func NewSyncManager(sync SyncFunc) SyncManager {
|
||||
return &syncManager{
|
||||
bspThresh: 1,
|
||||
peerHeads: make(map[peer.ID]*types.TipSet),
|
||||
syncTargets: make(chan *types.TipSet),
|
||||
@ -69,18 +92,18 @@ func NewSyncManager(sync SyncFunc) *SyncManager {
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *SyncManager) Start() {
|
||||
func (sm *syncManager) Start() {
|
||||
go sm.syncScheduler()
|
||||
for i := 0; i < syncWorkerCount; i++ {
|
||||
go sm.syncWorker(i)
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *SyncManager) Stop() {
|
||||
func (sm *syncManager) Stop() {
|
||||
close(sm.stop)
|
||||
}
|
||||
|
||||
func (sm *SyncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet) {
|
||||
func (sm *syncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet) {
|
||||
sm.lk.Lock()
|
||||
defer sm.lk.Unlock()
|
||||
sm.peerHeads[p] = ts
|
||||
@ -105,6 +128,14 @@ func (sm *SyncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.Tip
|
||||
sm.incomingTipSets <- ts
|
||||
}
|
||||
|
||||
func (sm *syncManager) State() []SyncerState {
|
||||
ret := make([]SyncerState, 0, len(sm.syncStates))
|
||||
for _, s := range sm.syncStates {
|
||||
ret = append(ret, s.Snapshot())
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
type syncBucketSet struct {
|
||||
buckets []*syncTargetBucket
|
||||
}
|
||||
@ -234,7 +265,7 @@ func (stb *syncTargetBucket) heaviestTipSet() *types.TipSet {
|
||||
return best
|
||||
}
|
||||
|
||||
func (sm *SyncManager) selectSyncTarget() (*types.TipSet, error) {
|
||||
func (sm *syncManager) selectSyncTarget() (*types.TipSet, error) {
|
||||
var buckets syncBucketSet
|
||||
|
||||
var peerHeads []*types.TipSet
|
||||
@ -258,7 +289,7 @@ func (sm *SyncManager) selectSyncTarget() (*types.TipSet, error) {
|
||||
return buckets.Heaviest(), nil
|
||||
}
|
||||
|
||||
func (sm *SyncManager) syncScheduler() {
|
||||
func (sm *syncManager) syncScheduler() {
|
||||
|
||||
for {
|
||||
select {
|
||||
@ -280,7 +311,7 @@ func (sm *SyncManager) syncScheduler() {
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *SyncManager) scheduleIncoming(ts *types.TipSet) {
|
||||
func (sm *syncManager) scheduleIncoming(ts *types.TipSet) {
|
||||
log.Debug("scheduling incoming tipset sync: ", ts.Cids())
|
||||
if sm.getBootstrapState() == BSStateSelected {
|
||||
sm.setBootstrapState(BSStateScheduled)
|
||||
@ -328,10 +359,11 @@ func (sm *SyncManager) scheduleIncoming(ts *types.TipSet) {
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *SyncManager) scheduleProcessResult(res *syncResult) {
|
||||
func (sm *syncManager) scheduleProcessResult(res *syncResult) {
|
||||
if res.success && sm.getBootstrapState() != BSStateComplete {
|
||||
sm.setBootstrapState(BSStateComplete)
|
||||
}
|
||||
|
||||
delete(sm.activeSyncs, res.ts.Key())
|
||||
relbucket := sm.activeSyncTips.PopRelated(res.ts)
|
||||
if relbucket != nil {
|
||||
@ -360,7 +392,7 @@ func (sm *SyncManager) scheduleProcessResult(res *syncResult) {
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *SyncManager) scheduleWorkSent() {
|
||||
func (sm *syncManager) scheduleWorkSent() {
|
||||
hts := sm.nextSyncTarget.heaviestTipSet()
|
||||
sm.activeSyncs[hts.Key()] = hts
|
||||
|
||||
@ -372,7 +404,7 @@ func (sm *SyncManager) scheduleWorkSent() {
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *SyncManager) syncWorker(id int) {
|
||||
func (sm *syncManager) syncWorker(id int) {
|
||||
ss := &SyncerState{}
|
||||
sm.syncStates[id] = ss
|
||||
for {
|
||||
@ -397,7 +429,7 @@ func (sm *SyncManager) syncWorker(id int) {
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *SyncManager) syncedPeerCount() int {
|
||||
func (sm *syncManager) syncedPeerCount() int {
|
||||
var count int
|
||||
for _, ts := range sm.peerHeads {
|
||||
if ts.Height() > 0 {
|
||||
@ -407,19 +439,19 @@ func (sm *SyncManager) syncedPeerCount() int {
|
||||
return count
|
||||
}
|
||||
|
||||
func (sm *SyncManager) getBootstrapState() int {
|
||||
func (sm *syncManager) getBootstrapState() int {
|
||||
sm.bssLk.Lock()
|
||||
defer sm.bssLk.Unlock()
|
||||
return sm.bootstrapState
|
||||
}
|
||||
|
||||
func (sm *SyncManager) setBootstrapState(v int) {
|
||||
func (sm *syncManager) setBootstrapState(v int) {
|
||||
sm.bssLk.Lock()
|
||||
defer sm.bssLk.Unlock()
|
||||
sm.bootstrapState = v
|
||||
}
|
||||
|
||||
func (sm *SyncManager) IsBootstrapped() bool {
|
||||
func (sm *syncManager) IsBootstrapped() bool {
|
||||
sm.bssLk.Lock()
|
||||
defer sm.bssLk.Unlock()
|
||||
return sm.bootstrapState == BSStateComplete
|
||||
|
@ -17,7 +17,7 @@ type syncOp struct {
|
||||
done func()
|
||||
}
|
||||
|
||||
func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T, *SyncManager, chan *syncOp)) {
|
||||
func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T, *syncManager, chan *syncOp)) {
|
||||
syncTargets := make(chan *syncOp)
|
||||
sm := NewSyncManager(func(ctx context.Context, ts *types.TipSet) error {
|
||||
ch := make(chan struct{})
|
||||
@ -27,7 +27,7 @@ func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T,
|
||||
}
|
||||
<-ch
|
||||
return nil
|
||||
})
|
||||
}).(*syncManager)
|
||||
sm.bspThresh = thresh
|
||||
|
||||
sm.Start()
|
||||
@ -77,12 +77,12 @@ func TestSyncManager(t *testing.T) {
|
||||
c3 := mock.TipSet(mock.MkBlock(b, 3, 5))
|
||||
d := mock.TipSet(mock.MkBlock(c1, 4, 5))
|
||||
|
||||
runSyncMgrTest(t, "testBootstrap", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
|
||||
runSyncMgrTest(t, "testBootstrap", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
|
||||
sm.SetPeerHead(ctx, "peer1", c1)
|
||||
assertGetSyncOp(t, stc, c1)
|
||||
})
|
||||
|
||||
runSyncMgrTest(t, "testBootstrap", 2, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
|
||||
runSyncMgrTest(t, "testBootstrap", 2, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
|
||||
sm.SetPeerHead(ctx, "peer1", c1)
|
||||
assertNoOp(t, stc)
|
||||
|
||||
@ -90,7 +90,7 @@ func TestSyncManager(t *testing.T) {
|
||||
assertGetSyncOp(t, stc, c1)
|
||||
})
|
||||
|
||||
runSyncMgrTest(t, "testSyncAfterBootstrap", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
|
||||
runSyncMgrTest(t, "testSyncAfterBootstrap", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
|
||||
sm.SetPeerHead(ctx, "peer1", b)
|
||||
assertGetSyncOp(t, stc, b)
|
||||
|
||||
@ -101,7 +101,7 @@ func TestSyncManager(t *testing.T) {
|
||||
assertGetSyncOp(t, stc, c2)
|
||||
})
|
||||
|
||||
runSyncMgrTest(t, "testCoalescing", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
|
||||
runSyncMgrTest(t, "testCoalescing", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
|
||||
sm.SetPeerHead(ctx, "peer1", a)
|
||||
assertGetSyncOp(t, stc, a)
|
||||
|
||||
@ -122,7 +122,7 @@ func TestSyncManager(t *testing.T) {
|
||||
assertGetSyncOp(t, stc, d)
|
||||
})
|
||||
|
||||
runSyncMgrTest(t, "testSyncIncomingTipset", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) {
|
||||
runSyncMgrTest(t, "testSyncIncomingTipset", 1, func(t *testing.T, sm *syncManager, stc chan *syncOp) {
|
||||
sm.SetPeerHead(ctx, "peer1", a)
|
||||
assertGetSyncOp(t, stc, a)
|
||||
|
||||
|
@ -662,6 +662,49 @@ func TestDuplicateNonce(t *testing.T) {
|
||||
require.Equal(t, includedMsg, mft[0].VMMessage().Cid(), "messages for tipset didn't contain expected message")
|
||||
}
|
||||
|
||||
// This test asserts that a block that includes a message with bad nonce can't be synced. A nonce is "bad" if it can't
|
||||
// be applied on the parent state.
|
||||
func TestBadNonce(t *testing.T) {
|
||||
H := 10
|
||||
tu := prepSyncTest(t, H)
|
||||
|
||||
base := tu.g.CurTipset
|
||||
|
||||
// Produce a message from the banker with a bad nonce
|
||||
makeBadMsg := func() *types.SignedMessage {
|
||||
|
||||
ba, err := tu.nds[0].StateGetActor(context.TODO(), tu.g.Banker(), base.TipSet().Key())
|
||||
require.NoError(t, err)
|
||||
msg := types.Message{
|
||||
To: tu.g.Banker(),
|
||||
From: tu.g.Banker(),
|
||||
|
||||
Nonce: ba.Nonce + 5,
|
||||
|
||||
Value: types.NewInt(1),
|
||||
|
||||
Method: 0,
|
||||
|
||||
GasLimit: 100_000_000,
|
||||
GasFeeCap: types.NewInt(0),
|
||||
GasPremium: types.NewInt(0),
|
||||
}
|
||||
|
||||
sig, err := tu.g.Wallet().Sign(context.TODO(), tu.g.Banker(), msg.Cid().Bytes())
|
||||
require.NoError(t, err)
|
||||
|
||||
return &types.SignedMessage{
|
||||
Message: msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
}
|
||||
|
||||
msgs := make([][]*types.SignedMessage, 1)
|
||||
msgs[0] = []*types.SignedMessage{makeBadMsg()}
|
||||
|
||||
tu.mineOnBlock(base, 0, []int{0}, true, true, msgs)
|
||||
}
|
||||
|
||||
func BenchmarkSyncBasic(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
runSyncBenchLength(b, 100)
|
||||
|
@ -22,6 +22,17 @@ type GasOutputs struct {
|
||||
GasBurned int64
|
||||
}
|
||||
|
||||
// ZeroGasOutputs returns a logically zeroed GasOutputs.
|
||||
func ZeroGasOutputs() GasOutputs {
|
||||
return GasOutputs{
|
||||
BaseFeeBurn: big.Zero(),
|
||||
OverEstimationBurn: big.Zero(),
|
||||
MinerPenalty: big.Zero(),
|
||||
MinerTip: big.Zero(),
|
||||
Refund: big.Zero(),
|
||||
}
|
||||
}
|
||||
|
||||
// ComputeGasOverestimationBurn computes amount of gas to be refunded and amount of gas to be burned
|
||||
// Result is (refund, burn)
|
||||
func ComputeGasOverestimationBurn(gasUsed, gasLimit int64) (int64, int64) {
|
||||
@ -58,13 +69,7 @@ func ComputeGasOverestimationBurn(gasUsed, gasLimit int64) (int64, int64) {
|
||||
|
||||
func ComputeGasOutputs(gasUsed, gasLimit int64, baseFee, feeCap, gasPremium abi.TokenAmount) GasOutputs {
|
||||
gasUsedBig := big.NewInt(gasUsed)
|
||||
out := GasOutputs{
|
||||
BaseFeeBurn: big.Zero(),
|
||||
OverEstimationBurn: big.Zero(),
|
||||
MinerPenalty: big.Zero(),
|
||||
MinerTip: big.Zero(),
|
||||
Refund: big.Zero(),
|
||||
}
|
||||
out := ZeroGasOutputs()
|
||||
|
||||
baseFeeToPay := baseFee
|
||||
if baseFee.Cmp(feeCap.Int) > 0 {
|
||||
|
@ -198,10 +198,9 @@ type Rand interface {
|
||||
type ApplyRet struct {
|
||||
types.MessageReceipt
|
||||
ActorErr aerrors.ActorError
|
||||
Penalty types.BigInt
|
||||
MinerTip types.BigInt
|
||||
ExecutionTrace types.ExecutionTrace
|
||||
Duration time.Duration
|
||||
GasCosts GasOutputs
|
||||
}
|
||||
|
||||
func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime,
|
||||
@ -325,8 +324,7 @@ func (vm *VM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*Ap
|
||||
},
|
||||
ActorErr: actorErr,
|
||||
ExecutionTrace: rt.executionTrace,
|
||||
Penalty: types.NewInt(0),
|
||||
MinerTip: types.NewInt(0),
|
||||
GasCosts: GasOutputs{},
|
||||
Duration: time.Since(start),
|
||||
}, actorErr
|
||||
}
|
||||
@ -354,14 +352,15 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
|
||||
msgGasCost := msgGas.Total()
|
||||
// this should never happen, but is currently still exercised by some tests
|
||||
if msgGasCost > msg.GasLimit {
|
||||
gasOutputs := ZeroGasOutputs()
|
||||
gasOutputs.MinerPenalty = types.BigMul(vm.baseFee, abi.NewTokenAmount(msgGasCost))
|
||||
return &ApplyRet{
|
||||
MessageReceipt: types.MessageReceipt{
|
||||
ExitCode: exitcode.SysErrOutOfGas,
|
||||
GasUsed: 0,
|
||||
},
|
||||
Penalty: types.BigMul(vm.baseFee, abi.NewTokenAmount(msgGasCost)),
|
||||
GasCosts: gasOutputs,
|
||||
Duration: time.Since(start),
|
||||
MinerTip: big.Zero(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -372,15 +371,16 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
|
||||
// this should never happen, but is currently still exercised by some tests
|
||||
if err != nil {
|
||||
if xerrors.Is(err, types.ErrActorNotFound) {
|
||||
gasOutputs := ZeroGasOutputs()
|
||||
gasOutputs.MinerPenalty = minerPenaltyAmount
|
||||
return &ApplyRet{
|
||||
MessageReceipt: types.MessageReceipt{
|
||||
ExitCode: exitcode.SysErrSenderInvalid,
|
||||
GasUsed: 0,
|
||||
},
|
||||
ActorErr: aerrors.Newf(exitcode.SysErrSenderInvalid, "actor not found: %s", msg.From),
|
||||
Penalty: minerPenaltyAmount,
|
||||
GasCosts: gasOutputs,
|
||||
Duration: time.Since(start),
|
||||
MinerTip: big.Zero(),
|
||||
}, nil
|
||||
}
|
||||
return nil, xerrors.Errorf("failed to look up from actor: %w", err)
|
||||
@ -388,19 +388,22 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
|
||||
|
||||
// this should never happen, but is currently still exercised by some tests
|
||||
if !fromActor.Code.Equals(builtin.AccountActorCodeID) {
|
||||
gasOutputs := ZeroGasOutputs()
|
||||
gasOutputs.MinerPenalty = minerPenaltyAmount
|
||||
return &ApplyRet{
|
||||
MessageReceipt: types.MessageReceipt{
|
||||
ExitCode: exitcode.SysErrSenderInvalid,
|
||||
GasUsed: 0,
|
||||
},
|
||||
ActorErr: aerrors.Newf(exitcode.SysErrSenderInvalid, "send from not account actor: %s", fromActor.Code),
|
||||
Penalty: minerPenaltyAmount,
|
||||
GasCosts: gasOutputs,
|
||||
Duration: time.Since(start),
|
||||
MinerTip: big.Zero(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
if msg.Nonce != fromActor.Nonce {
|
||||
gasOutputs := ZeroGasOutputs()
|
||||
gasOutputs.MinerPenalty = minerPenaltyAmount
|
||||
return &ApplyRet{
|
||||
MessageReceipt: types.MessageReceipt{
|
||||
ExitCode: exitcode.SysErrSenderStateInvalid,
|
||||
@ -408,14 +411,16 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
|
||||
},
|
||||
ActorErr: aerrors.Newf(exitcode.SysErrSenderStateInvalid,
|
||||
"actor nonce invalid: msg:%d != state:%d", msg.Nonce, fromActor.Nonce),
|
||||
Penalty: minerPenaltyAmount,
|
||||
|
||||
GasCosts: gasOutputs,
|
||||
Duration: time.Since(start),
|
||||
MinerTip: big.Zero(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
gascost := types.BigMul(types.NewInt(uint64(msg.GasLimit)), msg.GasFeeCap)
|
||||
if fromActor.Balance.LessThan(gascost) {
|
||||
gasOutputs := ZeroGasOutputs()
|
||||
gasOutputs.MinerPenalty = minerPenaltyAmount
|
||||
return &ApplyRet{
|
||||
MessageReceipt: types.MessageReceipt{
|
||||
ExitCode: exitcode.SysErrSenderStateInvalid,
|
||||
@ -423,9 +428,8 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
|
||||
},
|
||||
ActorErr: aerrors.Newf(exitcode.SysErrSenderStateInvalid,
|
||||
"actor balance less than needed: %s < %s", types.FIL(fromActor.Balance), types.FIL(gascost)),
|
||||
Penalty: minerPenaltyAmount,
|
||||
GasCosts: gasOutputs,
|
||||
Duration: time.Since(start),
|
||||
MinerTip: big.Zero(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -518,8 +522,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
|
||||
},
|
||||
ActorErr: actorErr,
|
||||
ExecutionTrace: rt.executionTrace,
|
||||
Penalty: gasOutputs.MinerPenalty,
|
||||
MinerTip: gasOutputs.MinerTip,
|
||||
GasCosts: gasOutputs,
|
||||
Duration: time.Since(start),
|
||||
}, nil
|
||||
}
|
||||
|
@ -12,6 +12,8 @@ import (
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
|
||||
tm "github.com/buger/goterm"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/fatih/color"
|
||||
@ -527,6 +529,11 @@ func interactiveDeal(cctx *cli.Context) error {
|
||||
continue
|
||||
}
|
||||
|
||||
if days < int(build.MinDealDuration/builtin.EpochsInDay) {
|
||||
printErr(xerrors.Errorf("minimum duration is %d days", int(build.MinDealDuration/builtin.EpochsInDay)))
|
||||
continue
|
||||
}
|
||||
|
||||
state = "miner"
|
||||
case "miner":
|
||||
fmt.Print("Miner Address (t0..): ")
|
||||
|
@ -29,6 +29,7 @@ var paychCmd = &cli.Command{
|
||||
paychVoucherCmd,
|
||||
paychSettleCmd,
|
||||
paychStatusCmd,
|
||||
paychStatusByFromToCmd,
|
||||
paychCloseCmd,
|
||||
},
|
||||
}
|
||||
@ -103,6 +104,7 @@ var paychStatusByFromToCmd = &cli.Command{
|
||||
if cctx.Args().Len() != 2 {
|
||||
return ShowHelp(cctx, fmt.Errorf("must pass two arguments: <from address> <to address>"))
|
||||
}
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
from, err := address.NewFromString(cctx.Args().Get(0))
|
||||
if err != nil {
|
||||
@ -120,7 +122,7 @@ var paychStatusByFromToCmd = &cli.Command{
|
||||
}
|
||||
defer closer()
|
||||
|
||||
avail, err := api.PaychAvailableFundsByFromTo(from, to)
|
||||
avail, err := api.PaychAvailableFundsByFromTo(ctx, from, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -138,6 +140,7 @@ var paychStatusCmd = &cli.Command{
|
||||
if cctx.Args().Len() != 1 {
|
||||
return ShowHelp(cctx, fmt.Errorf("must pass an argument: <channel address>"))
|
||||
}
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
ch, err := address.NewFromString(cctx.Args().Get(0))
|
||||
if err != nil {
|
||||
@ -150,7 +153,7 @@ var paychStatusCmd = &cli.Command{
|
||||
}
|
||||
defer closer()
|
||||
|
||||
avail, err := api.PaychAvailableFunds(ch)
|
||||
avail, err := api.PaychAvailableFunds(ctx, ch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
134
cli/state.go
134
cli/state.go
@ -54,6 +54,7 @@ var stateCmd = &cli.Command{
|
||||
stateListActorsCmd,
|
||||
stateListMinersCmd,
|
||||
stateCircSupplyCmd,
|
||||
stateSectorCmd,
|
||||
stateGetActorCmd,
|
||||
stateLookupIDCmd,
|
||||
stateReplaySetCmd,
|
||||
@ -65,6 +66,7 @@ var stateCmd = &cli.Command{
|
||||
stateGetDealSetCmd,
|
||||
stateWaitMsgCmd,
|
||||
stateSearchMsgCmd,
|
||||
stateMsgCostCmd,
|
||||
stateMinerInfo,
|
||||
stateMarketCmd,
|
||||
},
|
||||
@ -119,6 +121,13 @@ var stateMinerInfo = &cli.Command{
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
cd, err := api.StateMinerProvingDeadline(ctx, addr, ts.Key())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting miner info: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Proving Period Start:\t%s\n", EpochTime(cd.CurrentEpoch, cd.PeriodStart))
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -1304,6 +1313,60 @@ var stateSearchMsgCmd = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var stateMsgCostCmd = &cli.Command{
|
||||
Name: "msg-cost",
|
||||
Usage: "Get the detailed gas costs of a message",
|
||||
ArgsUsage: "[messageCid]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must specify message cid to get gas costs for")
|
||||
}
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
msg, err := cid.Decode(cctx.Args().First())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tsk := types.EmptyTSK
|
||||
|
||||
ts, err := LoadTipSet(ctx, cctx, api)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ts != nil {
|
||||
tsk = ts.Key()
|
||||
}
|
||||
|
||||
mgc, err := api.StateMsgGasCost(ctx, msg, tsk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if mgc != nil {
|
||||
fmt.Printf("Message CID: %s", mgc.Message)
|
||||
fmt.Printf("\nGas Used: %d", mgc.GasUsed)
|
||||
fmt.Printf("\nBase Fee Burn: %d", mgc.BaseFeeBurn)
|
||||
fmt.Printf("\nOverestimation Burn: %d", mgc.OverEstimationBurn)
|
||||
fmt.Printf("\nMiner Tip: %d", mgc.MinerTip)
|
||||
fmt.Printf("\nRefund: %d", mgc.Refund)
|
||||
fmt.Printf("\nTotal Cost: %d", mgc.TotalCost)
|
||||
fmt.Printf("\nMiner Penalty: %d", mgc.MinerPenalty)
|
||||
} else {
|
||||
fmt.Print("message was not found on chain")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var stateCallCmd = &cli.Command{
|
||||
Name: "call",
|
||||
Usage: "Invoke a method on an actor locally",
|
||||
@ -1554,6 +1617,77 @@ var stateCircSupplyCmd = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var stateSectorCmd = &cli.Command{
|
||||
Name: "sector",
|
||||
Usage: "Get miner sector info",
|
||||
ArgsUsage: "[miner address] [sector number]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
if cctx.Args().Len() != 2 {
|
||||
return xerrors.Errorf("expected 2 params")
|
||||
}
|
||||
|
||||
ts, err := LoadTipSet(ctx, cctx, api)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ts == nil {
|
||||
ts, err = api.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
maddr, err := address.NewFromString(cctx.Args().Get(0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sid, err := strconv.ParseInt(cctx.Args().Get(1), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
si, err := api.StateSectorGetInfo(ctx, maddr, abi.SectorNumber(sid), ts.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("SectorNumber: ", si.SectorNumber)
|
||||
fmt.Println("SealProof: ", si.SealProof)
|
||||
fmt.Println("SealedCID: ", si.SealedCID)
|
||||
fmt.Println("DealIDs: ", si.DealIDs)
|
||||
fmt.Println()
|
||||
fmt.Println("Activation: ", EpochTime(ts.Height(), si.Activation))
|
||||
fmt.Println("Expiration: ", EpochTime(ts.Height(), si.Expiration))
|
||||
fmt.Println()
|
||||
fmt.Println("DealWeight: ", si.DealWeight)
|
||||
fmt.Println("VerifiedDealWeight: ", si.VerifiedDealWeight)
|
||||
fmt.Println("InitialPledge: ", types.FIL(si.InitialPledge))
|
||||
fmt.Println("ExpectedDayReward: ", types.FIL(si.ExpectedDayReward))
|
||||
fmt.Println("ExpectedStoragePledge: ", types.FIL(si.ExpectedStoragePledge))
|
||||
fmt.Println()
|
||||
|
||||
sp, err := api.StateSectorPartition(ctx, maddr, abi.SectorNumber(sid), ts.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Deadline: ", sp.Deadline)
|
||||
fmt.Println("Partition: ", sp.Partition)
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var stateMarketCmd = &cli.Command{
|
||||
Name: "market",
|
||||
Usage: "Inspect the storage market actor",
|
||||
|
21
cli/util.go
21
cli/util.go
@ -2,10 +2,16 @@ package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
func parseTipSet(ctx context.Context, api api.FullNode, vals []string) (*types.TipSet, error) {
|
||||
@ -26,3 +32,16 @@ func parseTipSet(ctx context.Context, api api.FullNode, vals []string) (*types.T
|
||||
|
||||
return types.NewTipSet(headers)
|
||||
}
|
||||
|
||||
func EpochTime(curr, e abi.ChainEpoch) string {
|
||||
switch {
|
||||
case curr > e:
|
||||
return fmt.Sprintf("%d (%s ago)", e, time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(curr-e)))
|
||||
case curr == e:
|
||||
return fmt.Sprintf("%d (now)", e)
|
||||
case curr < e:
|
||||
return fmt.Sprintf("%d (in %s)", e, time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(e-curr)))
|
||||
}
|
||||
|
||||
panic("math broke")
|
||||
}
|
||||
|
@ -56,6 +56,10 @@ var importBenchCmd = &cli.Command{
|
||||
Usage: "set the parallelism factor for batch seal verification",
|
||||
Value: runtime.NumCPU(),
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "repodir",
|
||||
Usage: "set the repo directory for the lotus bench run (defaults to /tmp)",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
vm.BatchSealVerifyParallelism = cctx.Int("batch-seal-verify-threads")
|
||||
@ -70,9 +74,15 @@ var importBenchCmd = &cli.Command{
|
||||
}
|
||||
defer cfi.Close() //nolint:errcheck // read only file
|
||||
|
||||
tdir, err := ioutil.TempDir("", "lotus-import-bench")
|
||||
if err != nil {
|
||||
return err
|
||||
var tdir string
|
||||
if rdir := cctx.String("repodir"); rdir != "" {
|
||||
tdir = rdir
|
||||
} else {
|
||||
tmp, err := ioutil.TempDir("", "lotus-import-bench")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tdir = tmp
|
||||
}
|
||||
|
||||
bds, err := badger.NewDatastore(tdir, nil)
|
||||
|
@ -387,7 +387,7 @@ var sealBenchCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
log.Error("post verification failed")
|
||||
log.Error("window post verification failed")
|
||||
}
|
||||
|
||||
verifyWindowpost1 := time.Now()
|
||||
@ -403,7 +403,7 @@ var sealBenchCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
log.Error("post verification failed")
|
||||
log.Error("window post verification failed")
|
||||
}
|
||||
|
||||
verifyWindowpost2 := time.Now()
|
||||
|
90
cmd/lotus-gateway/api.go
Normal file
90
cmd/lotus-gateway/api.go
Normal file
@ -0,0 +1,90 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
const LookbackCap = time.Hour
|
||||
|
||||
var (
|
||||
ErrLookbackTooLong = fmt.Errorf("lookbacks of more than %s are disallowed", LookbackCap)
|
||||
)
|
||||
|
||||
type GatewayAPI struct {
|
||||
api api.FullNode
|
||||
}
|
||||
|
||||
func (a *GatewayAPI) getTipsetTimestamp(ctx context.Context, tsk types.TipSetKey) (time.Time, error) {
|
||||
if tsk.IsEmpty() {
|
||||
return time.Now(), nil
|
||||
}
|
||||
|
||||
ts, err := a.api.ChainGetTipSet(ctx, tsk)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
return time.Unix(int64(ts.Blocks()[0].Timestamp), 0), nil
|
||||
}
|
||||
|
||||
func (a *GatewayAPI) checkTipset(ctx context.Context, ts types.TipSetKey) error {
|
||||
when, err := a.getTipsetTimestamp(ctx, ts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if time.Since(when) > time.Hour {
|
||||
return ErrLookbackTooLong
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *GatewayAPI) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "StateGetActor")
|
||||
defer span.End()
|
||||
|
||||
if err := a.checkTipset(ctx, ts); err != nil {
|
||||
return nil, fmt.Errorf("bad tipset: %w", err)
|
||||
}
|
||||
|
||||
return a.api.StateGetActor(ctx, actor, ts)
|
||||
}
|
||||
|
||||
func (a *GatewayAPI) ChainHead(ctx context.Context) (*types.TipSet, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ChainHead")
|
||||
defer span.End()
|
||||
// TODO: cache and invalidate cache when timestamp is up (or have internal ChainNotify)
|
||||
|
||||
return a.api.ChainHead(ctx)
|
||||
}
|
||||
|
||||
func (a *GatewayAPI) ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ChainGetTipSet")
|
||||
defer span.End()
|
||||
|
||||
if err := a.checkTipset(ctx, tsk); err != nil {
|
||||
return nil, fmt.Errorf("bad tipset: %w", err)
|
||||
}
|
||||
|
||||
// TODO: since we're limiting lookbacks, should just cache this (could really even cache the json response bytes)
|
||||
return a.api.ChainGetTipSet(ctx, tsk)
|
||||
}
|
||||
|
||||
func (a *GatewayAPI) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "MpoolPush")
|
||||
defer span.End()
|
||||
|
||||
// TODO: additional anti-spam checks
|
||||
|
||||
return a.api.MpoolPush(ctx, sm)
|
||||
}
|
112
cmd/lotus-gateway/main.go
Normal file
112
cmd/lotus-gateway/main.go
Normal file
@ -0,0 +1,112 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||
logging "github.com/ipfs/go-log"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var log = logging.Logger("gateway")
|
||||
|
||||
func main() {
|
||||
lotuslog.SetupLogLevels()
|
||||
|
||||
local := []*cli.Command{
|
||||
runCmd,
|
||||
}
|
||||
|
||||
app := &cli.App{
|
||||
Name: "lotus-gateway",
|
||||
Usage: "Public API server for lotus",
|
||||
Version: build.UserVersion(),
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "repo",
|
||||
EnvVars: []string{"LOTUS_PATH"},
|
||||
Value: "~/.lotus", // TODO: Consider XDG_DATA_HOME
|
||||
},
|
||||
},
|
||||
|
||||
Commands: local,
|
||||
}
|
||||
app.Setup()
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
log.Warnf("%+v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var runCmd = &cli.Command{
|
||||
Name: "run",
|
||||
Usage: "Start api server",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "listen",
|
||||
Usage: "host address and port the api server will listen on",
|
||||
Value: "0.0.0.0:2346",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
log.Info("Starting lotus gateway")
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
address := cctx.String("listen")
|
||||
mux := mux.NewRouter()
|
||||
|
||||
log.Info("Setting up API endpoint at " + address)
|
||||
|
||||
rpcServer := jsonrpc.NewServer()
|
||||
rpcServer.Register("Filecoin", &GatewayAPI{api: api})
|
||||
|
||||
mux.Handle("/rpc/v0", rpcServer)
|
||||
mux.PathPrefix("/").Handler(http.DefaultServeMux)
|
||||
|
||||
/*ah := &auth.Handler{
|
||||
Verify: nodeApi.AuthVerify,
|
||||
Next: mux.ServeHTTP,
|
||||
}*/
|
||||
|
||||
srv := &http.Server{
|
||||
Handler: mux,
|
||||
BaseContext: func(listener net.Listener) context.Context {
|
||||
return ctx
|
||||
},
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
log.Warn("Shutting down...")
|
||||
if err := srv.Shutdown(context.TODO()); err != nil {
|
||||
log.Errorf("shutting down RPC server failed: %s", err)
|
||||
}
|
||||
log.Warn("Graceful shutdown successful")
|
||||
}()
|
||||
|
||||
nl, err := net.Listen("tcp", address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return srv.Serve(nl)
|
||||
},
|
||||
}
|
@ -4,6 +4,7 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
@ -28,6 +29,9 @@ var bitFieldCmd = &cli.Command{
|
||||
bitFieldRunsCmd,
|
||||
bitFieldStatCmd,
|
||||
bitFieldDecodeCmd,
|
||||
bitFieldIntersectCmd,
|
||||
bitFieldEncodeCmd,
|
||||
bitFieldSubCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -200,38 +204,9 @@ var bitFieldDecodeCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
var val string
|
||||
if cctx.Args().Present() {
|
||||
val = cctx.Args().Get(0)
|
||||
} else {
|
||||
b, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
val = string(b)
|
||||
}
|
||||
|
||||
var dec []byte
|
||||
switch cctx.String("enc") {
|
||||
case "base64":
|
||||
d, err := base64.StdEncoding.DecodeString(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decoding base64 value: %w", err)
|
||||
}
|
||||
dec = d
|
||||
case "hex":
|
||||
d, err := hex.DecodeString(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decoding hex value: %w", err)
|
||||
}
|
||||
dec = d
|
||||
default:
|
||||
return fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
|
||||
}
|
||||
|
||||
rle, err := bitfield.NewFromBytes(dec)
|
||||
rle, err := decode(cctx, 0)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to parse bitfield: %w", err)
|
||||
return err
|
||||
}
|
||||
|
||||
vals, err := rle.All(100000000000)
|
||||
@ -243,3 +218,170 @@ var bitFieldDecodeCmd = &cli.Command{
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var bitFieldIntersectCmd = &cli.Command{
|
||||
Name: "intersect",
|
||||
Description: "intersect 2 bitfields and print the resulting bitfield as base64",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "enc",
|
||||
Value: "base64",
|
||||
Usage: "specify input encoding to parse",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
b, err := decode(cctx, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a, err := decode(cctx, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o, err := bitfield.IntersectBitField(a, b)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("intersect: %w", err)
|
||||
}
|
||||
|
||||
s, err := o.RunIterator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bytes, err := rlepluslazy.EncodeRuns(s, []byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(base64.StdEncoding.EncodeToString(bytes))
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var bitFieldSubCmd = &cli.Command{
|
||||
Name: "sub",
|
||||
Description: "subtract 2 bitfields and print the resulting bitfield as base64",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "enc",
|
||||
Value: "base64",
|
||||
Usage: "specify input encoding to parse",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
b, err := decode(cctx, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a, err := decode(cctx, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o, err := bitfield.SubtractBitField(a, b)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("intersect: %w", err)
|
||||
}
|
||||
|
||||
s, err := o.RunIterator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bytes, err := rlepluslazy.EncodeRuns(s, []byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(base64.StdEncoding.EncodeToString(bytes))
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var bitFieldEncodeCmd = &cli.Command{
|
||||
Name: "encode",
|
||||
Description: "encode a series of decimal numbers into a bitfield",
|
||||
ArgsUsage: "[infile]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "enc",
|
||||
Value: "base64",
|
||||
Usage: "specify input encoding to parse",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
f, err := os.Open(cctx.Args().First())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close() // nolint
|
||||
|
||||
out := bitfield.New()
|
||||
for {
|
||||
var i uint64
|
||||
_, err := fmt.Fscan(f, &i)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
out.Set(i)
|
||||
}
|
||||
|
||||
s, err := out.RunIterator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bytes, err := rlepluslazy.EncodeRuns(s, []byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(base64.StdEncoding.EncodeToString(bytes))
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func decode(cctx *cli.Context, a int) (bitfield.BitField, error) {
|
||||
var val string
|
||||
if cctx.Args().Present() {
|
||||
if a >= cctx.NArg() {
|
||||
return bitfield.BitField{}, xerrors.Errorf("need more than %d args", a)
|
||||
}
|
||||
val = cctx.Args().Get(a)
|
||||
} else {
|
||||
if a > 0 {
|
||||
return bitfield.BitField{}, xerrors.Errorf("need more than %d args", a)
|
||||
}
|
||||
b, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return bitfield.BitField{}, err
|
||||
}
|
||||
val = string(b)
|
||||
}
|
||||
|
||||
var dec []byte
|
||||
switch cctx.String("enc") {
|
||||
case "base64":
|
||||
d, err := base64.StdEncoding.DecodeString(val)
|
||||
if err != nil {
|
||||
return bitfield.BitField{}, fmt.Errorf("decoding base64 value: %w", err)
|
||||
}
|
||||
dec = d
|
||||
case "hex":
|
||||
d, err := hex.DecodeString(val)
|
||||
if err != nil {
|
||||
return bitfield.BitField{}, fmt.Errorf("decoding hex value: %w", err)
|
||||
}
|
||||
dec = d
|
||||
default:
|
||||
return bitfield.BitField{}, fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
|
||||
}
|
||||
|
||||
return bitfield.NewFromBytes(dec)
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
@ -8,10 +9,12 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/gbrlsnchs/jwt/v3"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/lotus/api/apistruct"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
@ -24,6 +27,102 @@ var jwtCmd = &cli.Command{
|
||||
having to run the lotus daemon.`,
|
||||
Subcommands: []*cli.Command{
|
||||
jwtNewCmd,
|
||||
jwtTokenCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var jwtTokenCmd = &cli.Command{
|
||||
Name: "token",
|
||||
Usage: "create a token for a given jwt secret",
|
||||
ArgsUsage: "<name>",
|
||||
Description: `The jwt tokens have four different levels of permissions that provide some ability
|
||||
to control access to what methods can be invoked by the holder of the token.
|
||||
|
||||
This command only works on jwt secrets that are base16 encoded files, such as those produced by the
|
||||
sibling 'new' command.
|
||||
`,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "output",
|
||||
Value: "token",
|
||||
Usage: "specify a name",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "read",
|
||||
Value: false,
|
||||
Usage: "add read permissions to the token",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "write",
|
||||
Value: false,
|
||||
Usage: "add write permissions to the token",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "sign",
|
||||
Value: false,
|
||||
Usage: "add sign permissions to the token",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "admin",
|
||||
Value: false,
|
||||
Usage: "add admin permissions to the token",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("please specify a name")
|
||||
}
|
||||
|
||||
inputFile, err := os.Open(cctx.Args().First())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer inputFile.Close() //nolint:errcheck
|
||||
input := bufio.NewReader(inputFile)
|
||||
|
||||
encoded, err := ioutil.ReadAll(input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
decoded, err := hex.DecodeString(strings.TrimSpace(string(encoded)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var keyInfo types.KeyInfo
|
||||
if err := json.Unmarshal(decoded, &keyInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
perms := []auth.Permission{}
|
||||
|
||||
if cctx.Bool("read") {
|
||||
perms = append(perms, apistruct.PermRead)
|
||||
}
|
||||
|
||||
if cctx.Bool("write") {
|
||||
perms = append(perms, apistruct.PermWrite)
|
||||
}
|
||||
|
||||
if cctx.Bool("sign") {
|
||||
perms = append(perms, apistruct.PermSign)
|
||||
}
|
||||
|
||||
if cctx.Bool("admin") {
|
||||
perms = append(perms, apistruct.PermAdmin)
|
||||
}
|
||||
|
||||
p := modules.JwtPayload{
|
||||
Allow: perms,
|
||||
}
|
||||
|
||||
token, err := jwt.Sign(&p, jwt.NewHS256(keyInfo.PrivateKey))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(cctx.String("output"), token, 0600)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -9,16 +9,22 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/multiformats/go-base32"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/crypto"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
"github.com/filecoin-project/lotus/node/modules/lp2p"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
|
||||
@ -43,6 +49,90 @@ var keyinfoCmd = &cli.Command{
|
||||
keyinfoNewCmd,
|
||||
keyinfoInfoCmd,
|
||||
keyinfoImportCmd,
|
||||
keyinfoVerifyCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var keyinfoVerifyCmd = &cli.Command{
|
||||
Name: "verify",
|
||||
Usage: "verify the filename of a keystore object on disk with it's contents",
|
||||
Description: `Keystore objects are base32 enocded strings, with wallets being dynamically named via
|
||||
the wallet address. This command can ensure that the naming of these keystore objects are correct`,
|
||||
Action: func(cctx *cli.Context) error {
|
||||
filePath := cctx.Args().First()
|
||||
fileName := path.Base(filePath)
|
||||
|
||||
inputFile, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer inputFile.Close() //nolint:errcheck
|
||||
input := bufio.NewReader(inputFile)
|
||||
|
||||
keyContent, err := ioutil.ReadAll(input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var keyInfo types.KeyInfo
|
||||
if err := json.Unmarshal(keyContent, &keyInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch keyInfo.Type {
|
||||
case lp2p.KTLibp2pHost:
|
||||
name, err := base32.RawStdEncoding.DecodeString(fileName)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("decoding key: '%s': %w", fileName, err)
|
||||
}
|
||||
|
||||
if string(name) != keyInfo.Type {
|
||||
return fmt.Errorf("%s of type %s is incorrect", fileName, keyInfo.Type)
|
||||
}
|
||||
case modules.KTJwtHmacSecret:
|
||||
name, err := base32.RawStdEncoding.DecodeString(fileName)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("decoding key: '%s': %w", fileName, err)
|
||||
}
|
||||
|
||||
if string(name) != modules.JWTSecretName {
|
||||
return fmt.Errorf("%s of type %s is incorrect", fileName, keyInfo.Type)
|
||||
}
|
||||
case wallet.KTSecp256k1, wallet.KTBLS:
|
||||
keystore := wallet.NewMemKeyStore()
|
||||
w, err := wallet.NewWallet(keystore)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Import(&keyInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
list, err := keystore.List()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(list) != 1 {
|
||||
return fmt.Errorf("Unexpected number of keys, expected 1, found %d", len(list))
|
||||
}
|
||||
|
||||
name, err := base32.RawStdEncoding.DecodeString(fileName)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("decoding key: '%s': %w", fileName, err)
|
||||
}
|
||||
|
||||
if string(name) != list[0] {
|
||||
return fmt.Errorf("%s of type %s; file is named for %s, but key is actually %s", fileName, keyInfo.Type, string(name), list[0])
|
||||
}
|
||||
|
||||
break
|
||||
default:
|
||||
return fmt.Errorf("Unknown keytype %s", keyInfo.Type)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -4,16 +4,12 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/api/apibstore"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -187,35 +183,22 @@ var provingInfoCmd = &cli.Command{
|
||||
fmt.Printf("Current Epoch: %d\n", cd.CurrentEpoch)
|
||||
|
||||
fmt.Printf("Proving Period Boundary: %d\n", cd.PeriodStart%cd.WPoStProvingPeriod)
|
||||
fmt.Printf("Proving Period Start: %s\n", epochTime(cd.CurrentEpoch, cd.PeriodStart))
|
||||
fmt.Printf("Next Period Start: %s\n\n", epochTime(cd.CurrentEpoch, cd.PeriodStart+cd.WPoStProvingPeriod))
|
||||
fmt.Printf("Proving Period Start: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.PeriodStart))
|
||||
fmt.Printf("Next Period Start: %s\n\n", lcli.EpochTime(cd.CurrentEpoch, cd.PeriodStart+cd.WPoStProvingPeriod))
|
||||
|
||||
fmt.Printf("Faults: %d (%.2f%%)\n", faults, faultPerc)
|
||||
fmt.Printf("Recovering: %d\n", recovering)
|
||||
|
||||
fmt.Printf("Deadline Index: %d\n", cd.Index)
|
||||
fmt.Printf("Deadline Sectors: %d\n", curDeadlineSectors)
|
||||
fmt.Printf("Deadline Open: %s\n", epochTime(cd.CurrentEpoch, cd.Open))
|
||||
fmt.Printf("Deadline Close: %s\n", epochTime(cd.CurrentEpoch, cd.Close))
|
||||
fmt.Printf("Deadline Challenge: %s\n", epochTime(cd.CurrentEpoch, cd.Challenge))
|
||||
fmt.Printf("Deadline FaultCutoff: %s\n", epochTime(cd.CurrentEpoch, cd.FaultCutoff))
|
||||
fmt.Printf("Deadline Open: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.Open))
|
||||
fmt.Printf("Deadline Close: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.Close))
|
||||
fmt.Printf("Deadline Challenge: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.Challenge))
|
||||
fmt.Printf("Deadline FaultCutoff: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.FaultCutoff))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func epochTime(curr, e abi.ChainEpoch) string {
|
||||
switch {
|
||||
case curr > e:
|
||||
return fmt.Sprintf("%d (%s ago)", e, time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(curr-e)))
|
||||
case curr == e:
|
||||
return fmt.Sprintf("%d (now)", e)
|
||||
case curr < e:
|
||||
return fmt.Sprintf("%d (in %s)", e, time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(e-curr)))
|
||||
}
|
||||
|
||||
panic("math broke")
|
||||
}
|
||||
|
||||
var provingDeadlinesCmd = &cli.Command{
|
||||
Name: "deadlines",
|
||||
Usage: "View the current proving period deadlines information",
|
||||
|
@ -5,9 +5,12 @@ import (
|
||||
"os"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
)
|
||||
|
||||
@ -17,6 +20,8 @@ var retrievalDealsCmd = &cli.Command{
|
||||
Subcommands: []*cli.Command{
|
||||
retrievalDealSelectionCmd,
|
||||
retrievalDealsListCmd,
|
||||
retrievalSetAskCmd,
|
||||
retrievalGetAskCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -154,3 +159,112 @@ var retrievalDealsListCmd = &cli.Command{
|
||||
return w.Flush()
|
||||
},
|
||||
}
|
||||
|
||||
var retrievalSetAskCmd = &cli.Command{
|
||||
Name: "set-ask",
|
||||
Usage: "Configure the provider's retrieval ask",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "price",
|
||||
Usage: "Set the price of the ask for retrievals (FIL/GiB)",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "unseal-price",
|
||||
Usage: "Set the price to unseal",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "payment-interval",
|
||||
Usage: "Set the payment interval (in bytes) for retrieval",
|
||||
DefaultText: "1MiB",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "payment-interval-increase",
|
||||
Usage: "Set the payment interval increase (in bytes) for retrieval",
|
||||
DefaultText: "1MiB",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := lcli.DaemonContext(cctx)
|
||||
|
||||
api, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ask, err := api.MarketGetRetrievalAsk(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cctx.IsSet("price") {
|
||||
v, err := types.ParseFIL(cctx.String("price"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ask.PricePerByte = types.BigDiv(types.BigInt(v), types.NewInt(1<<30))
|
||||
}
|
||||
|
||||
if cctx.IsSet("unseal-price") {
|
||||
v, err := types.ParseFIL(cctx.String("unseal-price"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ask.UnsealPrice = abi.TokenAmount(v)
|
||||
}
|
||||
|
||||
if cctx.IsSet("payment-interval") {
|
||||
v, err := units.RAMInBytes(cctx.String("payment-interval"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ask.PaymentInterval = uint64(v)
|
||||
}
|
||||
|
||||
if cctx.IsSet("payment-interval-increase") {
|
||||
v, err := units.RAMInBytes(cctx.String("payment-interval-increase"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ask.PaymentIntervalIncrease = uint64(v)
|
||||
}
|
||||
|
||||
return api.MarketSetRetrievalAsk(ctx, ask)
|
||||
},
|
||||
}
|
||||
|
||||
var retrievalGetAskCmd = &cli.Command{
|
||||
Name: "get-ask",
|
||||
Usage: "Get the provider's current retrieval ask",
|
||||
Flags: []cli.Flag{},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := lcli.DaemonContext(cctx)
|
||||
|
||||
api, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ask, err := api.MarketGetRetrievalAsk(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
|
||||
fmt.Fprintf(w, "Price per Byte\tUnseal Price\tPayment Interval\tPayment Interval Increase\n")
|
||||
if ask == nil {
|
||||
fmt.Fprintf(w, "<miner does not have an retrieval ask set>\n")
|
||||
return w.Flush()
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "%s\t%s\t%s\t%s\n",
|
||||
types.FIL(ask.PricePerByte),
|
||||
types.FIL(ask.UnsealPrice),
|
||||
units.BytesSize(float64(ask.PaymentInterval)),
|
||||
units.BytesSize(float64(ask.PaymentIntervalIncrease)),
|
||||
)
|
||||
return w.Flush()
|
||||
|
||||
},
|
||||
}
|
||||
|
@ -64,6 +64,9 @@ const (
|
||||
// MethodAbortWith is the identifier for the method that panics optionally with
|
||||
// a passed exit code.
|
||||
MethodAbortWith
|
||||
// MethodInspectRuntime is the identifier for the method that returns the
|
||||
// current runtime values.
|
||||
MethodInspectRuntime
|
||||
)
|
||||
|
||||
// Exports defines the methods this actor exposes publicly.
|
||||
@ -77,6 +80,7 @@ func (a Actor) Exports() []interface{} {
|
||||
MethodSend: a.Send,
|
||||
MethodMutateState: a.MutateState,
|
||||
MethodAbortWith: a.AbortWith,
|
||||
MethodInspectRuntime: a.InspectRuntime,
|
||||
}
|
||||
}
|
||||
|
||||
@ -247,3 +251,28 @@ func (a Actor) AbortWith(rt runtime.Runtime, args *AbortWithArgs) *abi.EmptyValu
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InspectRuntimeReturn is the return value for the Actor.InspectRuntime method.
|
||||
type InspectRuntimeReturn struct {
|
||||
Caller address.Address
|
||||
Receiver address.Address
|
||||
ValueReceived abi.TokenAmount
|
||||
CurrEpoch abi.ChainEpoch
|
||||
CurrentBalance abi.TokenAmount
|
||||
State State
|
||||
}
|
||||
|
||||
// InspectRuntime returns a copy of the serializable values available in the Runtime.
|
||||
func (a Actor) InspectRuntime(rt runtime.Runtime, _ *abi.EmptyValue) *InspectRuntimeReturn {
|
||||
rt.ValidateImmediateCallerAcceptAny()
|
||||
var st State
|
||||
rt.StateReadonly(&st)
|
||||
return &InspectRuntimeReturn{
|
||||
Caller: rt.Caller(),
|
||||
Receiver: rt.Receiver(),
|
||||
ValueReceived: rt.ValueReceived(),
|
||||
CurrEpoch: rt.CurrEpoch(),
|
||||
CurrentBalance: rt.CurrentBalance(),
|
||||
State: st,
|
||||
}
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
"github.com/filecoin-project/specs-actors/support/mock"
|
||||
atesting "github.com/filecoin-project/specs-actors/support/testing"
|
||||
)
|
||||
@ -151,3 +152,28 @@ func TestAbortWithUncontrolled(t *testing.T) {
|
||||
})
|
||||
rt.Verify()
|
||||
}
|
||||
|
||||
func TestInspectRuntime(t *testing.T) {
|
||||
caller := atesting.NewIDAddr(t, 100)
|
||||
receiver := atesting.NewIDAddr(t, 101)
|
||||
builder := mock.NewBuilder(context.Background(), receiver)
|
||||
|
||||
rt := builder.Build(t)
|
||||
rt.SetCaller(caller, builtin.AccountActorCodeID)
|
||||
rt.StateCreate(&State{})
|
||||
var a Actor
|
||||
|
||||
rt.ExpectValidateCallerAny()
|
||||
ret := rt.Call(a.InspectRuntime, abi.Empty)
|
||||
rtr, ok := ret.(*InspectRuntimeReturn)
|
||||
if !ok {
|
||||
t.Fatal("invalid return value")
|
||||
}
|
||||
if rtr.Caller != caller {
|
||||
t.Fatal("unexpected runtime caller")
|
||||
}
|
||||
if rtr.Receiver != receiver {
|
||||
t.Fatal("unexpected runtime receiver")
|
||||
}
|
||||
rt.Verify()
|
||||
}
|
||||
|
@ -730,3 +730,145 @@ func (t *AbortWithArgs) UnmarshalCBOR(r io.Reader) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var lengthBufInspectRuntimeReturn = []byte{134}
|
||||
|
||||
func (t *InspectRuntimeReturn) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(lengthBufInspectRuntimeReturn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Caller (address.Address) (struct)
|
||||
if err := t.Caller.MarshalCBOR(w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Receiver (address.Address) (struct)
|
||||
if err := t.Receiver.MarshalCBOR(w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.ValueReceived (big.Int) (struct)
|
||||
if err := t.ValueReceived.MarshalCBOR(w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.CurrEpoch (abi.ChainEpoch) (int64)
|
||||
if t.CurrEpoch >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.CurrEpoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.CurrEpoch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// t.CurrentBalance (big.Int) (struct)
|
||||
if err := t.CurrentBalance.MarshalCBOR(w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.State (chaos.State) (struct)
|
||||
if err := t.State.MarshalCBOR(w); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *InspectRuntimeReturn) UnmarshalCBOR(r io.Reader) error {
|
||||
*t = InspectRuntimeReturn{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
|
||||
if extra != 6 {
|
||||
return fmt.Errorf("cbor input had wrong number of fields")
|
||||
}
|
||||
|
||||
// t.Caller (address.Address) (struct)
|
||||
|
||||
{
|
||||
|
||||
if err := t.Caller.UnmarshalCBOR(br); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.Caller: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
// t.Receiver (address.Address) (struct)
|
||||
|
||||
{
|
||||
|
||||
if err := t.Receiver.UnmarshalCBOR(br); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.Receiver: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
// t.ValueReceived (big.Int) (struct)
|
||||
|
||||
{
|
||||
|
||||
if err := t.ValueReceived.UnmarshalCBOR(br); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.ValueReceived: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
// t.CurrEpoch (abi.ChainEpoch) (int64)
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch maj {
|
||||
case cbg.MajUnsignedInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 positive overflow")
|
||||
}
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative oveflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
return fmt.Errorf("wrong type for int64 field: %d", maj)
|
||||
}
|
||||
|
||||
t.CurrEpoch = abi.ChainEpoch(extraI)
|
||||
}
|
||||
// t.CurrentBalance (big.Int) (struct)
|
||||
|
||||
{
|
||||
|
||||
if err := t.CurrentBalance.UnmarshalCBOR(br); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.CurrentBalance: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
// t.State (chaos.State) (struct)
|
||||
|
||||
{
|
||||
|
||||
if err := t.State.UnmarshalCBOR(br); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.State: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := gen.WriteTupleEncodersToFile("../cbor_gen.go", "chaos",
|
||||
if err := gen.WriteTupleEncodersToFile("./cbor_gen.go", "chaos",
|
||||
chaos.State{},
|
||||
chaos.CreateActorArgs{},
|
||||
chaos.ResolveAddressResponse{},
|
||||
@ -15,6 +15,7 @@ func main() {
|
||||
chaos.SendReturn{},
|
||||
chaos.MutateStateArgs{},
|
||||
chaos.AbortWithArgs{},
|
||||
chaos.InspectRuntimeReturn{},
|
||||
); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -148,6 +148,7 @@
|
||||
* [StateMinerRecoveries](#StateMinerRecoveries)
|
||||
* [StateMinerSectorCount](#StateMinerSectorCount)
|
||||
* [StateMinerSectors](#StateMinerSectors)
|
||||
* [StateMsgGasCost](#StateMsgGasCost)
|
||||
* [StateNetworkName](#StateNetworkName)
|
||||
* [StateNetworkVersion](#StateNetworkVersion)
|
||||
* [StateReadState](#StateReadState)
|
||||
@ -2376,7 +2377,12 @@ There are not yet any comments for this method.
|
||||
|
||||
Perms: sign
|
||||
|
||||
Inputs: `null`
|
||||
Inputs:
|
||||
```json
|
||||
[
|
||||
"t01234"
|
||||
]
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
@ -2400,6 +2406,7 @@ Perms: sign
|
||||
Inputs:
|
||||
```json
|
||||
[
|
||||
"t01234",
|
||||
"t01234"
|
||||
]
|
||||
```
|
||||
@ -3734,6 +3741,45 @@ Inputs:
|
||||
|
||||
Response: `null`
|
||||
|
||||
### StateMsgGasCost
|
||||
StateMsgGasCost searches for a message in the chain, and returns details of the messages gas costs, including the penalty and miner tip
|
||||
|
||||
|
||||
Perms: read
|
||||
|
||||
Inputs:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
},
|
||||
[
|
||||
{
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
},
|
||||
{
|
||||
"/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
|
||||
}
|
||||
]
|
||||
]
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"Message": {
|
||||
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||
},
|
||||
"GasUsed": "0",
|
||||
"BaseFeeBurn": "0",
|
||||
"OverEstimationBurn": "0",
|
||||
"MinerPenalty": "0",
|
||||
"MinerTip": "0",
|
||||
"Refund": "0",
|
||||
"TotalCost": "0"
|
||||
}
|
||||
```
|
||||
|
||||
### StateNetworkName
|
||||
StateNetworkName returns the name of the network the node is synced to
|
||||
|
||||
|
1
extern/oni
vendored
Submodule
1
extern/oni
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 8b7e7d438c4cc38a0d2d671876d4590ad20655b3
|
44
extern/sector-storage/manager.go
vendored
44
extern/sector-storage/manager.go
vendored
@ -203,25 +203,26 @@ func schedFetch(sector abi.SectorID, ft stores.SectorFileType, ptype stores.Path
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error {
|
||||
func (m *Manager) tryReadUnsealedPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (foundUnsealed bool, readOk bool, selector WorkerSelector, returnErr error) {
|
||||
|
||||
// acquire a lock purely for reading unsealed sectors
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if err := m.index.StorageLock(ctx, sector, stores.FTSealed|stores.FTCache, stores.FTUnsealed); err != nil {
|
||||
return xerrors.Errorf("acquiring sector lock: %w", err)
|
||||
if err := m.index.StorageLock(ctx, sector, stores.FTUnsealed, stores.FTNone); err != nil {
|
||||
returnErr = xerrors.Errorf("acquiring read sector lock: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
// passing 0 spt because we only need it when allowFetch is true
|
||||
best, err := m.index.StorageFindSector(ctx, sector, stores.FTUnsealed, 0, false)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err)
|
||||
returnErr = xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
var readOk bool
|
||||
var selector WorkerSelector
|
||||
if len(best) == 0 { // new
|
||||
selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing)
|
||||
} else { // append to existing
|
||||
foundUnsealed = len(best) > 0
|
||||
if foundUnsealed { // append to existing
|
||||
// There is unsealed sector, see if we can read from it
|
||||
|
||||
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
|
||||
@ -231,12 +232,27 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("reading piece from sealed sector: %w", err)
|
||||
returnErr = xerrors.Errorf("reading piece from sealed sector: %w", err)
|
||||
}
|
||||
} else {
|
||||
selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if readOk {
|
||||
return nil
|
||||
}
|
||||
func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.SectorID, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) error {
|
||||
foundUnsealed, readOk, selector, err := m.tryReadUnsealedPiece(ctx, sink, sector, offset, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if readOk {
|
||||
return nil
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if err := m.index.StorageLock(ctx, sector, stores.FTSealed|stores.FTCache, stores.FTUnsealed); err != nil {
|
||||
return xerrors.Errorf("acquiring unseal sector lock: %w", err)
|
||||
}
|
||||
|
||||
unsealFetch := func(ctx context.Context, worker Worker) error {
|
||||
@ -244,7 +260,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
|
||||
return xerrors.Errorf("copy sealed/cache sector data: %w", err)
|
||||
}
|
||||
|
||||
if len(best) > 0 {
|
||||
if foundUnsealed {
|
||||
if err := worker.Fetch(ctx, sector, stores.FTUnsealed, stores.PathSealing, stores.AcquireMove); err != nil {
|
||||
return xerrors.Errorf("copy unsealed sector data: %w", err)
|
||||
}
|
||||
|
2
extern/storage-sealing/upgrade_queue.go
vendored
2
extern/storage-sealing/upgrade_queue.go
vendored
@ -68,6 +68,8 @@ func (m *Sealing) tryUpgradeSector(ctx context.Context, params *miner.SectorPreC
|
||||
params.ReplaceSectorDeadline = loc.Deadline
|
||||
params.ReplaceSectorPartition = loc.Partition
|
||||
|
||||
log.Infof("replacing sector %d with %d", *replace, params.SectorNumber)
|
||||
|
||||
ri, err := m.api.StateSectorGetInfo(ctx, m.maddr, *replace, nil)
|
||||
if err != nil {
|
||||
log.Errorf("error calling StateSectorGetInfo for replaced sector: %+v", err)
|
||||
|
2
go.mod
2
go.mod
@ -25,7 +25,7 @@ require (
|
||||
github.com/filecoin-project/go-bitfield v0.2.0
|
||||
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2
|
||||
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03
|
||||
github.com/filecoin-project/go-data-transfer v0.6.3
|
||||
github.com/filecoin-project/go-data-transfer v0.6.4
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f
|
||||
github.com/filecoin-project/go-fil-markets v0.6.1-0.20200917052354-ee0af754c6e9
|
||||
github.com/filecoin-project/go-jsonrpc v0.1.2-0.20200822201400-474f4fdccc52
|
||||
|
3
go.sum
3
go.sum
@ -222,8 +222,9 @@ github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:a
|
||||
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg=
|
||||
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus=
|
||||
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ=
|
||||
github.com/filecoin-project/go-data-transfer v0.6.3 h1:7TLwm8nuodHYD/uiwJjKc/PGRR+LwqM8jmlZqgWuUfY=
|
||||
github.com/filecoin-project/go-data-transfer v0.6.3/go.mod h1:PmBKVXkhh67/tnEdJXQwDHl5mT+7Tbcwe1NPninqhnM=
|
||||
github.com/filecoin-project/go-data-transfer v0.6.4 h1:Q08ABa+cOTOLoAyHeA94fPLcwu53p6eeAaxMxQb0m0A=
|
||||
github.com/filecoin-project/go-data-transfer v0.6.4/go.mod h1:PmBKVXkhh67/tnEdJXQwDHl5mT+7Tbcwe1NPninqhnM=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
|
||||
github.com/filecoin-project/go-fil-markets v0.6.1-0.20200917052354-ee0af754c6e9 h1:SnCUC9wHDId9TtV8PsQp8q1OOsi+NOLOwitIDnAgUa4=
|
||||
|
@ -80,7 +80,7 @@ func (rcn *retrievalClientNode) WaitForPaymentChannelReady(ctx context.Context,
|
||||
|
||||
func (rcn *retrievalClientNode) CheckAvailableFunds(ctx context.Context, paymentChannel address.Address) (retrievalmarket.ChannelAvailableFunds, error) {
|
||||
|
||||
channelAvailableFunds, err := rcn.payAPI.PaychAvailableFunds(paymentChannel)
|
||||
channelAvailableFunds, err := rcn.payAPI.PaychAvailableFunds(ctx, paymentChannel)
|
||||
if err != nil {
|
||||
return retrievalmarket.ChannelAvailableFunds{}, err
|
||||
}
|
||||
|
@ -139,6 +139,7 @@ func (m *Miner) niceSleep(d time.Duration) bool {
|
||||
case <-build.Clock.After(d):
|
||||
return true
|
||||
case <-m.stop:
|
||||
log.Infow("received interrupt while trying to sleep in mining cycle")
|
||||
return false
|
||||
}
|
||||
}
|
||||
@ -169,7 +170,9 @@ func (m *Miner) mine(ctx context.Context) {
|
||||
prebase, err := m.GetBestMiningCandidate(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("failed to get best mining candidate: %s", err)
|
||||
m.niceSleep(time.Second * 5)
|
||||
if !m.niceSleep(time.Second * 5) {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@ -199,7 +202,9 @@ func (m *Miner) mine(ctx context.Context) {
|
||||
_, err = m.api.BeaconGetEntry(ctx, prebase.TipSet.Height()+prebase.NullRounds+1)
|
||||
if err != nil {
|
||||
log.Errorf("failed getting beacon entry: %s", err)
|
||||
m.niceSleep(time.Second)
|
||||
if !m.niceSleep(time.Second) {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@ -208,7 +213,9 @@ func (m *Miner) mine(ctx context.Context) {
|
||||
|
||||
if base.TipSet.Equals(lastBase.TipSet) && lastBase.NullRounds == base.NullRounds {
|
||||
log.Warnf("BestMiningCandidate from the previous round: %s (nulls:%d)", lastBase.TipSet.Cids(), lastBase.NullRounds)
|
||||
m.niceSleep(time.Duration(build.BlockDelaySecs) * time.Second)
|
||||
if !m.niceSleep(time.Duration(build.BlockDelaySecs) * time.Second) {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@ -217,7 +224,9 @@ func (m *Miner) mine(ctx context.Context) {
|
||||
b, err := m.mineOne(ctx, base)
|
||||
if err != nil {
|
||||
log.Errorf("mining block failed: %+v", err)
|
||||
m.niceSleep(time.Second)
|
||||
if !m.niceSleep(time.Second) {
|
||||
break
|
||||
}
|
||||
onDone(false, 0, err)
|
||||
continue
|
||||
}
|
||||
|
@ -266,6 +266,9 @@ func Online() Option {
|
||||
Override(new(dtypes.ChainBlockService), modules.ChainBlockService),
|
||||
|
||||
// Filecoin services
|
||||
// We don't want the SyncManagerCtor to be used as an fx constructor, but rather as a value.
|
||||
// It will be called implicitly by the Syncer constructor.
|
||||
Override(new(chain.SyncManagerCtor), func() chain.SyncManagerCtor { return chain.NewSyncManager }),
|
||||
Override(new(*chain.Syncer), modules.NewSyncer),
|
||||
Override(new(exchange.Client), exchange.NewClient),
|
||||
Override(new(*messagepool.MessagePool), modules.MessagePool),
|
||||
@ -379,7 +382,7 @@ func StorageMiner(out *api.StorageMiner) Option {
|
||||
|
||||
func(s *Settings) error {
|
||||
resAPI := &impl.StorageMinerAPI{}
|
||||
s.invokes[ExtractApiKey] = fx.Extract(resAPI)
|
||||
s.invokes[ExtractApiKey] = fx.Populate(resAPI)
|
||||
*out = resAPI
|
||||
return nil
|
||||
},
|
||||
@ -499,12 +502,18 @@ func Repo(r repo.Repo) Option {
|
||||
}
|
||||
|
||||
func FullAPI(out *api.FullNode) Option {
|
||||
return func(s *Settings) error {
|
||||
resAPI := &impl.FullNodeAPI{}
|
||||
s.invokes[ExtractApiKey] = fx.Extract(resAPI)
|
||||
*out = resAPI
|
||||
return nil
|
||||
}
|
||||
return Options(
|
||||
func(s *Settings) error {
|
||||
s.nodeType = repo.FullNode
|
||||
return nil
|
||||
},
|
||||
func(s *Settings) error {
|
||||
resAPI := &impl.FullNodeAPI{}
|
||||
s.invokes[ExtractApiKey] = fx.Populate(resAPI)
|
||||
*out = resAPI
|
||||
return nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
type StopFunc func(context.Context) error
|
||||
@ -512,9 +521,8 @@ type StopFunc func(context.Context) error
|
||||
// New builds and starts new Filecoin node
|
||||
func New(ctx context.Context, opts ...Option) (StopFunc, error) {
|
||||
settings := Settings{
|
||||
modules: map[interface{}]fx.Option{},
|
||||
invokes: make([]fx.Option, _nInvokes),
|
||||
nodeType: repo.FullNode,
|
||||
modules: map[interface{}]fx.Option{},
|
||||
invokes: make([]fx.Option, _nInvokes),
|
||||
}
|
||||
|
||||
// apply module options in the right order
|
||||
|
@ -1179,3 +1179,52 @@ func (a *StateAPI) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey)
|
||||
|
||||
return a.StateManager.GetNtwkVersion(ctx, ts.Height()), nil
|
||||
}
|
||||
|
||||
func (a *StateAPI) StateMsgGasCost(ctx context.Context, inputMsg cid.Cid, tsk types.TipSetKey) (*api.MsgGasCost, error) {
|
||||
var msg cid.Cid
|
||||
var ts *types.TipSet
|
||||
var err error
|
||||
if tsk != types.EmptyTSK {
|
||||
msg = inputMsg
|
||||
ts, err = a.Chain.LoadTipSet(tsk)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err)
|
||||
}
|
||||
} else {
|
||||
mlkp, err := a.StateSearchMsg(ctx, inputMsg)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("searching for msg %s: %w", inputMsg, err)
|
||||
}
|
||||
if mlkp == nil {
|
||||
return nil, xerrors.Errorf("didn't find msg %s", inputMsg)
|
||||
}
|
||||
|
||||
executionTs, err := a.Chain.GetTipSetFromKey(mlkp.TipSet)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("loading tipset %s: %w", mlkp.TipSet, err)
|
||||
}
|
||||
|
||||
ts, err = a.Chain.LoadTipSet(executionTs.Parents())
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("loading parent tipset %s: %w", mlkp.TipSet, err)
|
||||
}
|
||||
|
||||
msg = mlkp.Message
|
||||
}
|
||||
|
||||
m, r, err := a.StateManager.Replay(ctx, ts, msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &api.MsgGasCost{
|
||||
Message: msg,
|
||||
GasUsed: big.NewInt(r.GasUsed),
|
||||
BaseFeeBurn: r.GasCosts.BaseFeeBurn,
|
||||
OverEstimationBurn: r.GasCosts.OverEstimationBurn,
|
||||
MinerPenalty: r.GasCosts.MinerPenalty,
|
||||
MinerTip: r.GasCosts.MinerTip,
|
||||
Refund: r.GasCosts.Refund,
|
||||
TotalCost: big.Sub(m.RequiredFunds(), r.GasCosts.Refund),
|
||||
}, nil
|
||||
}
|
||||
|
@ -39,11 +39,11 @@ func (a *PaychAPI) PaychGet(ctx context.Context, from, to address.Address, amt t
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *PaychAPI) PaychAvailableFunds(ch address.Address) (*api.ChannelAvailableFunds, error) {
|
||||
func (a *PaychAPI) PaychAvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) {
|
||||
return a.PaychMgr.AvailableFunds(ch)
|
||||
}
|
||||
|
||||
func (a *PaychAPI) PaychAvailableFundsByFromTo(from, to address.Address) (*api.ChannelAvailableFunds, error) {
|
||||
func (a *PaychAPI) PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*api.ChannelAvailableFunds, error) {
|
||||
return a.PaychMgr.AvailableFundsByFromTo(from, to)
|
||||
}
|
||||
|
||||
|
@ -163,8 +163,31 @@ func NetworkName(mctx helpers.MetricsCtx, lc fx.Lifecycle, cs *store.ChainStore,
|
||||
return netName, err
|
||||
}
|
||||
|
||||
func NewSyncer(lc fx.Lifecycle, ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.Client, h host.Host, beacon beacon.Schedule, verifier ffiwrapper.Verifier) (*chain.Syncer, error) {
|
||||
syncer, err := chain.NewSyncer(ds, sm, exchange, h.ConnManager(), h.ID(), beacon, verifier)
|
||||
type SyncerParams struct {
|
||||
fx.In
|
||||
|
||||
Lifecycle fx.Lifecycle
|
||||
MetadataDS dtypes.MetadataDS
|
||||
StateManager *stmgr.StateManager
|
||||
ChainXchg exchange.Client
|
||||
SyncMgrCtor chain.SyncManagerCtor
|
||||
Host host.Host
|
||||
Beacon beacon.Schedule
|
||||
Verifier ffiwrapper.Verifier
|
||||
}
|
||||
|
||||
func NewSyncer(params SyncerParams) (*chain.Syncer, error) {
|
||||
var (
|
||||
lc = params.Lifecycle
|
||||
ds = params.MetadataDS
|
||||
sm = params.StateManager
|
||||
ex = params.ChainXchg
|
||||
smCtor = params.SyncMgrCtor
|
||||
h = params.Host
|
||||
b = params.Beacon
|
||||
v = params.Verifier
|
||||
)
|
||||
syncer, err := chain.NewSyncer(ds, sm, ex, smCtor, h.ConnManager(), h.ID(), b, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ func AddressFor(ctx context.Context, a addrSelectApi, mi miner.MinerInfo, use Ad
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
log.Warnw("control address didn't have enough funds for PoSt message", "address", addr, "required", types.FIL(minFunds), "balance", types.FIL(b))
|
||||
log.Warnw("control address didn't have enough funds for window post message", "address", addr, "required", types.FIL(minFunds), "balance", types.FIL(b))
|
||||
}
|
||||
|
||||
// Try to use the owner account if we can, fallback to worker if we can't
|
||||
|
@ -3,7 +3,6 @@ package storage
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
@ -31,8 +30,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/journal"
|
||||
)
|
||||
|
||||
var errNoPartitions = errors.New("no partitions")
|
||||
|
||||
func (s *WindowPoStScheduler) failPost(err error, deadline *dline.Info) {
|
||||
journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
|
||||
return WdPoStSchedulerEvt{
|
||||
@ -80,25 +77,29 @@ func (s *WindowPoStScheduler) doPost(ctx context.Context, deadline *dline.Info,
|
||||
})
|
||||
}
|
||||
|
||||
proof, err := s.runPost(ctx, *deadline, ts)
|
||||
switch err {
|
||||
case errNoPartitions:
|
||||
recordProofsEvent(nil, cid.Undef)
|
||||
return
|
||||
case nil:
|
||||
sm, err := s.submitPost(ctx, proof)
|
||||
if err != nil {
|
||||
log.Errorf("submitPost failed: %+v", err)
|
||||
s.failPost(err, deadline)
|
||||
return
|
||||
}
|
||||
recordProofsEvent(proof.Partitions, sm.Cid())
|
||||
default:
|
||||
log.Errorf("runPost failed: %+v", err)
|
||||
posts, err := s.runPost(ctx, *deadline, ts)
|
||||
if err != nil {
|
||||
log.Errorf("run window post failed: %+v", err)
|
||||
s.failPost(err, deadline)
|
||||
return
|
||||
}
|
||||
|
||||
if len(posts) == 0 {
|
||||
recordProofsEvent(nil, cid.Undef)
|
||||
return
|
||||
}
|
||||
|
||||
for i := range posts {
|
||||
post := &posts[i]
|
||||
sm, err := s.submitPost(ctx, post)
|
||||
if err != nil {
|
||||
log.Errorf("submit window post failed: %+v", err)
|
||||
s.failPost(err, deadline)
|
||||
} else {
|
||||
recordProofsEvent(post.Partitions, sm.Cid())
|
||||
}
|
||||
}
|
||||
|
||||
journal.J.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
|
||||
return WdPoStSchedulerEvt{
|
||||
evtCommon: s.getEvtCommon(nil),
|
||||
@ -336,7 +337,7 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, dlIdx uint64,
|
||||
return faults, sm, nil
|
||||
}
|
||||
|
||||
func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *types.TipSet) (*miner.SubmitWindowedPoStParams, error) {
|
||||
func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *types.TipSet) ([]miner.SubmitWindowedPoStParams, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "storage.runPost")
|
||||
defer span.End()
|
||||
|
||||
@ -425,7 +426,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
||||
|
||||
rand, err := s.api.ChainGetRandomnessFromBeacon(ctx, ts.Key(), crypto.DomainSeparationTag_WindowedPoStChallengeSeed, di.Challenge, buf.Bytes())
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to get chain randomness for windowPost (ts=%d; deadline=%d): %w", ts.Height(), di, err)
|
||||
return nil, xerrors.Errorf("failed to get chain randomness for window post (ts=%d; deadline=%d): %w", ts.Height(), di, err)
|
||||
}
|
||||
|
||||
dl, err := mas.LoadDeadline(di.Index)
|
||||
@ -433,6 +434,7 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
||||
return nil, xerrors.Errorf("loading deadline: %w", err)
|
||||
}
|
||||
|
||||
// Get the partitions for the given deadline
|
||||
var partitions []miner.Partition
|
||||
err = dl.ForEachPartition(func(_ uint64, part miner.Partition) error {
|
||||
partitions = append(partitions, part)
|
||||
@ -442,140 +444,212 @@ func (s *WindowPoStScheduler) runPost(ctx context.Context, di dline.Info, ts *ty
|
||||
return nil, xerrors.Errorf("loading partitions: %w", err)
|
||||
}
|
||||
|
||||
params := &miner.SubmitWindowedPoStParams{
|
||||
Deadline: di.Index,
|
||||
Partitions: make([]miner.PoStPartition, 0, len(partitions)),
|
||||
Proofs: nil,
|
||||
// Split partitions into batches, so as not to exceed the number of sectors
|
||||
// allowed in a single message
|
||||
partitionBatches, err := s.batchPartitions(partitions, mas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
skipCount := uint64(0)
|
||||
postSkipped := bitfield.New()
|
||||
var postOut []proof.PoStProof
|
||||
|
||||
for retries := 0; retries < 5; retries++ {
|
||||
var sinfos []proof.SectorInfo
|
||||
sidToPart := map[abi.SectorNumber]int{}
|
||||
|
||||
for partIdx, partition := range partitions {
|
||||
// TODO: Can do this in parallel
|
||||
toProve, err := partition.ActiveSectors()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting active sectors: %w", err)
|
||||
}
|
||||
|
||||
recs, err := partition.RecoveringSectors()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting recovering sectors: %w", err)
|
||||
}
|
||||
toProve, err = bitfield.MergeBitFields(toProve, recs)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("adding recoveries to set of sectors to prove: %w", err)
|
||||
}
|
||||
|
||||
good, err := s.checkSectors(ctx, toProve)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("checking sectors to skip: %w", err)
|
||||
}
|
||||
|
||||
good, err = bitfield.SubtractBitField(good, postSkipped)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("toProve - postSkipped: %w", err)
|
||||
}
|
||||
|
||||
skipped, err := bitfield.SubtractBitField(toProve, good)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("toProve - good: %w", err)
|
||||
}
|
||||
|
||||
sc, err := skipped.Count()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting skipped sector count: %w", err)
|
||||
}
|
||||
|
||||
skipCount += sc
|
||||
|
||||
partitionSectors, err := partition.AllSectors()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting partition sectors: %w", err)
|
||||
}
|
||||
|
||||
ssi, err := s.sectorsForProof(ctx, good, partitionSectors, ts)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting sorted sector info: %w", err)
|
||||
}
|
||||
|
||||
if len(ssi) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
sinfos = append(sinfos, ssi...)
|
||||
for _, si := range ssi {
|
||||
sidToPart[si.SectorNumber] = partIdx
|
||||
}
|
||||
|
||||
params.Partitions = append(params.Partitions, miner.PoStPartition{
|
||||
Index: uint64(partIdx),
|
||||
Skipped: skipped,
|
||||
})
|
||||
// Generate proofs in batches
|
||||
posts := make([]miner.SubmitWindowedPoStParams, 0, len(partitionBatches))
|
||||
for batchIdx, batch := range partitionBatches {
|
||||
batchPartitionStartIdx := 0
|
||||
for _, batch := range partitionBatches[:batchIdx] {
|
||||
batchPartitionStartIdx += len(batch)
|
||||
}
|
||||
|
||||
if len(sinfos) == 0 {
|
||||
// nothing to prove..
|
||||
return nil, errNoPartitions
|
||||
params := miner.SubmitWindowedPoStParams{
|
||||
Deadline: di.Index,
|
||||
Partitions: make([]miner.PoStPartition, 0, len(batch)),
|
||||
Proofs: nil,
|
||||
}
|
||||
|
||||
log.Infow("running windowPost",
|
||||
"chain-random", rand,
|
||||
"deadline", di,
|
||||
"height", ts.Height(),
|
||||
"skipped", skipCount)
|
||||
skipCount := uint64(0)
|
||||
postSkipped := bitfield.New()
|
||||
var postOut []proof.PoStProof
|
||||
somethingToProve := true
|
||||
|
||||
tsStart := build.Clock.Now()
|
||||
for retries := 0; retries < 5; retries++ {
|
||||
var partitions []miner.PoStPartition
|
||||
var sinfos []proof.SectorInfo
|
||||
for partIdx, partition := range batch {
|
||||
// TODO: Can do this in parallel
|
||||
toProve, err := partition.ActiveSectors()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting active sectors: %w", err)
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(s.actor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
recs, err := partition.RecoveringSectors()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting recovering sectors: %w", err)
|
||||
}
|
||||
toProve, err = bitfield.MergeBitFields(toProve, recs)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("adding recoveries to set of sectors to prove: %w", err)
|
||||
}
|
||||
|
||||
good, err := s.checkSectors(ctx, toProve)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("checking sectors to skip: %w", err)
|
||||
}
|
||||
|
||||
good, err = bitfield.SubtractBitField(good, postSkipped)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("toProve - postSkipped: %w", err)
|
||||
}
|
||||
|
||||
skipped, err := bitfield.SubtractBitField(toProve, good)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("toProve - good: %w", err)
|
||||
}
|
||||
|
||||
sc, err := skipped.Count()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting skipped sector count: %w", err)
|
||||
}
|
||||
|
||||
skipCount += sc
|
||||
|
||||
partitionSectors, err := partition.AllSectors()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting partition sectors: %w", err)
|
||||
}
|
||||
|
||||
ssi, err := s.sectorsForProof(ctx, good, partitionSectors, ts)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting sorted sector info: %w", err)
|
||||
}
|
||||
|
||||
if len(ssi) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
sinfos = append(sinfos, ssi...)
|
||||
partitions = append(partitions, miner.PoStPartition{
|
||||
Index: uint64(batchPartitionStartIdx + partIdx),
|
||||
Skipped: skipped,
|
||||
})
|
||||
}
|
||||
|
||||
if len(sinfos) == 0 {
|
||||
// nothing to prove for this batch
|
||||
somethingToProve = false
|
||||
break
|
||||
}
|
||||
|
||||
// Generate proof
|
||||
log.Infow("running window post",
|
||||
"chain-random", rand,
|
||||
"deadline", di,
|
||||
"height", ts.Height(),
|
||||
"skipped", skipCount)
|
||||
|
||||
tsStart := build.Clock.Now()
|
||||
|
||||
mid, err := address.IDFromAddress(s.actor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ps []abi.SectorID
|
||||
postOut, ps, err = s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), sinfos, abi.PoStRandomness(rand))
|
||||
elapsed := time.Since(tsStart)
|
||||
|
||||
log.Infow("computing window post", "batch", batchIdx, "elapsed", elapsed)
|
||||
|
||||
if err == nil {
|
||||
// Proof generation successful, stop retrying
|
||||
params.Partitions = append(params.Partitions, partitions...)
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
// Proof generation failed, so retry
|
||||
|
||||
if len(ps) == 0 {
|
||||
return nil, xerrors.Errorf("running window post failed: %w", err)
|
||||
}
|
||||
|
||||
log.Warnw("generate window post skipped sectors", "sectors", ps, "error", err, "try", retries)
|
||||
|
||||
skipCount += uint64(len(ps))
|
||||
for _, sector := range ps {
|
||||
postSkipped.Set(uint64(sector.Number))
|
||||
}
|
||||
}
|
||||
|
||||
var ps []abi.SectorID
|
||||
postOut, ps, err = s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), sinfos, abi.PoStRandomness(rand))
|
||||
elapsed := time.Since(tsStart)
|
||||
|
||||
log.Infow("computing window PoSt", "elapsed", elapsed)
|
||||
|
||||
if err == nil {
|
||||
break
|
||||
// Nothing to prove for this batch, try the next batch
|
||||
if !somethingToProve {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(ps) == 0 {
|
||||
return nil, xerrors.Errorf("running post failed: %w", err)
|
||||
if len(postOut) == 0 {
|
||||
return nil, xerrors.Errorf("received no proofs back from generate window post")
|
||||
}
|
||||
|
||||
log.Warnw("generate window PoSt skipped sectors", "sectors", ps, "error", err, "try", retries)
|
||||
params.Proofs = postOut
|
||||
|
||||
skipCount += uint64(len(ps))
|
||||
for _, sector := range ps {
|
||||
postSkipped.Set(uint64(sector.Number))
|
||||
}
|
||||
posts = append(posts, params)
|
||||
}
|
||||
|
||||
if len(postOut) == 0 {
|
||||
return nil, xerrors.Errorf("received no proofs back from generate window post")
|
||||
}
|
||||
|
||||
params.Proofs = postOut
|
||||
|
||||
// Compute randomness after generating proofs so as to reduce the impact
|
||||
// of chain reorgs (which change randomness)
|
||||
commEpoch := di.Open
|
||||
commRand, err := s.api.ChainGetRandomnessFromTickets(ctx, ts.Key(), crypto.DomainSeparationTag_PoStChainCommit, commEpoch, nil)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to get chain randomness for windowPost (ts=%d; deadline=%d): %w", ts.Height(), di, err)
|
||||
return nil, xerrors.Errorf("failed to get chain randomness for window post (ts=%d; deadline=%d): %w", ts.Height(), commEpoch, err)
|
||||
}
|
||||
params.ChainCommitEpoch = commEpoch
|
||||
params.ChainCommitRand = commRand
|
||||
|
||||
log.Infow("submitting window PoSt")
|
||||
for i := range posts {
|
||||
posts[i].ChainCommitEpoch = commEpoch
|
||||
posts[i].ChainCommitRand = commRand
|
||||
}
|
||||
|
||||
return params, nil
|
||||
return posts, nil
|
||||
}
|
||||
|
||||
func (s *WindowPoStScheduler) batchPartitions(partitions []miner.Partition, mas miner.State) ([][]miner.Partition, error) {
|
||||
// Get the number of sectors allowed in a partition, for this proof size
|
||||
sectorsPerPartition, err := builtin.PoStProofWindowPoStPartitionSectors(s.proofType)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting sectors per partition: %w", err)
|
||||
}
|
||||
|
||||
maxSectors, err := mas.MaxAddressedSectors()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We don't want to exceed the number of sectors allowed in a message.
|
||||
// So given the number of sectors in a partition, work out the number of
|
||||
// partitions that can be in a message without exceeding sectors per
|
||||
// message:
|
||||
// floor(number of sectors allowed in a message / sectors per partition)
|
||||
// eg:
|
||||
// max sectors per message 7: ooooooo
|
||||
// sectors per partition 3: ooo
|
||||
// partitions per message 2: oooOOO
|
||||
// <1><2> (3rd doesn't fit)
|
||||
partitionsPerMsg := int(maxSectors / sectorsPerPartition)
|
||||
|
||||
// The number of messages will be:
|
||||
// ceiling(number of partitions / partitions per message)
|
||||
batchCount := len(partitions) / partitionsPerMsg
|
||||
if len(partitions)%partitionsPerMsg != 0 {
|
||||
batchCount++
|
||||
}
|
||||
|
||||
// Split the partitions into batches
|
||||
batches := make([][]miner.Partition, 0, batchCount)
|
||||
for i := 0; i < len(partitions); i += partitionsPerMsg {
|
||||
end := i + partitionsPerMsg
|
||||
if end > len(partitions) {
|
||||
end = len(partitions)
|
||||
}
|
||||
batches = append(batches, partitions[i:end])
|
||||
}
|
||||
return batches, nil
|
||||
}
|
||||
|
||||
func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, allSectors bitfield.BitField, ts *types.TipSet) ([]proof.SectorInfo, error) {
|
||||
@ -626,7 +700,7 @@ func (s *WindowPoStScheduler) submitPost(ctx context.Context, proof *miner.Submi
|
||||
|
||||
enc, aerr := actors.SerializeParams(proof)
|
||||
if aerr != nil {
|
||||
return nil, xerrors.Errorf("could not serialize submit post parameters: %w", aerr)
|
||||
return nil, xerrors.Errorf("could not serialize submit window post parameters: %w", aerr)
|
||||
}
|
||||
|
||||
msg := &types.Message{
|
||||
@ -687,7 +761,7 @@ func (s *WindowPoStScheduler) setSender(ctx context.Context, msg *types.Message,
|
||||
|
||||
pa, err := AddressFor(ctx, s.api, mi, PoStAddr, minFunds)
|
||||
if err != nil {
|
||||
log.Errorw("error selecting address for post", "error", err)
|
||||
log.Errorw("error selecting address for window post", "error", err)
|
||||
msg.From = s.worker
|
||||
return
|
||||
}
|
||||
|
305
storage/wdpost_run_test.go
Normal file
305
storage/wdpost_run_test.go
Normal file
@ -0,0 +1,305 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
tutils "github.com/filecoin-project/specs-actors/support/testing"
|
||||
)
|
||||
|
||||
type mockStorageMinerAPI struct {
|
||||
partitions []*miner.Partition
|
||||
pushedMessages chan *types.Message
|
||||
}
|
||||
|
||||
func newMockStorageMinerAPI() *mockStorageMinerAPI {
|
||||
return &mockStorageMinerAPI{
|
||||
pushedMessages: make(chan *types.Message),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
|
||||
return abi.Randomness("ticket rand"), nil
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
|
||||
return abi.Randomness("beacon rand"), nil
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) setPartitions(ps []*miner.Partition) {
|
||||
m.partitions = append(m.partitions, ps...)
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) StateMinerPartitions(ctx context.Context, address address.Address, u uint64, key types.TipSetKey) ([]*miner.Partition, error) {
|
||||
return m.partitions, nil
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) StateMinerSectors(ctx context.Context, address address.Address, field *bitfield.BitField, b bool, key types.TipSetKey) ([]*api.ChainSectorInfo, error) {
|
||||
var sis []*api.ChainSectorInfo
|
||||
_ = field.ForEach(func(i uint64) error {
|
||||
sis = append(sis, &api.ChainSectorInfo{
|
||||
Info: miner.SectorOnChainInfo{
|
||||
SectorNumber: abi.SectorNumber(i),
|
||||
},
|
||||
ID: abi.SectorNumber(i),
|
||||
})
|
||||
return nil
|
||||
})
|
||||
return sis, nil
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) StateMinerInfo(ctx context.Context, address address.Address, key types.TipSetKey) (api.MinerInfo, error) {
|
||||
return api.MinerInfo{}, xerrors.Errorf("err")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) MpoolPushMessage(ctx context.Context, message *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) {
|
||||
m.pushedMessages <- message
|
||||
return &types.SignedMessage{
|
||||
Message: *message,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) {
|
||||
return &api.MsgLookup{
|
||||
Receipt: types.MessageReceipt{
|
||||
ExitCode: 0,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
type mockProver struct {
|
||||
}
|
||||
|
||||
func (m *mockProver) GenerateWinningPoSt(context.Context, abi.ActorID, []proof.SectorInfo, abi.PoStRandomness) ([]proof.PoStProof, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, sis []proof.SectorInfo, pr abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) {
|
||||
return []proof.PoStProof{
|
||||
{
|
||||
PoStProof: abi.RegisteredPoStProof_StackedDrgWindow2KiBV1,
|
||||
ProofBytes: []byte("post-proof"),
|
||||
},
|
||||
}, nil, nil
|
||||
}
|
||||
|
||||
type mockFaultTracker struct {
|
||||
}
|
||||
|
||||
func (m mockFaultTracker) CheckProvable(ctx context.Context, spt abi.RegisteredSealProof, sectors []abi.SectorID) ([]abi.SectorID, error) {
|
||||
// Returns "bad" sectors so just return nil meaning all sectors are good
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// TestWDPostDoPost verifies that doPost will send the correct number of window
|
||||
// PoST messages for a given number of partitions
|
||||
func TestWDPostDoPost(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
expectedMsgCount := 5
|
||||
|
||||
proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1
|
||||
postAct := tutils.NewIDAddr(t, 100)
|
||||
workerAct := tutils.NewIDAddr(t, 101)
|
||||
|
||||
mockStgMinerAPI := newMockStorageMinerAPI()
|
||||
|
||||
// Get the number of sectors allowed in a partition for this proof type
|
||||
sectorsPerPartition, err := builtin.PoStProofWindowPoStPartitionSectors(proofType)
|
||||
require.NoError(t, err)
|
||||
// Work out the number of partitions that can be included in a message
|
||||
// without exceeding the message sector limit
|
||||
partitionsPerMsg := int(miner.AddressedSectorsMax / sectorsPerPartition)
|
||||
|
||||
// Enough partitions to fill expectedMsgCount-1 messages
|
||||
partitionCount := (expectedMsgCount - 1) * partitionsPerMsg
|
||||
// Add an extra partition that should be included in the last message
|
||||
partitionCount++
|
||||
|
||||
var partitions []*miner.Partition
|
||||
for p := 0; p < partitionCount; p++ {
|
||||
sectors := bitfield.New()
|
||||
for s := uint64(0); s < sectorsPerPartition; s++ {
|
||||
sectors.Set(s)
|
||||
}
|
||||
partitions = append(partitions, &miner.Partition{
|
||||
Sectors: sectors,
|
||||
})
|
||||
}
|
||||
mockStgMinerAPI.setPartitions(partitions)
|
||||
|
||||
// Run window PoST
|
||||
scheduler := &WindowPoStScheduler{
|
||||
api: mockStgMinerAPI,
|
||||
prover: &mockProver{},
|
||||
faultTracker: &mockFaultTracker{},
|
||||
proofType: proofType,
|
||||
actor: postAct,
|
||||
worker: workerAct,
|
||||
}
|
||||
|
||||
di := &dline.Info{}
|
||||
ts := mockTipSet(t)
|
||||
scheduler.doPost(ctx, di, ts)
|
||||
|
||||
// Read the window PoST messages
|
||||
for i := 0; i < expectedMsgCount; i++ {
|
||||
msg := <-mockStgMinerAPI.pushedMessages
|
||||
require.Equal(t, builtin.MethodsMiner.SubmitWindowedPoSt, msg.Method)
|
||||
var params miner.SubmitWindowedPoStParams
|
||||
err := params.UnmarshalCBOR(bytes.NewReader(msg.Params))
|
||||
require.NoError(t, err)
|
||||
|
||||
if i == expectedMsgCount-1 {
|
||||
// In the last message we only included a single partition (see above)
|
||||
require.Len(t, params.Partitions, 1)
|
||||
} else {
|
||||
// All previous messages should include the full number of partitions
|
||||
require.Len(t, params.Partitions, partitionsPerMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mockTipSet(t *testing.T) *types.TipSet {
|
||||
minerAct := tutils.NewActorAddr(t, "miner")
|
||||
c, err := cid.Decode("QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH")
|
||||
require.NoError(t, err)
|
||||
blks := []*types.BlockHeader{
|
||||
{
|
||||
Miner: minerAct,
|
||||
Height: abi.ChainEpoch(1),
|
||||
ParentStateRoot: c,
|
||||
ParentMessageReceipts: c,
|
||||
Messages: c,
|
||||
},
|
||||
}
|
||||
ts, err := types.NewTipSet(blks)
|
||||
require.NoError(t, err)
|
||||
return ts
|
||||
}
|
||||
|
||||
//
|
||||
// All the mock methods below here are unused
|
||||
//
|
||||
|
||||
func (m *mockStorageMinerAPI) StateCall(ctx context.Context, message *types.Message, key types.TipSetKey) (*api.InvocResult, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) StateMinerDeadlines(ctx context.Context, maddr address.Address, tok types.TipSetKey) ([]*miner.Deadline, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) StateSectorPreCommitInfo(ctx context.Context, address address.Address, number abi.SectorNumber, key types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) StateSectorGetInfo(ctx context.Context, address address.Address, number abi.SectorNumber, key types.TipSetKey) (*miner.SectorOnChainInfo, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*api.SectorLocation, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) StateMinerProvingDeadline(ctx context.Context, address address.Address, key types.TipSetKey) (*dline.Info, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) StateMinerPreCommitDepositForPower(ctx context.Context, address address.Address, info miner.SectorPreCommitInfo, key types.TipSetKey) (types.BigInt, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) StateMinerInitialPledgeCollateral(ctx context.Context, address address.Address, info miner.SectorPreCommitInfo, key types.TipSetKey) (types.BigInt, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) StateSearchMsg(ctx context.Context, cid cid.Cid) (*api.MsgLookup, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) StateGetReceipt(ctx context.Context, cid cid.Cid, key types.TipSetKey) (*types.MessageReceipt, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) StateMarketStorageDeal(ctx context.Context, id abi.DealID, key types.TipSetKey) (*api.MarketDeal, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) StateMinerFaults(ctx context.Context, address address.Address, key types.TipSetKey) (bitfield.BitField, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) StateMinerRecoveries(ctx context.Context, address address.Address, key types.TipSetKey) (bitfield.BitField, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) StateAccountKey(ctx context.Context, address address.Address, key types.TipSetKey) (address.Address, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) GasEstimateMessageGas(ctx context.Context, message *types.Message, spec *api.MessageSendSpec, key types.TipSetKey) (*types.Message, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) ChainHead(ctx context.Context) (*types.TipSet, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) ChainGetTipSetByHeight(ctx context.Context, epoch abi.ChainEpoch, key types.TipSetKey) (*types.TipSet, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) ChainGetBlockMessages(ctx context.Context, cid cid.Cid) (*api.BlockMessages, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) ChainReadObj(ctx context.Context, cid cid.Cid) ([]byte, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) ChainHasObj(ctx context.Context, cid cid.Cid) (bool, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) WalletSign(ctx context.Context, address address.Address, bytes []byte) (*crypto.Signature, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) WalletBalance(ctx context.Context, address address.Address) (types.BigInt, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockStorageMinerAPI) WalletHas(ctx context.Context, address address.Address) (bool, error) {
|
||||
panic("implement me")
|
||||
}
|
@ -109,7 +109,7 @@ func (s *WindowPoStScheduler) Run(ctx context.Context) {
|
||||
select {
|
||||
case changes, ok := <-notifs:
|
||||
if !ok {
|
||||
log.Warn("WindowPoStScheduler notifs channel closed")
|
||||
log.Warn("window post scheduler notifs channel closed")
|
||||
notifs = nil
|
||||
continue
|
||||
}
|
||||
@ -150,10 +150,10 @@ func (s *WindowPoStScheduler) Run(ctx context.Context) {
|
||||
}
|
||||
|
||||
if err := s.revert(ctx, lowest); err != nil {
|
||||
log.Error("handling head reverts in windowPost sched: %+v", err)
|
||||
log.Error("handling head reverts in window post sched: %+v", err)
|
||||
}
|
||||
if err := s.update(ctx, highest); err != nil {
|
||||
log.Error("handling head updates in windowPost sched: %+v", err)
|
||||
log.Error("handling head updates in window post sched: %+v", err)
|
||||
}
|
||||
|
||||
span.End()
|
||||
@ -183,7 +183,7 @@ func (s *WindowPoStScheduler) revert(ctx context.Context, newLowest *types.TipSe
|
||||
|
||||
func (s *WindowPoStScheduler) update(ctx context.Context, new *types.TipSet) error {
|
||||
if new == nil {
|
||||
return xerrors.Errorf("no new tipset in WindowPoStScheduler.update")
|
||||
return xerrors.Errorf("no new tipset in window post sched update")
|
||||
}
|
||||
|
||||
di, err := s.api.StateMinerProvingDeadline(ctx, s.actor, new.Key())
|
||||
@ -205,7 +205,7 @@ func (s *WindowPoStScheduler) update(ctx context.Context, new *types.TipSet) err
|
||||
// (Need to get correct deadline above, which is tricky)
|
||||
|
||||
if di.Open+StartConfidence >= new.Height() {
|
||||
log.Info("not starting windowPost yet, waiting for startconfidence", di.Open, di.Open+StartConfidence, new.Height())
|
||||
log.Info("not starting window post yet, waiting for startconfidence", di.Open, di.Open+StartConfidence, new.Height())
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -215,7 +215,7 @@ func (s *WindowPoStScheduler) update(ctx context.Context, new *types.TipSet) err
|
||||
s.activeEPS = 0
|
||||
}
|
||||
s.failLk.Unlock()*/
|
||||
log.Infof("at %d, doPost for P %d, dd %d", new.Height(), di.PeriodStart, di.Index)
|
||||
log.Infof("at %d, do window post for P %d, dd %d", new.Height(), di.PeriodStart, di.Index)
|
||||
|
||||
s.doPost(ctx, di, new)
|
||||
|
||||
@ -237,7 +237,7 @@ func (s *WindowPoStScheduler) abortActivePoSt() {
|
||||
}
|
||||
})
|
||||
|
||||
log.Warnf("Aborting Window PoSt (Deadline: %+v)", s.activeDeadline)
|
||||
log.Warnf("Aborting window post (Deadline: %+v)", s.activeDeadline)
|
||||
}
|
||||
|
||||
s.activeDeadline = nil
|
||||
|
@ -186,7 +186,7 @@ func RecordTipsetPoints(ctx context.Context, api api.FullNode, pl *PointList, ti
|
||||
pl.AddPoint(p)
|
||||
}
|
||||
{
|
||||
blks := len(cids)
|
||||
blks := int64(len(cids))
|
||||
p = NewPoint("chain.gas_fill_ratio", float64(totalGasLimit)/float64(blks*build.BlockGasTarget))
|
||||
pl.AddPoint(p)
|
||||
p = NewPoint("chain.gas_capacity_ratio", float64(totalUniqGasLimit)/float64(blks*build.BlockGasTarget))
|
||||
|
Loading…
Reference in New Issue
Block a user