Merge branch 'master' into asr/spec-v1

This commit is contained in:
Steven Allen 2020-10-02 17:48:16 -07:00
commit 8292d60196
40 changed files with 1826 additions and 436 deletions

View File

@ -1,5 +1,71 @@
# Lotus changelog # Lotus changelog
# 0.8.1 / 2020-09-30
This optional release of Lotus introduces a new version of markets which switches to CBOR-map encodings, and allows datastore migrations. The release also introduces several improvements to the mining process, a few performance optimizations, and a battery of UX additions and enhancements.
## Changes
#### Dependencies
- Markets 0.7.0 with updated data stores (https://github.com/filecoin-project/lotus/pull/4089)
- Update ffi to code with blst fixes (https://github.com/filecoin-project/lotus/pull/3998)
#### Core Lotus
- Fix GetPower with no miner address (https://github.com/filecoin-project/lotus/pull/4049)
- Refactor: Move nonce generation out of mpool (https://github.com/filecoin-project/lotus/pull/3970)
#### Performance
- Implement caching syscalls for import-bench (https://github.com/filecoin-project/lotus/pull/3888)
- Fetch tipset blocks in parallel (https://github.com/filecoin-project/lotus/pull/4074)
- Optimize Tipset equals() (https://github.com/filecoin-project/lotus/pull/4056)
- Make state transition in validation async (https://github.com/filecoin-project/lotus/pull/3868)
#### Mining
- Add trace window post (https://github.com/filecoin-project/lotus/pull/4020)
- Use abstract types for Dont recompute post on revert (https://github.com/filecoin-project/lotus/pull/4022)
- Fix injectNulls logic in test miner (https://github.com/filecoin-project/lotus/pull/4058)
- Fix potential panic in FinalizeSector (https://github.com/filecoin-project/lotus/pull/4092)
- Don't recompute post on revert (https://github.com/filecoin-project/lotus/pull/3924)
- Fix some failed precommit handling (https://github.com/filecoin-project/lotus/pull/3445)
- Add --no-swap flag for worker (https://github.com/filecoin-project/lotus/pull/4107)
- Allow some single-thread tasks to run in parallel with PC2/C2 (https://github.com/filecoin-project/lotus/pull/4116)
#### UX
- Add an envvar to set address network version (https://github.com/filecoin-project/lotus/pull/4028)
- Add logging to chain export (https://github.com/filecoin-project/lotus/pull/4030)
- Add JSON output to state compute (https://github.com/filecoin-project/lotus/pull/4038)
- Wallet list CLI: Print balances/nonces (https://github.com/filecoin-project/lotus/pull/4088)
- Added an option to show or not show sector info for `lotus-miner info` (https://github.com/filecoin-project/lotus/pull/4003)
- Add a command to import an ipld object into the chainstore (https://github.com/filecoin-project/lotus/pull/3434)
- Improve the lotus-shed dealtracker (https://github.com/filecoin-project/lotus/pull/4051)
- Docs review and re-organization (https://github.com/filecoin-project/lotus/pull/3431)
- Fix wallet list (https://github.com/filecoin-project/lotus/pull/4104)
- Add an endpoint to validate whether a string is a well-formed address (https://github.com/filecoin-project/lotus/pull/4106)
- Add an option to set config path (https://github.com/filecoin-project/lotus/pull/4103)
- Add printf in TestWindowPost (https://github.com/filecoin-project/lotus/pull/4043)
- Improve miner sectors list UX (https://github.com/filecoin-project/lotus/pull/4108)
#### Tooling
- Move policy change to seal bench (https://github.com/filecoin-project/lotus/pull/4032)
- Add back network power to stats (https://github.com/filecoin-project/lotus/pull/4050)
- Conformance: Record and feed circulating supply (https://github.com/filecoin-project/lotus/pull/4078)
- Snapshot import progress bar, add HTTP support (https://github.com/filecoin-project/lotus/pull/4070)
- Add lotus shed util to validate a tipset (https://github.com/filecoin-project/lotus/pull/4065)
- tvx: a test vector extraction and execution tool (https://github.com/filecoin-project/lotus/pull/4064)
#### Bootstrap
- Add new bootstrappers (https://github.com/filecoin-project/lotus/pull/4007)
- Add Glif node to bootstrap peers (https://github.com/filecoin-project/lotus/pull/4004)
- Add one more node located in China (https://github.com/filecoin-project/lotus/pull/4041)
- Add ipfsmain bootstrapper (https://github.com/filecoin-project/lotus/pull/4067)
# 0.8.0 / 2020-09-26 # 0.8.0 / 2020-09-26
This consensus-breaking release of Lotus introduces an upgrade to the network. The changes that break consensus are: This consensus-breaking release of Lotus introduces an upgrade to the network. The changes that break consensus are:

View File

@ -192,6 +192,9 @@ type FullNode interface {
// MpoolPush pushes a signed message to mempool. // MpoolPush pushes a signed message to mempool.
MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error)
// MpoolPushUntrusted pushes a signed message to mempool from untrusted sources.
MpoolPushUntrusted(context.Context, *types.SignedMessage) (cid.Cid, error)
// MpoolPushMessage atomically assigns a nonce, signs, and pushes a message // MpoolPushMessage atomically assigns a nonce, signs, and pushes a message
// to mempool. // to mempool.
// maxFee is only used when GasFeeCap/GasPremium fields aren't specified // maxFee is only used when GasFeeCap/GasPremium fields aren't specified
@ -390,10 +393,16 @@ type FullNode interface {
// StateCompute is a flexible command that applies the given messages on the given tipset. // StateCompute is a flexible command that applies the given messages on the given tipset.
// The messages are run as though the VM were at the provided height. // The messages are run as though the VM were at the provided height.
StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*ComputeStateOutput, error) StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*ComputeStateOutput, error)
// StateVerifierStatus returns the data cap for the given address.
// Returns nil if there is no entry in the data cap table for the
// address.
StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
// StateVerifiedClientStatus returns the data cap for the given address. // StateVerifiedClientStatus returns the data cap for the given address.
// Returns nil if there is no entry in the data cap table for the // Returns nil if there is no entry in the data cap table for the
// address. // address.
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
// StateVerifiedClientStatus returns the address of the Verified Registry's root key
StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error)
// StateDealProviderCollateralBounds returns the min and max collateral a storage provider // StateDealProviderCollateralBounds returns the min and max collateral a storage provider
// can issue. It takes the deal size and verified status as parameters. // can issue. It takes the deal size and verified status as parameters.
StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (DealCollateralBounds, error) StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (DealCollateralBounds, error)

View File

@ -122,7 +122,9 @@ type FullNodeStruct struct {
MpoolPending func(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"` MpoolPending func(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"`
MpoolClear func(context.Context, bool) error `perm:"write"` MpoolClear func(context.Context, bool) error `perm:"write"`
MpoolPush func(context.Context, *types.SignedMessage) (cid.Cid, error) `perm:"write"` MpoolPush func(context.Context, *types.SignedMessage) (cid.Cid, error) `perm:"write"`
MpoolPushUntrusted func(context.Context, *types.SignedMessage) (cid.Cid, error) `perm:"write"`
MpoolPushMessage func(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error) `perm:"sign"` MpoolPushMessage func(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error) `perm:"sign"`
MpoolGetNonce func(context.Context, address.Address) (uint64, error) `perm:"read"` MpoolGetNonce func(context.Context, address.Address) (uint64, error) `perm:"read"`
MpoolSub func(context.Context) (<-chan api.MpoolUpdate, error) `perm:"read"` MpoolSub func(context.Context) (<-chan api.MpoolUpdate, error) `perm:"read"`
@ -202,7 +204,9 @@ type FullNodeStruct struct {
StateMinerSectorCount func(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error) `perm:"read"` StateMinerSectorCount func(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error) `perm:"read"`
StateListMessages func(ctx context.Context, match *types.Message, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) `perm:"read"` StateListMessages func(ctx context.Context, match *types.Message, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) `perm:"read"`
StateCompute func(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*api.ComputeStateOutput, error) `perm:"read"` StateCompute func(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*api.ComputeStateOutput, error) `perm:"read"`
StateVerifierStatus func(context.Context, address.Address, types.TipSetKey) (*abi.StoragePower, error) `perm:"read"`
StateVerifiedClientStatus func(context.Context, address.Address, types.TipSetKey) (*abi.StoragePower, error) `perm:"read"` StateVerifiedClientStatus func(context.Context, address.Address, types.TipSetKey) (*abi.StoragePower, error) `perm:"read"`
StateVerifiedRegistryRootKey func(ctx context.Context, tsk types.TipSetKey) (address.Address, error) `perm:"read"`
StateDealProviderCollateralBounds func(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (api.DealCollateralBounds, error) `perm:"read"` StateDealProviderCollateralBounds func(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (api.DealCollateralBounds, error) `perm:"read"`
StateCirculatingSupply func(context.Context, types.TipSetKey) (api.CirculatingSupply, error) `perm:"read"` StateCirculatingSupply func(context.Context, types.TipSetKey) (api.CirculatingSupply, error) `perm:"read"`
StateNetworkVersion func(context.Context, types.TipSetKey) (stnetwork.Version, error) `perm:"read"` StateNetworkVersion func(context.Context, types.TipSetKey) (stnetwork.Version, error) `perm:"read"`
@ -553,6 +557,10 @@ func (c *FullNodeStruct) MpoolPush(ctx context.Context, smsg *types.SignedMessag
return c.Internal.MpoolPush(ctx, smsg) return c.Internal.MpoolPush(ctx, smsg)
} }
func (c *FullNodeStruct) MpoolPushUntrusted(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) {
return c.Internal.MpoolPushUntrusted(ctx, smsg)
}
func (c *FullNodeStruct) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) { func (c *FullNodeStruct) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) {
return c.Internal.MpoolPushMessage(ctx, msg, spec) return c.Internal.MpoolPushMessage(ctx, msg, spec)
} }
@ -893,10 +901,18 @@ func (c *FullNodeStruct) StateCompute(ctx context.Context, height abi.ChainEpoch
return c.Internal.StateCompute(ctx, height, msgs, tsk) return c.Internal.StateCompute(ctx, height, msgs, tsk)
} }
func (c *FullNodeStruct) StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) {
return c.Internal.StateVerifierStatus(ctx, addr, tsk)
}
func (c *FullNodeStruct) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) { func (c *FullNodeStruct) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) {
return c.Internal.StateVerifiedClientStatus(ctx, addr, tsk) return c.Internal.StateVerifiedClientStatus(ctx, addr, tsk)
} }
func (c *FullNodeStruct) StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) {
return c.Internal.StateVerifiedRegistryRootKey(ctx, tsk)
}
func (c *FullNodeStruct) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) { func (c *FullNodeStruct) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) {
return c.Internal.StateDealProviderCollateralBounds(ctx, size, verified, tsk) return c.Internal.StateDealProviderCollateralBounds(ctx, size, verified, tsk)
} }

View File

@ -71,7 +71,7 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
t.Fatal(err) t.Fatal(err)
} }
channelAmt := int64(100000) channelAmt := int64(7000)
channelInfo, err := paymentCreator.PaychGet(ctx, createrAddr, receiverAddr, abi.NewTokenAmount(channelAmt)) channelInfo, err := paymentCreator.PaychGet(ctx, createrAddr, receiverAddr, abi.NewTokenAmount(channelAmt))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -182,6 +182,51 @@ func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
t.Fatal("Timed out waiting for receiver to submit vouchers") t.Fatal("Timed out waiting for receiver to submit vouchers")
} }
// Create a new voucher now that some vouchers have already been submitted
vouchRes, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), 3)
if err != nil {
t.Fatal(err)
}
if vouchRes.Voucher == nil {
t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouchRes.Shortfall))
}
vdelta, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouchRes.Voucher, nil, abi.NewTokenAmount(1000))
if err != nil {
t.Fatal(err)
}
if !vdelta.Equals(abi.NewTokenAmount(1000)) {
t.Fatal("voucher didn't have the right amount")
}
// Create a new voucher whose value would exceed the channel balance
excessAmt := abi.NewTokenAmount(1000)
vouchRes, err = paymentCreator.PaychVoucherCreate(ctx, channel, excessAmt, 4)
if err != nil {
t.Fatal(err)
}
if vouchRes.Voucher != nil {
t.Fatal("Expected not to be able to create voucher whose value would exceed channel balance")
}
if !vouchRes.Shortfall.Equals(excessAmt) {
t.Fatal(fmt.Errorf("Expected voucher shortfall of %d, got %d", excessAmt, vouchRes.Shortfall))
}
// Add a voucher whose value would exceed the channel balance
vouch := &paych.SignedVoucher{ChannelAddr: channel, Amount: excessAmt, Lane: 4, Nonce: 1}
vb, err := vouch.SigningBytes()
if err != nil {
t.Fatal(err)
}
sig, err := paymentCreator.WalletSign(ctx, createrAddr, vb)
if err != nil {
t.Fatal(err)
}
vouch.Signature = sig
_, err = paymentReceiver.PaychVoucherAdd(ctx, channel, vouch, nil, abi.NewTokenAmount(1000))
if err == nil {
t.Fatal(fmt.Errorf("Expected shortfall error of %d", excessAmt))
}
// wait for the settlement period to pass before collecting // wait for the settlement period to pass before collecting
waitForBlocks(ctx, t, bm, paymentReceiver, receiverAddr, paych0.SettleDelay) waitForBlocks(ctx, t, bm, paymentReceiver, receiverAddr, paych0.SettleDelay)

View File

@ -29,7 +29,7 @@ func buildType() string {
} }
// BuildVersion is the local build version, set by build system // BuildVersion is the local build version, set by build system
const BuildVersion = "0.8.0" const BuildVersion = "0.8.1"
func UserVersion() string { func UserVersion() string {
return BuildVersion + buildType() + CurrentCommit return BuildVersion + buildType() + CurrentCommit

View File

@ -27,10 +27,9 @@ type mockLaneState struct {
func NewMockPayChState(from address.Address, func NewMockPayChState(from address.Address,
to address.Address, to address.Address,
settlingAt abi.ChainEpoch, settlingAt abi.ChainEpoch,
toSend abi.TokenAmount,
lanes map[uint64]paych.LaneState, lanes map[uint64]paych.LaneState,
) paych.State { ) paych.State {
return &mockState{from, to, settlingAt, toSend, lanes} return &mockState{from: from, to: to, settlingAt: settlingAt, toSend: big.NewInt(0), lanes: lanes}
} }
// NewMockLaneState constructs a state for a payment channel lane with the set fixed values // NewMockLaneState constructs a state for a payment channel lane with the set fixed values

View File

@ -27,6 +27,10 @@ type state0 struct {
store adt.Store store adt.Store
} }
func (s *state0) RootKey() (address.Address, error) {
return s.State.RootKey, nil
}
func (s *state0) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { func (s *state0) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
return getDataCap(s.store, actors.Version0, s.State.VerifiedClients, addr) return getDataCap(s.store, actors.Version0, s.State.VerifiedClients, addr)
} }

View File

@ -27,6 +27,10 @@ type state2 struct {
store adt.Store store adt.Store
} }
func (s *state2) RootKey() (address.Address, error) {
return s.State.RootKey, nil
}
func (s *state2) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { func (s *state2) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
return getDataCap(s.store, actors.Version2, s.State.VerifiedClients, addr) return getDataCap(s.store, actors.Version2, s.State.VerifiedClients, addr)
} }

View File

@ -39,6 +39,7 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
type State interface { type State interface {
cbor.Marshaler cbor.Marshaler
RootKey() (address.Address, error)
VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error) VerifiedClientDataCap(address.Address) (bool, abi.StoragePower, error)
VerifierDataCap(address.Address) (bool, abi.StoragePower, error) VerifierDataCap(address.Address) (bool, abi.StoragePower, error)
ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error ForEachVerifier(func(addr address.Address, dcap abi.StoragePower) error) error

View File

@ -105,6 +105,10 @@ func checkFault(t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType str
return err return err
} }
if other == bh.Cid() {
return nil
}
return xerrors.Errorf("produced block would trigger '%s' consensus fault; miner: %s; bh: %s, other: %s", faultType, bh.Miner, bh.Cid(), other) return xerrors.Errorf("produced block would trigger '%s' consensus fault; miner: %s; bh: %s, other: %s", faultType, bh.Miner, bh.Cid(), other)
} }

View File

@ -55,6 +55,7 @@ var baseFeeLowerBoundFactor = types.NewInt(10)
var baseFeeLowerBoundFactorConservative = types.NewInt(100) var baseFeeLowerBoundFactorConservative = types.NewInt(100)
var MaxActorPendingMessages = 1000 var MaxActorPendingMessages = 1000
var MaxUntrustedActorPendingMessages = 10
var MaxNonceGap = uint64(4) var MaxNonceGap = uint64(4)
@ -195,9 +196,17 @@ func CapGasFee(msg *types.Message, maxFee abi.TokenAmount) {
msg.GasPremium = big.Min(msg.GasFeeCap, msg.GasPremium) // cap premium at FeeCap msg.GasPremium = big.Min(msg.GasFeeCap, msg.GasPremium) // cap premium at FeeCap
} }
func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict bool) (bool, error) { func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict, untrusted bool) (bool, error) {
nextNonce := ms.nextNonce nextNonce := ms.nextNonce
nonceGap := false nonceGap := false
maxNonceGap := MaxNonceGap
maxActorPendingMessages := MaxActorPendingMessages
if untrusted {
maxNonceGap = 0
maxActorPendingMessages = MaxUntrustedActorPendingMessages
}
switch { switch {
case m.Message.Nonce == nextNonce: case m.Message.Nonce == nextNonce:
nextNonce++ nextNonce++
@ -206,7 +215,7 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict bool) (boo
nextNonce++ nextNonce++
} }
case strict && m.Message.Nonce > nextNonce+MaxNonceGap: case strict && m.Message.Nonce > nextNonce+maxNonceGap:
return false, xerrors.Errorf("message nonce has too big a gap from expected nonce (Nonce: %d, nextNonce: %d): %w", m.Message.Nonce, nextNonce, ErrNonceGap) return false, xerrors.Errorf("message nonce has too big a gap from expected nonce (Nonce: %d, nextNonce: %d): %w", m.Message.Nonce, nextNonce, ErrNonceGap)
case m.Message.Nonce > nextNonce: case m.Message.Nonce > nextNonce:
@ -242,7 +251,7 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict bool) (boo
//ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.Value.Int) //ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.Value.Int)
} }
if !has && strict && len(ms.msgs) > MaxActorPendingMessages { if !has && strict && len(ms.msgs) >= maxActorPendingMessages {
log.Errorf("too many pending messages from actor %s", m.Message.From) log.Errorf("too many pending messages from actor %s", m.Message.From)
return false, ErrTooManyPendingMessages return false, ErrTooManyPendingMessages
} }
@ -484,7 +493,7 @@ func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) {
} }
mp.curTsLk.Lock() mp.curTsLk.Lock()
publish, err := mp.addTs(m, mp.curTs, true) publish, err := mp.addTs(m, mp.curTs, true, false)
if err != nil { if err != nil {
mp.curTsLk.Unlock() mp.curTsLk.Unlock()
return cid.Undef, err return cid.Undef, err
@ -551,7 +560,7 @@ func (mp *MessagePool) Add(m *types.SignedMessage) error {
mp.curTsLk.Lock() mp.curTsLk.Lock()
defer mp.curTsLk.Unlock() defer mp.curTsLk.Unlock()
_, err = mp.addTs(m, mp.curTs, false) _, err = mp.addTs(m, mp.curTs, false, false)
return err return err
} }
@ -619,7 +628,7 @@ func (mp *MessagePool) checkBalance(m *types.SignedMessage, curTs *types.TipSet)
return nil return nil
} }
func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) { func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local, untrusted bool) (bool, error) {
snonce, err := mp.getStateNonce(m.Message.From, curTs) snonce, err := mp.getStateNonce(m.Message.From, curTs)
if err != nil { if err != nil {
return false, xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure) return false, xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
@ -641,7 +650,7 @@ func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local
return false, err return false, err
} }
return publish, mp.addLocked(m, !local) return publish, mp.addLocked(m, !local, untrusted)
} }
func (mp *MessagePool) addLoaded(m *types.SignedMessage) error { func (mp *MessagePool) addLoaded(m *types.SignedMessage) error {
@ -676,17 +685,17 @@ func (mp *MessagePool) addLoaded(m *types.SignedMessage) error {
return err return err
} }
return mp.addLocked(m, false) return mp.addLocked(m, false, false)
} }
func (mp *MessagePool) addSkipChecks(m *types.SignedMessage) error { func (mp *MessagePool) addSkipChecks(m *types.SignedMessage) error {
mp.lk.Lock() mp.lk.Lock()
defer mp.lk.Unlock() defer mp.lk.Unlock()
return mp.addLocked(m, false) return mp.addLocked(m, false, false)
} }
func (mp *MessagePool) addLocked(m *types.SignedMessage, strict bool) error { func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool) error {
log.Debugf("mpooladd: %s %d", m.Message.From, m.Message.Nonce) log.Debugf("mpooladd: %s %d", m.Message.From, m.Message.Nonce)
if m.Signature.Type == crypto.SigTypeBLS { if m.Signature.Type == crypto.SigTypeBLS {
mp.blsSigCache.Add(m.Cid(), m.Signature) mp.blsSigCache.Add(m.Cid(), m.Signature)
@ -713,7 +722,7 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict bool) error {
mp.pending[m.Message.From] = mset mp.pending[m.Message.From] = mset
} }
incr, err := mset.add(m, mp, strict) incr, err := mset.add(m, mp, strict, untrusted)
if err != nil { if err != nil {
log.Debug(err) log.Debug(err)
return err return err
@ -793,6 +802,50 @@ func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) (
return act.Balance, nil return act.Balance, nil
} }
// this method is provided for the gateway to push messages.
// differences from Push:
// - strict checks are enabled
// - extra strict add checks are used when adding the messages to the msgSet
// that means: no nonce gaps, at most 10 pending messages for the actor
func (mp *MessagePool) PushUntrusted(m *types.SignedMessage) (cid.Cid, error) {
err := mp.checkMessage(m)
if err != nil {
return cid.Undef, err
}
// serialize push access to reduce lock contention
mp.addSema <- struct{}{}
defer func() {
<-mp.addSema
}()
msgb, err := m.Serialize()
if err != nil {
return cid.Undef, err
}
mp.curTsLk.Lock()
publish, err := mp.addTs(m, mp.curTs, false, true)
if err != nil {
mp.curTsLk.Unlock()
return cid.Undef, err
}
mp.curTsLk.Unlock()
mp.lk.Lock()
if err := mp.addLocal(m, msgb); err != nil {
mp.lk.Unlock()
return cid.Undef, err
}
mp.lk.Unlock()
if publish {
err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb)
}
return m.Cid(), err
}
func (mp *MessagePool) Remove(from address.Address, nonce uint64, applied bool) { func (mp *MessagePool) Remove(from address.Address, nonce uint64, applied bool) {
mp.lk.Lock() mp.lk.Lock()
defer mp.lk.Unlock() defer mp.lk.Unlock()

View File

@ -1195,13 +1195,6 @@ func recurseLinks(bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.
} }
func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, w io.Writer) error { func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, w io.Writer) error {
if ts == nil {
ts = cs.GetHeaviestTipSet()
}
seen := cid.NewSet()
walked := cid.NewSet()
h := &car.CarHeader{ h := &car.CarHeader{
Roots: ts.Cids(), Roots: ts.Cids(),
Version: 1, Version: 1,
@ -1211,6 +1204,28 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo
return xerrors.Errorf("failed to write car header: %s", err) return xerrors.Errorf("failed to write car header: %s", err)
} }
return cs.WalkSnapshot(ctx, ts, inclRecentRoots, skipOldMsgs, func(c cid.Cid) error {
blk, err := cs.bs.Get(c)
if err != nil {
return xerrors.Errorf("writing object to car, bs.Get: %w", err)
}
if err := carutil.LdWrite(w, c.Bytes(), blk.RawData()); err != nil {
return xerrors.Errorf("failed to write block to car output: %w", err)
}
return nil
})
}
func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, cb func(cid.Cid) error) error {
if ts == nil {
ts = cs.GetHeaviestTipSet()
}
seen := cid.NewSet()
walked := cid.NewSet()
blocksToWalk := ts.Cids() blocksToWalk := ts.Cids()
currentMinHeight := ts.Height() currentMinHeight := ts.Height()
@ -1219,15 +1234,15 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo
return nil return nil
} }
if err := cb(blk); err != nil {
return err
}
data, err := cs.bs.Get(blk) data, err := cs.bs.Get(blk)
if err != nil { if err != nil {
return xerrors.Errorf("getting block: %w", err) return xerrors.Errorf("getting block: %w", err)
} }
if err := carutil.LdWrite(w, blk.Bytes(), data.RawData()); err != nil {
return xerrors.Errorf("failed to write block to car output: %w", err)
}
var b types.BlockHeader var b types.BlockHeader
if err := b.UnmarshalCBOR(bytes.NewBuffer(data.RawData())); err != nil { if err := b.UnmarshalCBOR(bytes.NewBuffer(data.RawData())); err != nil {
return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blk, err) return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blk, err)
@ -1274,14 +1289,11 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo
if c.Prefix().Codec != cid.DagCBOR { if c.Prefix().Codec != cid.DagCBOR {
continue continue
} }
data, err := cs.bs.Get(c)
if err != nil { if err := cb(c); err != nil {
return xerrors.Errorf("writing object to car (get %s): %w", c, err) return err
} }
if err := carutil.LdWrite(w, c.Bytes(), data.RawData()); err != nil {
return xerrors.Errorf("failed to write out car object: %w", err)
}
} }
} }

View File

@ -12,13 +12,12 @@ import (
"text/tabwriter" "text/tabwriter"
"time" "time"
"github.com/filecoin-project/specs-actors/actors/builtin"
tm "github.com/buger/goterm" tm "github.com/buger/goterm"
"github.com/docker/go-units" "github.com/docker/go-units"
"github.com/fatih/color" "github.com/fatih/color"
datatransfer "github.com/filecoin-project/go-data-transfer" datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/ipfs/go-cidutil/cidenc" "github.com/ipfs/go-cidutil/cidenc"
"github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peer"
@ -476,6 +475,7 @@ func interactiveDeal(cctx *cli.Context) error {
var ask storagemarket.StorageAsk var ask storagemarket.StorageAsk
var epochPrice big.Int var epochPrice big.Int
var epochs abi.ChainEpoch var epochs abi.ChainEpoch
var verified bool
var a address.Address var a address.Address
if from := cctx.String("from"); from != "" { if from := cctx.String("from"); from != "" {
@ -572,6 +572,53 @@ func interactiveDeal(cctx *cli.Context) error {
ask = *a ask = *a
// TODO: run more validation // TODO: run more validation
state = "verified"
case "verified":
ts, err := api.ChainHead(ctx)
if err != nil {
return err
}
dcap, err := api.StateVerifiedClientStatus(ctx, a, ts.Key())
if err != nil {
return err
}
if dcap == nil {
state = "confirm"
continue
}
color.Blue(".. checking verified deal eligibility\n")
ds, err := api.ClientDealSize(ctx, data)
if err != nil {
return err
}
if dcap.Uint64() < uint64(ds.PieceSize) {
color.Yellow(".. not enough DataCap available for a verified deal\n")
state = "confirm"
continue
}
fmt.Print("\nMake this a verified deal? (yes/no): ")
var yn string
_, err = fmt.Scan(&yn)
if err != nil {
return err
}
switch yn {
case "yes":
verified = true
case "no":
verified = false
default:
fmt.Println("Type in full 'yes' or 'no'")
continue
}
state = "confirm" state = "confirm"
case "confirm": case "confirm":
fromBal, err := api.WalletBalance(ctx, a) fromBal, err := api.WalletBalance(ctx, a)
@ -590,10 +637,15 @@ func interactiveDeal(cctx *cli.Context) error {
epochs = abi.ChainEpoch(dur / (time.Duration(build.BlockDelaySecs) * time.Second)) epochs = abi.ChainEpoch(dur / (time.Duration(build.BlockDelaySecs) * time.Second))
// TODO: do some more or epochs math (round to miner PP, deal start buffer) // TODO: do some more or epochs math (round to miner PP, deal start buffer)
pricePerGib := ask.Price
if verified {
pricePerGib = ask.VerifiedPrice
}
gib := types.NewInt(1 << 30) gib := types.NewInt(1 << 30)
// TODO: price is based on PaddedPieceSize, right? // TODO: price is based on PaddedPieceSize, right?
epochPrice = types.BigDiv(types.BigMul(ask.Price, types.NewInt(uint64(ds.PieceSize))), gib) epochPrice = types.BigDiv(types.BigMul(pricePerGib, types.NewInt(uint64(ds.PieceSize))), gib)
totalPrice := types.BigMul(epochPrice, types.NewInt(uint64(epochs))) totalPrice := types.BigMul(epochPrice, types.NewInt(uint64(epochs)))
fmt.Printf("-----\n") fmt.Printf("-----\n")
@ -603,6 +655,7 @@ func interactiveDeal(cctx *cli.Context) error {
fmt.Printf("Piece size: %s (Payload size: %s)\n", units.BytesSize(float64(ds.PieceSize)), units.BytesSize(float64(ds.PayloadSize))) fmt.Printf("Piece size: %s (Payload size: %s)\n", units.BytesSize(float64(ds.PieceSize)), units.BytesSize(float64(ds.PayloadSize)))
fmt.Printf("Duration: %s\n", dur) fmt.Printf("Duration: %s\n", dur)
fmt.Printf("Total price: ~%s (%s per epoch)\n", types.FIL(totalPrice), types.FIL(epochPrice)) fmt.Printf("Total price: ~%s (%s per epoch)\n", types.FIL(totalPrice), types.FIL(epochPrice))
fmt.Printf("Verified: %v\n", verified)
state = "accept" state = "accept"
case "accept": case "accept":
@ -637,7 +690,7 @@ func interactiveDeal(cctx *cli.Context) error {
MinBlocksDuration: uint64(epochs), MinBlocksDuration: uint64(epochs),
DealStartEpoch: abi.ChainEpoch(cctx.Int64("start-epoch")), DealStartEpoch: abi.ChainEpoch(cctx.Int64("start-epoch")),
FastRetrieval: cctx.Bool("fast-retrieval"), FastRetrieval: cctx.Bool("fast-retrieval"),
VerifiedDeal: false, // TODO: Allow setting VerifiedDeal: verified,
}) })
if err != nil { if err != nil {
return err return err
@ -1423,7 +1476,7 @@ func toChannelOutput(useColor bool, otherPartyColumn string, channel lapi.DataTr
otherPartyColumn: otherParty, otherPartyColumn: otherParty,
"Root Cid": rootCid, "Root Cid": rootCid,
"Initiated?": initiated, "Initiated?": initiated,
"Transferred": channel.Transferred, "Transferred": units.BytesSize(float64(channel.Transferred)),
"Voucher": voucher, "Voucher": voucher,
"Message": channel.Message, "Message": channel.Message,
} }

View File

@ -7,6 +7,7 @@ import (
"sort" "sort"
"strconv" "strconv"
cid "github.com/ipfs/go-cid"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -43,6 +44,10 @@ var mpoolPending = &cli.Command{
Name: "local", Name: "local",
Usage: "print pending messages for addresses in local wallet only", Usage: "print pending messages for addresses in local wallet only",
}, },
&cli.BoolFlag{
Name: "cids",
Usage: "only print cids of messages in output",
},
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
@ -79,11 +84,15 @@ var mpoolPending = &cli.Command{
} }
} }
out, err := json.MarshalIndent(msg, "", " ") if cctx.Bool("cids") {
if err != nil { fmt.Println(msg.Cid())
return err } else {
out, err := json.MarshalIndent(msg, "", " ")
if err != nil {
return err
}
fmt.Println(string(out))
} }
fmt.Println(string(out))
} }
return nil return nil
@ -308,21 +317,8 @@ var mpoolReplaceCmd = &cli.Command{
Usage: "Spend up to X FIL for this message (applicable for auto mode)", Usage: "Spend up to X FIL for this message (applicable for auto mode)",
}, },
}, },
ArgsUsage: "[from] [nonce]", ArgsUsage: "<from nonce> | <message-cid>",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
if cctx.Args().Len() < 2 {
return cli.ShowCommandHelp(cctx, cctx.Command.Name)
}
from, err := address.NewFromString(cctx.Args().Get(0))
if err != nil {
return err
}
nonce, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
if err != nil {
return err
}
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
@ -332,6 +328,39 @@ var mpoolReplaceCmd = &cli.Command{
ctx := ReqContext(cctx) ctx := ReqContext(cctx)
var from address.Address
var nonce uint64
switch cctx.Args().Len() {
case 1:
mcid, err := cid.Decode(cctx.Args().First())
if err != nil {
return err
}
msg, err := api.ChainGetMessage(ctx, mcid)
if err != nil {
return fmt.Errorf("could not find referenced message: %w", err)
}
from = msg.From
nonce = msg.Nonce
case 2:
f, err := address.NewFromString(cctx.Args().Get(0))
if err != nil {
return err
}
n, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
if err != nil {
return err
}
from = f
nonce = n
default:
return cli.ShowCommandHelp(cctx, cctx.Command.Name)
}
ts, err := api.ChainHead(ctx) ts, err := api.ChainHead(ctx)
if err != nil { if err != nil {
return xerrors.Errorf("getting chain head: %w", err) return xerrors.Errorf("getting chain head: %w", err)

View File

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/hako/durafmt"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
@ -36,11 +37,11 @@ func parseTipSet(ctx context.Context, api api.FullNode, vals []string) (*types.T
func EpochTime(curr, e abi.ChainEpoch) string { func EpochTime(curr, e abi.ChainEpoch) string {
switch { switch {
case curr > e: case curr > e:
return fmt.Sprintf("%d (%s ago)", e, time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(curr-e))) return fmt.Sprintf("%d (%s ago)", e, durafmt.Parse(time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(curr-e))).LimitFirstN(2))
case curr == e: case curr == e:
return fmt.Sprintf("%d (now)", e) return fmt.Sprintf("%d (now)", e)
case curr < e: case curr < e:
return fmt.Sprintf("%d (in %s)", e, time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(e-curr))) return fmt.Sprintf("%d (in %s)", e, durafmt.Parse(time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(e-curr))).LimitFirstN(2))
} }
panic("math broke") panic("math broke")

View File

@ -86,5 +86,5 @@ func (a *GatewayAPI) MpoolPush(ctx context.Context, sm *types.SignedMessage) (ci
// TODO: additional anti-spam checks // TODO: additional anti-spam checks
return a.api.MpoolPush(ctx, sm) return a.api.MpoolPushUntrusted(ctx, sm)
} }

View File

@ -1,8 +1,10 @@
package main package main
import ( import (
"bufio"
"bytes" "bytes"
"context" "context"
"encoding/csv"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@ -27,11 +29,12 @@ import (
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
@ -44,6 +47,8 @@ var log = logging.Logger("main")
func main() { func main() {
local := []*cli.Command{ local := []*cli.Command{
runCmd, runCmd,
recoverMinersCmd,
findMinersCmd,
versionCmd, versionCmd,
} }
@ -107,6 +112,186 @@ var versionCmd = &cli.Command{
}, },
} }
var findMinersCmd = &cli.Command{
Name: "find-miners",
Usage: "find miners with a desired minimum balance",
Description: `Find miners returns a list of miners and their balances that are below a
threhold value. By default only the miner actor available balance is considered but other
account balances can be included by enabling them through the flags.
Examples
Find all miners with an available balance below 100 FIL
lotus-pcr find-miners --threshold 100
Find all miners with a balance below zero, which includes the owner and worker balances
lotus-pcr find-miners --threshold 0 --owner --worker
`,
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "no-sync",
EnvVars: []string{"LOTUS_PCR_NO_SYNC"},
Usage: "do not wait for chain sync to complete",
},
&cli.IntFlag{
Name: "threshold",
EnvVars: []string{"LOTUS_PCR_THRESHOLD"},
Usage: "balance below this limit will be printed",
Value: 0,
},
&cli.BoolFlag{
Name: "owner",
Usage: "include owner balance",
Value: false,
},
&cli.BoolFlag{
Name: "worker",
Usage: "include worker balance",
Value: false,
},
&cli.BoolFlag{
Name: "control",
Usage: "include control balance",
Value: false,
},
},
Action: func(cctx *cli.Context) error {
ctx := context.Background()
api, closer, err := stats.GetFullNodeAPI(cctx.Context, cctx.String("lotus-path"))
if err != nil {
log.Fatal(err)
}
defer closer()
if !cctx.Bool("no-sync") {
if err := stats.WaitForSyncComplete(ctx, api); err != nil {
log.Fatal(err)
}
}
owner := cctx.Bool("owner")
worker := cctx.Bool("worker")
control := cctx.Bool("control")
threshold := uint64(cctx.Int("threshold"))
rf := &refunder{
api: api,
threshold: types.FromFil(threshold),
}
refundTipset, err := api.ChainHead(ctx)
if err != nil {
return err
}
balanceRefund, err := rf.FindMiners(ctx, refundTipset, NewMinersRefund(), owner, worker, control)
if err != nil {
return err
}
for _, maddr := range balanceRefund.Miners() {
fmt.Printf("%s\t%s\n", maddr, types.FIL(balanceRefund.GetRefund(maddr)))
}
return nil
},
}
var recoverMinersCmd = &cli.Command{
Name: "recover-miners",
Usage: "Ensure all miners with a negative available balance have a FIL surplus across accounts",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "from",
EnvVars: []string{"LOTUS_PCR_FROM"},
Usage: "wallet address to send refund from",
},
&cli.BoolFlag{
Name: "no-sync",
EnvVars: []string{"LOTUS_PCR_NO_SYNC"},
Usage: "do not wait for chain sync to complete",
},
&cli.BoolFlag{
Name: "dry-run",
EnvVars: []string{"LOTUS_PCR_DRY_RUN"},
Usage: "do not send any messages",
Value: false,
},
&cli.StringFlag{
Name: "output",
Usage: "dump data as a csv format to this file",
},
&cli.IntFlag{
Name: "miner-recovery-cutoff",
EnvVars: []string{"LOTUS_PCR_MINER_RECOVERY_CUTOFF"},
Usage: "maximum amount of FIL that can be sent to any one miner before refund percent is applied",
Value: 3000,
},
&cli.IntFlag{
Name: "miner-recovery-bonus",
EnvVars: []string{"LOTUS_PCR_MINER_RECOVERY_BONUS"},
Usage: "additional FIL to send to each miner",
Value: 5,
},
&cli.IntFlag{
Name: "miner-recovery-refund-percent",
EnvVars: []string{"LOTUS_PCR_MINER_RECOVERY_REFUND_PERCENT"},
Usage: "percent of refund to issue",
Value: 110,
},
},
Action: func(cctx *cli.Context) error {
ctx := context.Background()
api, closer, err := stats.GetFullNodeAPI(cctx.Context, cctx.String("lotus-path"))
if err != nil {
log.Fatal(err)
}
defer closer()
from, err := address.NewFromString(cctx.String("from"))
if err != nil {
return xerrors.Errorf("parsing source address (provide correct --from flag!): %w", err)
}
if !cctx.Bool("no-sync") {
if err := stats.WaitForSyncComplete(ctx, api); err != nil {
log.Fatal(err)
}
}
dryRun := cctx.Bool("dry-run")
minerRecoveryRefundPercent := cctx.Int("miner-recovery-refund-percent")
minerRecoveryCutoff := uint64(cctx.Int("miner-recovery-cutoff"))
minerRecoveryBonus := uint64(cctx.Int("miner-recovery-bonus"))
rf := &refunder{
api: api,
wallet: from,
dryRun: dryRun,
minerRecoveryRefundPercent: minerRecoveryRefundPercent,
minerRecoveryCutoff: types.FromFil(minerRecoveryCutoff),
minerRecoveryBonus: types.FromFil(minerRecoveryBonus),
}
refundTipset, err := api.ChainHead(ctx)
if err != nil {
return err
}
balanceRefund, err := rf.EnsureMinerMinimums(ctx, refundTipset, NewMinersRefund(), cctx.String("output"))
if err != nil {
return err
}
if err := rf.Refund(ctx, "refund to recover miner", refundTipset, balanceRefund, 0); err != nil {
return err
}
return nil
},
}
var runCmd = &cli.Command{ var runCmd = &cli.Command{
Name: "run", Name: "run",
Usage: "Start message reimpursement", Usage: "Start message reimpursement",
@ -122,10 +307,10 @@ var runCmd = &cli.Command{
Usage: "do not wait for chain sync to complete", Usage: "do not wait for chain sync to complete",
}, },
&cli.IntFlag{ &cli.IntFlag{
Name: "percent-extra", Name: "refund-percent",
EnvVars: []string{"LOTUS_PCR_PERCENT_EXTRA"}, EnvVars: []string{"LOTUS_PCR_REFUND_PERCENT"},
Usage: "extra funds to send above the refund", Usage: "percent of refund to issue",
Value: 3, Value: 103,
}, },
&cli.IntFlag{ &cli.IntFlag{
Name: "max-message-queue", Name: "max-message-queue",
@ -163,6 +348,36 @@ var runCmd = &cli.Command{
Usage: "the number of tipsets to delay message processing to smooth chain reorgs", Usage: "the number of tipsets to delay message processing to smooth chain reorgs",
Value: int(build.MessageConfidence), Value: int(build.MessageConfidence),
}, },
&cli.BoolFlag{
Name: "miner-recovery",
EnvVars: []string{"LOTUS_PCR_MINER_RECOVERY"},
Usage: "run the miner recovery job",
Value: false,
},
&cli.IntFlag{
Name: "miner-recovery-period",
EnvVars: []string{"LOTUS_PCR_MINER_RECOVERY_PERIOD"},
Usage: "interval between running miner recovery",
Value: 2880,
},
&cli.IntFlag{
Name: "miner-recovery-cutoff",
EnvVars: []string{"LOTUS_PCR_MINER_RECOVERY_CUTOFF"},
Usage: "maximum amount of FIL that can be sent to any one miner before refund percent is applied",
Value: 3000,
},
&cli.IntFlag{
Name: "miner-recovery-bonus",
EnvVars: []string{"LOTUS_PCR_MINER_RECOVERY_BONUS"},
Usage: "additional FIL to send to each miner",
Value: 5,
},
&cli.IntFlag{
Name: "miner-recovery-refund-percent",
EnvVars: []string{"LOTUS_PCR_MINER_RECOVERY_REFUND_PERCENT"},
Usage: "percent of refund to issue",
Value: 110,
},
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
go func() { go func() {
@ -201,24 +416,33 @@ var runCmd = &cli.Command{
log.Fatal(err) log.Fatal(err)
} }
percentExtra := cctx.Int("percent-extra") refundPercent := cctx.Int("refund-percent")
maxMessageQueue := cctx.Int("max-message-queue") maxMessageQueue := cctx.Int("max-message-queue")
dryRun := cctx.Bool("dry-run") dryRun := cctx.Bool("dry-run")
preCommitEnabled := cctx.Bool("pre-commit") preCommitEnabled := cctx.Bool("pre-commit")
proveCommitEnabled := cctx.Bool("prove-commit") proveCommitEnabled := cctx.Bool("prove-commit")
aggregateTipsets := cctx.Int("aggregate-tipsets") aggregateTipsets := cctx.Int("aggregate-tipsets")
minerRecoveryEnabled := cctx.Bool("miner-recovery")
minerRecoveryPeriod := abi.ChainEpoch(int64(cctx.Int("miner-recovery-period")))
minerRecoveryRefundPercent := cctx.Int("miner-recovery-refund-percent")
minerRecoveryCutoff := uint64(cctx.Int("miner-recovery-cutoff"))
minerRecoveryBonus := uint64(cctx.Int("miner-recovery-bonus"))
rf := &refunder{ rf := &refunder{
api: api, api: api,
wallet: from, wallet: from,
percentExtra: percentExtra, refundPercent: refundPercent,
dryRun: dryRun, minerRecoveryRefundPercent: minerRecoveryRefundPercent,
preCommitEnabled: preCommitEnabled, minerRecoveryCutoff: types.FromFil(minerRecoveryCutoff),
proveCommitEnabled: proveCommitEnabled, minerRecoveryBonus: types.FromFil(minerRecoveryBonus),
dryRun: dryRun,
preCommitEnabled: preCommitEnabled,
proveCommitEnabled: proveCommitEnabled,
} }
var refunds *MinersRefund = NewMinersRefund() var refunds *MinersRefund = NewMinersRefund()
var rounds int = 0 var rounds int = 0
nextMinerRecovery := r.MinerRecoveryHeight() + minerRecoveryPeriod
for tipset := range tipsetsCh { for tipset := range tipsetsCh {
refunds, err = rf.ProcessTipset(ctx, tipset, refunds) refunds, err = rf.ProcessTipset(ctx, tipset, refunds)
@ -226,17 +450,34 @@ var runCmd = &cli.Command{
return err return err
} }
rounds = rounds + 1
if rounds < aggregateTipsets {
continue
}
refundTipset, err := api.ChainHead(ctx) refundTipset, err := api.ChainHead(ctx)
if err != nil { if err != nil {
return err return err
} }
if err := rf.Refund(ctx, refundTipset, refunds, rounds); err != nil { if minerRecoveryEnabled && refundTipset.Height() >= nextMinerRecovery {
recoveryRefund, err := rf.EnsureMinerMinimums(ctx, refundTipset, NewMinersRefund(), "")
if err != nil {
return err
}
if err := rf.Refund(ctx, "refund to recover miners", refundTipset, recoveryRefund, 0); err != nil {
return err
}
if err := r.SetMinerRecoveryHeight(tipset.Height()); err != nil {
return err
}
nextMinerRecovery = r.MinerRecoveryHeight() + minerRecoveryPeriod
}
rounds = rounds + 1
if rounds < aggregateTipsets {
continue
}
if err := rf.Refund(ctx, "refund stats", refundTipset, refunds, rounds); err != nil {
return err return err
} }
@ -295,7 +536,6 @@ func (m *MinersRefund) Track(addr address.Address, value types.BigInt) {
m.count = m.count + 1 m.count = m.count + 1
m.totalRefunds = types.BigAdd(m.totalRefunds, value) m.totalRefunds = types.BigAdd(m.totalRefunds, value)
m.refunds[addr] = types.BigAdd(m.refunds[addr], value) m.refunds[addr] = types.BigAdd(m.refunds[addr], value)
} }
@ -324,8 +564,14 @@ type refunderNodeApi interface {
ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]api.Message, error) ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]api.Message, error)
ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error) ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error)
ChainGetTipSetByHeight(ctx context.Context, epoch abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) ChainGetTipSetByHeight(ctx context.Context, epoch abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
StateMinerInitialPledgeCollateral(ctx context.Context, addr address.Address, precommitInfo miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error) StateMinerInitialPledgeCollateral(ctx context.Context, addr address.Address, precommitInfo miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error)
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error)
StateSectorPreCommitInfo(ctx context.Context, addr address.Address, sector abi.SectorNumber, tsk types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) StateSectorPreCommitInfo(ctx context.Context, addr address.Address, sector abi.SectorNumber, tsk types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error)
StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
StateMinerSectors(ctx context.Context, addr address.Address, filter *bitfield.BitField, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error)
StateMinerFaults(ctx context.Context, addr address.Address, tsk types.TipSetKey) (bitfield.BitField, error)
StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error)
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error)
StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error)
MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error)
@ -334,12 +580,241 @@ type refunderNodeApi interface {
} }
type refunder struct { type refunder struct {
api refunderNodeApi api refunderNodeApi
wallet address.Address wallet address.Address
percentExtra int refundPercent int
dryRun bool minerRecoveryRefundPercent int
preCommitEnabled bool minerRecoveryCutoff big.Int
proveCommitEnabled bool minerRecoveryBonus big.Int
dryRun bool
preCommitEnabled bool
proveCommitEnabled bool
threshold big.Int
}
func (r *refunder) FindMiners(ctx context.Context, tipset *types.TipSet, refunds *MinersRefund, owner, worker, control bool) (*MinersRefund, error) {
miners, err := r.api.StateListMiners(ctx, tipset.Key())
if err != nil {
return nil, err
}
for _, maddr := range miners {
mact, err := r.api.StateGetActor(ctx, maddr, types.EmptyTSK)
if err != nil {
log.Errorw("failed", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
continue
}
if !mact.Balance.GreaterThan(big.Zero()) {
continue
}
minerAvailableBalance, err := r.api.StateMinerAvailableBalance(ctx, maddr, tipset.Key())
if err != nil {
log.Errorw("failed", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
continue
}
// Look up and find all addresses associated with the miner
minerInfo, err := r.api.StateMinerInfo(ctx, maddr, tipset.Key())
if err != nil {
log.Errorw("failed", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
continue
}
allAddresses := []address.Address{}
if worker {
allAddresses = append(allAddresses, minerInfo.Worker)
}
if owner {
allAddresses = append(allAddresses, minerInfo.Owner)
}
if control {
allAddresses = append(allAddresses, minerInfo.ControlAddresses...)
}
// Sum the balancer of all the addresses
addrSum := big.Zero()
addrCheck := make(map[address.Address]struct{}, len(allAddresses))
for _, addr := range allAddresses {
if _, found := addrCheck[addr]; !found {
balance, err := r.api.WalletBalance(ctx, addr)
if err != nil {
log.Errorw("failed", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
continue
}
addrSum = big.Add(addrSum, balance)
addrCheck[addr] = struct{}{}
}
}
totalAvailableBalance := big.Add(addrSum, minerAvailableBalance)
if totalAvailableBalance.GreaterThanEqual(r.threshold) {
continue
}
refunds.Track(maddr, totalAvailableBalance)
log.Debugw("processing miner", "miner", maddr, "sectors", "available_balance", totalAvailableBalance)
}
return refunds, nil
}
func (r *refunder) EnsureMinerMinimums(ctx context.Context, tipset *types.TipSet, refunds *MinersRefund, output string) (*MinersRefund, error) {
miners, err := r.api.StateListMiners(ctx, tipset.Key())
if err != nil {
return nil, err
}
w := ioutil.Discard
if len(output) != 0 {
f, err := os.Create(output)
if err != nil {
return nil, err
}
defer f.Close() // nolint:errcheck
w = bufio.NewWriter(f)
}
csvOut := csv.NewWriter(w)
defer csvOut.Flush()
if err := csvOut.Write([]string{"MinerID", "FaultedSectors", "AvailableBalance", "ProposedRefund"}); err != nil {
return nil, err
}
for _, maddr := range miners {
mact, err := r.api.StateGetActor(ctx, maddr, types.EmptyTSK)
if err != nil {
log.Errorw("failed", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
continue
}
if !mact.Balance.GreaterThan(big.Zero()) {
continue
}
minerAvailableBalance, err := r.api.StateMinerAvailableBalance(ctx, maddr, tipset.Key())
if err != nil {
log.Errorw("failed", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
continue
}
// Look up and find all addresses associated with the miner
minerInfo, err := r.api.StateMinerInfo(ctx, maddr, tipset.Key())
if err != nil {
log.Errorw("failed", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
continue
}
allAddresses := []address.Address{minerInfo.Worker, minerInfo.Owner}
allAddresses = append(allAddresses, minerInfo.ControlAddresses...)
// Sum the balancer of all the addresses
addrSum := big.Zero()
addrCheck := make(map[address.Address]struct{}, len(allAddresses))
for _, addr := range allAddresses {
if _, found := addrCheck[addr]; !found {
balance, err := r.api.WalletBalance(ctx, addr)
if err != nil {
log.Errorw("failed", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
continue
}
addrSum = big.Add(addrSum, balance)
addrCheck[addr] = struct{}{}
}
}
faults, err := r.api.StateMinerFaults(ctx, maddr, tipset.Key())
if err != nil {
log.Errorw("failed to look up miner faults", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
continue
}
faultsCount, err := faults.Count()
if err != nil {
log.Errorw("failed to get count of faults", "err", err, "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
continue
}
if faultsCount == 0 {
log.Debugw("skipping miner with zero faults", "height", tipset.Height(), "key", tipset.Key(), "miner", maddr)
continue
}
totalAvailableBalance := big.Add(addrSum, minerAvailableBalance)
balanceCutoff := big.Mul(big.Div(big.NewIntUnsigned(faultsCount), big.NewInt(10)), big.NewIntUnsigned(build.FilecoinPrecision))
if totalAvailableBalance.GreaterThan(balanceCutoff) {
log.Debugw(
"skipping over miner with total available balance larger than refund",
"height", tipset.Height(),
"key", tipset.Key(),
"miner", maddr,
"available_balance", totalAvailableBalance,
"balance_cutoff", balanceCutoff,
"faults_count", faultsCount,
"available_balance_fil", big.Div(totalAvailableBalance, big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
"balance_cutoff_fil", big.Div(balanceCutoff, big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
)
continue
}
refundValue := big.Sub(balanceCutoff, totalAvailableBalance)
if r.minerRecoveryRefundPercent > 0 {
refundValue = types.BigMul(types.BigDiv(refundValue, types.NewInt(100)), types.NewInt(uint64(r.minerRecoveryRefundPercent)))
}
refundValue = big.Add(refundValue, r.minerRecoveryBonus)
if refundValue.GreaterThan(r.minerRecoveryCutoff) {
log.Infow(
"skipping over miner with refund greater than refund cutoff",
"height", tipset.Height(),
"key", tipset.Key(),
"miner", maddr,
"available_balance", totalAvailableBalance,
"balance_cutoff", balanceCutoff,
"faults_count", faultsCount,
"refund", refundValue,
"available_balance_fil", big.Div(totalAvailableBalance, big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
"balance_cutoff_fil", big.Div(balanceCutoff, big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
"refund_fil", big.Div(refundValue, big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
)
continue
}
refunds.Track(maddr, refundValue)
record := []string{
maddr.String(),
fmt.Sprintf("%d", faultsCount),
big.Div(totalAvailableBalance, big.NewIntUnsigned(build.FilecoinPrecision)).String(),
big.Div(refundValue, big.NewIntUnsigned(build.FilecoinPrecision)).String(),
}
if err := csvOut.Write(record); err != nil {
return nil, err
}
log.Debugw(
"processing miner",
"miner", maddr,
"faults_count", faultsCount,
"available_balance", totalAvailableBalance,
"refund", refundValue,
"available_balance_fil", big.Div(totalAvailableBalance, big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
"refund_fil", big.Div(refundValue, big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
)
}
return refunds, nil
} }
func (r *refunder) ProcessTipset(ctx context.Context, tipset *types.TipSet, refunds *MinersRefund) (*MinersRefund, error) { func (r *refunder) ProcessTipset(ctx context.Context, tipset *types.TipSet, refunds *MinersRefund) (*MinersRefund, error) {
@ -460,22 +935,41 @@ func (r *refunder) ProcessTipset(ctx context.Context, tipset *types.TipSet, refu
continue continue
} }
if r.percentExtra > 0 { if r.refundPercent > 0 {
refundValue = types.BigAdd(refundValue, types.BigMul(types.BigDiv(refundValue, types.NewInt(100)), types.NewInt(uint64(r.percentExtra)))) refundValue = types.BigMul(types.BigDiv(refundValue, types.NewInt(100)), types.NewInt(uint64(r.refundPercent)))
} }
log.Debugw("processing message", "method", messageMethod, "cid", msg.Cid, "from", m.From, "to", m.To, "value", m.Value, "gas_fee_cap", m.GasFeeCap, "gas_premium", m.GasPremium, "gas_used", recps[i].GasUsed, "refund", refundValue) log.Debugw(
"processing message",
"method", messageMethod,
"cid", msg.Cid,
"from", m.From,
"to", m.To,
"value", m.Value,
"gas_fee_cap", m.GasFeeCap,
"gas_premium", m.GasPremium,
"gas_used", recps[i].GasUsed,
"refund", refundValue,
"refund_fil", big.Div(refundValue, big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
)
refunds.Track(m.From, refundValue) refunds.Track(m.From, refundValue)
tipsetRefunds.Track(m.From, refundValue) tipsetRefunds.Track(m.From, refundValue)
} }
log.Infow("tipset stats", "height", tipset.Height(), "key", tipset.Key(), "total_refunds", tipsetRefunds.TotalRefunds(), "messages_processed", tipsetRefunds.Count()) log.Infow(
"tipset stats",
"height", tipset.Height(),
"key", tipset.Key(),
"total_refunds", tipsetRefunds.TotalRefunds(),
"total_refunds_fil", big.Div(tipsetRefunds.TotalRefunds(), big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
"messages_processed", tipsetRefunds.Count(),
)
return refunds, nil return refunds, nil
} }
func (r *refunder) Refund(ctx context.Context, tipset *types.TipSet, refunds *MinersRefund, rounds int) error { func (r *refunder) Refund(ctx context.Context, name string, tipset *types.TipSet, refunds *MinersRefund, rounds int) error {
if refunds.Count() == 0 { if refunds.Count() == 0 {
log.Debugw("no messages to refund in tipset", "height", tipset.Height(), "key", tipset.Key()) log.Debugw("no messages to refund in tipset", "height", tipset.Height(), "key", tipset.Key())
return nil return nil
@ -533,13 +1027,24 @@ func (r *refunder) Refund(ctx context.Context, tipset *types.TipSet, refunds *Mi
refundSum = types.BigAdd(refundSum, msg.Value) refundSum = types.BigAdd(refundSum, msg.Value)
} }
log.Infow("refund stats", "tipsets_processed", rounds, "height", tipset.Height(), "key", tipset.Key(), "messages_sent", len(messages)-failures, "refund_sum", refundSum, "messages_failures", failures, "messages_processed", refunds.Count()) log.Infow(
name,
"tipsets_processed", rounds,
"height", tipset.Height(),
"key", tipset.Key(),
"messages_sent", len(messages)-failures,
"refund_sum", refundSum,
"refund_sum_fil", big.Div(refundSum, big.NewIntUnsigned(build.FilecoinPrecision)).Int64(),
"messages_failures", failures,
"messages_processed", refunds.Count(),
)
return nil return nil
} }
type Repo struct { type Repo struct {
last abi.ChainEpoch lastHeight abi.ChainEpoch
path string lastMinerRecoveryHeight abi.ChainEpoch
path string
} }
func NewRepo(path string) (*Repo, error) { func NewRepo(path string) (*Repo, error) {
@ -549,8 +1054,9 @@ func NewRepo(path string) (*Repo, error) {
} }
return &Repo{ return &Repo{
last: 0, lastHeight: 0,
path: path, lastMinerRecoveryHeight: 0,
path: path,
}, nil }, nil
} }
@ -581,43 +1087,66 @@ func (r *Repo) init() error {
return nil return nil
} }
func (r *Repo) Open() (err error) { func (r *Repo) Open() error {
if err = r.init(); err != nil { if err := r.init(); err != nil {
return return err
} }
var f *os.File if err := r.loadHeight(); err != nil {
return err
}
f, err = os.OpenFile(filepath.Join(r.path, "height"), os.O_RDWR|os.O_CREATE, 0644) if err := r.loadMinerRecoveryHeight(); err != nil {
return err
}
return nil
}
func loadChainEpoch(fn string) (abi.ChainEpoch, error) {
f, err := os.OpenFile(fn, os.O_RDWR|os.O_CREATE, 0644)
if err != nil { if err != nil {
return return 0, err
} }
defer func() { defer func() {
err = f.Close() err = f.Close()
}() }()
var raw []byte raw, err := ioutil.ReadAll(f)
raw, err = ioutil.ReadAll(f)
if err != nil { if err != nil {
return return 0, err
} }
height, err := strconv.Atoi(string(bytes.TrimSpace(raw))) height, err := strconv.Atoi(string(bytes.TrimSpace(raw)))
if err != nil { if err != nil {
return return 0, err
} }
r.last = abi.ChainEpoch(height) return abi.ChainEpoch(height), nil
return }
func (r *Repo) loadHeight() error {
var err error
r.lastHeight, err = loadChainEpoch(filepath.Join(r.path, "height"))
return err
}
func (r *Repo) loadMinerRecoveryHeight() error {
var err error
r.lastMinerRecoveryHeight, err = loadChainEpoch(filepath.Join(r.path, "miner_recovery_height"))
return err
} }
func (r *Repo) Height() abi.ChainEpoch { func (r *Repo) Height() abi.ChainEpoch {
return r.last return r.lastHeight
}
func (r *Repo) MinerRecoveryHeight() abi.ChainEpoch {
return r.lastMinerRecoveryHeight
} }
func (r *Repo) SetHeight(last abi.ChainEpoch) (err error) { func (r *Repo) SetHeight(last abi.ChainEpoch) (err error) {
r.last = last r.lastHeight = last
var f *os.File var f *os.File
f, err = os.OpenFile(filepath.Join(r.path, "height"), os.O_RDWR, 0644) f, err = os.OpenFile(filepath.Join(r.path, "height"), os.O_RDWR, 0644)
if err != nil { if err != nil {
@ -628,7 +1157,26 @@ func (r *Repo) SetHeight(last abi.ChainEpoch) (err error) {
err = f.Close() err = f.Close()
}() }()
if _, err = fmt.Fprintf(f, "%d", r.last); err != nil { if _, err = fmt.Fprintf(f, "%d", r.lastHeight); err != nil {
return
}
return
}
func (r *Repo) SetMinerRecoveryHeight(last abi.ChainEpoch) (err error) {
r.lastMinerRecoveryHeight = last
var f *os.File
f, err = os.OpenFile(filepath.Join(r.path, "miner_recovery_height"), os.O_RDWR, 0644)
if err != nil {
return
}
defer func() {
err = f.Close()
}()
if _, err = fmt.Fprintf(f, "%d", r.lastMinerRecoveryHeight); err != nil {
return return
} }

View File

@ -39,6 +39,7 @@ func main() {
consensusCmd, consensusCmd,
serveDealStatsCmd, serveDealStatsCmd,
syncCmd, syncCmd,
stateTreePruneCmd,
} }
app := &cli.App{ app := &cli.App{

289
cmd/lotus-shed/pruning.go Normal file
View File

@ -0,0 +1,289 @@
package main
import (
"context"
"fmt"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/lib/blockstore"
"github.com/filecoin-project/lotus/node/repo"
"github.com/ipfs/bbloom"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
dshelp "github.com/ipfs/go-ipfs-ds-help"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
)
type cidSet interface {
Add(cid.Cid)
Has(cid.Cid) bool
HasRaw([]byte) bool
Len() int
}
type bloomSet struct {
bloom *bbloom.Bloom
}
func newBloomSet(size int64) (*bloomSet, error) {
b, err := bbloom.New(float64(size), 3)
if err != nil {
return nil, err
}
return &bloomSet{bloom: b}, nil
}
func (bs *bloomSet) Add(c cid.Cid) {
bs.bloom.Add(c.Hash())
}
func (bs *bloomSet) Has(c cid.Cid) bool {
return bs.bloom.Has(c.Hash())
}
func (bs *bloomSet) HasRaw(b []byte) bool {
return bs.bloom.Has(b)
}
func (bs *bloomSet) Len() int {
return int(bs.bloom.ElementsAdded())
}
type mapSet struct {
m map[string]struct{}
}
func newMapSet() *mapSet {
return &mapSet{m: make(map[string]struct{})}
}
func (bs *mapSet) Add(c cid.Cid) {
bs.m[string(c.Hash())] = struct{}{}
}
func (bs *mapSet) Has(c cid.Cid) bool {
_, ok := bs.m[string(c.Hash())]
return ok
}
func (bs *mapSet) HasRaw(b []byte) bool {
_, ok := bs.m[string(b)]
return ok
}
func (bs *mapSet) Len() int {
return len(bs.m)
}
var stateTreePruneCmd = &cli.Command{
Name: "state-prune",
Description: "Deletes old state root data from local chainstore",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "repo",
Value: "~/.lotus",
},
&cli.Int64Flag{
Name: "keep-from-lookback",
Usage: "keep stateroots at or newer than the current height minus this lookback",
Value: 1800, // 2 x finality
},
&cli.IntFlag{
Name: "delete-up-to",
Usage: "delete up to the given number of objects (used to run a faster 'partial' sync)",
},
&cli.BoolFlag{
Name: "use-bloom-set",
Usage: "use a bloom filter for the 'good' set instead of a map, reduces memory usage but may not clean up as much",
},
&cli.BoolFlag{
Name: "dry-run",
Usage: "only enumerate the good set, don't do any deletions",
},
&cli.BoolFlag{
Name: "only-ds-gc",
Usage: "Only run datastore GC",
},
&cli.IntFlag{
Name: "gc-count",
Usage: "number of times to run gc",
Value: 20,
},
},
Action: func(cctx *cli.Context) error {
ctx := context.TODO()
fsrepo, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return err
}
lkrepo, err := fsrepo.Lock(repo.FullNode)
if err != nil {
return err
}
defer lkrepo.Close() //nolint:errcheck
ds, err := lkrepo.Datastore("/chain")
if err != nil {
return err
}
defer ds.Close() //nolint:errcheck
mds, err := lkrepo.Datastore("/metadata")
if err != nil {
return err
}
defer mds.Close() //nolint:errcheck
if cctx.Bool("only-ds-gc") {
gcds, ok := ds.(datastore.GCDatastore)
if ok {
fmt.Println("running datastore gc....")
for i := 0; i < cctx.Int("gc-count"); i++ {
if err := gcds.CollectGarbage(); err != nil {
return xerrors.Errorf("datastore GC failed: %w", err)
}
}
fmt.Println("gc complete!")
return nil
}
return fmt.Errorf("datastore doesnt support gc")
}
bs := blockstore.NewBlockstore(ds)
cs := store.NewChainStore(bs, mds, vm.Syscalls(ffiwrapper.ProofVerifier))
if err := cs.Load(); err != nil {
return fmt.Errorf("loading chainstore: %w", err)
}
var goodSet cidSet
if cctx.Bool("use-bloom-set") {
bset, err := newBloomSet(10000000)
if err != nil {
return err
}
goodSet = bset
} else {
goodSet = newMapSet()
}
ts := cs.GetHeaviestTipSet()
rrLb := abi.ChainEpoch(cctx.Int64("keep-from-lookback"))
if err := cs.WalkSnapshot(ctx, ts, rrLb, true, func(c cid.Cid) error {
if goodSet.Len()%20 == 0 {
fmt.Printf("\renumerating keep set: %d ", goodSet.Len())
}
goodSet.Add(c)
return nil
}); err != nil {
return fmt.Errorf("snapshot walk failed: %w", err)
}
fmt.Println()
fmt.Printf("Successfully marked keep set! (%d objects)\n", goodSet.Len())
if cctx.Bool("dry-run") {
return nil
}
var b datastore.Batch
var batchCount int
markForRemoval := func(c cid.Cid) error {
if b == nil {
nb, err := ds.Batch()
if err != nil {
return fmt.Errorf("opening batch: %w", err)
}
b = nb
}
batchCount++
if err := b.Delete(dshelp.MultihashToDsKey(c.Hash())); err != nil {
return err
}
if batchCount > 100 {
if err := b.Commit(); err != nil {
return xerrors.Errorf("failed to commit batch deletes: %w", err)
}
b = nil
batchCount = 0
}
return nil
}
res, err := ds.Query(query.Query{KeysOnly: true})
if err != nil {
return xerrors.Errorf("failed to query datastore: %w", err)
}
dupTo := cctx.Int("delete-up-to")
var deleteCount int
var goodHits int
for {
v, ok := res.NextSync()
if !ok {
break
}
bk, err := dshelp.BinaryFromDsKey(datastore.RawKey(v.Key[len("/blocks"):]))
if err != nil {
return xerrors.Errorf("failed to parse key: %w", err)
}
if goodSet.HasRaw(bk) {
goodHits++
continue
}
nc := cid.NewCidV1(cid.Raw, bk)
deleteCount++
if err := markForRemoval(nc); err != nil {
return fmt.Errorf("failed to remove cid %s: %w", nc, err)
}
if deleteCount%20 == 0 {
fmt.Printf("\rdeleting %d objects (good hits: %d)... ", deleteCount, goodHits)
}
if dupTo != 0 && deleteCount > dupTo {
break
}
}
if b != nil {
if err := b.Commit(); err != nil {
return xerrors.Errorf("failed to commit final batch delete: %w", err)
}
}
gcds, ok := ds.(datastore.GCDatastore)
if ok {
fmt.Println("running datastore gc....")
for i := 0; i < cctx.Int("gc-count"); i++ {
if err := gcds.CollectGarbage(); err != nil {
return xerrors.Errorf("datastore GC failed: %w", err)
}
}
fmt.Println("gc complete!")
}
return nil
},
}

View File

@ -3,6 +3,8 @@ package main
import ( import (
"fmt" "fmt"
"github.com/filecoin-project/go-state-types/big"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -37,29 +39,31 @@ var verifRegCmd = &cli.Command{
} }
var verifRegAddVerifierCmd = &cli.Command{ var verifRegAddVerifierCmd = &cli.Command{
Name: "add-verifier", Name: "add-verifier",
Usage: "make a given account a verifier", Usage: "make a given account a verifier",
ArgsUsage: "<message sender> <new verifier> <allowance>",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
fromk, err := address.NewFromString("t3qfoulel6fy6gn3hjmbhpdpf6fs5aqjb5fkurhtwvgssizq4jey5nw4ptq5up6h7jk7frdvvobv52qzmgjinq") if cctx.Args().Len() != 3 {
return fmt.Errorf("must specify three arguments: sender, verifier, and allowance")
}
sender, err := address.NewFromString(cctx.Args().Get(0))
if err != nil { if err != nil {
return err return err
} }
if cctx.Args().Len() != 2 { verifier, err := address.NewFromString(cctx.Args().Get(1))
return fmt.Errorf("must specify two arguments: address and allowance")
}
target, err := address.NewFromString(cctx.Args().Get(0))
if err != nil { if err != nil {
return err return err
} }
allowance, err := types.BigFromString(cctx.Args().Get(1)) allowance, err := types.BigFromString(cctx.Args().Get(2))
if err != nil { if err != nil {
return err return err
} }
params, err := actors.SerializeParams(&verifreg0.AddVerifierParams{Address: target, Allowance: allowance}) // TODO: ActorUpgrade: Abstract
params, err := actors.SerializeParams(&verifreg0.AddVerifierParams{Address: verifier, Allowance: allowance})
if err != nil { if err != nil {
return err return err
} }
@ -71,21 +75,19 @@ var verifRegAddVerifierCmd = &cli.Command{
defer closer() defer closer()
ctx := lcli.ReqContext(cctx) ctx := lcli.ReqContext(cctx)
msg := &types.Message{ vrk, err := api.StateVerifiedRegistryRootKey(ctx, types.EmptyTSK)
To: verifreg.Address,
From: fromk,
Method: builtin0.MethodsVerifiedRegistry.AddVerifier,
Params: params,
}
smsg, err := api.MpoolPushMessage(ctx, msg, nil)
if err != nil { if err != nil {
return err return err
} }
fmt.Printf("message sent, now waiting on cid: %s\n", smsg.Cid()) smsg, err := api.MsigPropose(ctx, vrk, verifreg.Address, big.Zero(), sender, uint64(builtin0.MethodsVerifiedRegistry.AddVerifier), params)
if err != nil {
return err
}
mwait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) fmt.Printf("message sent, now waiting on cid: %s\n", smsg)
mwait, err := api.StateWaitMsg(ctx, smsg, build.MessageConfidence)
if err != nil { if err != nil {
return err return err
} }
@ -94,6 +96,7 @@ var verifRegAddVerifierCmd = &cli.Command{
return fmt.Errorf("failed to add verifier: %d", mwait.Receipt.ExitCode) return fmt.Errorf("failed to add verifier: %d", mwait.Receipt.ExitCode)
} }
//TODO: Internal msg might still have failed
return nil return nil
}, },

View File

@ -5,19 +5,22 @@ import (
"os" "os"
"sort" "sort"
"strconv" "strconv"
"text/tabwriter"
"time" "time"
"github.com/docker/go-units"
"github.com/fatih/color"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/tablewriter"
lcli "github.com/filecoin-project/lotus/cli" lcli "github.com/filecoin-project/lotus/cli"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing" sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
@ -144,8 +147,19 @@ var sectorsListCmd = &cli.Command{
Name: "show-removed", Name: "show-removed",
Usage: "show removed sectors", Usage: "show removed sectors",
}, },
&cli.BoolFlag{
Name: "color",
Aliases: []string{"c"},
Value: true,
},
&cli.BoolFlag{
Name: "fast",
Usage: "don't show on-chain info for better performance",
},
}, },
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
color.NoColor = !cctx.Bool("color")
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil { if err != nil {
return err return err
@ -170,7 +184,12 @@ var sectorsListCmd = &cli.Command{
return err return err
} }
activeSet, err := fullApi.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK) head, err := fullApi.ChainHead(ctx)
if err != nil {
return err
}
activeSet, err := fullApi.StateMinerActiveSectors(ctx, maddr, head.Key())
if err != nil { if err != nil {
return err return err
} }
@ -179,7 +198,7 @@ var sectorsListCmd = &cli.Command{
activeIDs[info.SectorNumber] = struct{}{} activeIDs[info.SectorNumber] = struct{}{}
} }
sset, err := fullApi.StateMinerSectors(ctx, maddr, nil, types.EmptyTSK) sset, err := fullApi.StateMinerSectors(ctx, maddr, nil, head.Key())
if err != nil { if err != nil {
return err return err
} }
@ -192,12 +211,26 @@ var sectorsListCmd = &cli.Command{
return list[i] < list[j] return list[i] < list[j]
}) })
w := tabwriter.NewWriter(os.Stdout, 8, 4, 1, ' ', 0) tw := tablewriter.New(
tablewriter.Col("ID"),
tablewriter.Col("State"),
tablewriter.Col("OnChain"),
tablewriter.Col("Active"),
tablewriter.Col("Expiration"),
tablewriter.Col("Deals"),
tablewriter.Col("DealWeight"),
tablewriter.NewLineCol("Error"),
tablewriter.NewLineCol("EarlyExpiration"))
fast := cctx.Bool("fast")
for _, s := range list { for _, s := range list {
st, err := nodeApi.SectorsStatus(ctx, s, false) st, err := nodeApi.SectorsStatus(ctx, s, !fast)
if err != nil { if err != nil {
fmt.Fprintf(w, "%d:\tError: %s\n", s, err) tw.Write(map[string]interface{}{
"ID": s,
"Error": err,
})
continue continue
} }
@ -205,20 +238,60 @@ var sectorsListCmd = &cli.Command{
_, inSSet := commitedIDs[s] _, inSSet := commitedIDs[s]
_, inASet := activeIDs[s] _, inASet := activeIDs[s]
_, _ = fmt.Fprintf(w, "%d: %s\tsSet: %s\tactive: %s\ttktH: %d\tseedH: %d\tdeals: %v\t toUpgrade:%t\n", dw := .0
s, if st.Expiration-st.Activation > 0 {
st.State, dw = float64(big.Div(st.DealWeight, big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
yesno(inSSet), }
yesno(inASet),
st.Ticket.Epoch, var deals int
st.Seed.Epoch, for _, deal := range st.Deals {
st.Deals, if deal != 0 {
st.ToUpgrade, deals++
) }
}
exp := st.Expiration
if st.OnTime > 0 && st.OnTime < exp {
exp = st.OnTime // Can be different when the sector was CC upgraded
}
m := map[string]interface{}{
"ID": s,
"State": color.New(stateOrder[sealing.SectorState(st.State)].col).Sprint(st.State),
"OnChain": yesno(inSSet),
"Active": yesno(inASet),
}
if deals > 0 {
m["Deals"] = color.GreenString("%d", deals)
} else {
m["Deals"] = color.BlueString("CC")
if st.ToUpgrade {
m["Deals"] = color.CyanString("CC(upgrade)")
}
}
if !fast {
if !inSSet {
m["Expiration"] = "n/a"
} else {
m["Expiration"] = lcli.EpochTime(head.Height(), exp)
if !fast && deals > 0 {
m["DealWeight"] = units.BytesSize(dw)
}
if st.Early > 0 {
m["EarlyExpiration"] = color.YellowString(lcli.EpochTime(head.Height(), st.Early))
}
}
}
tw.Write(m)
} }
} }
return w.Flush() return tw.Flush(os.Stdout)
}, },
} }
@ -447,7 +520,7 @@ var sectorsUpdateCmd = &cli.Command{
func yesno(b bool) string { func yesno(b bool) string {
if b { if b {
return "YES" return color.GreenString("YES")
} }
return "NO" return color.RedString("NO")
} }

View File

@ -72,6 +72,7 @@
* [MpoolPending](#MpoolPending) * [MpoolPending](#MpoolPending)
* [MpoolPush](#MpoolPush) * [MpoolPush](#MpoolPush)
* [MpoolPushMessage](#MpoolPushMessage) * [MpoolPushMessage](#MpoolPushMessage)
* [MpoolPushUntrusted](#MpoolPushUntrusted)
* [MpoolSelect](#MpoolSelect) * [MpoolSelect](#MpoolSelect)
* [MpoolSetConfig](#MpoolSetConfig) * [MpoolSetConfig](#MpoolSetConfig)
* [MpoolSub](#MpoolSub) * [MpoolSub](#MpoolSub)
@ -160,6 +161,8 @@
* [StateSectorPartition](#StateSectorPartition) * [StateSectorPartition](#StateSectorPartition)
* [StateSectorPreCommitInfo](#StateSectorPreCommitInfo) * [StateSectorPreCommitInfo](#StateSectorPreCommitInfo)
* [StateVerifiedClientStatus](#StateVerifiedClientStatus) * [StateVerifiedClientStatus](#StateVerifiedClientStatus)
* [StateVerifiedRegistryRootKey](#StateVerifiedRegistryRootKey)
* [StateVerifierStatus](#StateVerifierStatus)
* [StateWaitMsg](#StateWaitMsg) * [StateWaitMsg](#StateWaitMsg)
* [Sync](#Sync) * [Sync](#Sync)
* [SyncCheckBad](#SyncCheckBad) * [SyncCheckBad](#SyncCheckBad)
@ -1779,6 +1782,43 @@ Response:
} }
``` ```
### MpoolPushUntrusted
MpoolPushUntrusted pushes a signed message to mempool from untrusted sources.
Perms: write
Inputs:
```json
[
{
"Message": {
"Version": 42,
"To": "t01234",
"From": "t01234",
"Nonce": 42,
"Value": "0",
"GasLimit": 9,
"GasFeeCap": "0",
"GasPremium": "0",
"Method": 1,
"Params": "Ynl0ZSBhcnJheQ=="
},
"Signature": {
"Type": 2,
"Data": "Ynl0ZSBhcnJheQ=="
}
}
]
```
Response:
```json
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
}
```
### MpoolSelect ### MpoolSelect
MpoolSelect returns a list of pending messages for inclusion in the next block MpoolSelect returns a list of pending messages for inclusion in the next block
@ -4117,6 +4157,53 @@ Returns nil if there is no entry in the data cap table for the
address. address.
Perms: read
Inputs:
```json
[
"t01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
{
"/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
}
]
]
```
Response: `"0"`
### StateVerifiedRegistryRootKey
StateVerifiedClientStatus returns the address of the Verified Registry's root key
Perms: read
Inputs:
```json
[
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
{
"/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
}
]
]
```
Response: `"t01234"`
### StateVerifierStatus
StateVerifierStatus returns the data cap for the given address.
Returns nil if there is no entry in the data cap table for the
address.
Perms: read Perms: read
Inputs: Inputs:

View File

@ -10,14 +10,38 @@ type Resources struct {
MinMemory uint64 // What Must be in RAM for decent perf MinMemory uint64 // What Must be in RAM for decent perf
MaxMemory uint64 // Memory required (swap + ram) MaxMemory uint64 // Memory required (swap + ram)
Threads int // -1 = multithread MaxParallelism int // -1 = multithread
CanGPU bool CanGPU bool
BaseMinMemory uint64 // What Must be in RAM for decent perf (shared between threads) BaseMinMemory uint64 // What Must be in RAM for decent perf (shared between threads)
} }
func (r Resources) MultiThread() bool { /*
return r.Threads == -1
Percent of threads to allocate to parallel tasks
12 * 0.92 = 11
16 * 0.92 = 14
24 * 0.92 = 22
32 * 0.92 = 29
64 * 0.92 = 58
128 * 0.92 = 117
*/
var ParallelNum uint64 = 92
var ParallelDenom uint64 = 100
// TODO: Take NUMA into account
func (r Resources) Threads(wcpus uint64) uint64 {
if r.MaxParallelism == -1 {
n := (wcpus * ParallelNum) / ParallelDenom
if n == 0 {
return wcpus
}
return n
}
return uint64(r.MaxParallelism)
} }
var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources{ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources{
@ -26,7 +50,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 8 << 30, MaxMemory: 8 << 30,
MinMemory: 8 << 30, MinMemory: 8 << 30,
Threads: 1, MaxParallelism: 1,
BaseMinMemory: 1 << 30, BaseMinMemory: 1 << 30,
}, },
@ -34,7 +58,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 4 << 30, MaxMemory: 4 << 30,
MinMemory: 4 << 30, MinMemory: 4 << 30,
Threads: 1, MaxParallelism: 1,
BaseMinMemory: 1 << 30, BaseMinMemory: 1 << 30,
}, },
@ -42,7 +66,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 30, MaxMemory: 1 << 30,
MinMemory: 1 << 30, MinMemory: 1 << 30,
Threads: 1, MaxParallelism: 1,
BaseMinMemory: 1 << 30, BaseMinMemory: 1 << 30,
}, },
@ -50,7 +74,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 2 << 10, MaxMemory: 2 << 10,
MinMemory: 2 << 10, MinMemory: 2 << 10,
Threads: 1, MaxParallelism: 1,
BaseMinMemory: 2 << 10, BaseMinMemory: 2 << 10,
}, },
@ -58,7 +82,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 8 << 20, MaxMemory: 8 << 20,
MinMemory: 8 << 20, MinMemory: 8 << 20,
Threads: 1, MaxParallelism: 1,
BaseMinMemory: 8 << 20, BaseMinMemory: 8 << 20,
}, },
@ -68,7 +92,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 128 << 30, MaxMemory: 128 << 30,
MinMemory: 112 << 30, MinMemory: 112 << 30,
Threads: 1, MaxParallelism: 1,
BaseMinMemory: 10 << 20, BaseMinMemory: 10 << 20,
}, },
@ -76,7 +100,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 64 << 30, MaxMemory: 64 << 30,
MinMemory: 56 << 30, MinMemory: 56 << 30,
Threads: 1, MaxParallelism: 1,
BaseMinMemory: 10 << 20, BaseMinMemory: 10 << 20,
}, },
@ -84,7 +108,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 30, MaxMemory: 1 << 30,
MinMemory: 768 << 20, MinMemory: 768 << 20,
Threads: 1, MaxParallelism: 1,
BaseMinMemory: 1 << 20, BaseMinMemory: 1 << 20,
}, },
@ -92,7 +116,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 2 << 10, MaxMemory: 2 << 10,
MinMemory: 2 << 10, MinMemory: 2 << 10,
Threads: 1, MaxParallelism: 1,
BaseMinMemory: 2 << 10, BaseMinMemory: 2 << 10,
}, },
@ -100,35 +124,35 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 8 << 20, MaxMemory: 8 << 20,
MinMemory: 8 << 20, MinMemory: 8 << 20,
Threads: 1, MaxParallelism: 1,
BaseMinMemory: 8 << 20, BaseMinMemory: 8 << 20,
}, },
}, },
sealtasks.TTPreCommit2: { sealtasks.TTPreCommit2: {
abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{
MaxMemory: 64 << 30, MaxMemory: 30 << 30,
MinMemory: 64 << 30, MinMemory: 30 << 30,
Threads: -1, MaxParallelism: -1,
CanGPU: true, CanGPU: true,
BaseMinMemory: 60 << 30, BaseMinMemory: 1 << 30,
}, },
abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{
MaxMemory: 32 << 30, MaxMemory: 15 << 30,
MinMemory: 32 << 30, MinMemory: 15 << 30,
Threads: -1, MaxParallelism: -1,
CanGPU: true, CanGPU: true,
BaseMinMemory: 30 << 30, BaseMinMemory: 1 << 30,
}, },
abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{
MaxMemory: 3 << 29, // 1.5G MaxMemory: 3 << 29, // 1.5G
MinMemory: 1 << 30, MinMemory: 1 << 30,
Threads: -1, MaxParallelism: -1,
BaseMinMemory: 1 << 30, BaseMinMemory: 1 << 30,
}, },
@ -136,7 +160,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 2 << 10, MaxMemory: 2 << 10,
MinMemory: 2 << 10, MinMemory: 2 << 10,
Threads: -1, MaxParallelism: -1,
BaseMinMemory: 2 << 10, BaseMinMemory: 2 << 10,
}, },
@ -144,7 +168,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 8 << 20, MaxMemory: 8 << 20,
MinMemory: 8 << 20, MinMemory: 8 << 20,
Threads: -1, MaxParallelism: -1,
BaseMinMemory: 8 << 20, BaseMinMemory: 8 << 20,
}, },
@ -154,7 +178,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 30, MaxMemory: 1 << 30,
MinMemory: 1 << 30, MinMemory: 1 << 30,
Threads: 0, MaxParallelism: 0,
BaseMinMemory: 1 << 30, BaseMinMemory: 1 << 30,
}, },
@ -162,7 +186,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 30, MaxMemory: 1 << 30,
MinMemory: 1 << 30, MinMemory: 1 << 30,
Threads: 0, MaxParallelism: 0,
BaseMinMemory: 1 << 30, BaseMinMemory: 1 << 30,
}, },
@ -170,7 +194,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 30, MaxMemory: 1 << 30,
MinMemory: 1 << 30, MinMemory: 1 << 30,
Threads: 0, MaxParallelism: 0,
BaseMinMemory: 1 << 30, BaseMinMemory: 1 << 30,
}, },
@ -178,7 +202,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 2 << 10, MaxMemory: 2 << 10,
MinMemory: 2 << 10, MinMemory: 2 << 10,
Threads: 0, MaxParallelism: 0,
BaseMinMemory: 2 << 10, BaseMinMemory: 2 << 10,
}, },
@ -186,7 +210,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 8 << 20, MaxMemory: 8 << 20,
MinMemory: 8 << 20, MinMemory: 8 << 20,
Threads: 0, MaxParallelism: 0,
BaseMinMemory: 8 << 20, BaseMinMemory: 8 << 20,
}, },
@ -196,8 +220,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 190 << 30, // TODO: Confirm MaxMemory: 190 << 30, // TODO: Confirm
MinMemory: 60 << 30, MinMemory: 60 << 30,
Threads: -1, MaxParallelism: -1,
CanGPU: true, CanGPU: true,
BaseMinMemory: 64 << 30, // params BaseMinMemory: 64 << 30, // params
}, },
@ -205,8 +229,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 150 << 30, // TODO: ~30G of this should really be BaseMaxMemory MaxMemory: 150 << 30, // TODO: ~30G of this should really be BaseMaxMemory
MinMemory: 30 << 30, MinMemory: 30 << 30,
Threads: -1, MaxParallelism: -1,
CanGPU: true, CanGPU: true,
BaseMinMemory: 32 << 30, // params BaseMinMemory: 32 << 30, // params
}, },
@ -214,8 +238,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 3 << 29, // 1.5G MaxMemory: 3 << 29, // 1.5G
MinMemory: 1 << 30, MinMemory: 1 << 30,
Threads: 1, // This is fine MaxParallelism: 1, // This is fine
CanGPU: true, CanGPU: true,
BaseMinMemory: 10 << 30, BaseMinMemory: 10 << 30,
}, },
@ -223,8 +247,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 2 << 10, MaxMemory: 2 << 10,
MinMemory: 2 << 10, MinMemory: 2 << 10,
Threads: 1, MaxParallelism: 1,
CanGPU: true, CanGPU: true,
BaseMinMemory: 2 << 10, BaseMinMemory: 2 << 10,
}, },
@ -232,8 +256,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 8 << 20, MaxMemory: 8 << 20,
MinMemory: 8 << 20, MinMemory: 8 << 20,
Threads: 1, MaxParallelism: 1,
CanGPU: true, CanGPU: true,
BaseMinMemory: 8 << 20, BaseMinMemory: 8 << 20,
}, },
@ -243,8 +267,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 20, MaxMemory: 1 << 20,
MinMemory: 1 << 20, MinMemory: 1 << 20,
Threads: 0, MaxParallelism: 0,
CanGPU: false, CanGPU: false,
BaseMinMemory: 0, BaseMinMemory: 0,
}, },
@ -252,8 +276,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 20, MaxMemory: 1 << 20,
MinMemory: 1 << 20, MinMemory: 1 << 20,
Threads: 0, MaxParallelism: 0,
CanGPU: false, CanGPU: false,
BaseMinMemory: 0, BaseMinMemory: 0,
}, },
@ -261,8 +285,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 20, MaxMemory: 1 << 20,
MinMemory: 1 << 20, MinMemory: 1 << 20,
Threads: 0, MaxParallelism: 0,
CanGPU: false, CanGPU: false,
BaseMinMemory: 0, BaseMinMemory: 0,
}, },
@ -270,8 +294,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 20, MaxMemory: 1 << 20,
MinMemory: 1 << 20, MinMemory: 1 << 20,
Threads: 0, MaxParallelism: 0,
CanGPU: false, CanGPU: false,
BaseMinMemory: 0, BaseMinMemory: 0,
}, },
@ -279,8 +303,8 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
MaxMemory: 1 << 20, MaxMemory: 1 << 20,
MinMemory: 1 << 20, MinMemory: 1 << 20,
Threads: 0, MaxParallelism: 0,
CanGPU: false, CanGPU: false,
BaseMinMemory: 0, BaseMinMemory: 0,
}, },

View File

@ -28,12 +28,7 @@ func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerResource
func (a *activeResources) add(wr storiface.WorkerResources, r Resources) { func (a *activeResources) add(wr storiface.WorkerResources, r Resources) {
a.gpuUsed = r.CanGPU a.gpuUsed = r.CanGPU
if r.MultiThread() { a.cpuUse += r.Threads(wr.CPUs)
a.cpuUse += wr.CPUs
} else {
a.cpuUse += uint64(r.Threads)
}
a.memUsedMin += r.MinMemory a.memUsedMin += r.MinMemory
a.memUsedMax += r.MaxMemory a.memUsedMax += r.MaxMemory
} }
@ -42,12 +37,7 @@ func (a *activeResources) free(wr storiface.WorkerResources, r Resources) {
if r.CanGPU { if r.CanGPU {
a.gpuUsed = false a.gpuUsed = false
} }
if r.MultiThread() { a.cpuUse -= r.Threads(wr.CPUs)
a.cpuUse -= wr.CPUs
} else {
a.cpuUse -= uint64(r.Threads)
}
a.memUsedMin -= r.MinMemory a.memUsedMin -= r.MinMemory
a.memUsedMax -= r.MaxMemory a.memUsedMax -= r.MaxMemory
} }
@ -68,16 +58,9 @@ func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, call
return false return false
} }
if needRes.MultiThread() { if a.cpuUse+needRes.Threads(res.CPUs) > res.CPUs {
if a.cpuUse > 0 { log.Debugf("sched: not scheduling on worker %d for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads(res.CPUs), a.cpuUse, res.CPUs)
log.Debugf("sched: not scheduling on worker %d for %s; multicore process needs %d threads, %d in use, target %d", wid, caller, res.CPUs, a.cpuUse, res.CPUs) return false
return false
}
} else {
if a.cpuUse+uint64(needRes.Threads) > res.CPUs {
log.Debugf("sched: not scheduling on worker %d for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads, a.cpuUse, res.CPUs)
return false
}
} }
if len(res.GPUs) > 0 && needRes.CanGPU { if len(res.GPUs) > 0 && needRes.CanGPU {

View File

@ -290,6 +290,9 @@ func TestSched(t *testing.T) {
} }
testFunc := func(workers []workerSpec, tasks []task) func(t *testing.T) { testFunc := func(workers []workerSpec, tasks []task) func(t *testing.T) {
ParallelNum = 1
ParallelDenom = 1
return func(t *testing.T) { return func(t *testing.T) {
index := stores.NewIndex() index := stores.NewIndex()

2
go.mod
View File

@ -47,10 +47,12 @@ require (
github.com/google/uuid v1.1.1 github.com/google/uuid v1.1.1
github.com/gorilla/mux v1.7.4 github.com/gorilla/mux v1.7.4
github.com/gorilla/websocket v1.4.2 github.com/gorilla/websocket v1.4.2
github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026
github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e
github.com/hashicorp/go-multierror v1.1.0 github.com/hashicorp/go-multierror v1.1.0
github.com/hashicorp/golang-lru v0.5.4 github.com/hashicorp/golang-lru v0.5.4
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d
github.com/ipfs/bbloom v0.0.4
github.com/ipfs/go-bitswap v0.2.20 github.com/ipfs/go-bitswap v0.2.20
github.com/ipfs/go-block-format v0.0.2 github.com/ipfs/go-block-format v0.0.2
github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834 github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834

2
go.sum
View File

@ -421,6 +421,8 @@ github.com/gxed/go-shellwords v1.0.3/go.mod h1:N7paucT91ByIjmVJHhvoarjoQnmsi3Jd3
github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU=
github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48=
github.com/gxed/pubsub v0.0.0-20180201040156-26ebdf44f824/go.mod h1:OiEWyHgK+CWrmOlVquHaIK1vhpUJydC9m0Je6mhaiNE= github.com/gxed/pubsub v0.0.0-20180201040156-26ebdf44f824/go.mod h1:OiEWyHgK+CWrmOlVquHaIK1vhpUJydC9m0Je6mhaiNE=
github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026 h1:BpJ2o0OR5FV7vrkDYfXYVJQeMNWa8RhklZOpW2ITAIQ=
github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026/go.mod h1:5Scbynm8dF1XAPwIwkGPqzkM/shndPm79Jd1003hTjE=
github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 h1:F9k+7wv5OIk1zcq23QpdiL0hfDuXPjuOmMNaC6fgQ0Q= github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 h1:F9k+7wv5OIk1zcq23QpdiL0hfDuXPjuOmMNaC6fgQ0Q=
github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1/go.mod h1:jvfsLIxk0fY/2BKSQ1xf2406AKA5dwMmKKv0ADcOfN8= github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1/go.mod h1:jvfsLIxk0fY/2BKSQ1xf2406AKA5dwMmKKv0ADcOfN8=
github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4nmd7b5qy5t0GWDTwSn4OyRgfAXSmo6VnryBY= github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4nmd7b5qy5t0GWDTwSn4OyRgfAXSmo6VnryBY=

View File

@ -12,6 +12,7 @@ import (
type Column struct { type Column struct {
Name string Name string
SeparateLine bool SeparateLine bool
Lines int
} }
type TableWriter struct { type TableWriter struct {
@ -50,6 +51,7 @@ cloop:
for i, column := range w.cols { for i, column := range w.cols {
if column.Name == col { if column.Name == col {
byColID[i] = fmt.Sprint(val) byColID[i] = fmt.Sprint(val)
w.cols[i].Lines++
continue cloop continue cloop
} }
} }
@ -58,6 +60,7 @@ cloop:
w.cols = append(w.cols, Column{ w.cols = append(w.cols, Column{
Name: col, Name: col,
SeparateLine: false, SeparateLine: false,
Lines: 1,
}) })
} }
@ -77,7 +80,11 @@ func (w *TableWriter) Flush(out io.Writer) error {
w.rows = append([]map[int]string{header}, w.rows...) w.rows = append([]map[int]string{header}, w.rows...)
for col := range w.cols { for col, c := range w.cols {
if c.Lines == 0 {
continue
}
for _, row := range w.rows { for _, row := range w.rows {
val, found := row[col] val, found := row[col]
if !found { if !found {
@ -94,9 +101,13 @@ func (w *TableWriter) Flush(out io.Writer) error {
cols := make([]string, len(w.cols)) cols := make([]string, len(w.cols))
for ci, col := range w.cols { for ci, col := range w.cols {
if col.Lines == 0 {
continue
}
e, _ := row[ci] e, _ := row[ci]
pad := colLengths[ci] - cliStringLength(e) + 2 pad := colLengths[ci] - cliStringLength(e) + 2
if !col.SeparateLine { if !col.SeparateLine && col.Lines > 0 {
e = e + strings.Repeat(" ", pad) e = e + strings.Repeat(" ", pad)
if _, err := fmt.Fprint(out, e); err != nil { if _, err := fmt.Fprint(out, e); err != nil {
return err return err

View File

@ -25,15 +25,15 @@ import (
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/events"
"github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/events"
"github.com/filecoin-project/lotus/chain/events/state" "github.com/filecoin-project/lotus/chain/events/state"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing" sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/markets/utils" "github.com/filecoin-project/lotus/markets/utils"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/storage/sectorblocks" "github.com/filecoin-project/lotus/storage/sectorblocks"
) )
@ -50,14 +50,24 @@ type ProviderNodeAdapter struct {
secb *sectorblocks.SectorBlocks secb *sectorblocks.SectorBlocks
ev *events.Events ev *events.Events
publishSpec, addBalanceSpec *api.MessageSendSpec
} }
func NewProviderNodeAdapter(dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full api.FullNode) storagemarket.StorageProviderNode { func NewProviderNodeAdapter(fc *config.MinerFeeConfig) func(dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full api.FullNode) storagemarket.StorageProviderNode {
return &ProviderNodeAdapter{ return func(dag dtypes.StagingDAG, secb *sectorblocks.SectorBlocks, full api.FullNode) storagemarket.StorageProviderNode {
FullNode: full, na := &ProviderNodeAdapter{
dag: dag, FullNode: full,
secb: secb,
ev: events.NewEvents(context.TODO(), full), dag: dag,
secb: secb,
ev: events.NewEvents(context.TODO(), full),
}
if fc != nil {
na.publishSpec = &api.MessageSendSpec{MaxFee: abi.TokenAmount(fc.MaxPublishDealsFee)}
na.addBalanceSpec = &api.MessageSendSpec{MaxFee: abi.TokenAmount(fc.MaxMarketBalanceAddFee)}
}
return na
} }
} }
@ -84,7 +94,7 @@ func (n *ProviderNodeAdapter) PublishDeals(ctx context.Context, deal storagemark
Value: types.NewInt(0), Value: types.NewInt(0),
Method: builtin0.MethodsMarket.PublishStorageDeals, Method: builtin0.MethodsMarket.PublishStorageDeals,
Params: params, Params: params,
}, nil) }, n.publishSpec)
if err != nil { if err != nil {
return cid.Undef, err return cid.Undef, err
} }
@ -183,7 +193,7 @@ func (n *ProviderNodeAdapter) AddFunds(ctx context.Context, addr address.Address
From: addr, From: addr,
Value: amount, Value: amount,
Method: builtin0.MethodsMarket.AddBalance, Method: builtin0.MethodsMarket.AddBalance,
}, nil) }, n.addBalanceSpec)
if err != nil { if err != nil {
return cid.Undef, err return cid.Undef, err
} }

View File

@ -345,7 +345,7 @@ func Online() Option {
Override(new(dtypes.DealFilter), modules.BasicDealFilter(nil)), Override(new(dtypes.DealFilter), modules.BasicDealFilter(nil)),
Override(new(modules.ProviderDealFunds), modules.NewProviderDealFunds), Override(new(modules.ProviderDealFunds), modules.NewProviderDealFunds),
Override(new(storagemarket.StorageProvider), modules.StorageProvider), Override(new(storagemarket.StorageProvider), modules.StorageProvider),
Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter), Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(nil)),
Override(HandleRetrievalKey, modules.HandleRetrieval), Override(HandleRetrievalKey, modules.HandleRetrieval),
Override(GetParamsKey, modules.GetParams), Override(GetParamsKey, modules.GetParams),
Override(HandleDealsKey, modules.HandleDeals), Override(HandleDealsKey, modules.HandleDeals),
@ -464,6 +464,8 @@ func ConfigStorageMiner(c interface{}) Option {
Override(new(dtypes.DealFilter), modules.BasicDealFilter(dealfilter.CliDealFilter(cfg.Dealmaking.Filter))), Override(new(dtypes.DealFilter), modules.BasicDealFilter(dealfilter.CliDealFilter(cfg.Dealmaking.Filter))),
), ),
Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(&cfg.Fees)),
Override(new(sectorstorage.SealerConfig), cfg.Storage), Override(new(sectorstorage.SealerConfig), cfg.Storage),
Override(new(*storage.Miner), modules.StorageMiner(cfg.Fees)), Override(new(*storage.Miner), modules.StorageMiner(cfg.Fees)),
) )

View File

@ -61,9 +61,11 @@ type SealingConfig struct {
} }
type MinerFeeConfig struct { type MinerFeeConfig struct {
MaxPreCommitGasFee types.FIL MaxPreCommitGasFee types.FIL
MaxCommitGasFee types.FIL MaxCommitGasFee types.FIL
MaxWindowPoStGasFee types.FIL MaxWindowPoStGasFee types.FIL
MaxPublishDealsFee types.FIL
MaxMarketBalanceAddFee types.FIL
} }
// API contains configs for API endpoint // API contains configs for API endpoint
@ -147,7 +149,7 @@ func DefaultStorageMiner() *StorageMiner {
MaxWaitDealsSectors: 2, // 64G with 32G sectors MaxWaitDealsSectors: 2, // 64G with 32G sectors
MaxSealingSectors: 0, MaxSealingSectors: 0,
MaxSealingSectorsForDeals: 0, MaxSealingSectorsForDeals: 0,
WaitDealsDelay: Duration(time.Hour), WaitDealsDelay: Duration(time.Hour * 6),
}, },
Storage: sectorstorage.SealerConfig{ Storage: sectorstorage.SealerConfig{
@ -169,13 +171,15 @@ func DefaultStorageMiner() *StorageMiner {
ConsiderOfflineRetrievalDeals: true, ConsiderOfflineRetrievalDeals: true,
PieceCidBlocklist: []cid.Cid{}, PieceCidBlocklist: []cid.Cid{},
// TODO: It'd be nice to set this based on sector size // TODO: It'd be nice to set this based on sector size
ExpectedSealDuration: Duration(time.Hour * 12), ExpectedSealDuration: Duration(time.Hour * 24),
}, },
Fees: MinerFeeConfig{ Fees: MinerFeeConfig{
MaxPreCommitGasFee: types.FIL(types.BigDiv(types.FromFil(1), types.NewInt(20))), // 0.05 MaxPreCommitGasFee: types.FIL(types.BigDiv(types.FromFil(1), types.NewInt(20))), // 0.05
MaxCommitGasFee: types.FIL(types.BigDiv(types.FromFil(1), types.NewInt(20))), MaxCommitGasFee: types.FIL(types.BigDiv(types.FromFil(1), types.NewInt(20))),
MaxWindowPoStGasFee: types.FIL(types.FromFil(50)), MaxWindowPoStGasFee: types.FIL(types.FromFil(50)),
MaxPublishDealsFee: types.FIL(types.BigDiv(types.FromFil(1), types.NewInt(33))), // 0.03ish
MaxMarketBalanceAddFee: types.FIL(types.BigDiv(types.FromFil(1), types.NewInt(100))), // 0.01
}, },
} }
cfg.Common.API.ListenAddress = "/ip4/127.0.0.1/tcp/2345/http" cfg.Common.API.ListenAddress = "/ip4/127.0.0.1/tcp/2345/http"

View File

@ -59,7 +59,7 @@ import (
var DefaultHashFunction = uint64(mh.BLAKE2B_MIN + 31) var DefaultHashFunction = uint64(mh.BLAKE2B_MIN + 31)
const dealStartBufferHours uint64 = 24 const dealStartBufferHours uint64 = 49
type API struct { type API struct {
fx.In fx.In
@ -153,7 +153,7 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams)
} }
blocksPerHour := 60 * 60 / build.BlockDelaySecs blocksPerHour := 60 * 60 / build.BlockDelaySecs
dealStart = ts.Height() + abi.ChainEpoch(dealStartBufferHours*blocksPerHour) dealStart = ts.Height() + abi.ChainEpoch(dealStartBufferHours*blocksPerHour) // TODO: Get this from storage ask
} }
result, err := a.SMDealClient.ProposeStorageDeal(ctx, storagemarket.ProposeStorageDealParams{ result, err := a.SMDealClient.ProposeStorageDeal(ctx, storagemarket.ProposeStorageDealParams{

View File

@ -110,6 +110,10 @@ func (a *MpoolAPI) MpoolPush(ctx context.Context, smsg *types.SignedMessage) (ci
return a.Mpool.Push(smsg) return a.Mpool.Push(smsg)
} }
func (a *MpoolAPI) MpoolPushUntrusted(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) {
return a.Mpool.PushUntrusted(smsg)
}
func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) { func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) {
cp := *msg cp := *msg
msg = &cp msg = &cp

View File

@ -1019,6 +1019,37 @@ func (a *StateAPI) StateMinerAvailableBalance(ctx context.Context, maddr address
return types.BigAdd(abal, vested), nil return types.BigAdd(abal, vested), nil
} }
// StateVerifiedClientStatus returns the data cap for the given address.
// Returns zero if there is no entry in the data cap table for the
// address.
func (a *StateAPI) StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) {
act, err := a.StateGetActor(ctx, verifreg.Address, tsk)
if err != nil {
return nil, err
}
aid, err := a.StateLookupID(ctx, addr, tsk)
if err != nil {
log.Warnf("lookup failure %v", err)
return nil, err
}
vrs, err := verifreg.Load(a.StateManager.ChainStore().Store(ctx), act)
if err != nil {
return nil, xerrors.Errorf("failed to load verified registry state: %w", err)
}
verified, dcap, err := vrs.VerifierDataCap(aid)
if err != nil {
return nil, xerrors.Errorf("looking up verifier: %w", err)
}
if !verified {
return nil, nil
}
return &dcap, nil
}
// StateVerifiedClientStatus returns the data cap for the given address. // StateVerifiedClientStatus returns the data cap for the given address.
// Returns zero if there is no entry in the data cap table for the // Returns zero if there is no entry in the data cap table for the
// address. // address.
@ -1050,6 +1081,20 @@ func (a *StateAPI) StateVerifiedClientStatus(ctx context.Context, addr address.A
return &dcap, nil return &dcap, nil
} }
func (a *StateAPI) StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) {
vact, err := a.StateGetActor(ctx, verifreg.Address, tsk)
if err != nil {
return address.Undef, err
}
vst, err := verifreg.Load(a.StateManager.ChainStore().Store(ctx), vact)
if err != nil {
return address.Undef, err
}
return vst.RootKey()
}
var dealProviderCollateralNum = types.NewInt(110) var dealProviderCollateralNum = types.NewInt(110)
var dealProviderCollateralDen = types.NewInt(100) var dealProviderCollateralDen = types.NewInt(100)

View File

@ -211,7 +211,7 @@ func (ca *channelAccessor) checkVoucherValidUnlocked(ctx context.Context, ch add
} }
// Check the voucher against the highest known voucher nonce / value // Check the voucher against the highest known voucher nonce / value
laneStates, err := ca.laneState(ctx, pchState, ch) laneStates, err := ca.laneState(pchState, ch)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -259,16 +259,9 @@ func (ca *channelAccessor) checkVoucherValidUnlocked(ctx context.Context, ch add
return nil, err return nil, err
} }
// Total required balance = total redeemed + toSend // Total required balance must not exceed actor balance
// Must not exceed actor balance if act.Balance.LessThan(totalRedeemed) {
ts, err := pchState.ToSend() return nil, newErrInsufficientFunds(types.BigSub(totalRedeemed, act.Balance))
if err != nil {
return nil, err
}
newTotal := types.BigAdd(totalRedeemed, ts)
if act.Balance.LessThan(newTotal) {
return nil, newErrInsufficientFunds(types.BigSub(newTotal, act.Balance))
} }
if len(sv.Merges) != 0 { if len(sv.Merges) != 0 {
@ -454,7 +447,6 @@ func (ca *channelAccessor) allocateLane(ch address.Address) (uint64, error) {
ca.lk.Lock() ca.lk.Lock()
defer ca.lk.Unlock() defer ca.lk.Unlock()
// TODO: should this take into account lane state?
return ca.store.AllocateLane(ch) return ca.store.AllocateLane(ch)
} }
@ -469,7 +461,7 @@ func (ca *channelAccessor) listVouchers(ctx context.Context, ch address.Address)
// laneState gets the LaneStates from chain, then applies all vouchers in // laneState gets the LaneStates from chain, then applies all vouchers in
// the data store over the chain state // the data store over the chain state
func (ca *channelAccessor) laneState(ctx context.Context, state paych.State, ch address.Address) (map[uint64]paych.LaneState, error) { func (ca *channelAccessor) laneState(state paych.State, ch address.Address) (map[uint64]paych.LaneState, error) {
// TODO: we probably want to call UpdateChannelState with all vouchers to be fully correct // TODO: we probably want to call UpdateChannelState with all vouchers to be fully correct
// (but technically dont't need to) // (but technically dont't need to)
@ -501,9 +493,12 @@ func (ca *channelAccessor) laneState(ctx context.Context, state paych.State, ch
return nil, xerrors.Errorf("paych merges not handled yet") return nil, xerrors.Errorf("paych merges not handled yet")
} }
// If there's a voucher for a lane that isn't in chain state just // Check if there is an existing laneState in the payment channel
// create it // for this voucher's lane
ls, ok := laneStates[v.Voucher.Lane] ls, ok := laneStates[v.Voucher.Lane]
// If the voucher does not have a higher nonce than the existing
// laneState for this lane, ignore it
if ok { if ok {
n, err := ls.Nonce() n, err := ls.Nonce()
if err != nil { if err != nil {
@ -514,6 +509,7 @@ func (ca *channelAccessor) laneState(ctx context.Context, state paych.State, ch
} }
} }
// Voucher has a higher nonce, so replace laneState with this voucher
laneStates[v.Voucher.Lane] = laneState{v.Voucher.Amount, v.Voucher.Nonce} laneStates[v.Voucher.Lane] = laneState{v.Voucher.Amount, v.Voucher.Nonce}
} }

View File

@ -47,7 +47,6 @@ func TestCheckVoucherValid(t *testing.T) {
expectError bool expectError bool
key []byte key []byte
actorBalance big.Int actorBalance big.Int
toSend big.Int
voucherAmount big.Int voucherAmount big.Int
voucherLane uint64 voucherLane uint64
voucherNonce uint64 voucherNonce uint64
@ -56,35 +55,30 @@ func TestCheckVoucherValid(t *testing.T) {
name: "passes when voucher amount < balance", name: "passes when voucher amount < balance",
key: fromKeyPrivate, key: fromKeyPrivate,
actorBalance: big.NewInt(10), actorBalance: big.NewInt(10),
toSend: big.NewInt(0),
voucherAmount: big.NewInt(5), voucherAmount: big.NewInt(5),
}, { }, {
name: "fails when funds too low", name: "fails when funds too low",
expectError: true, expectError: true,
key: fromKeyPrivate, key: fromKeyPrivate,
actorBalance: big.NewInt(5), actorBalance: big.NewInt(5),
toSend: big.NewInt(0),
voucherAmount: big.NewInt(10), voucherAmount: big.NewInt(10),
}, { }, {
name: "fails when invalid signature", name: "fails when invalid signature",
expectError: true, expectError: true,
key: randKeyPrivate, key: randKeyPrivate,
actorBalance: big.NewInt(10), actorBalance: big.NewInt(10),
toSend: big.NewInt(0),
voucherAmount: big.NewInt(5), voucherAmount: big.NewInt(5),
}, { }, {
name: "fails when signed by channel To account (instead of From account)", name: "fails when signed by channel To account (instead of From account)",
expectError: true, expectError: true,
key: toKeyPrivate, key: toKeyPrivate,
actorBalance: big.NewInt(10), actorBalance: big.NewInt(10),
toSend: big.NewInt(0),
voucherAmount: big.NewInt(5), voucherAmount: big.NewInt(5),
}, { }, {
name: "fails when nonce too low", name: "fails when nonce too low",
expectError: true, expectError: true,
key: fromKeyPrivate, key: fromKeyPrivate,
actorBalance: big.NewInt(10), actorBalance: big.NewInt(10),
toSend: big.NewInt(0),
voucherAmount: big.NewInt(5), voucherAmount: big.NewInt(5),
voucherLane: 1, voucherLane: 1,
voucherNonce: 2, voucherNonce: 2,
@ -95,7 +89,6 @@ func TestCheckVoucherValid(t *testing.T) {
name: "passes when nonce higher", name: "passes when nonce higher",
key: fromKeyPrivate, key: fromKeyPrivate,
actorBalance: big.NewInt(10), actorBalance: big.NewInt(10),
toSend: big.NewInt(0),
voucherAmount: big.NewInt(5), voucherAmount: big.NewInt(5),
voucherLane: 1, voucherLane: 1,
voucherNonce: 3, voucherNonce: 3,
@ -106,7 +99,6 @@ func TestCheckVoucherValid(t *testing.T) {
name: "passes when nonce for different lane", name: "passes when nonce for different lane",
key: fromKeyPrivate, key: fromKeyPrivate,
actorBalance: big.NewInt(10), actorBalance: big.NewInt(10),
toSend: big.NewInt(0),
voucherAmount: big.NewInt(5), voucherAmount: big.NewInt(5),
voucherLane: 2, voucherLane: 2,
voucherNonce: 2, voucherNonce: 2,
@ -118,32 +110,22 @@ func TestCheckVoucherValid(t *testing.T) {
expectError: true, expectError: true,
key: fromKeyPrivate, key: fromKeyPrivate,
actorBalance: big.NewInt(10), actorBalance: big.NewInt(10),
toSend: big.NewInt(0),
voucherAmount: big.NewInt(5), voucherAmount: big.NewInt(5),
voucherLane: 1, voucherLane: 1,
voucherNonce: 3, voucherNonce: 3,
laneStates: map[uint64]paych.LaneState{ laneStates: map[uint64]paych.LaneState{
1: paychmock.NewMockLaneState(big.NewInt(6), 2), 1: paychmock.NewMockLaneState(big.NewInt(6), 2),
}, },
}, {
name: "fails when voucher + ToSend > balance",
expectError: true,
key: fromKeyPrivate,
actorBalance: big.NewInt(10),
toSend: big.NewInt(9),
voucherAmount: big.NewInt(2),
}, { }, {
// voucher supersedes lane 1 redeemed so // voucher supersedes lane 1 redeemed so
// lane 1 effective redeemed = voucher amount // lane 1 effective redeemed = voucher amount
// //
// required balance = toSend + total redeemed // required balance = voucher amt
// = 1 + 6 (lane1)
// = 7 // = 7
// So required balance: 7 < actor balance: 10 // So required balance: 7 < actor balance: 10
name: "passes when voucher + total redeemed <= balance", name: "passes when voucher total redeemed <= balance",
key: fromKeyPrivate, key: fromKeyPrivate,
actorBalance: big.NewInt(10), actorBalance: big.NewInt(10),
toSend: big.NewInt(1),
voucherAmount: big.NewInt(6), voucherAmount: big.NewInt(6),
voucherLane: 1, voucherLane: 1,
voucherNonce: 2, voucherNonce: 2,
@ -152,29 +134,68 @@ func TestCheckVoucherValid(t *testing.T) {
1: paychmock.NewMockLaneState(big.NewInt(4), 1), 1: paychmock.NewMockLaneState(big.NewInt(4), 1),
}, },
}, { }, {
// required balance = toSend + total redeemed // required balance = total redeemed
// = 1 + 4 (lane 2) + 6 (voucher lane 1) // = 6 (voucher lane 1) + 5 (lane 2)
// = 11 // = 11
// So required balance: 11 > actor balance: 10 // So required balance: 11 > actor balance: 10
name: "fails when voucher + total redeemed > balance", name: "fails when voucher total redeemed > balance",
expectError: true, expectError: true,
key: fromKeyPrivate, key: fromKeyPrivate,
actorBalance: big.NewInt(10), actorBalance: big.NewInt(10),
toSend: big.NewInt(1),
voucherAmount: big.NewInt(6), voucherAmount: big.NewInt(6),
voucherLane: 1, voucherLane: 1,
voucherNonce: 1, voucherNonce: 1,
laneStates: map[uint64]paych.LaneState{ laneStates: map[uint64]paych.LaneState{
// Lane 2 (different from voucher lane 1) // Lane 2 (different from voucher lane 1)
2: paychmock.NewMockLaneState(big.NewInt(4), 1), 2: paychmock.NewMockLaneState(big.NewInt(5), 1),
},
}, {
// voucher supersedes lane 1 redeemed so
// lane 1 effective redeemed = voucher amount
//
// required balance = total redeemed
// = 6 (new voucher lane 1) + 5 (lane 2)
// = 11
// So required balance: 11 > actor balance: 10
name: "fails when voucher total redeemed > balance",
expectError: true,
key: fromKeyPrivate,
actorBalance: big.NewInt(10),
voucherAmount: big.NewInt(6),
voucherLane: 1,
voucherNonce: 2,
laneStates: map[uint64]paych.LaneState{
// Lane 1 (superseded by new voucher in voucher lane 1)
1: paychmock.NewMockLaneState(big.NewInt(5), 1),
// Lane 2 (different from voucher lane 1)
2: paychmock.NewMockLaneState(big.NewInt(5), 1),
},
}, {
// voucher supersedes lane 1 redeemed so
// lane 1 effective redeemed = voucher amount
//
// required balance = total redeemed
// = 5 (new voucher lane 1) + 5 (lane 2)
// = 10
// So required balance: 10 <= actor balance: 10
name: "passes when voucher total redeemed <= balance",
expectError: false,
key: fromKeyPrivate,
actorBalance: big.NewInt(10),
voucherAmount: big.NewInt(5),
voucherLane: 1,
voucherNonce: 2,
laneStates: map[uint64]paych.LaneState{
// Lane 1 (superseded by new voucher in voucher lane 1)
1: paychmock.NewMockLaneState(big.NewInt(4), 1),
// Lane 2 (different from voucher lane 1)
2: paychmock.NewMockLaneState(big.NewInt(5), 1),
}, },
}} }}
for _, tcase := range tcases { for _, tcase := range tcases {
tcase := tcase tcase := tcase
t.Run(tcase.name, func(t *testing.T) { t.Run(tcase.name, func(t *testing.T) {
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
// Create an actor for the channel with the test case balance // Create an actor for the channel with the test case balance
act := &types.Actor{ act := &types.Actor{
Code: builtin.AccountActorCodeID, Code: builtin.AccountActorCodeID,
@ -184,16 +205,17 @@ func TestCheckVoucherValid(t *testing.T) {
} }
mock.setPaychState(ch, act, paychmock.NewMockPayChState( mock.setPaychState(ch, act, paychmock.NewMockPayChState(
fromAcct, toAcct, abi.ChainEpoch(0), tcase.toSend, tcase.laneStates)) fromAcct, toAcct, abi.ChainEpoch(0), tcase.laneStates))
// Create a manager // Create a manager
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
mgr, err := newManager(store, mock) mgr, err := newManager(store, mock)
require.NoError(t, err) require.NoError(t, err)
// Add channel To address to wallet // Add channel To address to wallet
mock.addWalletAddress(to) mock.addWalletAddress(to)
// Create a signed voucher // Create the test case signed voucher
sv := createTestVoucher(t, ch, tcase.voucherLane, tcase.voucherNonce, tcase.voucherAmount, tcase.key) sv := createTestVoucher(t, ch, tcase.voucherLane, tcase.voucherNonce, tcase.voucherAmount, tcase.key)
// Check the voucher's validity // Check the voucher's validity
@ -207,135 +229,11 @@ func TestCheckVoucherValid(t *testing.T) {
} }
} }
func TestCheckVoucherValidCountingAllLanes(t *testing.T) {
ctx := context.Background()
fromKeyPrivate, fromKeyPublic := testGenerateKeyPair(t)
ch := tutils.NewIDAddr(t, 100)
from := tutils.NewSECP256K1Addr(t, string(fromKeyPublic))
to := tutils.NewSECP256K1Addr(t, "secpTo")
fromAcct := tutils.NewActorAddr(t, "fromAct")
toAcct := tutils.NewActorAddr(t, "toAct")
minDelta := big.NewInt(0)
mock := newMockManagerAPI()
mock.setAccountAddress(fromAcct, from)
mock.setAccountAddress(toAcct, to)
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
actorBalance := big.NewInt(10)
toSend := big.NewInt(1)
laneStates := map[uint64]paych.LaneState{
1: paychmock.NewMockLaneState(big.NewInt(3), 1),
2: paychmock.NewMockLaneState(big.NewInt(4), 1),
}
act := &types.Actor{
Code: builtin.AccountActorCodeID,
Head: cid.Cid{},
Nonce: 0,
Balance: actorBalance,
}
mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), toSend, laneStates))
mgr, err := newManager(store, mock)
require.NoError(t, err)
// Add channel To address to wallet
mock.addWalletAddress(to)
//
// Should not be possible to add a voucher with a value such that
// <total lane Redeemed> + toSend > <actor balance>
//
// lane 1 redeemed: 3
// voucher amount (lane 1): 6
// lane 1 redeemed (with voucher): 6
//
// Lane 1: 6
// Lane 2: 4
// toSend: 1
// --
// total: 11
//
// actor balance is 10 so total is too high.
//
voucherLane := uint64(1)
voucherNonce := uint64(2)
voucherAmount := big.NewInt(6)
sv := createTestVoucher(t, ch, voucherLane, voucherNonce, voucherAmount, fromKeyPrivate)
err = mgr.CheckVoucherValid(ctx, ch, sv)
require.Error(t, err)
//
// lane 1 redeemed: 3
// voucher amount (lane 1): 4
// lane 1 redeemed (with voucher): 4
//
// Lane 1: 4
// Lane 2: 4
// toSend: 1
// --
// total: 9
//
// actor balance is 10 so total is ok.
//
voucherAmount = big.NewInt(4)
sv = createTestVoucher(t, ch, voucherLane, voucherNonce, voucherAmount, fromKeyPrivate)
err = mgr.CheckVoucherValid(ctx, ch, sv)
require.NoError(t, err)
// Add voucher to lane 1, so Lane 1 effective redeemed
// (with first voucher) is now 4
_, err = mgr.AddVoucherOutbound(ctx, ch, sv, nil, minDelta)
require.NoError(t, err)
//
// lane 1 redeemed: 4
// voucher amount (lane 1): 6
// lane 1 redeemed (with voucher): 6
//
// Lane 1: 6
// Lane 2: 4
// toSend: 1
// --
// total: 11
//
// actor balance is 10 so total is too high.
//
voucherNonce++
voucherAmount = big.NewInt(6)
sv = createTestVoucher(t, ch, voucherLane, voucherNonce, voucherAmount, fromKeyPrivate)
err = mgr.CheckVoucherValid(ctx, ch, sv)
require.Error(t, err)
//
// lane 1 redeemed: 4
// voucher amount (lane 1): 5
// lane 1 redeemed (with voucher): 5
//
// Lane 1: 5
// Lane 2: 4
// toSend: 1
// --
// total: 10
//
// actor balance is 10 so total is ok.
//
voucherAmount = big.NewInt(5)
sv = createTestVoucher(t, ch, voucherLane, voucherNonce, voucherAmount, fromKeyPrivate)
err = mgr.CheckVoucherValid(ctx, ch, sv)
require.NoError(t, err)
}
func TestCreateVoucher(t *testing.T) { func TestCreateVoucher(t *testing.T) {
ctx := context.Background() ctx := context.Background()
// Set up a manager with a single payment channel // Set up a manager with a single payment channel
s := testSetupMgrWithChannel(ctx, t) s := testSetupMgrWithChannel(t)
// Create a voucher in lane 1 // Create a voucher in lane 1
voucherLane1Amt := big.NewInt(5) voucherLane1Amt := big.NewInt(5)
@ -400,7 +298,7 @@ func TestAddVoucherDelta(t *testing.T) {
ctx := context.Background() ctx := context.Background()
// Set up a manager with a single payment channel // Set up a manager with a single payment channel
s := testSetupMgrWithChannel(ctx, t) s := testSetupMgrWithChannel(t)
voucherLane := uint64(1) voucherLane := uint64(1)
@ -442,7 +340,7 @@ func TestAddVoucherNextLane(t *testing.T) {
ctx := context.Background() ctx := context.Background()
// Set up a manager with a single payment channel // Set up a manager with a single payment channel
s := testSetupMgrWithChannel(ctx, t) s := testSetupMgrWithChannel(t)
minDelta := big.NewInt(0) minDelta := big.NewInt(0)
voucherAmount := big.NewInt(2) voucherAmount := big.NewInt(2)
@ -489,10 +387,8 @@ func TestAddVoucherNextLane(t *testing.T) {
} }
func TestAllocateLane(t *testing.T) { func TestAllocateLane(t *testing.T) {
ctx := context.Background()
// Set up a manager with a single payment channel // Set up a manager with a single payment channel
s := testSetupMgrWithChannel(ctx, t) s := testSetupMgrWithChannel(t)
// First lane should be 0 // First lane should be 0
lane, err := s.mgr.AllocateLane(s.ch) lane, err := s.mgr.AllocateLane(s.ch)
@ -525,7 +421,6 @@ func TestAllocateLaneWithExistingLaneState(t *testing.T) {
// Create a channel that will be retrieved from state // Create a channel that will be retrieved from state
actorBalance := big.NewInt(10) actorBalance := big.NewInt(10)
toSend := big.NewInt(1)
act := &types.Actor{ act := &types.Actor{
Code: builtin.AccountActorCodeID, Code: builtin.AccountActorCodeID,
@ -534,7 +429,7 @@ func TestAllocateLaneWithExistingLaneState(t *testing.T) {
Balance: actorBalance, Balance: actorBalance,
} }
mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), toSend, make(map[uint64]paych.LaneState))) mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), make(map[uint64]paych.LaneState)))
mgr, err := newManager(store, mock) mgr, err := newManager(store, mock)
require.NoError(t, err) require.NoError(t, err)
@ -575,10 +470,11 @@ func TestAddVoucherInboundWalletKey(t *testing.T) {
} }
mock := newMockManagerAPI() mock := newMockManagerAPI()
mock.setAccountAddress(fromAcct, from) mock.setAccountAddress(fromAcct, from)
mock.setAccountAddress(toAcct, to) mock.setAccountAddress(toAcct, to)
mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), types.NewInt(0), make(map[uint64]paych.LaneState))) mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), make(map[uint64]paych.LaneState)))
// Create a manager // Create a manager
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
@ -613,7 +509,7 @@ func TestBestSpendable(t *testing.T) {
ctx := context.Background() ctx := context.Background()
// Set up a manager with a single payment channel // Set up a manager with a single payment channel
s := testSetupMgrWithChannel(ctx, t) s := testSetupMgrWithChannel(t)
// Add vouchers to lane 1 with amounts: [1, 2, 3] // Add vouchers to lane 1 with amounts: [1, 2, 3]
voucherLane := uint64(1) voucherLane := uint64(1)
@ -693,7 +589,7 @@ func TestCheckSpendable(t *testing.T) {
ctx := context.Background() ctx := context.Background()
// Set up a manager with a single payment channel // Set up a manager with a single payment channel
s := testSetupMgrWithChannel(ctx, t) s := testSetupMgrWithChannel(t)
// Create voucher with Extra // Create voucher with Extra
voucherLane := uint64(1) voucherLane := uint64(1)
@ -757,7 +653,7 @@ func TestSubmitVoucher(t *testing.T) {
ctx := context.Background() ctx := context.Background()
// Set up a manager with a single payment channel // Set up a manager with a single payment channel
s := testSetupMgrWithChannel(ctx, t) s := testSetupMgrWithChannel(t)
// Create voucher with Extra // Create voucher with Extra
voucherLane := uint64(1) voucherLane := uint64(1)
@ -819,7 +715,7 @@ type testScaffold struct {
fromKeyPrivate []byte fromKeyPrivate []byte
} }
func testSetupMgrWithChannel(ctx context.Context, t *testing.T) *testScaffold { func testSetupMgrWithChannel(t *testing.T) *testScaffold {
fromKeyPrivate, fromKeyPublic := testGenerateKeyPair(t) fromKeyPrivate, fromKeyPublic := testGenerateKeyPair(t)
ch := tutils.NewIDAddr(t, 100) ch := tutils.NewIDAddr(t, 100)
@ -840,7 +736,7 @@ func testSetupMgrWithChannel(ctx context.Context, t *testing.T) *testScaffold {
Nonce: 0, Nonce: 0,
Balance: balance, Balance: balance,
} }
mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), big.NewInt(0), make(map[uint64]paych.LaneState))) mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), make(map[uint64]paych.LaneState)))
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
mgr, err := newManager(store, mock) mgr, err := newManager(store, mock)

View File

@ -978,7 +978,7 @@ func TestPaychAvailableFunds(t *testing.T) {
Nonce: 0, Nonce: 0,
Balance: createAmt, Balance: createAmt,
} }
mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), big.NewInt(0), make(map[uint64]paych.LaneState))) mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), make(map[uint64]paych.LaneState)))
// Send create channel response // Send create channel response
response := testChannelResponse(t, ch) response := testChannelResponse(t, ch)
mock.receiveMsgResponse(createMsgCid, response) mock.receiveMsgResponse(createMsgCid, response)

View File

@ -0,0 +1,103 @@
package paychmgr
import (
"context"
"testing"
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
paychmock "github.com/filecoin-project/lotus/chain/actors/builtin/paych/mock"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/specs-actors/actors/builtin"
tutils "github.com/filecoin-project/specs-actors/support/testing"
"github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore"
ds_sync "github.com/ipfs/go-datastore/sync"
"github.com/stretchr/testify/require"
)
// TestPaychAddVoucherAfterAddFunds tests adding a voucher to a channel with
// insufficient funds, then adding funds to the channel, then adding the
// voucher again
func TestPaychAddVoucherAfterAddFunds(t *testing.T) {
ctx := context.Background()
store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
fromKeyPrivate, fromKeyPublic := testGenerateKeyPair(t)
ch := tutils.NewIDAddr(t, 100)
from := tutils.NewSECP256K1Addr(t, string(fromKeyPublic))
to := tutils.NewSECP256K1Addr(t, "secpTo")
fromAcct := tutils.NewActorAddr(t, "fromAct")
toAcct := tutils.NewActorAddr(t, "toAct")
mock := newMockManagerAPI()
defer mock.close()
// Add the from signing key to the wallet
mock.setAccountAddress(fromAcct, from)
mock.setAccountAddress(toAcct, to)
mock.addSigningKey(fromKeyPrivate)
mgr, err := newManager(store, mock)
require.NoError(t, err)
// Send create message for a channel with value 10
createAmt := big.NewInt(10)
_, createMsgCid, err := mgr.GetPaych(ctx, from, to, createAmt)
require.NoError(t, err)
// Send create channel response
response := testChannelResponse(t, ch)
mock.receiveMsgResponse(createMsgCid, response)
// Create an actor in state for the channel with the initial channel balance
act := &types.Actor{
Code: builtin.AccountActorCodeID,
Head: cid.Cid{},
Nonce: 0,
Balance: createAmt,
}
mock.setPaychState(ch, act, paychmock.NewMockPayChState(fromAcct, toAcct, abi.ChainEpoch(0), make(map[uint64]paych.LaneState)))
// Wait for create response to be processed by manager
_, err = mgr.GetPaychWaitReady(ctx, createMsgCid)
require.NoError(t, err)
// Create a voucher with a value equal to the channel balance
voucher := paych.SignedVoucher{Amount: createAmt, Lane: 1}
res, err := mgr.CreateVoucher(ctx, ch, voucher)
require.NoError(t, err)
require.NotNil(t, res.Voucher)
// Create a voucher in a different lane with an amount that exceeds the
// channel balance
excessAmt := types.NewInt(5)
voucher = paych.SignedVoucher{Amount: excessAmt, Lane: 2}
res, err = mgr.CreateVoucher(ctx, ch, voucher)
require.NoError(t, err)
require.Nil(t, res.Voucher)
require.Equal(t, res.Shortfall, excessAmt)
// Add funds so as to cover the voucher shortfall
_, addFundsMsgCid, err := mgr.GetPaych(ctx, from, to, excessAmt)
require.NoError(t, err)
// Trigger add funds confirmation
mock.receiveMsgResponse(addFundsMsgCid, types.MessageReceipt{ExitCode: 0})
// Update actor test case balance to reflect added funds
act.Balance = types.BigAdd(createAmt, excessAmt)
// Wait for add funds confirmation to be processed by manager
_, err = mgr.GetPaychWaitReady(ctx, addFundsMsgCid)
require.NoError(t, err)
// Adding same voucher that previously exceeded channel balance
// should succeed now that the channel balance has been increased
res, err = mgr.CreateVoucher(ctx, ch, voucher)
require.NoError(t, err)
require.NotNil(t, res.Voucher)
}

View File

@ -309,7 +309,7 @@ func (ca *channelAccessor) currentAvailableFunds(channelID string, queuedAmt typ
return nil, err return nil, err
} }
laneStates, err := ca.laneState(ca.chctx, pchState, ch) laneStates, err := ca.laneState(pchState, ch)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -199,16 +199,24 @@ func RecordTipsetPoints(ctx context.Context, api api.FullNode, pl *PointList, ti
return nil return nil
} }
type apiIpldStore struct { type ApiIpldStore struct {
ctx context.Context ctx context.Context
api api.FullNode api apiIpldStoreApi
} }
func (ht *apiIpldStore) Context() context.Context { type apiIpldStoreApi interface {
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
}
func NewApiIpldStore(ctx context.Context, api apiIpldStoreApi) *ApiIpldStore {
return &ApiIpldStore{ctx, api}
}
func (ht *ApiIpldStore) Context() context.Context {
return ht.ctx return ht.ctx
} }
func (ht *apiIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) error { func (ht *ApiIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) error {
raw, err := ht.api.ChainReadObj(ctx, c) raw, err := ht.api.ChainReadObj(ctx, c)
if err != nil { if err != nil {
return err return err
@ -225,8 +233,8 @@ func (ht *apiIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) err
return fmt.Errorf("Object does not implement CBORUnmarshaler") return fmt.Errorf("Object does not implement CBORUnmarshaler")
} }
func (ht *apiIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) { func (ht *ApiIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) {
return cid.Undef, fmt.Errorf("Put is not implemented on apiIpldStore") return cid.Undef, fmt.Errorf("Put is not implemented on ApiIpldStore")
} }
func RecordTipsetStatePoints(ctx context.Context, api api.FullNode, pl *PointList, tipset *types.TipSet) error { func RecordTipsetStatePoints(ctx context.Context, api api.FullNode, pl *PointList, tipset *types.TipSet) error {