Merge pull request #8781 from filecoin-project/release/v1.15.3

build: release: v1.15.3
This commit is contained in:
Jiaying Wang 2022-06-01 22:46:43 -04:00 committed by GitHub
commit 2f6a383023
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
112 changed files with 2303 additions and 596 deletions

View File

@ -67,14 +67,14 @@ commands:
- run: |
apt update
apt install -y wget
wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz
wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512
if [ "$(sha512sum go-ipfs_v0.4.22_linux-amd64.tar.gz)" != "$(cat go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512)" ]
wget https://github.com/ipfs/go-ipfs/releases/download/v0.12.2/go-ipfs_v0.12.2_linux-amd64.tar.gz
wget https://github.com/ipfs/go-ipfs/releases/download/v0.12.2/go-ipfs_v0.12.2_linux-amd64.tar.gz.sha512
if [ "$(sha512sum go-ipfs_v0.12.2_linux-amd64.tar.gz)" != "$(cat go-ipfs_v0.12.2_linux-amd64.tar.gz.sha512)" ]
then
echo "ipfs failed checksum check"
exit 1
fi
tar -xf go-ipfs_v0.4.22_linux-amd64.tar.gz
tar -xf go-ipfs_v0.12.2_linux-amd64.tar.gz
mv go-ipfs/ipfs /usr/local/bin/ipfs
chmod +x /usr/local/bin/ipfs
git_fetch_all_tags:
@ -863,6 +863,11 @@ workflows:
suite: itest-get_messages_in_ts
target: "./itests/get_messages_in_ts_test.go"
- test:
name: test-itest-lookup_robust_address
suite: itest-lookup_robust_address
target: "./itests/lookup_robust_address_test.go"
- test:
name: test-itest-mempool
suite: itest-mempool
@ -1093,4 +1098,4 @@ workflows:
only:
- master
jobs:
- publish-packer-snap
- publish-packer-snap

View File

@ -67,14 +67,14 @@ commands:
- run: |
apt update
apt install -y wget
wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz
wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512
if [ "$(sha512sum go-ipfs_v0.4.22_linux-amd64.tar.gz)" != "$(cat go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512)" ]
wget https://github.com/ipfs/go-ipfs/releases/download/v0.12.2/go-ipfs_v0.12.2_linux-amd64.tar.gz
wget https://github.com/ipfs/go-ipfs/releases/download/v0.12.2/go-ipfs_v0.12.2_linux-amd64.tar.gz.sha512
if [ "$(sha512sum go-ipfs_v0.12.2_linux-amd64.tar.gz)" != "$(cat go-ipfs_v0.12.2_linux-amd64.tar.gz.sha512)" ]
then
echo "ipfs failed checksum check"
exit 1
fi
tar -xf go-ipfs_v0.4.22_linux-amd64.tar.gz
tar -xf go-ipfs_v0.12.2_linux-amd64.tar.gz
mv go-ipfs/ipfs /usr/local/bin/ipfs
chmod +x /usr/local/bin/ipfs
git_fetch_all_tags:
@ -898,4 +898,4 @@ workflows:
only:
- master
jobs:
- publish-packer-snap
- publish-packer-snap

View File

@ -35,27 +35,27 @@ coverage:
markets:
target: auto
threshold: 1%
informational: false
informational: true
paths:
- "markets"
- "paychmgr"
miner:
target: auto
threshold: 1.5%
informational: false
informational: true
paths:
- "miner"
- "storage"
chain:
target: auto
threshold: 1%
informational: false
informational: true
paths:
- "chain"
node:
target: auto
threshold: 1%
informational: false
informational: true
paths:
- "node"
- "blockstore"

View File

@ -1,5 +1,78 @@
# Lotus changelog
# 1.15.3 / 2022-05-31
This is an optional release of lotus that include new APIs, some improvements and bug fixes.
## New Features
- feat: api: add StateGetNetworkParams api ([filecoin-project/lotus#8546](https://github.com/filecoin-project/lotus/pull/8546))
- feat: api: Implement StateLookupRobustAddress ([filecoin-project/lotus#8486](https://github.com/filecoin-project/lotus/pull/8486))
- sealing: DataCid on workers ([filecoin-project/lotus#8557](https://github.com/filecoin-project/lotus/pull/8557))
- feat: FVM: Support exectraces ([filecoin-project/lotus#8514](https://github.com/filecoin-project/lotus/pull/8514))
## Improvements
- ux: wallet: update delete usage ([filecoin-project/lotus#8442](https://github.com/filecoin-project/lotus/pull/8442))
- chore: config: default-disable kvlog ([filecoin-project/lotus#8477](https://github.com/filecoin-project/lotus/pull/8477))
- chore: cli: Alias cli commands for uniformity ([filecoin-project/lotus#8587](https://github.com/filecoin-project/lotus/pull/8587))
## Bug Fixes
- fix: Make deal making logs much less noisy ([filecoin-project/lotus#8622](https://github.com/filecoin-project/lotus/pull/8622))
- fix: mpool: avoid deadlock on unsubscribe ([filecoin-project/lotus#8635](https://github.com/filecoin-project/lotus/pull/8635))
- fix: update StatApplied when fvm apply message ([filecoin-project/lotus#8545](https://github.com/filecoin-project/lotus/pull/8545))
- fix: ci: build macos and linux assets on tagged releases ([filecoin-project/lotus#8597](https://github.com/filecoin-project/lotus/pull/8597))
- fix: Clean up vanilla proof fetching errors for proper display ([filecoin-project/lotus#8564](https://github.com/filecoin-project/lotus/pull/8564))
- fix: Make markets logger less noisy when doing retrievals ([filecoin-project/lotus#8604](https://github.com/filecoin-project/lotus/pull/8604))
- fix: test: Fix flaky TestGatewayWalletMsig ([filecoin-project/lotus#8601](https://github.com/filecoin-project/lotus/pull/8601))
- fix: lotus-wallet: correct network in version ([filecoin-project/lotus#8563](https://github.com/filecoin-project/lotus/pull/8563))
- fix: worker: Download proofs if PRU2 is enabled ([filecoin-project/lotus#8555](https://github.com/filecoin-project/lotus/pull/8555))
- fix: ux: update `lotus-wallet run` description ([filecoin-project/lotus#8528](https://github.com/filecoin-project/lotus/pull/8528))
- fix: market: Infer index provider topic from network name by default ([filecoin-project/lotus#8526](https://github.com/filecoin-project/lotus/pull/8526))
- fix: storiface: Make FSOverhead numbers more accurate ([filecoin-project/lotus#8481](https://github.com/filecoin-project/lotus/pull/8481))
- fix: node: fix comment for IndexProvider ([filecoin-project/lotus#8480](https://github.com/filecoin-project/lotus/pull/8480))
- fix: sealing: fail to add expired precommits to a batch ([filecoin-project/lotus#8479](https://github.com/filecoin-project/lotus/pull/8479))
- fix:sealing:check index out of bounds against correct param length not return length ([filecoin-project/lotus#8475](https://github.com/filecoin-project/lotus/pull/8475))
- fix: sealing: Finalize snap sectors before submitting proofs ([filecoin-project/lotus#8582](https://github.com/filecoin-project/lotus/pull/8582))
- chore: fix lint issue ([filecoin-project/lotus#8531](https://github.com/filecoin-project/lotus/pull/8531))
- feat: vm: add actor error backtraces to FVM ([filecoin-project/lotus#8524](https://github.com/filecoin-project/lotus/pull/8524))
## Dependency Updates
- github.com/filecoin-project/specs-storage (v0.2.2 -> v0.2.3-0.20220426183226-1a0a63c5990f):
- deps: update go-libp2p@v0.19 ([filecoin-project/lotus#8511](https://github.com/filecoin-project/lotus/pull/8511))
- chore: deps: Use tagged version of specs-storage #8764
## Others
- chore: merge releases back into master ([filecoin-project/lotus#8638](https://github.com/filecoin-project/lotus/pull/8638))
- chore:ci:make codecov quiet ([filecoin-project/lotus#8600](https://github.com/filecoin-project/lotus/pull/8600))
- chore: version: bump the version to v1.15.3-dev ([filecoin-project/lotus#8473](https://github.com/filecoin-project/lotus/pull/8473))
- chore: .gitignore: ignore built in actor bundles ([filecoin-project/lotus#8590](https://github.com/filecoin-project/lotus/pull/8590))
- ci: deps: macos build deps ([filecoin-project/lotus#8581](https://github.com/filecoin-project/lotus/pull/8581))
- chore:chore:chore remove the wrong TODO ([filecoin-project/lotus#8586](https://github.com/filecoin-project/lotus/pull/8586))
- add 1475 bootstrapper ([filecoin-project/lotus#8495](https://github.com/filecoin-project/lotus/pull/8495))
- Bump version for xtools ([filecoin-project/lotus#8494](https://github.com/filecoin-project/lotus/pull/8494))
- chore: bundle: remove wrongly committed bundle cars #8763
## Contributors
| Contributor | Commits | Lines ± | Files Changed |
|-------------|---------|---------|---------------|
| @stebalien | 4 | +607/-95 | 19 |
| @magik6k | 9 | +550/-37 | 43 |
| @geoff-vball | 5 | +279/-219 | 27 |
| @simlecode | 1 | +306/-39 | 20 |
| @arajasek | 1 | +256/-34 | 10 |
| @zenground0 | 11 | +214/-66 | 31 |
| @arajasek | 2 | +149/-99 | 8 |
| @vyzo | 3 | +125/-81 | 4 |
| @Masih| 1 | +134/-15 | 7 |
| @travisperson | 3 | +24/-32 | 6 |
| @Rjan | 6 | +16/-16 | 9 |
| @jennijuju | 3 | +9/-8 | 15 |
| Rob Quist | 3 | +12/-4 | 3 |
| @Icarus9913 | 1 | +3/-3 | 3 |
| @swift-mx | 1 | +3/-0 | 1 |
| @Phi-rjan | 1 | +1/-1 | 1 |
| @lifei | 1 | +1/-0 | 1 |
# 1.15.2 / 2022-05-06
This is a highly recommended feature lotus release v1.15.2. This feature release introduces many new features and for SPs, including PoSt workers, sealing scheduler, snap deal queue and so on.

View File

@ -193,7 +193,7 @@ BINS+=lotus-health
lotus-wallet:
rm -f lotus-wallet
$(GOCC) build -o lotus-wallet ./cmd/lotus-wallet
$(GOCC) build $(GOFLAGS) -o lotus-wallet ./cmd/lotus-wallet
.PHONY: lotus-wallet
BINS+=lotus-wallet
@ -362,4 +362,4 @@ print-%:
@echo $*=$($*)
circleci:
go generate -x ./.circleci
go generate -x ./.circleci

View File

@ -10,7 +10,7 @@
<a href="https://circleci.com/gh/filecoin-project/lotus"><img src="https://circleci.com/gh/filecoin-project/lotus.svg?style=svg"></a>
<a href="https://codecov.io/gh/filecoin-project/lotus"><img src="https://codecov.io/gh/filecoin-project/lotus/branch/master/graph/badge.svg"></a>
<a href="https://goreportcard.com/report/github.com/filecoin-project/lotus"><img src="https://goreportcard.com/badge/github.com/filecoin-project/lotus" /></a>
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.16-blue.svg" /></a>
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.17-blue.svg" /></a>
<br>
</p>

View File

@ -522,8 +522,10 @@ type FullNode interface {
StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*MarketDeal, error) //perm:read
// StateLookupID retrieves the ID address of the given address
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
// StateAccountKey returns the public key address of the given ID address
// StateAccountKey returns the public key address of the given ID address for secp and bls accounts
StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
// StateLookupRobustAddress returns the public key address of the given ID address for non-account addresses (multisig, miners etc)
StateLookupRobustAddress(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
// StateChangedActors returns all the actors whose states change between the two given state CIDs
// TODO: Should this take tipset keys instead?
StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) //perm:read
@ -590,6 +592,9 @@ type FullNode interface {
// StateGetRandomnessFromBeacon is used to sample the beacon for randomness.
StateGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) //perm:read
// StateGetNetworkParams return current network params
StateGetNetworkParams(ctx context.Context) (*NetworkParams, error) //perm:read
// MethodGroup: Msig
// The Msig methods are used to interact with multisig wallets on the
// filecoin network

View File

@ -54,6 +54,8 @@ type StorageMiner interface {
ComputeWindowPoSt(ctx context.Context, dlIdx uint64, tsk types.TipSetKey) ([]miner.SubmitWindowedPoStParams, error) //perm:admin
ComputeDataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) //perm:admin
// Temp api for testing
PledgeSector(context.Context) (abi.SectorID, error) //perm:write
@ -124,6 +126,7 @@ type StorageMiner interface {
WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) //perm:admin
//storiface.WorkerReturn
ReturnDataCid(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true
ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true
ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error //perm:admin retry:true
ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error //perm:admin retry:true

View File

@ -34,6 +34,7 @@ type Worker interface {
Info(context.Context) (storiface.WorkerInfo, error) //perm:admin
// storiface.WorkerCalls
DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) //perm:admin
AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) //perm:admin
SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin
SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) //perm:admin

View File

@ -2406,6 +2406,21 @@ func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{})
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2)
}
// StateGetNetworkParams mocks base method.
func (m *MockFullNode) StateGetNetworkParams(arg0 context.Context) (*api.NetworkParams, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateGetNetworkParams", arg0)
ret0, _ := ret[0].(*api.NetworkParams)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StateGetNetworkParams indicates an expected call of StateGetNetworkParams.
func (mr *MockFullNodeMockRecorder) StateGetNetworkParams(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetNetworkParams", reflect.TypeOf((*MockFullNode)(nil).StateGetNetworkParams), arg0)
}
// StateGetRandomnessFromBeacon mocks base method.
func (m *MockFullNode) StateGetRandomnessFromBeacon(arg0 context.Context, arg1 crypto.DomainSeparationTag, arg2 abi.ChainEpoch, arg3 []byte, arg4 types.TipSetKey) (abi.Randomness, error) {
m.ctrl.T.Helper()
@ -2496,6 +2511,21 @@ func (mr *MockFullNodeMockRecorder) StateLookupID(arg0, arg1, arg2 interface{})
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateLookupID", reflect.TypeOf((*MockFullNode)(nil).StateLookupID), arg0, arg1, arg2)
}
// StateLookupRobustAddress mocks base method.
func (m *MockFullNode) StateLookupRobustAddress(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateLookupRobustAddress", arg0, arg1, arg2)
ret0, _ := ret[0].(address.Address)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StateLookupRobustAddress indicates an expected call of StateLookupRobustAddress.
func (mr *MockFullNodeMockRecorder) StateLookupRobustAddress(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateLookupRobustAddress", reflect.TypeOf((*MockFullNode)(nil).StateLookupRobustAddress), arg0, arg1, arg2)
}
// StateMarketBalance mocks base method.
func (m *MockFullNode) StateMarketBalance(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (api.MarketBalance, error) {
m.ctrl.T.Helper()

View File

@ -352,6 +352,8 @@ type FullNodeStruct struct {
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"`
StateGetNetworkParams func(p0 context.Context) (*NetworkParams, error) `perm:"read"`
StateGetRandomnessFromBeacon func(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
StateGetRandomnessFromTickets func(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
@ -364,6 +366,8 @@ type FullNodeStruct struct {
StateLookupID func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
StateLookupRobustAddress func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
StateMarketBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MarketBalance, error) `perm:"read"`
StateMarketDeals func(p0 context.Context, p1 types.TipSetKey) (map[string]MarketDeal, error) `perm:"read"`
@ -639,6 +643,8 @@ type StorageMinerStruct struct {
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
ComputeDataCid func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data) (abi.PieceInfo, error) `perm:"admin"`
ComputeProof func(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) `perm:"read"`
ComputeWindowPoSt func(p0 context.Context, p1 uint64, p2 types.TipSetKey) ([]miner.SubmitWindowedPoStParams, error) `perm:"admin"`
@ -741,6 +747,8 @@ type StorageMinerStruct struct {
ReturnAddPiece func(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error `perm:"admin"`
ReturnDataCid func(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error `perm:"admin"`
ReturnFetch func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
ReturnFinalizeReplicaUpdate func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
@ -892,6 +900,8 @@ type WorkerStruct struct {
Internal struct {
AddPiece func(p0 context.Context, p1 storage.SectorRef, p2 []abi.UnpaddedPieceSize, p3 abi.UnpaddedPieceSize, p4 storage.Data) (storiface.CallID, error) `perm:"admin"`
DataCid func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data) (storiface.CallID, error) `perm:"admin"`
Enabled func(p0 context.Context) (bool, error) `perm:"admin"`
Fetch func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType, p3 storiface.PathType, p4 storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"`
@ -2462,6 +2472,17 @@ func (s *FullNodeStub) StateGetActor(p0 context.Context, p1 address.Address, p2
return nil, ErrNotSupported
}
func (s *FullNodeStruct) StateGetNetworkParams(p0 context.Context) (*NetworkParams, error) {
if s.Internal.StateGetNetworkParams == nil {
return nil, ErrNotSupported
}
return s.Internal.StateGetNetworkParams(p0)
}
func (s *FullNodeStub) StateGetNetworkParams(p0 context.Context) (*NetworkParams, error) {
return nil, ErrNotSupported
}
func (s *FullNodeStruct) StateGetRandomnessFromBeacon(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) {
if s.Internal.StateGetRandomnessFromBeacon == nil {
return *new(abi.Randomness), ErrNotSupported
@ -2528,6 +2549,17 @@ func (s *FullNodeStub) StateLookupID(p0 context.Context, p1 address.Address, p2
return *new(address.Address), ErrNotSupported
}
func (s *FullNodeStruct) StateLookupRobustAddress(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
if s.Internal.StateLookupRobustAddress == nil {
return *new(address.Address), ErrNotSupported
}
return s.Internal.StateLookupRobustAddress(p0, p1, p2)
}
func (s *FullNodeStub) StateLookupRobustAddress(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
return *new(address.Address), ErrNotSupported
}
func (s *FullNodeStruct) StateMarketBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MarketBalance, error) {
if s.Internal.StateMarketBalance == nil {
return *new(MarketBalance), ErrNotSupported
@ -3848,6 +3880,17 @@ func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPo
return *new(map[abi.SectorNumber]string), ErrNotSupported
}
func (s *StorageMinerStruct) ComputeDataCid(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data) (abi.PieceInfo, error) {
if s.Internal.ComputeDataCid == nil {
return *new(abi.PieceInfo), ErrNotSupported
}
return s.Internal.ComputeDataCid(p0, p1, p2)
}
func (s *StorageMinerStub) ComputeDataCid(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data) (abi.PieceInfo, error) {
return *new(abi.PieceInfo), ErrNotSupported
}
func (s *StorageMinerStruct) ComputeProof(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) {
if s.Internal.ComputeProof == nil {
return *new([]builtin.PoStProof), ErrNotSupported
@ -4409,6 +4452,17 @@ func (s *StorageMinerStub) ReturnAddPiece(p0 context.Context, p1 storiface.CallI
return ErrNotSupported
}
func (s *StorageMinerStruct) ReturnDataCid(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error {
if s.Internal.ReturnDataCid == nil {
return ErrNotSupported
}
return s.Internal.ReturnDataCid(p0, p1, p2, p3)
}
func (s *StorageMinerStub) ReturnDataCid(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error {
return ErrNotSupported
}
func (s *StorageMinerStruct) ReturnFetch(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
if s.Internal.ReturnFetch == nil {
return ErrNotSupported
@ -5146,6 +5200,17 @@ func (s *WorkerStub) AddPiece(p0 context.Context, p1 storage.SectorRef, p2 []abi
return *new(storiface.CallID), ErrNotSupported
}
func (s *WorkerStruct) DataCid(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data) (storiface.CallID, error) {
if s.Internal.DataCid == nil {
return *new(storiface.CallID), ErrNotSupported
}
return s.Internal.DataCid(p0, p1, p2)
}
func (s *WorkerStub) DataCid(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storage.Data) (storiface.CallID, error) {
return *new(storiface.CallID), ErrNotSupported
}
func (s *WorkerStruct) Enabled(p0 context.Context) (bool, error) {
if s.Internal.Enabled == nil {
return false, ErrNotSupported

View File

@ -11,6 +11,7 @@ import (
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-graphsync"
@ -19,8 +20,6 @@ import (
ma "github.com/multiformats/go-multiaddr"
)
// TODO: check if this exists anywhere else
type MultiaddrSlice []ma.Multiaddr
func (m *MultiaddrSlice) UnmarshalJSON(raw []byte) (err error) {
@ -286,3 +285,35 @@ type ExportRef struct {
FromLocalCAR string // if specified, get data from a local CARv2 file.
DealID retrievalmarket.DealID
}
type NetworkParams struct {
NetworkName dtypes.NetworkName
BlockDelaySecs uint64
ConsensusMinerMinPower abi.StoragePower
SupportedProofTypes []abi.RegisteredSealProof
PreCommitChallengeDelay abi.ChainEpoch
ForkUpgradeParams ForkUpgradeParams
}
type ForkUpgradeParams struct {
UpgradeSmokeHeight abi.ChainEpoch
UpgradeBreezeHeight abi.ChainEpoch
UpgradeIgnitionHeight abi.ChainEpoch
UpgradeLiftoffHeight abi.ChainEpoch
UpgradeAssemblyHeight abi.ChainEpoch
UpgradeRefuelHeight abi.ChainEpoch
UpgradeTapeHeight abi.ChainEpoch
UpgradeKumquatHeight abi.ChainEpoch
UpgradePriceListOopsHeight abi.ChainEpoch
BreezeGasTampingDuration abi.ChainEpoch
UpgradeCalicoHeight abi.ChainEpoch
UpgradePersianHeight abi.ChainEpoch
UpgradeOrangeHeight abi.ChainEpoch
UpgradeClausHeight abi.ChainEpoch
UpgradeTrustHeight abi.ChainEpoch
UpgradeNorwegianHeight abi.ChainEpoch
UpgradeTurboHeight abi.ChainEpoch
UpgradeHyperdriveHeight abi.ChainEpoch
UpgradeChocolateHeight abi.ChainEpoch
UpgradeOhSnapHeight abi.ChainEpoch
}

View File

@ -604,6 +604,9 @@ type FullNode interface {
// StateGetRandomnessFromBeacon is used to sample the beacon for randomness.
StateGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) //perm:read
// StateGetNetworkParams return current network params
StateGetNetworkParams(ctx context.Context) (*api.NetworkParams, error) //perm:read
// MethodGroup: Msig
// The Msig methods are used to interact with multisig wallets on the
// filecoin network

View File

@ -267,6 +267,8 @@ type FullNodeStruct struct {
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"`
StateGetNetworkParams func(p0 context.Context) (*api.NetworkParams, error) `perm:"read"`
StateGetRandomnessFromBeacon func(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
StateGetRandomnessFromTickets func(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
@ -1746,6 +1748,17 @@ func (s *FullNodeStub) StateGetActor(p0 context.Context, p1 address.Address, p2
return nil, ErrNotSupported
}
func (s *FullNodeStruct) StateGetNetworkParams(p0 context.Context) (*api.NetworkParams, error) {
if s.Internal.StateGetNetworkParams == nil {
return nil, ErrNotSupported
}
return s.Internal.StateGetNetworkParams(p0)
}
func (s *FullNodeStub) StateGetNetworkParams(p0 context.Context) (*api.NetworkParams, error) {
return nil, ErrNotSupported
}
func (s *FullNodeStruct) StateGetRandomnessFromBeacon(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) {
if s.Internal.StateGetRandomnessFromBeacon == nil {
return *new(abi.Randomness), ErrNotSupported

View File

@ -2275,6 +2275,21 @@ func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{})
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2)
}
// StateGetNetworkParams mocks base method.
func (m *MockFullNode) StateGetNetworkParams(arg0 context.Context) (*api.NetworkParams, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateGetNetworkParams", arg0)
ret0, _ := ret[0].(*api.NetworkParams)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StateGetNetworkParams indicates an expected call of StateGetNetworkParams.
func (mr *MockFullNodeMockRecorder) StateGetNetworkParams(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetNetworkParams", reflect.TypeOf((*MockFullNode)(nil).StateGetNetworkParams), arg0)
}
// StateGetRandomnessFromBeacon mocks base method.
func (m *MockFullNode) StateGetRandomnessFromBeacon(arg0 context.Context, arg1 crypto.DomainSeparationTag, arg2 abi.ChainEpoch, arg3 []byte, arg4 types.TipSetKey) (abi.Randomness, error) {
m.ctrl.T.Helper()

View File

@ -13,3 +13,4 @@
/dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt
/dns4/bootstrap-0.ipfsmain.cn/tcp/34721/p2p/12D3KooWQnwEGNqcM2nAcPtRR9rAX8Hrg4k9kJLCHoTR5chJfz6d
/dns4/bootstrap-1.ipfsmain.cn/tcp/34723/p2p/12D3KooWMKxMkD5DMpSWsW7dBddKxKT7L2GgbNuckz9otxvkvByP
/dns4/bootstarp-0.1475.io/tcp/61256/p2p/12D3KooWQjaNmbz9b1XmheQB3RWsRjKSzuRLfjeiDZHyX7Y5RcBr

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -53,11 +53,19 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,
}
var SupportedProofTypes = []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg2KiBV1,
abi.RegisteredSealProof_StackedDrg8MiBV1,
}
var ConsensusMinerMinPower = abi.NewStoragePower(2048)
var MinVerifiedDealSize = abi.NewStoragePower(256)
var PreCommitChallengeDelay = abi.ChainEpoch(10)
func init() {
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1, abi.RegisteredSealProof_StackedDrg8MiBV1)
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
policy.SetPreCommitChallengeDelay(abi.ChainEpoch(10))
policy.SetSupportedProofTypes(SupportedProofTypes...)
policy.SetConsensusMinerMinPower(ConsensusMinerMinPower)
policy.SetMinVerifiedDealSize(MinVerifiedDealSize)
policy.SetPreCommitChallengeDelay(PreCommitChallengeDelay)
getUpgradeHeight := func(ev string, def abi.ChainEpoch) abi.ChainEpoch {
hs, found := os.LookupEnv(ev)

View File

@ -44,13 +44,20 @@ const UpgradeChocolateHeight = -17
const UpgradeOhSnapHeight = 240
var SupportedProofTypes = []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg512MiBV1,
abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1,
}
var ConsensusMinerMinPower = abi.NewStoragePower(2 << 30)
var MinVerifiedDealSize = abi.NewStoragePower(1 << 20)
var PreCommitChallengeDelay = abi.ChainEpoch(150)
func init() {
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30))
policy.SetSupportedProofTypes(
abi.RegisteredSealProof_StackedDrg512MiBV1,
abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1,
)
policy.SetSupportedProofTypes(SupportedProofTypes...)
policy.SetConsensusMinerMinPower(ConsensusMinerMinPower)
policy.SetMinVerifiedDealSize(MinVerifiedDealSize)
policy.SetPreCommitChallengeDelay(PreCommitChallengeDelay)
SetAddressNetwork(address.Testnet)

View File

@ -57,12 +57,19 @@ const UpgradeChocolateHeight = 312746
// 2022-02-10T19:23:00Z
const UpgradeOhSnapHeight = 682006
var SupportedProofTypes = []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1,
}
var ConsensusMinerMinPower = abi.NewStoragePower(32 << 30)
var MinVerifiedDealSize = abi.NewStoragePower(1 << 20)
var PreCommitChallengeDelay = abi.ChainEpoch(150)
func init() {
policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30))
policy.SetSupportedProofTypes(
abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1,
)
policy.SetSupportedProofTypes(SupportedProofTypes...)
policy.SetConsensusMinerMinPower(ConsensusMinerMinPower)
policy.SetMinVerifiedDealSize(MinVerifiedDealSize)
policy.SetPreCommitChallengeDelay(PreCommitChallengeDelay)
SetAddressNetwork(address.Testnet)

View File

@ -53,15 +53,20 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,
}
var SupportedProofTypes = []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg2KiBV1,
abi.RegisteredSealProof_StackedDrg8MiBV1,
abi.RegisteredSealProof_StackedDrg512MiBV1,
}
var ConsensusMinerMinPower = abi.NewStoragePower(2048)
var MinVerifiedDealSize = abi.NewStoragePower(256)
var PreCommitChallengeDelay = abi.ChainEpoch(10)
func init() {
policy.SetSupportedProofTypes(
abi.RegisteredSealProof_StackedDrg2KiBV1,
abi.RegisteredSealProof_StackedDrg8MiBV1,
abi.RegisteredSealProof_StackedDrg512MiBV1,
)
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
policy.SetPreCommitChallengeDelay(abi.ChainEpoch(10))
policy.SetSupportedProofTypes(SupportedProofTypes...)
policy.SetConsensusMinerMinPower(ConsensusMinerMinPower)
policy.SetMinVerifiedDealSize(MinVerifiedDealSize)
policy.SetPreCommitChallengeDelay(PreCommitChallengeDelay)
getUpgradeHeight := func(ev string, def abi.ChainEpoch) abi.ChainEpoch {
hs, found := os.LookupEnv(ev)

View File

@ -70,6 +70,14 @@ const UpgradeChocolateHeight = 1231620
// 2022-03-01T15:00:00Z
var UpgradeOhSnapHeight = abi.ChainEpoch(1594680)
var SupportedProofTypes = []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1,
}
var ConsensusMinerMinPower = abi.NewStoragePower(10 << 40)
var MinVerifiedDealSize = abi.NewStoragePower(1 << 20)
var PreCommitChallengeDelay = abi.ChainEpoch(150)
func init() {
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
SetAddressNetwork(address.Mainnet)

View File

@ -50,23 +50,29 @@ const UpgradeHyperdriveHeight = 379178
const UpgradeChocolateHeight = 999999999
var SupportedProofTypes = []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg512MiBV1,
abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1,
}
// Minimum block production power is set to 4 TiB
// Rationale is to discourage small-scale miners from trying to take over the network
// One needs to invest in ~2.3x the compute to break consensus, making it not worth it
//
// DOWNSIDE: the fake-seals need to be kept alive/protected, otherwise network will seize
//
var ConsensusMinerMinPower = abi.NewStoragePower(4 << 40)
var MinVerifiedDealSize = abi.NewStoragePower(1 << 20)
// Lower the most time-consuming parts of PoRep
var PreCommitChallengeDelay = abi.ChainEpoch(10)
func init() {
// Minimum block production power is set to 4 TiB
// Rationale is to discourage small-scale miners from trying to take over the network
// One needs to invest in ~2.3x the compute to break consensus, making it not worth it
//
// DOWNSIDE: the fake-seals need to be kept alive/protected, otherwise network will seize
//
policy.SetConsensusMinerMinPower(abi.NewStoragePower(4 << 40))
policy.SetSupportedProofTypes(
abi.RegisteredSealProof_StackedDrg512MiBV1,
abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1,
)
// Lower the most time-consuming parts of PoRep
policy.SetPreCommitChallengeDelay(10)
policy.SetSupportedProofTypes(SupportedProofTypes...)
policy.SetConsensusMinerMinPower(ConsensusMinerMinPower)
policy.SetMinVerifiedDealSize(MinVerifiedDealSize)
policy.SetPreCommitChallengeDelay(PreCommitChallengeDelay)
// TODO - make this a variable
//miner.WPoStChallengeLookback = abi.ChainEpoch(2)

View File

@ -33,6 +33,12 @@ var (
MinimumBaseFee = int64(100)
BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
PropagationDelaySecs = uint64(6)
SupportedProofTypes = []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1,
}
ConsensusMinerMinPower = abi.NewStoragePower(10 << 40)
PreCommitChallengeDelay = abi.ChainEpoch(150)
AllowableClockDriftSecs = uint64(1)

View File

@ -37,7 +37,7 @@ func BuildTypeString() string {
}
// BuildVersion is the local build version
const BuildVersion = "1.15.2"
const BuildVersion = "1.15.3"
func UserVersion() string {
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {

View File

@ -108,7 +108,7 @@ type PublishStorageDealsParams = market0.PublishStorageDealsParams
type PublishStorageDealsReturn interface {
DealIDs() ([]abi.DealID, error)
// Note that this index is based on the batch of deals that were published, NOT the DealID
IsDealValid(index uint64) (bool, error)
IsDealValid(index uint64) (bool, int, error)
}
func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStorageDealsReturn, error) {

View File

@ -197,7 +197,7 @@ type PublishStorageDealsParams = market0.PublishStorageDealsParams
type PublishStorageDealsReturn interface {
DealIDs() ([]abi.DealID, error)
// Note that this index is based on the batch of deals that were published, NOT the DealID
IsDealValid(index uint64) (bool, error)
IsDealValid(index uint64) (bool, int, error)
}
func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStorageDealsReturn, error) {

View File

@ -8,6 +8,11 @@ import (
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
{{if (ge .v 6)}}
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-bitfield"
{{end}}
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/types"
@ -253,12 +258,29 @@ type publishStorageDealsReturn{{.v}} struct {
market{{.v}}.PublishStorageDealsReturn
}
func (r *publishStorageDealsReturn{{.v}}) IsDealValid(index uint64) (bool, error) {
func (r *publishStorageDealsReturn{{.v}}) IsDealValid(index uint64) (bool, int, error) {
{{if (ge .v 6)}}
return r.ValidDeals.IsSet(index)
set, err := r.ValidDeals.IsSet(index)
if err != nil || !set {
return false, -1, err
}
maskBf, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{
Runs: []rlepluslazy.Run{rlepluslazy.Run{Val: true, Len: index}}})
if err != nil {
return false, -1, err
}
before, err := bitfield.IntersectBitField(maskBf, r.ValidDeals)
if err != nil {
return false, -1, err
}
outIdx, err := before.Count()
if err != nil {
return false, -1, err
}
return set, int(outIdx), nil
{{else}}
// PublishStorageDeals only succeeded if all deals were valid in this version of actors
return true, nil
return true, int(index), nil
{{end}}
}

View File

@ -246,10 +246,10 @@ type publishStorageDealsReturn0 struct {
market0.PublishStorageDealsReturn
}
func (r *publishStorageDealsReturn0) IsDealValid(index uint64) (bool, error) {
func (r *publishStorageDealsReturn0) IsDealValid(index uint64) (bool, int, error) {
// PublishStorageDeals only succeeded if all deals were valid in this version of actors
return true, nil
return true, int(index), nil
}

View File

@ -246,10 +246,10 @@ type publishStorageDealsReturn2 struct {
market2.PublishStorageDealsReturn
}
func (r *publishStorageDealsReturn2) IsDealValid(index uint64) (bool, error) {
func (r *publishStorageDealsReturn2) IsDealValid(index uint64) (bool, int, error) {
// PublishStorageDeals only succeeded if all deals were valid in this version of actors
return true, nil
return true, int(index), nil
}

View File

@ -241,10 +241,10 @@ type publishStorageDealsReturn3 struct {
market3.PublishStorageDealsReturn
}
func (r *publishStorageDealsReturn3) IsDealValid(index uint64) (bool, error) {
func (r *publishStorageDealsReturn3) IsDealValid(index uint64) (bool, int, error) {
// PublishStorageDeals only succeeded if all deals were valid in this version of actors
return true, nil
return true, int(index), nil
}

View File

@ -241,10 +241,10 @@ type publishStorageDealsReturn4 struct {
market4.PublishStorageDealsReturn
}
func (r *publishStorageDealsReturn4) IsDealValid(index uint64) (bool, error) {
func (r *publishStorageDealsReturn4) IsDealValid(index uint64) (bool, int, error) {
// PublishStorageDeals only succeeded if all deals were valid in this version of actors
return true, nil
return true, int(index), nil
}

View File

@ -241,10 +241,10 @@ type publishStorageDealsReturn5 struct {
market5.PublishStorageDealsReturn
}
func (r *publishStorageDealsReturn5) IsDealValid(index uint64) (bool, error) {
func (r *publishStorageDealsReturn5) IsDealValid(index uint64) (bool, int, error) {
// PublishStorageDeals only succeeded if all deals were valid in this version of actors
return true, nil
return true, int(index), nil
}

View File

@ -9,6 +9,9 @@ import (
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-bitfield"
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/types"
@ -241,9 +244,26 @@ type publishStorageDealsReturn6 struct {
market6.PublishStorageDealsReturn
}
func (r *publishStorageDealsReturn6) IsDealValid(index uint64) (bool, error) {
func (r *publishStorageDealsReturn6) IsDealValid(index uint64) (bool, int, error) {
return r.ValidDeals.IsSet(index)
set, err := r.ValidDeals.IsSet(index)
if err != nil || !set {
return false, -1, err
}
maskBf, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{
Runs: []rlepluslazy.Run{rlepluslazy.Run{Val: true, Len: index}}})
if err != nil {
return false, -1, err
}
before, err := bitfield.IntersectBitField(maskBf, r.ValidDeals)
if err != nil {
return false, -1, err
}
outIdx, err := before.Count()
if err != nil {
return false, -1, err
}
return set, int(outIdx), nil
}

View File

@ -9,6 +9,9 @@ import (
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-bitfield"
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/types"
@ -241,9 +244,26 @@ type publishStorageDealsReturn7 struct {
market7.PublishStorageDealsReturn
}
func (r *publishStorageDealsReturn7) IsDealValid(index uint64) (bool, error) {
func (r *publishStorageDealsReturn7) IsDealValid(index uint64) (bool, int, error) {
return r.ValidDeals.IsSet(index)
set, err := r.ValidDeals.IsSet(index)
if err != nil || !set {
return false, -1, err
}
maskBf, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{
Runs: []rlepluslazy.Run{rlepluslazy.Run{Val: true, Len: index}}})
if err != nil {
return false, -1, err
}
before, err := bitfield.IntersectBitField(maskBf, r.ValidDeals)
if err != nil {
return false, -1, err
}
outIdx, err := before.Count()
if err != nil {
return false, -1, err
}
return set, int(outIdx), nil
}

View File

@ -467,7 +467,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl
}
nv := filec.sm.GetNetworkVersion(ctx, b.Header.Height)
pl := vm.PricelistByEpochAndNetworkVersion(b.Header.Height, nv)
pl := vm.PricelistByEpoch(b.Header.Height)
var sumGasLimit int64
checkMsg := func(msg types.ChainMsg) error {
m := msg.VMMessage()

View File

@ -281,11 +281,12 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message,
// gas checks
// 4. Min Gas
minGas := vm.PricelistByEpochAndNetworkVersion(epoch, nv).OnChainMessage(m.ChainLength())
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
check = api.MessageCheckStatus{
Cid: m.Cid(),
CheckStatus: api.CheckStatus{Code: api.CheckStatusMessageMinGas,
CheckStatus: api.CheckStatus{
Code: api.CheckStatusMessageMinGas,
Hint: map[string]interface{}{
"minGas": minGas,
},

View File

@ -19,6 +19,7 @@ import (
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/network"
lps "github.com/filecoin-project/pubsub"
"github.com/hashicorp/go-multierror"
lru "github.com/hashicorp/golang-lru"
"github.com/ipfs/go-cid"
@ -27,7 +28,6 @@ import (
"github.com/ipfs/go-datastore/query"
logging "github.com/ipfs/go-log/v2"
pubsub "github.com/libp2p/go-libp2p-pubsub"
lps "github.com/whyrusleeping/pubsub"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
@ -629,11 +629,7 @@ func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) err
// a (soft) validation error.
func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) {
epoch := curTs.Height() + 1
nv, err := mp.getNtwkVersion(epoch)
if err != nil {
return false, xerrors.Errorf("getting network version: %w", err)
}
minGas := vm.PricelistByEpochAndNetworkVersion(epoch, nv).OnChainMessage(m.ChainLength())
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), build.NewestNetworkVersion); err != nil {
return false, xerrors.Errorf("message will not be included in a block: %w", err)
@ -1477,7 +1473,7 @@ func (mp *MessagePool) Updates(ctx context.Context) (<-chan api.MpoolUpdate, err
sub := mp.changes.Sub(localUpdates)
go func() {
defer mp.changes.Unsub(sub, localUpdates)
defer mp.changes.Unsub(sub)
defer close(out)
for {

View File

@ -781,7 +781,6 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
// cannot exceed the block limit; drop all messages that exceed the limit
// - the total gasReward cannot exceed the actor's balance; drop all messages that exceed
// the balance
a, err := mp.api.GetActorAfter(actor, ts)
if err != nil {
log.Errorf("failed to load actor state, not building chain for %s: %v", actor, err)
@ -794,12 +793,6 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
skip := 0
i := 0
rewards := make([]*big.Int, 0, len(msgs))
nv, err := mp.getNtwkVersion(ts.Height())
if err != nil {
log.Errorf("getting network version: %v", err)
return nil
}
for i = 0; i < len(msgs); i++ {
m := msgs[i]
@ -815,7 +808,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
}
curNonce++
minGas := vm.PricelistByEpochAndNetworkVersion(ts.Height(), nv).OnChainMessage(m.ChainLength()).Total()
minGas := vm.PricelistByEpoch(ts.Height()).OnChainMessage(m.ChainLength()).Total()
if m.Message.GasLimit < minGas {
break
}

View File

@ -4,6 +4,8 @@ import (
"context"
"sync"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/specs-actors/v7/actors/migration/nv15"
"github.com/filecoin-project/lotus/chain/rand"
@ -22,6 +24,7 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
_init "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/state"
@ -318,6 +321,48 @@ func (sm *StateManager) LookupID(ctx context.Context, addr address.Address, ts *
return state.LookupID(addr)
}
func (sm *StateManager) LookupRobustAddress(ctx context.Context, idAddr address.Address, ts *types.TipSet) (address.Address, error) {
idAddrDecoded, err := address.IDFromAddress(idAddr)
if err != nil {
return address.Undef, xerrors.Errorf("failed to decode provided address as id addr: %w", err)
}
cst := cbor.NewCborStore(sm.cs.StateBlockstore())
wrapStore := adt.WrapStore(ctx, cst)
stateTree, err := state.LoadStateTree(cst, sm.parentState(ts))
if err != nil {
return address.Undef, xerrors.Errorf("load state tree: %w", err)
}
initActor, err := stateTree.GetActor(_init.Address)
if err != nil {
return address.Undef, xerrors.Errorf("load init actor: %w", err)
}
initState, err := _init.Load(wrapStore, initActor)
if err != nil {
return address.Undef, xerrors.Errorf("load init state: %w", err)
}
robustAddr := address.Undef
err = initState.ForEachActor(func(id abi.ActorID, addr address.Address) error {
if uint64(id) == idAddrDecoded {
robustAddr = addr
// Hacky way to early return from ForEach
return xerrors.New("robust address found")
}
return nil
})
if robustAddr == address.Undef {
if err == nil {
return address.Undef, xerrors.Errorf("Address %s not found", idAddr.String())
}
return address.Undef, xerrors.Errorf("finding address: %w", err)
}
return robustAddr, nil
}
func (sm *StateManager) ValidateChain(ctx context.Context, ts *types.TipSet) error {
tschain := []*types.TipSet{ts}
for ts.Height() != 0 {

View File

@ -28,6 +28,7 @@ import (
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/pubsub"
lru "github.com/hashicorp/golang-lru"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
@ -35,7 +36,6 @@ import (
"github.com/ipfs/go-datastore/query"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
"github.com/whyrusleeping/pubsub"
"golang.org/x/xerrors"
)

View File

@ -14,6 +14,7 @@ import (
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/Gurpartap/async"
"github.com/filecoin-project/pubsub"
"github.com/hashicorp/go-multierror"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
@ -22,7 +23,6 @@ import (
"github.com/libp2p/go-libp2p-core/connmgr"
"github.com/libp2p/go-libp2p-core/peer"
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/whyrusleeping/pubsub"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
@ -260,7 +260,7 @@ func (syncer *Syncer) IncomingBlocks(ctx context.Context) (<-chan *types.BlockHe
out := make(chan *types.BlockHeader, 10)
go func() {
defer syncer.incoming.Unsub(sub, LocalIncoming)
defer syncer.incoming.Unsub(sub)
for {
select {

169
chain/vm/cbor_gen.go Normal file
View File

@ -0,0 +1,169 @@
// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
package vm
import (
"fmt"
"io"
"math"
"sort"
types "github.com/filecoin-project/lotus/chain/types"
cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors"
)
var _ = xerrors.Errorf
var _ = cid.Undef
var _ = math.E
var _ = sort.Sort
var lengthBufFvmExecutionTrace = []byte{132}
func (t *FvmExecutionTrace) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
if _, err := w.Write(lengthBufFvmExecutionTrace); err != nil {
return err
}
scratch := make([]byte, 9)
// t.Msg (types.Message) (struct)
if err := t.Msg.MarshalCBOR(w); err != nil {
return err
}
// t.MsgRct (types.MessageReceipt) (struct)
if err := t.MsgRct.MarshalCBOR(w); err != nil {
return err
}
// t.Error (string) (string)
if len(t.Error) > cbg.MaxLength {
return xerrors.Errorf("Value in field t.Error was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Error))); err != nil {
return err
}
if _, err := io.WriteString(w, string(t.Error)); err != nil {
return err
}
// t.Subcalls ([]vm.FvmExecutionTrace) (slice)
if len(t.Subcalls) > cbg.MaxLength {
return xerrors.Errorf("Slice value in field t.Subcalls was too long")
}
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Subcalls))); err != nil {
return err
}
for _, v := range t.Subcalls {
if err := v.MarshalCBOR(w); err != nil {
return err
}
}
return nil
}
func (t *FvmExecutionTrace) UnmarshalCBOR(r io.Reader) error {
*t = FvmExecutionTrace{}
br := cbg.GetPeeker(r)
scratch := make([]byte, 8)
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
if err != nil {
return err
}
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 4 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Msg (types.Message) (struct)
{
b, err := br.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := br.UnreadByte(); err != nil {
return err
}
t.Msg = new(types.Message)
if err := t.Msg.UnmarshalCBOR(br); err != nil {
return xerrors.Errorf("unmarshaling t.Msg pointer: %w", err)
}
}
}
// t.MsgRct (types.MessageReceipt) (struct)
{
b, err := br.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := br.UnreadByte(); err != nil {
return err
}
t.MsgRct = new(types.MessageReceipt)
if err := t.MsgRct.UnmarshalCBOR(br); err != nil {
return xerrors.Errorf("unmarshaling t.MsgRct pointer: %w", err)
}
}
}
// t.Error (string) (string)
{
sval, err := cbg.ReadStringBuf(br, scratch)
if err != nil {
return err
}
t.Error = string(sval)
}
// t.Subcalls ([]vm.FvmExecutionTrace) (slice)
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
if err != nil {
return err
}
if extra > cbg.MaxLength {
return fmt.Errorf("t.Subcalls: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.Subcalls = make([]FvmExecutionTrace, extra)
}
for i := 0; i < int(extra); i++ {
var v FvmExecutionTrace
if err := v.UnmarshalCBOR(br); err != nil {
return err
}
t.Subcalls[i] = v
}
return nil
}

View File

@ -3,11 +3,13 @@ package vm
import (
"bytes"
"context"
"sync/atomic"
"time"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/actors/aerrors"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/go-state-types/network"
@ -41,12 +43,41 @@ type FvmExtern struct {
Rand
blockstore.Blockstore
epoch abi.ChainEpoch
nv network.Version
lbState LookbackStateGetter
base cid.Cid
}
// VerifyConsensusFault is similar to the one in syscalls.go used by the LegacyVM, except it never errors
// This may eventually become identical to ExecutionTrace, but we can make incremental progress towards that
type FvmExecutionTrace struct {
Msg *types.Message
MsgRct *types.MessageReceipt
Error string
Subcalls []FvmExecutionTrace
}
func (t *FvmExecutionTrace) ToExecutionTrace() types.ExecutionTrace {
if t == nil {
return types.ExecutionTrace{}
}
ret := types.ExecutionTrace{
Msg: t.Msg,
MsgRct: t.MsgRct,
Error: t.Error,
Duration: 0,
GasCharges: nil,
Subcalls: make([]types.ExecutionTrace, len(t.Subcalls)),
}
for i, v := range t.Subcalls {
ret.Subcalls[i] = v.ToExecutionTrace()
}
return ret
}
// VerifyConsensusFault is similar to the one in syscalls.go used by the Lotus VM, except it never errors
// Errors are logged and "no fault" is returned, which is functionally what go-actors does anyway
func (x *FvmExtern) VerifyConsensusFault(ctx context.Context, a, b, extra []byte) (*ffi_cgo.ConsensusFault, int64) {
totalGas := int64(0)
@ -183,7 +214,7 @@ func (x *FvmExtern) workerKeyAtLookback(ctx context.Context, minerId address.Add
}
cstWithoutGas := cbor.NewCborStore(x.Blockstore)
cbb := &gasChargingBlocks{gasAdder, PricelistByEpochAndNetworkVersion(x.epoch, x.nv), x.Blockstore}
cbb := &gasChargingBlocks{gasAdder, PricelistByEpoch(x.epoch), x.Blockstore}
cstWithGas := cbor.NewCborStore(cbb)
lbState, err := x.lbState(ctx, height)
@ -243,15 +274,17 @@ func NewFVM(ctx context.Context, opts *VMOpts) (*FVM, error) {
fvmOpts := ffi.FVMOpts{
FVMVersion: 0,
Externs: &FvmExtern{Rand: opts.Rand, Blockstore: opts.Bstore, lbState: opts.LookbackState, base: opts.StateBase, epoch: opts.Epoch, nv: opts.NetworkVersion},
Externs: &FvmExtern{Rand: opts.Rand, Blockstore: opts.Bstore, lbState: opts.LookbackState, base: opts.StateBase, epoch: opts.Epoch},
Epoch: opts.Epoch,
BaseFee: opts.BaseFee,
BaseCircSupply: circToReport,
NetworkVersion: opts.NetworkVersion,
StateBase: opts.StateBase,
Tracing: EnableDetailedTracing,
}
fvm, err := ffi.CreateFVM(&fvmOpts)
if err != nil {
return nil, err
}
@ -263,6 +296,7 @@ func NewFVM(ctx context.Context, opts *VMOpts) (*FVM, error) {
func (vm *FVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) {
start := build.Clock.Now()
defer atomic.AddUint64(&StatApplied, 1)
msgBytes, err := cmsg.VMMessage().Serialize()
if err != nil {
return nil, xerrors.Errorf("serializing msg: %w", err)
@ -273,6 +307,22 @@ func (vm *FVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet
return nil, xerrors.Errorf("applying msg: %w", err)
}
var et FvmExecutionTrace
if len(ret.ExecTraceBytes) != 0 {
if err = et.UnmarshalCBOR(bytes.NewReader(ret.ExecTraceBytes)); err != nil {
return nil, xerrors.Errorf("failed to unmarshal exectrace: %w", err)
}
}
var aerr aerrors.ActorError
if ret.ExitCode != 0 {
amsg := ret.FailureInfo
if amsg == "" {
amsg = "unknown error"
}
aerr = aerrors.New(exitcode.ExitCode(ret.ExitCode), amsg)
}
return &ApplyRet{
MessageReceipt: types.MessageReceipt{
Return: ret.Return,
@ -289,16 +339,15 @@ func (vm *FVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet
GasRefund: 0,
GasBurned: 0,
},
// TODO: do these eventually, not consensus critical
// https://github.com/filecoin-project/ref-fvm/issues/318
ActorErr: nil,
ExecutionTrace: types.ExecutionTrace{},
ActorErr: aerr,
ExecutionTrace: et.ToExecutionTrace(),
Duration: time.Since(start),
}, nil
}
func (vm *FVM) ApplyImplicitMessage(ctx context.Context, cmsg *types.Message) (*ApplyRet, error) {
start := build.Clock.Now()
defer atomic.AddUint64(&StatApplied, 1)
msgBytes, err := cmsg.VMMessage().Serialize()
if err != nil {
return nil, xerrors.Errorf("serializing msg: %w", err)
@ -308,17 +357,30 @@ func (vm *FVM) ApplyImplicitMessage(ctx context.Context, cmsg *types.Message) (*
return nil, xerrors.Errorf("applying msg: %w", err)
}
var et FvmExecutionTrace
if len(ret.ExecTraceBytes) != 0 {
if err = et.UnmarshalCBOR(bytes.NewReader(ret.ExecTraceBytes)); err != nil {
return nil, xerrors.Errorf("failed to unmarshal exectrace: %w", err)
}
}
var aerr aerrors.ActorError
if ret.ExitCode != 0 {
amsg := ret.FailureInfo
if amsg == "" {
amsg = "unknown error"
}
aerr = aerrors.New(exitcode.ExitCode(ret.ExitCode), amsg)
}
return &ApplyRet{
MessageReceipt: types.MessageReceipt{
Return: ret.Return,
ExitCode: exitcode.ExitCode(ret.ExitCode),
GasUsed: ret.GasUsed,
},
GasCosts: nil,
// TODO: do these eventually, not consensus critical
// https://github.com/filecoin-project/ref-fvm/issues/318
ActorErr: nil,
ExecutionTrace: types.ExecutionTrace{},
ActorErr: aerr,
ExecutionTrace: et.ToExecutionTrace(),
Duration: time.Since(start),
}, nil
}

View File

@ -3,8 +3,6 @@ package vm
import (
"fmt"
"github.com/filecoin-project/go-state-types/network"
vmr "github.com/filecoin-project/specs-actors/v7/actors/runtime"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
@ -84,153 +82,145 @@ type Pricelist interface {
OnVerifyConsensusFault() GasCharge
}
var priceListGenesis = pricelistV0{
computeGasMulti: 1,
storageGasMulti: 1000,
// Prices are the price lists per starting epoch. Public for testing purposes
// (concretely to allow the test vector runner to rebase prices).
var Prices = map[abi.ChainEpoch]Pricelist{
abi.ChainEpoch(0): &pricelistV0{
computeGasMulti: 1,
storageGasMulti: 1000,
onChainMessageComputeBase: 38863,
onChainMessageStorageBase: 36,
onChainMessageStoragePerByte: 1,
onChainMessageComputeBase: 38863,
onChainMessageStorageBase: 36,
onChainMessageStoragePerByte: 1,
onChainReturnValuePerByte: 1,
onChainReturnValuePerByte: 1,
sendBase: 29233,
sendTransferFunds: 27500,
sendTransferOnlyPremium: 159672,
sendInvokeMethod: -5377,
sendBase: 29233,
sendTransferFunds: 27500,
sendTransferOnlyPremium: 159672,
sendInvokeMethod: -5377,
ipldGetBase: 75242,
ipldPutBase: 84070,
ipldPutPerByte: 1,
ipldGetBase: 75242,
ipldPutBase: 84070,
ipldPutPerByte: 1,
createActorCompute: 1108454,
createActorStorage: 36 + 40,
deleteActor: -(36 + 40), // -createActorStorage
createActorCompute: 1108454,
createActorStorage: 36 + 40,
deleteActor: -(36 + 40), // -createActorStorage
verifySignature: map[crypto.SigType]int64{
crypto.SigTypeBLS: 16598605,
crypto.SigTypeSecp256k1: 1637292,
verifySignature: map[crypto.SigType]int64{
crypto.SigTypeBLS: 16598605,
crypto.SigTypeSecp256k1: 1637292,
},
hashingBase: 31355,
computeUnsealedSectorCidBase: 98647,
verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used
verifyAggregateSealBase: 0,
verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{
abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: {
flat: 123861062,
scale: 9226981,
},
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {
flat: 748593537,
scale: 85639,
},
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {
flat: 748593537,
scale: 85639,
},
},
verifyPostDiscount: true,
verifyConsensusFault: 495422,
},
abi.ChainEpoch(build.UpgradeCalicoHeight): &pricelistV0{
computeGasMulti: 1,
storageGasMulti: 1300,
hashingBase: 31355,
computeUnsealedSectorCidBase: 98647,
verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used
verifyAggregateSealBase: 0,
verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{
abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: {
flat: 123861062,
scale: 9226981,
onChainMessageComputeBase: 38863,
onChainMessageStorageBase: 36,
onChainMessageStoragePerByte: 1,
onChainReturnValuePerByte: 1,
sendBase: 29233,
sendTransferFunds: 27500,
sendTransferOnlyPremium: 159672,
sendInvokeMethod: -5377,
ipldGetBase: 114617,
ipldPutBase: 353640,
ipldPutPerByte: 1,
createActorCompute: 1108454,
createActorStorage: 36 + 40,
deleteActor: -(36 + 40), // -createActorStorage
verifySignature: map[crypto.SigType]int64{
crypto.SigTypeBLS: 16598605,
crypto.SigTypeSecp256k1: 1637292,
},
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {
flat: 748593537,
scale: 85639,
hashingBase: 31355,
computeUnsealedSectorCidBase: 98647,
verifySealBase: 2000, // TODO gas, it VerifySeal syscall is not used
verifyAggregateSealPer: map[abi.RegisteredSealProof]int64{
abi.RegisteredSealProof_StackedDrg32GiBV1_1: 449900,
abi.RegisteredSealProof_StackedDrg64GiBV1_1: 359272,
},
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {
flat: 748593537,
scale: 85639,
verifyAggregateSealSteps: map[abi.RegisteredSealProof]stepCost{
abi.RegisteredSealProof_StackedDrg32GiBV1_1: {
{4, 103994170},
{7, 112356810},
{13, 122912610},
{26, 137559930},
{52, 162039100},
{103, 210960780},
{205, 318351180},
{410, 528274980},
},
abi.RegisteredSealProof_StackedDrg64GiBV1_1: {
{4, 102581240},
{7, 110803030},
{13, 120803700},
{26, 134642130},
{52, 157357890},
{103, 203017690},
{205, 304253590},
{410, 509880640},
},
},
verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{
abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: {
flat: 117680921,
scale: 43780,
},
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {
flat: 117680921,
scale: 43780,
},
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {
flat: 117680921,
scale: 43780,
},
},
verifyPostDiscount: false,
verifyConsensusFault: 495422,
verifyReplicaUpdate: 36316136,
},
verifyPostDiscount: true,
verifyConsensusFault: 495422,
}
var priceListCalico = pricelistV0{
computeGasMulti: 1,
storageGasMulti: 1300,
onChainMessageComputeBase: 38863,
onChainMessageStorageBase: 36,
onChainMessageStoragePerByte: 1,
onChainReturnValuePerByte: 1,
sendBase: 29233,
sendTransferFunds: 27500,
sendTransferOnlyPremium: 159672,
sendInvokeMethod: -5377,
ipldGetBase: 114617,
ipldPutBase: 353640,
ipldPutPerByte: 1,
createActorCompute: 1108454,
createActorStorage: 36 + 40,
deleteActor: -(36 + 40), // -createActorStorage
verifySignature: map[crypto.SigType]int64{
crypto.SigTypeBLS: 16598605,
crypto.SigTypeSecp256k1: 1637292,
},
hashingBase: 31355,
computeUnsealedSectorCidBase: 98647,
verifySealBase: 2000, // TODO gas, it VerifySeal syscall is not used
verifyAggregateSealPer: map[abi.RegisteredSealProof]int64{
abi.RegisteredSealProof_StackedDrg32GiBV1_1: 449900,
abi.RegisteredSealProof_StackedDrg64GiBV1_1: 359272,
},
verifyAggregateSealSteps: map[abi.RegisteredSealProof]stepCost{
abi.RegisteredSealProof_StackedDrg32GiBV1_1: {
{4, 103994170},
{7, 112356810},
{13, 122912610},
{26, 137559930},
{52, 162039100},
{103, 210960780},
{205, 318351180},
{410, 528274980},
},
abi.RegisteredSealProof_StackedDrg64GiBV1_1: {
{4, 102581240},
{7, 110803030},
{13, 120803700},
{26, 134642130},
{52, 157357890},
{103, 203017690},
{205, 304253590},
{410, 509880640},
},
},
verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{
abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: {
flat: 117680921,
scale: 43780,
},
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {
flat: 117680921,
scale: 43780,
},
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {
flat: 117680921,
scale: 43780,
},
},
verifyPostDiscount: false,
verifyConsensusFault: 495422,
verifyReplicaUpdate: 36316136,
}
// Prices are the price lists per starting epoch.
// For network v8 and onwards, this is disregarded; the pricelist is selected by network version.
var pricesByEpoch = map[abi.ChainEpoch]Pricelist{
abi.ChainEpoch(0): &priceListGenesis,
abi.ChainEpoch(build.UpgradeCalicoHeight): &priceListCalico,
}
// PricelistByEpochAndNetworkVersion finds the latest prices for the given epoch
func PricelistByEpochAndNetworkVersion(epoch abi.ChainEpoch, nv network.Version) Pricelist {
if nv >= network.Version8 {
return &priceListCalico
}
// PricelistByEpoch finds the latest prices for the given epoch
func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist {
// since we are storing the prices as map or epoch to price
// we need to get the price with the highest epoch that is lower or equal to the `epoch` arg
bestEpoch := abi.ChainEpoch(0)
bestPrice := pricesByEpoch[bestEpoch]
for e, pl := range pricesByEpoch {
bestPrice := Prices[bestEpoch]
for e, pl := range Prices {
// if `e` happened after `bestEpoch` and `e` is earlier or equal to the target `epoch`
if e > bestEpoch && e <= epoch {
bestEpoch = e

View File

@ -51,7 +51,7 @@ var EmptyObjectCid cid.Cid
// TryCreateAccountActor creates account actors from only BLS/SECP256K1 addresses.
func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, address.Address, aerrors.ActorError) {
if err := rt.chargeGasSafe(PricelistByEpochAndNetworkVersion(rt.height, rt.NetworkVersion()).OnCreateActor()); err != nil {
if err := rt.chargeGasSafe(PricelistByEpoch(rt.height).OnCreateActor()); err != nil {
return nil, address.Undef, err
}

View File

@ -56,8 +56,8 @@ func (m *Message) ValueReceived() abi.TokenAmount {
return m.msg.Value
}
// EnableGasTracing, if true, outputs gas tracing in execution traces.
var EnableGasTracing = os.Getenv("LOTUS_VM_ENABLE_GAS_TRACING_VERY_SLOW") == "1"
// EnableDetailedTracing, if true, outputs gas tracing in execution traces.
var EnableDetailedTracing = os.Getenv("LOTUS_VM_ENABLE_GAS_TRACING_VERY_SLOW") == "1"
type Runtime struct {
rt7.Message
@ -516,7 +516,7 @@ func (rt *Runtime) stateCommit(oldh, newh cid.Cid) aerrors.ActorError {
}
func (rt *Runtime) finilizeGasTracing() {
if EnableGasTracing {
if EnableDetailedTracing {
if rt.lastGasCharge != nil {
rt.lastGasCharge.TimeTaken = time.Since(rt.lastGasChargeTime)
}
@ -550,7 +550,7 @@ func (rt *Runtime) chargeGasFunc(skip int) func(GasCharge) {
func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError {
toUse := gas.Total()
if EnableGasTracing {
if EnableDetailedTracing {
var callers [10]uintptr
cout := gruntime.Callers(2+skip, callers[:])

View File

@ -56,14 +56,14 @@ func BenchmarkRuntime_CreateRuntimeChargeGas_TracingDisabled(b *testing.B) {
b.ResetTimer()
EnableGasTracing = false
noop := func() bool { return EnableGasTracing }
EnableDetailedTracing = false
noop := func() bool { return EnableDetailedTracing }
for n := 0; n < b.N; n++ {
// flip the value and access it to make sure
// the compiler doesn't optimize away
EnableGasTracing = true
EnableDetailedTracing = true
_ = noop()
EnableGasTracing = false
EnableDetailedTracing = false
_ = (&Runtime{cst: cst}).chargeGasInternal(gch, 0)
}
}

View File

@ -135,7 +135,7 @@ func (vm *LegacyVM) makeRuntime(ctx context.Context, msg *types.Message, parent
gasAvailable: msg.GasLimit,
depth: 0,
numActorsCreated: 0,
pricelist: PricelistByEpochAndNetworkVersion(vm.blockHeight, vm.networkVersion),
pricelist: PricelistByEpoch(vm.blockHeight),
allowInternal: true,
callerValidated: false,
executionTrace: types.ExecutionTrace{Msg: msg},
@ -283,7 +283,7 @@ func (vm *LegacyVM) send(ctx context.Context, msg *types.Message, parent *Runtim
st := vm.cstate
rt := vm.makeRuntime(ctx, msg, parent)
if EnableGasTracing {
if EnableDetailedTracing {
rt.lastGasChargeTime = start
if parent != nil {
rt.lastGasChargeTime = parent.lastGasChargeTime
@ -431,7 +431,7 @@ func (vm *LegacyVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*App
return nil, err
}
pl := PricelistByEpochAndNetworkVersion(vm.blockHeight, vm.networkVersion)
pl := PricelistByEpoch(vm.blockHeight)
msgGas := pl.OnChainMessage(cmsg.ChainLength())
msgGasCost := msgGas.Total()

View File

@ -90,7 +90,8 @@ var ChainHeadCmd = &cli.Command{
}
var ChainGetBlock = &cli.Command{
Name: "getblock",
Name: "get-block",
Aliases: []string{"getblock"},
Usage: "Get a block and print its details",
ArgsUsage: "[blockCid]",
Flags: []cli.Flag{
@ -299,6 +300,7 @@ var ChainStatObjCmd = &cli.Command{
var ChainGetMsgCmd = &cli.Command{
Name: "getmessage",
Aliases: []string{"get-message", "get-msg"},
Usage: "Get and print a message by its cid",
ArgsUsage: "[messageCid]",
Action: func(cctx *cli.Context) error {
@ -349,6 +351,7 @@ var ChainGetMsgCmd = &cli.Command{
var ChainSetHeadCmd = &cli.Command{
Name: "sethead",
Aliases: []string{"set-head"},
Usage: "manually set the local nodes head tipset (Caution: normally only used for recovery)",
ArgsUsage: "[tipsetkey]",
Flags: []cli.Flag{

View File

@ -358,7 +358,8 @@ var NetId = &cli.Command{
}
var NetFindPeer = &cli.Command{
Name: "findpeer",
Name: "find-peer",
Aliases: []string{"findpeer"},
Usage: "Find the addresses of a given peerID",
ArgsUsage: "[peerId]",
Action: func(cctx *cli.Context) error {

View File

@ -1428,6 +1428,7 @@ func jsonReturn(code cid.Cid, method abi.MethodNum, ret []byte) (string, error)
var StateWaitMsgCmd = &cli.Command{
Name: "wait-msg",
Aliases: []string{"wait-message"},
Usage: "Wait for a message to appear on chain",
ArgsUsage: "[messageCid]",
Flags: []cli.Flag{
@ -1470,6 +1471,7 @@ var StateWaitMsgCmd = &cli.Command{
var StateSearchMsgCmd = &cli.Command{
Name: "search-msg",
Aliases: []string{"search-message"},
Usage: "Search to see whether a message has appeared on chain",
ArgsUsage: "[messageCid]",
Action: func(cctx *cli.Context) error {
@ -1727,6 +1729,7 @@ var StateCircSupplyCmd = &cli.Command{
var StateSectorCmd = &cli.Command{
Name: "sector",
Aliases: []string{"sector-info"},
Usage: "Get miner sector info",
ArgsUsage: "[minerAddress] [sectorNumber]",
Action: func(cctx *cli.Context) error {

View File

@ -500,7 +500,7 @@ var walletVerify = &cli.Command{
var walletDelete = &cli.Command{
Name: "delete",
Usage: "Delete an account from the wallet",
Usage: "Soft delete an address from the wallet - hard deletion needed for permanent removal",
ArgsUsage: "<address> ",
Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx)

View File

@ -51,8 +51,9 @@ var actorCmd = &cli.Command{
}
var actorSetAddrsCmd = &cli.Command{
Name: "set-addrs",
Usage: "set addresses that your miner can be publicly dialed on",
Name: "set-addresses",
Aliases: []string{"set-addrs"},
Usage: "set addresses that your miner can be publicly dialed on",
Flags: []cli.Flag{
&cli.Int64Flag{
Name: "gas-limit",

View File

@ -522,8 +522,9 @@ var provingComputeCmd = &cli.Command{
}
var provingComputeWindowPoStCmd = &cli.Command{
Name: "window-post",
Usage: "Compute WindowPoSt for a specific deadline",
Name: "windowed-post",
Aliases: []string{"window-post"},
Usage: "Compute WindowPoSt for a specific deadline",
Description: `Note: This command is intended to be used to verify PoSt compute performance.
It will not send any messages to the chain.`,
ArgsUsage: "[deadline index]",

View File

@ -14,7 +14,8 @@ import (
)
var blockmsgidCmd = &cli.Command{
Name: "blockmsgid",
Name: "block-message-id",
Aliases: []string{"blockmsgid"},
Usage: "Print a block's pubsub message ID",
ArgsUsage: "<blockCid> ...",
Action: func(cctx *cli.Context) error {

View File

@ -41,8 +41,9 @@ type keyInfoOutput struct {
}
var keyinfoCmd = &cli.Command{
Name: "keyinfo",
Usage: "work with lotus keyinfo files (wallets and libp2p host keys)",
Name: "key-info",
Aliases: []string{"keyinfo"},
Usage: "work with lotus keyinfo files (wallets and libp2p host keys)",
Description: `The subcommands of keyinfo provide helpful tools for working with keyinfo files without
having to run the lotus daemon.`,
Subcommands: []*cli.Command{

View File

@ -119,7 +119,7 @@ var minerFaultsCmd = &cli.Command{
var minerCreateCmd = &cli.Command{
Name: "create",
Usage: "sends a create miner msg",
Usage: "sends a create miner message",
ArgsUsage: "[sender] [owner] [worker] [sector size]",
Action: func(cctx *cli.Context) error {
wapi, closer, err := lcli.GetFullNodeAPI(cctx)

View File

@ -20,7 +20,8 @@ var mpoolCmd = &cli.Command{
}
var minerSelectMsgsCmd = &cli.Command{
Name: "miner-select-msgs",
Name: "miner-select-messages",
Aliases: []string{"miner-select-msgs"},
Flags: []cli.Flag{
&cli.Float64Flag{
Name: "ticket-quality",

View File

@ -22,7 +22,8 @@ import (
)
var msgCmd = &cli.Command{
Name: "msg",
Name: "message",
Aliases: []string{"msg"},
Usage: "Translate message between various formats",
ArgsUsage: "Message in any form",
Action: func(cctx *cli.Context) error {

View File

@ -16,6 +16,7 @@ import (
var infoMessageSizeSimCommand = &cli.Command{
Name: "message-size",
Aliases: []string{"msg-size"},
Description: "Output information about message size distribution",
Flags: []cli.Flag{
&cli.Int64Flag{

View File

@ -144,7 +144,7 @@ var runCmd = &cli.Command{
Hidden: true,
},
},
Description: "For setup instructions see 'lotus-wallet --help'",
Description: "Needs FULLNODE_API_INFO env-var to be set before running (see lotus-wallet --help for setup instructions)",
Action: func(cctx *cli.Context) error {
log.Info("Starting lotus wallet")

View File

@ -289,7 +289,7 @@ var runCmd = &cli.Command{
return err
}
if cctx.Bool("commit") {
if cctx.Bool("commit") || cctx.Bool("prove-replica-update2") {
if err := paramfetch.GetParams(ctx, build.ParametersJSON(), build.SrsJSON(), uint64(ssize)); err != nil {
return xerrors.Errorf("get params: %w", err)
}
@ -313,7 +313,7 @@ var runCmd = &cli.Command{
}
if (workerType == sealtasks.WorkerSealing || cctx.IsSet("addpiece")) && cctx.Bool("addpiece") {
taskTypes = append(taskTypes, sealtasks.TTAddPiece)
taskTypes = append(taskTypes, sealtasks.TTAddPiece, sealtasks.TTDataCid)
}
if (workerType == sealtasks.WorkerSealing || cctx.IsSet("precommit1")) && cctx.Bool("precommit1") {
taskTypes = append(taskTypes, sealtasks.TTPreCommit1)

View File

@ -23,6 +23,7 @@ var tasksCmd = &cli.Command{
var allowSetting = map[sealtasks.TaskType]struct{}{
sealtasks.TTAddPiece: {},
sealtasks.TTDataCid: {},
sealtasks.TTPreCommit1: {},
sealtasks.TTPreCommit2: {},
sealtasks.TTCommit2: {},

View File

@ -7,6 +7,7 @@ import (
"encoding/base64"
"fmt"
"io/ioutil"
"math"
"os"
"os/exec"
"strconv"
@ -28,6 +29,7 @@ import (
"github.com/filecoin-project/test-vectors/schema"
"github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
)
@ -51,6 +53,52 @@ var TipsetVectorOpts struct {
OnTipsetApplied []func(bs blockstore.Blockstore, params *ExecuteTipsetParams, res *ExecuteTipsetResult)
}
type GasPricingRestoreFn func()
// adjustGasPricing adjusts the global gas price mapping to make sure that the
// gas pricelist for vector's network version is used at the vector's epoch.
// Because it manipulates a global, it returns a function that reverts the
// change. The caller MUST invoke this function or the test vector runner will
// become invalid.
func adjustGasPricing(vectorEpoch abi.ChainEpoch, vectorNv network.Version) GasPricingRestoreFn {
// Stash the current pricing mapping.
// Ok to take a reference instead of a copy, because we override the map
// with a new one below.
var old = vm.Prices
// Resolve the epoch at which the vector network version kicks in.
var epoch abi.ChainEpoch = math.MaxInt64
if vectorNv == network.Version0 {
// genesis is not an upgrade.
epoch = 0
} else {
for _, u := range filcns.DefaultUpgradeSchedule() {
if u.Network == vectorNv {
epoch = u.Height
break
}
}
}
if epoch == math.MaxInt64 {
panic(fmt.Sprintf("could not resolve network version %d to height", vectorNv))
}
// Find the right pricelist for this network version.
pricelist := vm.PricelistByEpoch(epoch)
// Override the pricing mapping by setting the relevant pricelist for the
// network version at the epoch where the vector runs.
vm.Prices = map[abi.ChainEpoch]vm.Pricelist{
vectorEpoch: pricelist,
}
// Return a function to restore the original mapping.
return func() {
vm.Prices = old
}
}
// ExecuteMessageVector executes a message-class test vector.
func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) (diffs []string, err error) {
var (
@ -69,6 +117,10 @@ func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema
// Create a new Driver.
driver := NewDriver(ctx, vector.Selector, DriverOpts{DisableVMFlush: true})
// Monkey patch the gas pricing.
revertFn := adjustGasPricing(baseEpoch, nv)
defer revertFn()
// Apply every message.
for i, m := range vector.ApplyMessages {
msg, err := types.DecodeMessage(m.Bytes)

View File

@ -15,6 +15,7 @@
* [Check](#Check)
* [CheckProvable](#CheckProvable)
* [Compute](#Compute)
* [ComputeDataCid](#ComputeDataCid)
* [ComputeProof](#ComputeProof)
* [ComputeWindowPoSt](#ComputeWindowPoSt)
* [Create](#Create)
@ -105,6 +106,7 @@
* [PledgeSector](#PledgeSector)
* [Return](#Return)
* [ReturnAddPiece](#ReturnAddPiece)
* [ReturnDataCid](#ReturnDataCid)
* [ReturnFetch](#ReturnFetch)
* [ReturnFinalizeReplicaUpdate](#ReturnFinalizeReplicaUpdate)
* [ReturnFinalizeSector](#ReturnFinalizeSector)
@ -361,6 +363,29 @@ Response:
## Compute
### ComputeDataCid
Perms: admin
Inputs:
```json
[
1024,
{}
]
```
Response:
```json
{
"Size": 1032,
"PieceCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
}
}
```
### ComputeProof
@ -2195,6 +2220,36 @@ Response:
### ReturnAddPiece
Perms: admin
Inputs:
```json
[
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
},
{
"Size": 1032,
"PieceCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
}
},
{
"Code": 0,
"Message": "string value"
}
]
```
Response: `{}`
### ReturnDataCid
storiface.WorkerReturn
@ -4020,6 +4075,88 @@ Response:
"BaseMinMemory": 68719476736
}
},
"seal/v0/datacid": {
"0": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"1": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"2": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"3": {
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"4": {
"MinMemory": 8589934592,
"MaxMemory": 8589934592,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"5": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"6": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"7": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"8": {
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"9": {
"MinMemory": 8589934592,
"MaxMemory": 8589934592,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
}
},
"seal/v0/fetch": {
"0": {
"MinMemory": 1048576,

View File

@ -9,6 +9,8 @@
* [Version](#Version)
* [Add](#Add)
* [AddPiece](#AddPiece)
* [Data](#Data)
* [DataCid](#DataCid)
* [Finalize](#Finalize)
* [FinalizeReplicaUpdate](#FinalizeReplicaUpdate)
* [FinalizeSector](#FinalizeSector)
@ -520,6 +522,88 @@ Response:
"BaseMinMemory": 68719476736
}
},
"seal/v0/datacid": {
"0": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"1": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"2": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"3": {
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"4": {
"MinMemory": 8589934592,
"MaxMemory": 8589934592,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"5": {
"MinMemory": 2048,
"MaxMemory": 2048,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 2048
},
"6": {
"MinMemory": 8388608,
"MaxMemory": 8388608,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 8388608
},
"7": {
"MinMemory": 1073741824,
"MaxMemory": 1073741824,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"8": {
"MinMemory": 4294967296,
"MaxMemory": 4294967296,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
},
"9": {
"MinMemory": 8589934592,
"MaxMemory": 8589934592,
"GPUUtilization": 0,
"MaxParallelism": 1,
"MaxParallelismGPU": 0,
"BaseMinMemory": 1073741824
}
},
"seal/v0/fetch": {
"0": {
"MinMemory": 1048576,
@ -1242,7 +1326,6 @@ Response: `131584`
### AddPiece
storiface.WorkerCalls
Perms: admin
@ -1276,6 +1359,34 @@ Response:
}
```
## Data
### DataCid
storiface.WorkerCalls
Perms: admin
Inputs:
```json
[
1024,
{}
]
```
Response:
```json
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
}
```
## Finalize

View File

@ -165,6 +165,7 @@
* [StateDealProviderCollateralBounds](#StateDealProviderCollateralBounds)
* [StateDecodeParams](#StateDecodeParams)
* [StateGetActor](#StateGetActor)
* [StateGetNetworkParams](#StateGetNetworkParams)
* [StateGetRandomnessFromBeacon](#StateGetRandomnessFromBeacon)
* [StateGetRandomnessFromTickets](#StateGetRandomnessFromTickets)
* [StateGetReceipt](#StateGetReceipt)
@ -5185,6 +5186,49 @@ Response:
}
```
### StateGetNetworkParams
StateGetNetworkParams return current network params
Perms: read
Inputs: `null`
Response:
```json
{
"NetworkName": "lotus",
"BlockDelaySecs": 42,
"ConsensusMinerMinPower": "0",
"SupportedProofTypes": [
8
],
"PreCommitChallengeDelay": 10101,
"ForkUpgradeParams": {
"UpgradeSmokeHeight": 10101,
"UpgradeBreezeHeight": 10101,
"UpgradeIgnitionHeight": 10101,
"UpgradeLiftoffHeight": 10101,
"UpgradeAssemblyHeight": 10101,
"UpgradeRefuelHeight": 10101,
"UpgradeTapeHeight": 10101,
"UpgradeKumquatHeight": 10101,
"UpgradePriceListOopsHeight": 10101,
"BreezeGasTampingDuration": 10101,
"UpgradeCalicoHeight": 10101,
"UpgradePersianHeight": 10101,
"UpgradeOrangeHeight": 10101,
"UpgradeClausHeight": 10101,
"UpgradeTrustHeight": 10101,
"UpgradeNorwegianHeight": 10101,
"UpgradeTurboHeight": 10101,
"UpgradeHyperdriveHeight": 10101,
"UpgradeChocolateHeight": 10101,
"UpgradeOhSnapHeight": 10101
}
}
```
### StateGetRandomnessFromBeacon
StateGetRandomnessFromBeacon is used to sample the beacon for randomness.

View File

@ -175,12 +175,14 @@
* [StateDecodeParams](#StateDecodeParams)
* [StateEncodeParams](#StateEncodeParams)
* [StateGetActor](#StateGetActor)
* [StateGetNetworkParams](#StateGetNetworkParams)
* [StateGetRandomnessFromBeacon](#StateGetRandomnessFromBeacon)
* [StateGetRandomnessFromTickets](#StateGetRandomnessFromTickets)
* [StateListActors](#StateListActors)
* [StateListMessages](#StateListMessages)
* [StateListMiners](#StateListMiners)
* [StateLookupID](#StateLookupID)
* [StateLookupRobustAddress](#StateLookupRobustAddress)
* [StateMarketBalance](#StateMarketBalance)
* [StateMarketDeals](#StateMarketDeals)
* [StateMarketParticipants](#StateMarketParticipants)
@ -5037,7 +5039,7 @@ A nil TipSetKey can be provided as a param, this will cause the heaviest tipset
### StateAccountKey
StateAccountKey returns the public key address of the given ID address
StateAccountKey returns the public key address of the given ID address for secp and bls accounts
Perms: read
@ -5641,6 +5643,49 @@ Response:
}
```
### StateGetNetworkParams
StateGetNetworkParams return current network params
Perms: read
Inputs: `null`
Response:
```json
{
"NetworkName": "lotus",
"BlockDelaySecs": 42,
"ConsensusMinerMinPower": "0",
"SupportedProofTypes": [
8
],
"PreCommitChallengeDelay": 10101,
"ForkUpgradeParams": {
"UpgradeSmokeHeight": 10101,
"UpgradeBreezeHeight": 10101,
"UpgradeIgnitionHeight": 10101,
"UpgradeLiftoffHeight": 10101,
"UpgradeAssemblyHeight": 10101,
"UpgradeRefuelHeight": 10101,
"UpgradeTapeHeight": 10101,
"UpgradeKumquatHeight": 10101,
"UpgradePriceListOopsHeight": 10101,
"BreezeGasTampingDuration": 10101,
"UpgradeCalicoHeight": 10101,
"UpgradePersianHeight": 10101,
"UpgradeOrangeHeight": 10101,
"UpgradeClausHeight": 10101,
"UpgradeTrustHeight": 10101,
"UpgradeNorwegianHeight": 10101,
"UpgradeTurboHeight": 10101,
"UpgradeHyperdriveHeight": 10101,
"UpgradeChocolateHeight": 10101,
"UpgradeOhSnapHeight": 10101
}
}
```
### StateGetRandomnessFromBeacon
StateGetRandomnessFromBeacon is used to sample the beacon for randomness.
@ -5783,6 +5828,29 @@ Response:
StateLookupID retrieves the ID address of the given address
Perms: read
Inputs:
```json
[
"f01234",
[
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
{
"/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
}
]
]
```
Response: `"f01234"`
### StateLookupRobustAddress
StateLookupRobustAddress returns the public key address of the given ID address for non-account addresses (multisig, miners etc)
Perms: read
Inputs:

View File

@ -7,7 +7,7 @@ USAGE:
lotus-miner [global options] command [command options] [arguments...]
VERSION:
1.15.2
1.15.3
COMMANDS:
init Initialize a lotus miner repo
@ -236,35 +236,24 @@ USAGE:
lotus-miner actor command [command options] [arguments...]
COMMANDS:
set-addrs set addresses that your miner can be publicly dialed on
withdraw withdraw available balance
repay-debt pay down a miner's debt
set-peer-id set the peer id of your miner
set-owner Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner)
control Manage control addresses
propose-change-worker Propose a worker address change
confirm-change-worker Confirm a worker address change
compact-allocated compact allocated sectors bitfield
help, h Shows a list of commands or help for one command
set-addresses, set-addrs set addresses that your miner can be publicly dialed on
withdraw withdraw available balance
repay-debt pay down a miner's debt
set-peer-id set the peer id of your miner
set-owner Set owner address (this command should be invoked twice, first with the old owner as the senderAddress, and then with the new owner)
control Manage control addresses
propose-change-worker Propose a worker address change
confirm-change-worker Confirm a worker address change
compact-allocated compact allocated sectors bitfield
help, h Shows a list of commands or help for one command
OPTIONS:
--help, -h show help (default: false)
```
### lotus-miner actor set-addrs
#### lotus-miner actor set-addresses, set-addrs
```
NAME:
lotus-miner actor set-addrs - set addresses that your miner can be publicly dialed on
USAGE:
lotus-miner actor set-addrs [command options] [arguments...]
OPTIONS:
--gas-limit value set gas limit (default: 0)
--unset unset address (default: false)
--help, -h show help (default: false)
```
### lotus-miner actor withdraw
@ -1199,22 +1188,22 @@ USAGE:
lotus-miner net command [command options] [arguments...]
COMMANDS:
peers Print peers
ping Ping peers
connect Connect to a peer
listen List listen addresses
id Get node identity
findpeer Find the addresses of a given peerID
scores Print peers' pubsub scores
reachability Print information about reachability from the internet
bandwidth Print bandwidth usage information
block Manage network connection gating rules
stat Report resource usage for a scope
limit Get or set resource limits for a scope
protect Add one or more peer IDs to the list of protected peer connections
unprotect Remove one or more peer IDs from the list of protected peer connections.
list-protected List the peer IDs with protected connection.
help, h Shows a list of commands or help for one command
peers Print peers
ping Ping peers
connect Connect to a peer
listen List listen addresses
id Get node identity
find-peer, findpeer Find the addresses of a given peerID
scores Print peers' pubsub scores
reachability Print information about reachability from the internet
bandwidth Print bandwidth usage information
block Manage network connection gating rules
stat Report resource usage for a scope
limit Get or set resource limits for a scope
protect Add one or more peer IDs to the list of protected peer connections
unprotect Remove one or more peer IDs from the list of protected peer connections.
list-protected List the peer IDs with protected connection.
help, h Shows a list of commands or help for one command
OPTIONS:
--help, -h show help (default: false)
@ -1290,17 +1279,8 @@ OPTIONS:
```
### lotus-miner net findpeer
#### lotus-miner net find-peer, findpeer
```
NAME:
lotus-miner net findpeer - Find the addresses of a given peerID
USAGE:
lotus-miner net findpeer [command options] [peerId]
OPTIONS:
--help, -h show help (default: false)
```
### lotus-miner net scores
@ -2141,29 +2121,16 @@ USAGE:
lotus-miner proving compute command [command options] [arguments...]
COMMANDS:
window-post Compute WindowPoSt for a specific deadline
help, h Shows a list of commands or help for one command
windowed-post, window-post Compute WindowPoSt for a specific deadline
help, h Shows a list of commands or help for one command
OPTIONS:
--help, -h show help (default: false)
```
#### lotus-miner proving compute window-post
##### lotus-miner proving compute windowed-post, window-post
```
NAME:
lotus-miner proving compute window-post - Compute WindowPoSt for a specific deadline
USAGE:
lotus-miner proving compute window-post [command options] [deadline index]
DESCRIPTION:
Note: This command is intended to be used to verify PoSt compute performance.
It will not send any messages to the chain.
OPTIONS:
--help, -h show help (default: false)
```
## lotus-miner storage

View File

@ -7,7 +7,7 @@ USAGE:
lotus-worker [global options] command [command options] [arguments...]
VERSION:
1.15.2
1.15.3
COMMANDS:
run Start lotus worker
@ -173,7 +173,7 @@ NAME:
lotus-worker tasks enable - Enable a task type
USAGE:
lotus-worker tasks enable [command options] [UNS|C2|PC2|PC1|PR2|RU|AP|GSK]
lotus-worker tasks enable [command options] [UNS|C2|PC2|PC1|PR2|RU|AP|DC|GSK]
OPTIONS:
--help, -h show help (default: false)
@ -186,7 +186,7 @@ NAME:
lotus-worker tasks disable - Disable a task type
USAGE:
lotus-worker tasks disable [command options] [UNS|C2|PC2|PC1|PR2|RU|AP|GSK]
lotus-worker tasks disable [command options] [UNS|C2|PC2|PC1|PR2|RU|AP|DC|GSK]
OPTIONS:
--help, -h show help (default: false)

View File

@ -7,7 +7,7 @@ USAGE:
lotus [global options] command [command options] [arguments...]
VERSION:
1.15.2
1.15.3
COMMANDS:
daemon Start a lotus daemon process
@ -212,7 +212,7 @@ COMMANDS:
set-default Set default wallet address
sign sign a message
verify verify the signature of a message
delete Delete an account from the wallet
delete Soft delete an address from the wallet - hard deletion needed for permanent removal
market Interact with market balances
help, h Shows a list of commands or help for one command
@ -346,7 +346,7 @@ OPTIONS:
### lotus wallet delete
```
NAME:
lotus wallet delete - Delete an account from the wallet
lotus wallet delete - Soft delete an address from the wallet - hard deletion needed for permanent removal
USAGE:
lotus wallet delete [command options] <address>
@ -1726,30 +1726,30 @@ USAGE:
lotus state command [command options] [arguments...]
COMMANDS:
power Query network or miner power
sectors Query the sector set of a miner
active-sectors Query the active sector set of a miner
list-actors list all actors in the network
list-miners list all miners in the network
circulating-supply Get the exact current circulating supply of Filecoin
sector Get miner sector info
get-actor Print actor information
lookup Find corresponding ID address
replay Replay a particular message
sector-size Look up miners sector size
read-state View a json representation of an actors state
list-messages list messages on chain matching given criteria
compute-state Perform state computations
call Invoke a method on an actor locally
get-deal View on-chain deal info
wait-msg Wait for a message to appear on chain
search-msg Search to see whether a message has appeared on chain
miner-info Retrieve miner information
market Inspect the storage market actor
exec-trace Get the execution trace of a given message
network-version Returns the network version
miner-proving-deadline Retrieve information about a given miner's proving deadline
help, h Shows a list of commands or help for one command
power Query network or miner power
sectors Query the sector set of a miner
active-sectors Query the active sector set of a miner
list-actors list all actors in the network
list-miners list all miners in the network
circulating-supply Get the exact current circulating supply of Filecoin
sector, sector-info Get miner sector info
get-actor Print actor information
lookup Find corresponding ID address
replay Replay a particular message
sector-size Look up miners sector size
read-state View a json representation of an actors state
list-messages list messages on chain matching given criteria
compute-state Perform state computations
call Invoke a method on an actor locally
get-deal View on-chain deal info
wait-msg, wait-message Wait for a message to appear on chain
search-msg, search-message Search to see whether a message has appeared on chain
miner-info Retrieve miner information
market Inspect the storage market actor
exec-trace Get the execution trace of a given message
network-version Returns the network version
miner-proving-deadline Retrieve information about a given miner's proving deadline
help, h Shows a list of commands or help for one command
OPTIONS:
--tipset value specify tipset to call method on (pass comma separated array of cids)
@ -1837,17 +1837,8 @@ OPTIONS:
```
### lotus state sector
#### lotus state sector, sector-info
```
NAME:
lotus state sector - Get miner sector info
USAGE:
lotus state sector [command options] [minerAddress] [sectorNumber]
OPTIONS:
--help, -h show help (default: false)
```
### lotus state get-actor
@ -1985,31 +1976,12 @@ OPTIONS:
```
### lotus state wait-msg
#### lotus state wait-msg, wait-message
```
NAME:
lotus state wait-msg - Wait for a message to appear on chain
USAGE:
lotus state wait-msg [command options] [messageCid]
OPTIONS:
--timeout value (default: "10m")
--help, -h show help (default: false)
```
### lotus state search-msg
#### lotus state search-msg, search-message
```
NAME:
lotus state search-msg - Search to see whether a message has appeared on chain
USAGE:
lotus state search-msg [command options] [messageCid]
OPTIONS:
--help, -h show help (default: false)
```
### lotus state miner-info
@ -2103,24 +2075,24 @@ USAGE:
lotus chain command [command options] [arguments...]
COMMANDS:
head Print chain head
getblock Get a block and print its details
read-obj Read the raw bytes of an object
delete-obj Delete an object from the chain blockstore
stat-obj Collect size and ipld link counts for objs
getmessage Get and print a message by its cid
sethead manually set the local nodes head tipset (Caution: normally only used for recovery)
list, love View a segment of the chain
get Get chain DAG node by path
bisect bisect chain for an event
export export chain to a car file
slash-consensus Report consensus fault
gas-price Estimate gas prices
inspect-usage Inspect block space usage of a given tipset
decode decode various types
encode encode various types
disputer interact with the window post disputer
help, h Shows a list of commands or help for one command
head Print chain head
get-block, getblock Get a block and print its details
read-obj Read the raw bytes of an object
delete-obj Delete an object from the chain blockstore
stat-obj Collect size and ipld link counts for objs
getmessage, get-message, get-msg Get and print a message by its cid
sethead, set-head manually set the local nodes head tipset (Caution: normally only used for recovery)
list, love View a segment of the chain
get Get chain DAG node by path
bisect bisect chain for an event
export export chain to a car file
slash-consensus Report consensus fault
gas-price Estimate gas prices
inspect-usage Inspect block space usage of a given tipset
decode decode various types
encode encode various types
disputer interact with the window post disputer
help, h Shows a list of commands or help for one command
OPTIONS:
--help, -h show help (default: false)
@ -2140,18 +2112,8 @@ OPTIONS:
```
### lotus chain getblock
#### lotus chain get-block, getblock
```
NAME:
lotus chain getblock - Get a block and print its details
USAGE:
lotus chain getblock [command options] [blockCid]
OPTIONS:
--raw print just the raw block header (default: false)
--help, -h show help (default: false)
```
### lotus chain read-obj
@ -2204,32 +2166,12 @@ OPTIONS:
```
### lotus chain getmessage
##### lotus chain getmessage, get-message, get-msg
```
NAME:
lotus chain getmessage - Get and print a message by its cid
USAGE:
lotus chain getmessage [command options] [messageCid]
OPTIONS:
--help, -h show help (default: false)
```
### lotus chain sethead
#### lotus chain sethead, set-head
```
NAME:
lotus chain sethead - manually set the local nodes head tipset (Caution: normally only used for recovery)
USAGE:
lotus chain sethead [command options] [tipsetkey]
OPTIONS:
--genesis reset head to genesis (default: false)
--epoch value reset head to given epoch (default: 0)
--help, -h show help (default: false)
```
#### lotus chain list, love
@ -2602,22 +2544,22 @@ USAGE:
lotus net command [command options] [arguments...]
COMMANDS:
peers Print peers
ping Ping peers
connect Connect to a peer
listen List listen addresses
id Get node identity
findpeer Find the addresses of a given peerID
scores Print peers' pubsub scores
reachability Print information about reachability from the internet
bandwidth Print bandwidth usage information
block Manage network connection gating rules
stat Report resource usage for a scope
limit Get or set resource limits for a scope
protect Add one or more peer IDs to the list of protected peer connections
unprotect Remove one or more peer IDs from the list of protected peer connections.
list-protected List the peer IDs with protected connection.
help, h Shows a list of commands or help for one command
peers Print peers
ping Ping peers
connect Connect to a peer
listen List listen addresses
id Get node identity
find-peer, findpeer Find the addresses of a given peerID
scores Print peers' pubsub scores
reachability Print information about reachability from the internet
bandwidth Print bandwidth usage information
block Manage network connection gating rules
stat Report resource usage for a scope
limit Get or set resource limits for a scope
protect Add one or more peer IDs to the list of protected peer connections
unprotect Remove one or more peer IDs from the list of protected peer connections.
list-protected List the peer IDs with protected connection.
help, h Shows a list of commands or help for one command
OPTIONS:
--help, -h show help (default: false)
@ -2693,17 +2635,8 @@ OPTIONS:
```
### lotus net findpeer
#### lotus net find-peer, findpeer
```
NAME:
lotus net findpeer - Find the addresses of a given peerID
USAGE:
lotus net findpeer [command options] [peerId]
OPTIONS:
--help, -h show help (default: false)
```
### lotus net scores

View File

@ -20,7 +20,7 @@
#
# type: bool
# env var: LOTUS_BACKUP_DISABLEMETADATALOG
#DisableMetadataLog = false
#DisableMetadataLog = true
[Logging]

View File

@ -20,7 +20,7 @@
#
# type: bool
# env var: LOTUS_BACKUP_DISABLEMETADATALOG
#DisableMetadataLog = false
#DisableMetadataLog = true
[Logging]
@ -265,7 +265,7 @@
[IndexProvider]
# Enable set whether to enable indexing announcement to the network and expose endpoints that
# allow indexer nodes to process announcements. Disabled by default.
# allow indexer nodes to process announcements. Enabled by default.
#
# type: bool
# env var: LOTUS_INDEXPROVIDER_ENABLE

View File

@ -15,7 +15,7 @@ may be subject to change.
Complete gas balancing is performed using `lotus-bench` the process is based on importing a chain export
and collecting gas traces which are later aggregated.
Before building `lotus-bench` make sure `EnableGasTracing` in `chain/vm/runtime.go` is set to `true`.
Before building `lotus-bench` make sure `EnableDetailedTracing` in `chain/vm/runtime.go` is set to `true`.
The process can be started using `./lotus-bench import` with `--car` flag set to the location of
CAR chain export. `--start-epoch` and `--end-epoch` can be used to to limit the range of epochs to run

2
extern/filecoin-ffi vendored

@ -1 +1 @@
Subproject commit 3f9ecec25017a871eaa1e673d869ce2efd109881
Subproject commit cd52de8dbcf117109c8d6f02c17e855ccdc0497c

View File

@ -51,6 +51,120 @@ func (sb *Sealer) NewSector(ctx context.Context, sector storage.SectorRef) error
return nil
}
func (sb *Sealer) DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
// TODO: allow tuning those:
chunk := abi.PaddedPieceSize(4 << 20)
parallel := runtime.NumCPU()
maxSizeSpt := abi.RegisteredSealProof_StackedDrg64GiBV1_1
throttle := make(chan []byte, parallel)
piecePromises := make([]func() (abi.PieceInfo, error), 0)
buf := make([]byte, chunk.Unpadded())
for i := 0; i < parallel; i++ {
if abi.UnpaddedPieceSize(i)*chunk.Unpadded() >= pieceSize {
break // won't use this many buffers
}
throttle <- make([]byte, chunk.Unpadded())
}
for {
var read int
for rbuf := buf; len(rbuf) > 0; {
n, err := pieceData.Read(rbuf)
if err != nil && err != io.EOF {
return abi.PieceInfo{}, xerrors.Errorf("pr read error: %w", err)
}
rbuf = rbuf[n:]
read += n
if err == io.EOF {
break
}
}
if read == 0 {
break
}
done := make(chan struct {
cid.Cid
error
}, 1)
pbuf := <-throttle
copy(pbuf, buf[:read])
go func(read int) {
defer func() {
throttle <- pbuf
}()
c, err := sb.pieceCid(maxSizeSpt, pbuf[:read])
done <- struct {
cid.Cid
error
}{c, err}
}(read)
piecePromises = append(piecePromises, func() (abi.PieceInfo, error) {
select {
case e := <-done:
if e.error != nil {
return abi.PieceInfo{}, e.error
}
return abi.PieceInfo{
Size: abi.UnpaddedPieceSize(read).Padded(),
PieceCID: e.Cid,
}, nil
case <-ctx.Done():
return abi.PieceInfo{}, ctx.Err()
}
})
}
if len(piecePromises) == 1 {
return piecePromises[0]()
}
var payloadRoundedBytes abi.PaddedPieceSize
pieceCids := make([]abi.PieceInfo, len(piecePromises))
for i, promise := range piecePromises {
pinfo, err := promise()
if err != nil {
return abi.PieceInfo{}, err
}
pieceCids[i] = pinfo
payloadRoundedBytes += pinfo.Size
}
pieceCID, err := ffi.GenerateUnsealedCID(maxSizeSpt, pieceCids)
if err != nil {
return abi.PieceInfo{}, xerrors.Errorf("generate unsealed CID: %w", err)
}
// validate that the pieceCID was properly formed
if _, err := commcid.CIDToPieceCommitmentV1(pieceCID); err != nil {
return abi.PieceInfo{}, err
}
if payloadRoundedBytes < pieceSize.Padded() {
paddedCid, err := commpffi.ZeroPadPieceCommitment(pieceCID, payloadRoundedBytes.Unpadded(), pieceSize)
if err != nil {
return abi.PieceInfo{}, xerrors.Errorf("failed to pad data: %w", err)
}
pieceCID = paddedCid
}
return abi.PieceInfo{
Size: pieceSize.Padded(),
PieceCID: pieceCID,
}, nil
}
func (sb *Sealer) AddPiece(ctx context.Context, sector storage.SectorRef, existingPieceSizes []abi.UnpaddedPieceSize, pieceSize abi.UnpaddedPieceSize, file storage.Data) (abi.PieceInfo, error) {
// TODO: allow tuning those:
chunk := abi.PaddedPieceSize(4 << 20)

View File

@ -165,7 +165,7 @@ func New(ctx context.Context, lstor *stores.Local, stor stores.Store, ls stores.
sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize, sealtasks.TTFetch, sealtasks.TTFinalizeReplicaUpdate,
}
if sc.AllowAddPiece {
localTasks = append(localTasks, sealtasks.TTAddPiece)
localTasks = append(localTasks, sealtasks.TTAddPiece, sealtasks.TTDataCid)
}
if sc.AllowPreCommit1 {
localTasks = append(localTasks, sealtasks.TTPreCommit1)
@ -327,6 +327,27 @@ func (m *Manager) NewSector(ctx context.Context, sector storage.SectorRef) error
return nil
}
func (m *Manager) DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
selector := newTaskSelector()
var out abi.PieceInfo
err := m.sched.Schedule(ctx, storage.NoSectorRef, sealtasks.TTDataCid, selector, schedNop, func(ctx context.Context, w Worker) error {
p, err := m.waitSimpleCall(ctx)(w.DataCid(ctx, pieceSize, pieceData))
if err != nil {
return err
}
if p != nil {
out = p.(abi.PieceInfo)
}
return nil
})
return out, err
}
func (m *Manager) AddPiece(ctx context.Context, sector storage.SectorRef, existingPieces []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
@ -975,6 +996,10 @@ func (m *Manager) ProveReplicaUpdate2(ctx context.Context, sector storage.Sector
return out, waitErr
}
func (m *Manager) ReturnDataCid(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error {
return m.returnResult(ctx, callID, pi, err)
}
func (m *Manager) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error {
return m.returnResult(ctx, callID, pi, err)
}

View File

@ -80,6 +80,10 @@ func (mgr *SectorMgr) SectorsUnsealPiece(ctx context.Context, sector storage.Sec
panic("SectorMgr: unsealing piece: implement me")
}
func (mgr *SectorMgr) DataCid(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
panic("todo")
}
func (mgr *SectorMgr) AddPiece(ctx context.Context, sectorID storage.SectorRef, existingPieces []abi.UnpaddedPieceSize, size abi.UnpaddedPieceSize, r io.Reader) (abi.PieceInfo, error) {
log.Warn("Add piece: ", sectorID, size, sectorID.ProofType)
@ -537,6 +541,10 @@ func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStPr
var _ storiface.WorkerReturn = &SectorMgr{}
func (mgr *SectorMgr) ReturnDataCid(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error {
panic("not supported")
}
func (mgr *SectorMgr) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error {
panic("not supported")
}

View File

@ -68,6 +68,10 @@ type schedTestWorker struct {
ignoreResources bool
}
func (s *schedTestWorker) DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
panic("implement me")
}
func (s *schedTestWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
panic("implement me")
}

View File

@ -3,6 +3,7 @@ package sealtasks
type TaskType string
const (
TTDataCid TaskType = "seal/v0/datacid"
TTAddPiece TaskType = "seal/v0/addpiece"
TTPreCommit1 TaskType = "seal/v0/precommit/1"
TTPreCommit2 TaskType = "seal/v0/precommit/2"
@ -25,7 +26,8 @@ const (
)
var order = map[TaskType]int{
TTRegenSectorKey: 10, // least priority
TTRegenSectorKey: 11, // least priority
TTDataCid: 10,
TTAddPiece: 9,
TTReplicaUpdate: 8,
TTProveReplicaUpdate2: 7,
@ -44,6 +46,7 @@ var order = map[TaskType]int{
}
var shortNames = map[TaskType]string{
TTDataCid: "DC",
TTAddPiece: "AP",
TTPreCommit1: "PC1",

View File

@ -827,7 +827,7 @@ func (r *Remote) GenerateSingleVanillaProof(ctx context.Context, minerID abi.Act
log.Error("response close: ", err)
}
return nil, xerrors.Errorf("non-200 code from %s: '%s'", url, string(body))
return nil, xerrors.Errorf("non-200 code from %s: '%s'", url, strings.TrimSpace(string(body)))
}
body, err := ioutil.ReadAll(resp.Body)

View File

@ -30,7 +30,7 @@ var FSOverheadSeal = map[SectorFileType]int{ // 10x overheads
FTUnsealed: FSOverheadDen,
FTSealed: FSOverheadDen,
FTUpdate: FSOverheadDen,
FTUpdateCache: FSOverheadDen * 2,
FTUpdateCache: FSOverheadDen*2 + 1,
FTCache: 141, // 11 layers + D(2x ssize) + C + R'
}
@ -39,9 +39,9 @@ var FSOverheadSeal = map[SectorFileType]int{ // 10x overheads
var FsOverheadFinalized = map[SectorFileType]int{
FTUnsealed: FSOverheadDen,
FTSealed: FSOverheadDen,
FTUpdate: FSOverheadDen * 2, // XXX: we should clear the update cache on Finalize???
FTUpdateCache: FSOverheadDen,
FTCache: 2,
FTUpdate: FSOverheadDen,
FTUpdateCache: 1,
FTCache: 1,
}
type SectorFileType int

View File

@ -569,6 +569,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources
func init() {
ResourceTable[sealtasks.TTUnseal] = ResourceTable[sealtasks.TTPreCommit1] // TODO: measure accurately
ResourceTable[sealtasks.TTRegenSectorKey] = ResourceTable[sealtasks.TTReplicaUpdate]
ResourceTable[sealtasks.TTDataCid] = ResourceTable[sealtasks.TTAddPiece]
// V1_1 is the same as V1
for _, m := range ResourceTable {

View File

@ -117,6 +117,7 @@ var UndefCall CallID
type WorkerCalls interface {
// async
DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (CallID, error)
AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (CallID, error)
SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (CallID, error)
SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (CallID, error)
@ -197,6 +198,7 @@ func Err(code ErrorCode, sub error) *CallError {
}
type WorkerReturn interface {
ReturnDataCid(ctx context.Context, callID CallID, pi abi.PieceInfo, err *CallError) error
ReturnAddPiece(ctx context.Context, callID CallID, pi abi.PieceInfo, err *CallError) error
ReturnSealPreCommit1(ctx context.Context, callID CallID, p1o storage.PreCommit1Out, err *CallError) error
ReturnSealPreCommit2(ctx context.Context, callID CallID, sealed storage.SectorCids, err *CallError) error

View File

@ -23,6 +23,10 @@ type testExec struct {
apch chan chan apres
}
func (t *testExec) DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error) {
panic("implement me")
}
func (t *testExec) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) {
panic("implement me")
}

View File

@ -180,6 +180,7 @@ func (l *LocalWorker) ffiExec() (ffiwrapper.Storage, error) {
type ReturnType string
const (
DataCid ReturnType = "DataCid"
AddPiece ReturnType = "AddPiece"
SealPreCommit1 ReturnType = "SealPreCommit1"
SealPreCommit2 ReturnType = "SealPreCommit2"
@ -232,6 +233,7 @@ func rfunc(in interface{}) func(context.Context, storiface.CallID, storiface.Wor
}
var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, *storiface.CallError) error{
DataCid: rfunc(storiface.WorkerReturn.ReturnDataCid),
AddPiece: rfunc(storiface.WorkerReturn.ReturnAddPiece),
SealPreCommit1: rfunc(storiface.WorkerReturn.ReturnSealPreCommit1),
SealPreCommit2: rfunc(storiface.WorkerReturn.ReturnSealPreCommit2),
@ -341,6 +343,17 @@ func (l *LocalWorker) NewSector(ctx context.Context, sector storage.SectorRef) e
return sb.NewSector(ctx, sector)
}
func (l *LocalWorker) DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
sb, err := l.executor()
if err != nil {
return storiface.UndefCall, err
}
return l.asyncCall(ctx, storage.NoSectorRef, DataCid, func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
return sb.DataCid(ctx, pieceSize, pieceData)
})
}
func (l *LocalWorker) AddPiece(ctx context.Context, sector storage.SectorRef, epcs []abi.UnpaddedPieceSize, sz abi.UnpaddedPieceSize, r io.Reader) (storiface.CallID, error) {
sb, err := l.executor()
if err != nil {

View File

@ -186,6 +186,12 @@ func (t *trackedWorker) FinalizeSector(ctx context.Context, sector storage.Secto
return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTFinalize, func() (storiface.CallID, error) { return t.Worker.FinalizeSector(ctx, sector, keepUnsealed) })
}
func (t *trackedWorker) DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, storage.NoSectorRef, sealtasks.TTDataCid, func() (storiface.CallID, error) {
return t.Worker.DataCid(ctx, pieceSize, pieceData)
})
}
func (t *trackedWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) {
return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTAddPiece, func() (storiface.CallID, error) {
return t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)

View File

@ -3,6 +3,7 @@ package sealing
import (
"bytes"
"context"
"fmt"
"github.com/filecoin-project/go-state-types/network"
@ -138,18 +139,19 @@ func (mgr *CurrentDealInfoManager) dealIDFromPublishDealsMsg(ctx context.Context
break
}
}
fmt.Printf("found dealIdx %d\n", dealIdx)
if dealIdx == -1 {
return dealID, nil, xerrors.Errorf("could not find deal in publish deals message %s", publishCid)
}
if dealIdx >= len(dealIDs) {
if dealIdx >= len(pubDealsParams.Deals) {
return dealID, nil, xerrors.Errorf(
"deal index %d out of bounds of deals (len %d) in publish deals message %s",
"deal index %d out of bounds of deal proposals (len %d) in publish deals message %s",
dealIdx, len(dealIDs), publishCid)
}
valid, err := retval.IsDealValid(uint64(dealIdx))
valid, outIdx, err := retval.IsDealValid(uint64(dealIdx))
if err != nil {
return dealID, nil, xerrors.Errorf("determining deal validity: %w", err)
}
@ -158,7 +160,12 @@ func (mgr *CurrentDealInfoManager) dealIDFromPublishDealsMsg(ctx context.Context
return dealID, nil, xerrors.New("deal was invalid at publication")
}
return dealIDs[dealIdx], lookup.TipSetTok, nil
// final check against for invalid return value output
// should not be reachable from onchain output, only pathological test cases
if outIdx >= len(dealIDs) {
return dealID, nil, xerrors.Errorf("invalid publish storage deals ret marking %d as valid while only returning %d valid deals in publish deal message %s", outIdx, len(dealIDs), publishCid)
}
return dealIDs[outIdx], lookup.TipSetTok, nil
}
func (mgr *CurrentDealInfoManager) CheckDealEquality(ctx context.Context, tok TipSetToken, p1, p2 market.DealProposal) (bool, error) {

View File

@ -8,9 +8,11 @@ import (
"testing"
"time"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/network"
market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market"
"golang.org/x/net/context"
"golang.org/x/xerrors"
@ -36,6 +38,7 @@ func TestGetCurrentDealInfo(t *testing.T) {
dummyCid, _ := cid.Parse("bafkqaaa")
dummyCid2, _ := cid.Parse("bafkqaab")
zeroDealID := abi.DealID(0)
anotherDealID := abi.DealID(8)
earlierDealID := abi.DealID(9)
successDealID := abi.DealID(10)
proposal := market.DealProposal{
@ -58,6 +61,16 @@ func TestGetCurrentDealInfo(t *testing.T) {
ClientCollateral: abi.NewTokenAmount(1),
Label: "other",
}
anotherProposal := market.DealProposal{
PieceCID: dummyCid2,
PieceSize: abi.PaddedPieceSize(100),
Client: tutils.NewActorAddr(t, "client"),
Provider: tutils.NewActorAddr(t, "provider"),
StoragePricePerEpoch: abi.NewTokenAmount(1),
ProviderCollateral: abi.NewTokenAmount(1),
ClientCollateral: abi.NewTokenAmount(1),
Label: "another",
}
successDeal := &api.MarketDeal{
Proposal: proposal,
State: market.DealState{
@ -72,6 +85,13 @@ func TestGetCurrentDealInfo(t *testing.T) {
LastUpdatedEpoch: 2,
},
}
anotherDeal := &api.MarketDeal{
Proposal: anotherProposal,
State: market.DealState{
SectorStartEpoch: 1,
LastUpdatedEpoch: 2,
},
}
type testCaseData struct {
searchMessageLookup *MsgLookup
@ -82,6 +102,7 @@ func TestGetCurrentDealInfo(t *testing.T) {
expectedDealID abi.DealID
expectedMarketDeal *api.MarketDeal
expectedError error
networkVersion network.Version
}
testCases := map[string]testCaseData{
"deal lookup succeeds": {
@ -89,7 +110,7 @@ func TestGetCurrentDealInfo(t *testing.T) {
searchMessageLookup: &MsgLookup{
Receipt: MessageReceipt{
ExitCode: exitcode.Ok,
Return: makePublishDealsReturnBytes(t, []abi.DealID{successDealID}),
Return: makePublishDealsReturnBytesOldVersion(t, []abi.DealID{successDealID}),
},
},
marketDeals: map[abi.DealID]*api.MarketDeal{
@ -104,7 +125,7 @@ func TestGetCurrentDealInfo(t *testing.T) {
searchMessageLookup: &MsgLookup{
Receipt: MessageReceipt{
ExitCode: exitcode.Ok,
Return: makePublishDealsReturnBytes(t, []abi.DealID{earlierDealID, successDealID}),
Return: makePublishDealsReturnBytesOldVersion(t, []abi.DealID{earlierDealID, successDealID}),
},
},
marketDeals: map[abi.DealID]*api.MarketDeal{
@ -120,7 +141,7 @@ func TestGetCurrentDealInfo(t *testing.T) {
searchMessageLookup: &MsgLookup{
Receipt: MessageReceipt{
ExitCode: exitcode.Ok,
Return: makePublishDealsReturnBytes(t, []abi.DealID{earlierDealID}),
Return: makePublishDealsReturnBytesOldVersion(t, []abi.DealID{earlierDealID}),
},
},
marketDeals: map[abi.DealID]*api.MarketDeal{
@ -130,12 +151,12 @@ func TestGetCurrentDealInfo(t *testing.T) {
expectedDealID: zeroDealID,
expectedError: xerrors.Errorf("could not find deal in publish deals message %s", dummyCid),
},
"deal lookup fails mismatch count of deals and return values": {
"deal lookup handles invalid actor output with mismatched count of deals and return values": {
publishCid: dummyCid,
searchMessageLookup: &MsgLookup{
Receipt: MessageReceipt{
ExitCode: exitcode.Ok,
Return: makePublishDealsReturnBytes(t, []abi.DealID{earlierDealID}),
Return: makePublishDealsReturnBytesOldVersion(t, []abi.DealID{earlierDealID}),
},
},
marketDeals: map[abi.DealID]*api.MarketDeal{
@ -144,14 +165,52 @@ func TestGetCurrentDealInfo(t *testing.T) {
},
targetProposal: &proposal,
expectedDealID: zeroDealID,
expectedError: xerrors.Errorf("deal index 1 out of bounds of deals (len 1) in publish deals message %s", dummyCid),
expectedError: xerrors.Errorf("invalid publish storage deals ret marking 1 as valid while only returning 1 valid deals in publish deal message %s", dummyCid),
},
"deal lookup fails when deal was not valid and index exceeds output array": {
publishCid: dummyCid,
searchMessageLookup: &MsgLookup{
Receipt: MessageReceipt{
ExitCode: exitcode.Ok,
Return: makePublishDealsReturn(t, []abi.DealID{earlierDealID}, []uint64{0}),
},
},
marketDeals: map[abi.DealID]*api.MarketDeal{
earlierDealID: earlierDeal,
successDealID: successDeal,
},
targetProposal: &proposal,
expectedDealID: zeroDealID,
expectedError: xerrors.Errorf("deal was invalid at publication"),
networkVersion: network.Version14,
},
"deal lookup succeeds when theres a separate deal failure": {
publishCid: dummyCid,
searchMessageLookup: &MsgLookup{
Receipt: MessageReceipt{
ExitCode: exitcode.Ok,
Return: makePublishDealsReturn(t, []abi.DealID{anotherDealID, successDealID}, []uint64{0, 2}),
},
},
marketDeals: map[abi.DealID]*api.MarketDeal{
anotherDealID: anotherDeal,
earlierDealID: earlierDeal,
successDealID: successDeal,
},
targetProposal: &proposal,
expectedDealID: successDealID,
expectedMarketDeal: successDeal,
networkVersion: network.Version14,
},
"deal lookup succeeds, target proposal nil, single deal in message": {
publishCid: dummyCid,
searchMessageLookup: &MsgLookup{
Receipt: MessageReceipt{
ExitCode: exitcode.Ok,
Return: makePublishDealsReturnBytes(t, []abi.DealID{successDealID}),
Return: makePublishDealsReturnBytesOldVersion(t, []abi.DealID{successDealID}),
},
},
marketDeals: map[abi.DealID]*api.MarketDeal{
@ -166,7 +225,7 @@ func TestGetCurrentDealInfo(t *testing.T) {
searchMessageLookup: &MsgLookup{
Receipt: MessageReceipt{
ExitCode: exitcode.Ok,
Return: makePublishDealsReturnBytes(t, []abi.DealID{earlierDealID, successDealID}),
Return: makePublishDealsReturnBytesOldVersion(t, []abi.DealID{earlierDealID, successDealID}),
},
},
marketDeals: map[abi.DealID]*api.MarketDeal{
@ -228,6 +287,7 @@ func TestGetCurrentDealInfo(t *testing.T) {
SearchMessageLookup: data.searchMessageLookup,
SearchMessageErr: data.searchMessageErr,
MarketDeals: marketDeals,
Version: data.networkVersion,
}
dealInfoMgr := CurrentDealInfoManager{mockApi}
@ -256,13 +316,21 @@ type CurrentDealInfoMockAPI struct {
SearchMessageErr error
MarketDeals map[marketDealKey]*api.MarketDeal
Version network.Version
}
func (mapi *CurrentDealInfoMockAPI) ChainGetMessage(ctx context.Context, c cid.Cid) (*types.Message, error) {
var dealIDs []abi.DealID
var keys []marketDealKey
for k := range mapi.MarketDeals {
keys = append(keys, k)
}
sort.SliceStable(keys, func(i, j int) bool {
return keys[i].DealID < keys[j].DealID
})
var deals []market2.ClientDealProposal
for k, dl := range mapi.MarketDeals {
dealIDs = append(dealIDs, k.DealID)
for _, k := range keys {
dl := mapi.MarketDeals[k]
deals = append(deals, market2.ClientDealProposal{
Proposal: market2.DealProposal(dl.Proposal),
ClientSignature: crypto.Signature{
@ -271,15 +339,14 @@ func (mapi *CurrentDealInfoMockAPI) ChainGetMessage(ctx context.Context, c cid.C
},
})
}
sort.SliceStable(deals, func(i, j int) bool {
return dealIDs[i] < dealIDs[j]
})
buf := new(bytes.Buffer)
params := market2.PublishStorageDealsParams{Deals: deals}
err := params.MarshalCBOR(buf)
if err != nil {
panic(err)
}
return &types.Message{
Params: buf.Bytes(),
}, nil
@ -310,15 +377,28 @@ func (mapi *CurrentDealInfoMockAPI) StateSearchMsg(ctx context.Context, c cid.Ci
}
func (mapi *CurrentDealInfoMockAPI) StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error) {
return network.Version0, nil
return mapi.Version, nil
}
func makePublishDealsReturnBytes(t *testing.T, dealIDs []abi.DealID) []byte {
func makePublishDealsReturnBytesOldVersion(t *testing.T, dealIDs []abi.DealID) []byte {
buf := new(bytes.Buffer)
dealsReturn := market0.PublishStorageDealsReturn{
IDs: dealIDs,
}
err := dealsReturn.MarshalCBOR(buf)
require.NoError(t, err)
return buf.Bytes()
}
func makePublishDealsReturn(t *testing.T, dealIDs []abi.DealID, validIdxs []uint64) []byte {
buf := new(bytes.Buffer)
dealsReturn := market7.PublishStorageDealsReturn{
IDs: dealIDs,
ValidDeals: bitfield.NewFromSet(validIdxs),
}
err := dealsReturn.MarshalCBOR(buf)
require.NoError(t, err)
return buf.Bytes()
}

View File

@ -446,7 +446,7 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e
}
if !ok {
exp, _, _ := expF(sector.number)
log.Infof("CC update sector %d cannot fit deal, expiration %d before deal end epoch %d", id, exp, piece.deal.DealProposal.EndEpoch)
log.Debugf("CC update sector %d cannot fit deal, expiration %d before deal end epoch %d", id, exp, piece.deal.DealProposal.EndEpoch)
continue
}

View File

@ -386,10 +386,15 @@ func (b *PreCommitBatcher) AddPreCommit(ctx context.Context, s SectorInfo, depos
return sealiface.PreCommitBatchRes{}, err
}
cutoff, err := getPreCommitCutoff(curEpoch, s)
if err != nil {
return sealiface.PreCommitBatchRes{}, xerrors.Errorf("failed to calculate cutoff: %w", err)
}
sn := s.SectorNumber
b.lk.Lock()
b.cutoffs[sn] = getPreCommitCutoff(curEpoch, s)
b.cutoffs[sn] = cutoff
b.todo[sn] = &preCommitEntry{
deposit: deposit,
pci: in,
@ -467,7 +472,7 @@ func (b *PreCommitBatcher) Stop(ctx context.Context) error {
}
// TODO: If this returned epochs, it would make testing much easier
func getPreCommitCutoff(curEpoch abi.ChainEpoch, si SectorInfo) time.Time {
func getPreCommitCutoff(curEpoch abi.ChainEpoch, si SectorInfo) (time.Time, error) {
cutoffEpoch := si.TicketEpoch + policy.MaxPreCommitRandomnessLookback
for _, p := range si.Pieces {
if p.DealInfo == nil {
@ -481,8 +486,8 @@ func getPreCommitCutoff(curEpoch abi.ChainEpoch, si SectorInfo) time.Time {
}
if cutoffEpoch <= curEpoch {
return time.Now()
return time.Now(), xerrors.Errorf("cutoff has already passed (cutoff %d <= curEpoch %d)", cutoffEpoch, curEpoch)
}
return time.Now().Add(time.Duration(cutoffEpoch-curEpoch) * time.Duration(build.BlockDelaySecs) * time.Second)
return time.Now().Add(time.Duration(cutoffEpoch-curEpoch) * time.Duration(build.BlockDelaySecs) * time.Second), nil
}

View File

@ -4,6 +4,8 @@ import (
"fmt"
"os"
"github.com/filecoin-project/lotus/chain/vm"
gen "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/lotus/api"
@ -38,6 +40,14 @@ func main() {
os.Exit(1)
}
err = gen.WriteTupleEncodersToFile("./chain/vm/cbor_gen.go", "vm",
vm.FvmExecutionTrace{},
)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
err = gen.WriteMapEncodersToFile("./paychmgr/cbor_gen.go", "paychmgr",
paychmgr.VoucherInfo{},
paychmgr.ChannelInfo{},

10
go.mod
View File

@ -46,6 +46,7 @@ require (
github.com/filecoin-project/go-statestore v0.2.0
github.com/filecoin-project/go-storedcounter v0.1.0
github.com/filecoin-project/index-provider v0.5.0
github.com/filecoin-project/pubsub v1.0.0
github.com/filecoin-project/specs-actors v0.9.14
github.com/filecoin-project/specs-actors/v2 v2.3.6
github.com/filecoin-project/specs-actors/v3 v3.1.1
@ -53,7 +54,7 @@ require (
github.com/filecoin-project/specs-actors/v5 v5.0.4
github.com/filecoin-project/specs-actors/v6 v6.0.1
github.com/filecoin-project/specs-actors/v7 v7.0.0
github.com/filecoin-project/specs-storage v0.2.2
github.com/filecoin-project/specs-storage v0.2.4
github.com/filecoin-project/test-vectors/schema v0.0.5
github.com/gbrlsnchs/jwt/v3 v3.0.1
github.com/gdamore/tcell/v2 v2.2.0
@ -108,7 +109,7 @@ require (
github.com/koalacxr/quantile v0.0.1
github.com/libp2p/go-buffer-pool v0.0.2
github.com/libp2p/go-eventbus v0.2.1
github.com/libp2p/go-libp2p v0.19.0
github.com/libp2p/go-libp2p v0.19.3
github.com/libp2p/go-libp2p-connmgr v0.3.1
github.com/libp2p/go-libp2p-core v0.15.1
github.com/libp2p/go-libp2p-discovery v0.6.0
@ -140,14 +141,13 @@ require (
github.com/prometheus/client_golang v1.12.1
github.com/raulk/clock v1.1.0
github.com/raulk/go-watchdog v1.2.0
github.com/stretchr/testify v1.7.0
github.com/stretchr/testify v1.7.1
github.com/syndtr/goleveldb v1.0.0
github.com/urfave/cli/v2 v2.3.0
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba
github.com/whyrusleeping/cbor-gen v0.0.0-20220302191723-37c43cae8e14
github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7
github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325
github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542
go.opencensus.io v0.23.0
go.opentelemetry.io/otel v1.3.0
@ -277,7 +277,7 @@ require (
github.com/libp2p/go-tcp-transport v0.5.1 // indirect
github.com/libp2p/go-ws-transport v0.6.0 // indirect
github.com/libp2p/go-yamux/v3 v3.1.1 // indirect
github.com/lucas-clemente/quic-go v0.27.0 // indirect
github.com/lucas-clemente/quic-go v0.27.1 // indirect
github.com/lucasb-eyer/go-colorful v1.0.3 // indirect
github.com/magefile/mage v1.9.0 // indirect
github.com/mailru/easyjson v0.7.6 // indirect

18
go.sum
View File

@ -382,6 +382,8 @@ github.com/filecoin-project/go-storedcounter v0.1.0 h1:Mui6wSUBC+cQGHbDUBcO7rfh5
github.com/filecoin-project/go-storedcounter v0.1.0/go.mod h1:4ceukaXi4vFURIoxYMfKzaRF5Xv/Pinh2oTnoxpv+z8=
github.com/filecoin-project/index-provider v0.5.0 h1:k2C1RFvOvxmA2i8bhmkb3b4qun7RDRDzzs/y25/TwQg=
github.com/filecoin-project/index-provider v0.5.0/go.mod h1:KHVrP2vU3YuScb+fawObwTFoR882up9U07kk0ZrfP0c=
github.com/filecoin-project/pubsub v1.0.0 h1:ZTmT27U07e54qV1mMiQo4HDr0buo8I1LDHBYLXlsNXM=
github.com/filecoin-project/pubsub v1.0.0/go.mod h1:GkpB33CcUtUNrLPhJgfdy4FDx4OMNR9k+46DHx/Lqrg=
github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
github.com/filecoin-project/specs-actors v0.9.14 h1:68PVstg2UB3ZsMLF+DKFTAs/YKsqhKWynkr0IqmVRQY=
github.com/filecoin-project/specs-actors v0.9.14/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao=
@ -402,8 +404,8 @@ github.com/filecoin-project/specs-actors/v6 v6.0.1/go.mod h1:V1AYfi5GkHXipx1mnVi
github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1.0.20220118005651-2470cb39827e/go.mod h1:TA5FwCna+Yi36POaT7SLKXsgEDvJwc0V/L6ZsO19B9M=
github.com/filecoin-project/specs-actors/v7 v7.0.0 h1:FQN7tjt3o68hfb3qLFSJBoLMuOFY0REkFVLO/zXj8RU=
github.com/filecoin-project/specs-actors/v7 v7.0.0/go.mod h1:TA5FwCna+Yi36POaT7SLKXsgEDvJwc0V/L6ZsO19B9M=
github.com/filecoin-project/specs-storage v0.2.2 h1:6ugbtKQ6LTcTEnEIX9HkeCtTp1PCYO497P/bokF5tF4=
github.com/filecoin-project/specs-storage v0.2.2/go.mod h1:6cc/lncmAxMUocPi0z1EPCX63beIX7F7UnlmUZ3hLQo=
github.com/filecoin-project/specs-storage v0.2.4 h1:acymZwLVQm6bLKPtS5OqmVBuG2GDLna+pECDVMN1vto=
github.com/filecoin-project/specs-storage v0.2.4/go.mod h1:6cc/lncmAxMUocPi0z1EPCX63beIX7F7UnlmUZ3hLQo=
github.com/filecoin-project/storetheindex v0.3.5 h1:KoS9TvjPm6zIZfUH8atAHJbVHOO7GTP1MdTG+v0eE+Q=
github.com/filecoin-project/storetheindex v0.3.5/go.mod h1:0r3d0kSpK63O6AvLr1CjAINLi+nWD49clzcnKV+GLpI=
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
@ -1084,8 +1086,8 @@ github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76f
github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw=
github.com/libp2p/go-libp2p v0.18.0-rc1/go.mod h1:RgYlH7IIWHXREimC92bw5Lg1V2R5XmSzuLHb5fTnr+8=
github.com/libp2p/go-libp2p v0.18.0-rc3/go.mod h1:WYL+Xw1iuwi6rdfzw5VIEpD+HqzYucHZ6fcUuumbI3M=
github.com/libp2p/go-libp2p v0.19.0 h1:zosskMbaobL7UDCVLEe1m5CGs1TaFNFoN/M5XLiKg0U=
github.com/libp2p/go-libp2p v0.19.0/go.mod h1:Ki9jJXLO2YqrTIFxofV7Twyd3INWPT97+r8hGt7XPjI=
github.com/libp2p/go-libp2p v0.19.3 h1:LqjvuBWdyYSqvkH4VVYxA78Fkphzg2Pq86VMnilqgkw=
github.com/libp2p/go-libp2p v0.19.3/go.mod h1:AGlPVLjh0+6jvEtf+a2gZEux7yHJrYXnG9IC7wcQ2NY=
github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo=
github.com/libp2p/go-libp2p-asn-util v0.1.0 h1:rABPCO77SjdbJ/eJ/ynIo8vWICy1VEnL5JAxJbQLo1E=
github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I=
@ -1455,8 +1457,9 @@ github.com/lucas-clemente/quic-go v0.21.2/go.mod h1:vF5M1XqhBAHgbjKcJOXY3JZz3GP0
github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0=
github.com/lucas-clemente/quic-go v0.24.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0=
github.com/lucas-clemente/quic-go v0.25.0/go.mod h1:YtzP8bxRVCBlO77yRanE264+fY/T2U9ZlW1AaHOsMOg=
github.com/lucas-clemente/quic-go v0.27.0 h1:v6WY87q9zD4dKASbG8hy/LpzAVNzEQzw8sEIeloJsc4=
github.com/lucas-clemente/quic-go v0.27.0/go.mod h1:AzgQoPda7N+3IqMMMkywBKggIFo2KT6pfnlrQ2QieeI=
github.com/lucas-clemente/quic-go v0.27.1 h1:sOw+4kFSVrdWOYmUjufQ9GBVPqZ+tu+jMtXxXNmRJyk=
github.com/lucas-clemente/quic-go v0.27.1/go.mod h1:AzgQoPda7N+3IqMMMkywBKggIFo2KT6pfnlrQ2QieeI=
github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac=
github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg=
@ -1906,8 +1909,9 @@ github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
@ -1996,8 +2000,6 @@ github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84
github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4=
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds=
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI=
github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325 h1:++Zf4xQ7YrkE81gNHIjVqx5JZsn0nbMeHOkY1ILAIME=
github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325/go.mod h1:g7ckxrjiFh8mi1AY7ox23PZD0g6QU/TxW3U3unX7I3A=
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow=
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg=
github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=

View File

@ -138,8 +138,8 @@ func TestGatewayWalletMsig(t *testing.T) {
//stm: @CHAIN_STATE_MINER_AVAILABLE_BALANCE_001
msigBalance, err := lite.MsigGetAvailableBalance(ctx, msig, types.EmptyTSK)
require.NoError(t, err)
require.Greater(t, msigBalance.Int64(), int64(0))
require.Less(t, msigBalance.Int64(), amt.Int64())
require.GreaterOrEqual(t, msigBalance.Int64(), int64(0))
require.LessOrEqual(t, msigBalance.Int64(), amt.Int64())
// Propose to add a new address to the msig
proto, err = lite.MsigAddPropose(ctx, msig, walletAddrs[0], walletAddrs[3], false)

View File

@ -0,0 +1,32 @@
package itests
import (
"context"
"testing"
"time"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/stretchr/testify/require"
)
func TestStateLookupRobustAddress(t *testing.T) {
ctx := context.Background()
kit.QuietMiningLogs()
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.GenesisNetworkVersion(network.Version15))
ens.InterconnectAll().BeginMining(10 * time.Millisecond)
addr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
// Look up the robust address
robAddr, err := client.StateLookupRobustAddress(ctx, addr, types.EmptyTSK)
require.NoError(t, err)
// Check the id address for the given robust address and make sure it matches
idAddr, err := client.StateLookupID(ctx, robAddr, types.EmptyTSK)
require.NoError(t, err)
require.Equal(t, addr, idAddr)
}

Some files were not shown because too many files have changed in this diff Show More