Merge remote-tracking branch 'origin/master' into deps/update-ctx-dsbs

This commit is contained in:
Łukasz Magiera 2021-12-17 12:59:09 +01:00
commit bc384c01e3
101 changed files with 4934 additions and 588 deletions

View File

@ -895,6 +895,11 @@ workflows:
suite: itest-sector_terminate
target: "./itests/sector_terminate_test.go"
- test:
name: test-itest-self_sent_txn
suite: itest-self_sent_txn
target: "./itests/self_sent_txn_test.go"
- test:
name: test-itest-tape
suite: itest-tape

21
.github/pull_request_template.md vendored Normal file
View File

@ -0,0 +1,21 @@
## Related Issues
<!-- link all issues that this PR might resolve/fix. If an issue doesn't exist, include a brief motivation for the change being made.-->
## Proposed Changes
<!-- provide a clear list of the changes being made-->
## Additional Info
<!-- callouts, links to documentation, and etc-->
## Checklist
Before you mark the PR ready for review, please make sure that:
- [ ] All commits have a clear commit message.
- [ ] The PR title is in the form of of `<PR type>: <#issue number> <area>: <change being made>`
- example: ` fix: #1234 mempool: Introduce a cache for valid signatures`
- `PR type`: _fix_, _feat_, _INTERFACE BREAKING CHANGE_, _CONSENSUS BREAKING_, _build_, _chore_, _ci_, _docs_, _misc_,_perf_, _refactor_, _revert_, _style_, _test_
- `area`: _api_, _chain_, _state_, _vm_, _data transfer_, _market_, _mempool_, _message_, _block production_, _multisig_, _networking_, _paychan_, _proving_, _sealing_, _wallet_
- [ ] This PR has tests for new functionality or change in behaviour
- [ ] If new user-facing features are introduced, clear usage guidelines and / or documentation updates should be included in https://lotus.filecoin.io or [Discussion Tutorials.](https://github.com/filecoin-project/lotus/discussions/categories/tutorials)
- [ ] CI is green

View File

@ -118,17 +118,21 @@ type StorageMiner interface {
WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) //perm:admin
//storiface.WorkerReturn
ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true
ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error //perm:admin retry:true
ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error //perm:admin retry:true
ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error //perm:admin retry:true
ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error //perm:admin retry:true
ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error //perm:admin retry:true
ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true
ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error //perm:admin retry:true
ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error //perm:admin retry:true
ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error //perm:admin retry:true
ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error //perm:admin retry:true
ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnReplicaUpdate(ctx context.Context, callID storiface.CallID, out storage.ReplicaUpdateOut, err *storiface.CallError) error //perm:admin retry:true
ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, vanillaProofs storage.ReplicaVanillaProofs, err *storiface.CallError) error //perm:admin retry:true
ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, proof storage.ReplicaUpdateProof, err *storiface.CallError) error //perm:admin retry:true
ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error //perm:admin retry:true
ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
// SealingSchedDiag dumps internal sealing scheduler state
SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin

View File

@ -39,6 +39,10 @@ type Worker interface {
SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) //perm:admin
SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) //perm:admin
FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin
ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin
ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) //perm:admin
ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) //perm:admin
GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) (storiface.CallID, error) //perm:admin
ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) //perm:admin
MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) //perm:admin
UnsealPiece(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) //perm:admin

View File

@ -716,12 +716,20 @@ type StorageMinerStruct struct {
ReturnFinalizeSector func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
ReturnGenerateSectorKeyFromData func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
ReturnMoveStorage func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
ReturnProveReplicaUpdate1 func(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaVanillaProofs, p3 *storiface.CallError) error `perm:"admin"`
ReturnProveReplicaUpdate2 func(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateProof, p3 *storiface.CallError) error `perm:"admin"`
ReturnReadPiece func(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error `perm:"admin"`
ReturnReleaseUnsealed func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
ReturnReplicaUpdate func(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateOut, p3 *storiface.CallError) error `perm:"admin"`
ReturnSealCommit1 func(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error `perm:"admin"`
ReturnSealCommit2 func(p0 context.Context, p1 storiface.CallID, p2 storage.Proof, p3 *storiface.CallError) error `perm:"admin"`
@ -855,6 +863,8 @@ type WorkerStruct struct {
FinalizeSector func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"`
GenerateSectorKeyFromData func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) `perm:"admin"`
Info func(p0 context.Context) (storiface.WorkerInfo, error) `perm:"admin"`
MoveStorage func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"`
@ -863,10 +873,16 @@ type WorkerStruct struct {
ProcessSession func(p0 context.Context) (uuid.UUID, error) `perm:"admin"`
ProveReplicaUpdate1 func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid) (storiface.CallID, error) `perm:"admin"`
ProveReplicaUpdate2 func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid, p5 storage.ReplicaVanillaProofs) (storiface.CallID, error) `perm:"admin"`
ReleaseUnsealed func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"`
Remove func(p0 context.Context, p1 abi.SectorID) error `perm:"admin"`
ReplicaUpdate func(p0 context.Context, p1 storage.SectorRef, p2 []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"`
SealCommit1 func(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) `perm:"admin"`
SealCommit2 func(p0 context.Context, p1 storage.SectorRef, p2 storage.Commit1Out) (storiface.CallID, error) `perm:"admin"`
@ -4220,6 +4236,17 @@ func (s *StorageMinerStub) ReturnFinalizeSector(p0 context.Context, p1 storiface
return ErrNotSupported
}
func (s *StorageMinerStruct) ReturnGenerateSectorKeyFromData(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
if s.Internal.ReturnGenerateSectorKeyFromData == nil {
return ErrNotSupported
}
return s.Internal.ReturnGenerateSectorKeyFromData(p0, p1, p2)
}
func (s *StorageMinerStub) ReturnGenerateSectorKeyFromData(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
return ErrNotSupported
}
func (s *StorageMinerStruct) ReturnMoveStorage(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
if s.Internal.ReturnMoveStorage == nil {
return ErrNotSupported
@ -4231,6 +4258,28 @@ func (s *StorageMinerStub) ReturnMoveStorage(p0 context.Context, p1 storiface.Ca
return ErrNotSupported
}
func (s *StorageMinerStruct) ReturnProveReplicaUpdate1(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaVanillaProofs, p3 *storiface.CallError) error {
if s.Internal.ReturnProveReplicaUpdate1 == nil {
return ErrNotSupported
}
return s.Internal.ReturnProveReplicaUpdate1(p0, p1, p2, p3)
}
func (s *StorageMinerStub) ReturnProveReplicaUpdate1(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaVanillaProofs, p3 *storiface.CallError) error {
return ErrNotSupported
}
func (s *StorageMinerStruct) ReturnProveReplicaUpdate2(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateProof, p3 *storiface.CallError) error {
if s.Internal.ReturnProveReplicaUpdate2 == nil {
return ErrNotSupported
}
return s.Internal.ReturnProveReplicaUpdate2(p0, p1, p2, p3)
}
func (s *StorageMinerStub) ReturnProveReplicaUpdate2(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateProof, p3 *storiface.CallError) error {
return ErrNotSupported
}
func (s *StorageMinerStruct) ReturnReadPiece(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error {
if s.Internal.ReturnReadPiece == nil {
return ErrNotSupported
@ -4253,6 +4302,17 @@ func (s *StorageMinerStub) ReturnReleaseUnsealed(p0 context.Context, p1 storifac
return ErrNotSupported
}
func (s *StorageMinerStruct) ReturnReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateOut, p3 *storiface.CallError) error {
if s.Internal.ReturnReplicaUpdate == nil {
return ErrNotSupported
}
return s.Internal.ReturnReplicaUpdate(p0, p1, p2, p3)
}
func (s *StorageMinerStub) ReturnReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateOut, p3 *storiface.CallError) error {
return ErrNotSupported
}
func (s *StorageMinerStruct) ReturnSealCommit1(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error {
if s.Internal.ReturnSealCommit1 == nil {
return ErrNotSupported
@ -4891,6 +4951,17 @@ func (s *WorkerStub) FinalizeSector(p0 context.Context, p1 storage.SectorRef, p2
return *new(storiface.CallID), ErrNotSupported
}
func (s *WorkerStruct) GenerateSectorKeyFromData(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) {
if s.Internal.GenerateSectorKeyFromData == nil {
return *new(storiface.CallID), ErrNotSupported
}
return s.Internal.GenerateSectorKeyFromData(p0, p1, p2)
}
func (s *WorkerStub) GenerateSectorKeyFromData(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) {
return *new(storiface.CallID), ErrNotSupported
}
func (s *WorkerStruct) Info(p0 context.Context) (storiface.WorkerInfo, error) {
if s.Internal.Info == nil {
return *new(storiface.WorkerInfo), ErrNotSupported
@ -4935,6 +5006,28 @@ func (s *WorkerStub) ProcessSession(p0 context.Context) (uuid.UUID, error) {
return *new(uuid.UUID), ErrNotSupported
}
func (s *WorkerStruct) ProveReplicaUpdate1(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid) (storiface.CallID, error) {
if s.Internal.ProveReplicaUpdate1 == nil {
return *new(storiface.CallID), ErrNotSupported
}
return s.Internal.ProveReplicaUpdate1(p0, p1, p2, p3, p4)
}
func (s *WorkerStub) ProveReplicaUpdate1(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid) (storiface.CallID, error) {
return *new(storiface.CallID), ErrNotSupported
}
func (s *WorkerStruct) ProveReplicaUpdate2(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid, p5 storage.ReplicaVanillaProofs) (storiface.CallID, error) {
if s.Internal.ProveReplicaUpdate2 == nil {
return *new(storiface.CallID), ErrNotSupported
}
return s.Internal.ProveReplicaUpdate2(p0, p1, p2, p3, p4, p5)
}
func (s *WorkerStub) ProveReplicaUpdate2(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid, p5 storage.ReplicaVanillaProofs) (storiface.CallID, error) {
return *new(storiface.CallID), ErrNotSupported
}
func (s *WorkerStruct) ReleaseUnsealed(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) {
if s.Internal.ReleaseUnsealed == nil {
return *new(storiface.CallID), ErrNotSupported
@ -4957,6 +5050,17 @@ func (s *WorkerStub) Remove(p0 context.Context, p1 abi.SectorID) error {
return ErrNotSupported
}
func (s *WorkerStruct) ReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []abi.PieceInfo) (storiface.CallID, error) {
if s.Internal.ReplicaUpdate == nil {
return *new(storiface.CallID), ErrNotSupported
}
return s.Internal.ReplicaUpdate(p0, p1, p2)
}
func (s *WorkerStub) ReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []abi.PieceInfo) (storiface.CallID, error) {
return *new(storiface.CallID), ErrNotSupported
}
func (s *WorkerStruct) SealCommit1(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) {
if s.Internal.SealCommit1 == nil {
return *new(storiface.CallID), ErrNotSupported

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -47,6 +47,8 @@ var UpgradeHyperdriveHeight = abi.ChainEpoch(-16)
var UpgradeChocolateHeight = abi.ChainEpoch(-17)
var UpgradeSnapDealsHeight = abi.ChainEpoch(-18)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,
}

View File

@ -41,6 +41,7 @@ const UpgradeNorwegianHeight = -14
const UpgradeTurboHeight = -15
const UpgradeHyperdriveHeight = -16
const UpgradeChocolateHeight = 6360
const UpgradeSnapDealsHeight = 99999999
func init() {
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30))

View File

@ -54,6 +54,8 @@ const UpgradeHyperdriveHeight = 420
const UpgradeChocolateHeight = 312746
const UpgradeSnapDealsHeight = 99999999
func init() {
policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30))
policy.SetSupportedProofTypes(

View File

@ -47,6 +47,7 @@ var UpgradeTurboHeight = abi.ChainEpoch(-15)
var UpgradeHyperdriveHeight = abi.ChainEpoch(-16)
var UpgradeChocolateHeight = abi.ChainEpoch(-17)
var UpgradeSnapDealsHeight = abi.ChainEpoch(-18)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,

View File

@ -62,18 +62,20 @@ const UpgradeNorwegianHeight = 665280
const UpgradeTurboHeight = 712320
// 2021-06-30T22:00:00Z
var UpgradeHyperdriveHeight = abi.ChainEpoch(892800)
const UpgradeHyperdriveHeight = 892800
// 2021-10-26T13:30:00Z
var UpgradeChocolateHeight = abi.ChainEpoch(1231620)
const UpgradeChocolateHeight = 1231620
var UpgradeSnapDealsHeight = abi.ChainEpoch(999999999999)
func init() {
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
SetAddressNetwork(address.Mainnet)
}
if os.Getenv("LOTUS_DISABLE_CHOCOLATE") == "1" {
UpgradeChocolateHeight = math.MaxInt64
if os.Getenv("LOTUS_DISABLE_SNAPDEALS") == "1" {
UpgradeSnapDealsHeight = math.MaxInt64
}
Devnet = false

View File

@ -34,7 +34,7 @@ const NewestNetworkVersion = network.Version{{.latestNetworkVersion}}
/* inline-gen start */
const NewestNetworkVersion = network.Version14
const NewestNetworkVersion = network.Version15
/* inline-gen end */

View File

@ -99,6 +99,7 @@ var (
UpgradeTurboHeight abi.ChainEpoch = -14
UpgradeHyperdriveHeight abi.ChainEpoch = -15
UpgradeChocolateHeight abi.ChainEpoch = -16
UpgradeSnapDealsHeight abi.ChainEpoch = -17
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,

View File

@ -23,6 +23,8 @@ import (
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
)
func init() {
@ -50,6 +52,10 @@ func init() {
builtin.RegisterActorState(builtin6.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load6(store, root)
})
builtin.RegisterActorState(builtin7.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load7(store, root)
})
}
var Methods = builtin4.MethodsAccount
@ -75,6 +81,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case builtin6.AccountActorCodeID:
return load6(store, act.Head)
case builtin7.AccountActorCodeID:
return load7(store, act.Head)
}
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
}
@ -100,6 +109,9 @@ func MakeState(store adt.Store, av actors.Version, addr address.Address) (State,
case actors.Version6:
return make6(store, addr)
case actors.Version7:
return make7(store, addr)
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@ -125,6 +137,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version6:
return builtin6.AccountActorCodeID, nil
case actors.Version7:
return builtin7.AccountActorCodeID, nil
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)

View File

@ -0,0 +1,40 @@
package account
import (
"github.com/filecoin-project/go-address"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors/adt"
account7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/account"
)
var _ State = (*state7)(nil)
func load7(store adt.Store, root cid.Cid) (State, error) {
out := state7{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make7(store adt.Store, addr address.Address) (State, error) {
out := state7{store: store}
out.State = account7.State{Address: addr}
return &out, nil
}
type state7 struct {
account7.State
store adt.Store
}
func (s *state7) PubkeyAddress() (address.Address, error) {
return s.Address, nil
}
func (s *state7) GetState() interface{} {
return &s.State
}

View File

@ -23,46 +23,49 @@ import (
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
smoothing6 "github.com/filecoin-project/specs-actors/v6/actors/util/smoothing"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
smoothing7 "github.com/filecoin-project/specs-actors/v7/actors/util/smoothing"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/types"
miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner"
proof6 "github.com/filecoin-project/specs-actors/v6/actors/runtime/proof"
miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
)
var SystemActorAddr = builtin6.SystemActorAddr
var BurntFundsActorAddr = builtin6.BurntFundsActorAddr
var CronActorAddr = builtin6.CronActorAddr
var SystemActorAddr = builtin7.SystemActorAddr
var BurntFundsActorAddr = builtin7.BurntFundsActorAddr
var CronActorAddr = builtin7.CronActorAddr
var SaftAddress = makeAddress("t0122")
var ReserveAddress = makeAddress("t090")
var RootVerifierAddress = makeAddress("t080")
var (
ExpectedLeadersPerEpoch = builtin6.ExpectedLeadersPerEpoch
ExpectedLeadersPerEpoch = builtin7.ExpectedLeadersPerEpoch
)
const (
EpochDurationSeconds = builtin6.EpochDurationSeconds
EpochsInDay = builtin6.EpochsInDay
SecondsInDay = builtin6.SecondsInDay
EpochDurationSeconds = builtin7.EpochDurationSeconds
EpochsInDay = builtin7.EpochsInDay
SecondsInDay = builtin7.SecondsInDay
)
const (
MethodSend = builtin6.MethodSend
MethodConstructor = builtin6.MethodConstructor
MethodSend = builtin7.MethodSend
MethodConstructor = builtin7.MethodConstructor
)
// These are all just type aliases across actor versions. In the future, that might change
// and we might need to do something fancier.
type SectorInfo = proof6.SectorInfo
type PoStProof = proof6.PoStProof
type SectorInfo = proof7.SectorInfo
type PoStProof = proof7.PoStProof
type FilterEstimate = smoothing0.FilterEstimate
func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
return miner6.QAPowerForWeight(size, duration, dealWeight, verifiedWeight)
return miner7.QAPowerForWeight(size, duration, dealWeight, verifiedWeight)
}
func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate {
@ -101,6 +104,12 @@ func FromV6FilterEstimate(v6 smoothing6.FilterEstimate) FilterEstimate {
}
func FromV7FilterEstimate(v7 smoothing7.FilterEstimate) FilterEstimate {
return (FilterEstimate)(v7)
}
type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error)
var ActorStateLoaders = make(map[cid.Cid]ActorStateLoader)
@ -138,6 +147,9 @@ func ActorNameByCode(c cid.Cid) string {
case builtin6.IsBuiltinActor(c):
return builtin6.ActorNameByCode(c)
case builtin7.IsBuiltinActor(c):
return builtin7.ActorNameByCode(c)
default:
return "<unknown>"
}
@ -169,6 +181,10 @@ func IsBuiltinActor(c cid.Cid) bool {
return true
}
if builtin7.IsBuiltinActor(c) {
return true
}
return false
}
@ -198,6 +214,10 @@ func IsAccountActor(c cid.Cid) bool {
return true
}
if c == builtin7.AccountActorCodeID {
return true
}
return false
}
@ -227,6 +247,10 @@ func IsStorageMinerActor(c cid.Cid) bool {
return true
}
if c == builtin7.StorageMinerActorCodeID {
return true
}
return false
}
@ -256,6 +280,10 @@ func IsMultisigActor(c cid.Cid) bool {
return true
}
if c == builtin7.MultisigActorCodeID {
return true
}
return false
}
@ -285,6 +313,10 @@ func IsPaymentChannelActor(c cid.Cid) bool {
return true
}
if c == builtin7.PaymentChannelActorCodeID {
return true
}
return false
}

View File

@ -17,6 +17,8 @@ import (
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
)
func MakeState(store adt.Store, av actors.Version) (State, error) {
@ -40,6 +42,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
case actors.Version6:
return make6(store)
case actors.Version7:
return make7(store)
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@ -65,14 +70,17 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version6:
return builtin6.CronActorCodeID, nil
case actors.Version7:
return builtin7.CronActorCodeID, nil
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
}
var (
Address = builtin6.CronActorAddr
Methods = builtin6.MethodsCron
Address = builtin7.CronActorAddr
Methods = builtin7.MethodsCron
)
type State interface {

View File

@ -0,0 +1,35 @@
package cron
import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors/adt"
cron7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/cron"
)
var _ State = (*state7)(nil)
func load7(store adt.Store, root cid.Cid) (State, error) {
out := state7{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make7(store adt.Store) (State, error) {
out := state7{store: store}
out.State = *cron7.ConstructState(cron7.BuiltInEntries())
return &out, nil
}
type state7 struct {
cron7.State
store adt.Store
}
func (s *state7) GetState() interface{} {
return &s.State
}

View File

@ -25,6 +25,8 @@ import (
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
)
func init() {
@ -52,11 +54,15 @@ func init() {
builtin.RegisterActorState(builtin6.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load6(store, root)
})
builtin.RegisterActorState(builtin7.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load7(store, root)
})
}
var (
Address = builtin6.InitActorAddr
Methods = builtin6.MethodsInit
Address = builtin7.InitActorAddr
Methods = builtin7.MethodsInit
)
func Load(store adt.Store, act *types.Actor) (State, error) {
@ -80,6 +86,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case builtin6.InitActorCodeID:
return load6(store, act.Head)
case builtin7.InitActorCodeID:
return load7(store, act.Head)
}
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
}
@ -105,6 +114,9 @@ func MakeState(store adt.Store, av actors.Version, networkName string) (State, e
case actors.Version6:
return make6(store, networkName)
case actors.Version7:
return make7(store, networkName)
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@ -130,6 +142,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version6:
return builtin6.InitActorCodeID, nil
case actors.Version7:
return builtin7.InitActorCodeID, nil
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)

View File

@ -0,0 +1,114 @@
package init
import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/node/modules/dtypes"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init"
adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt"
)
var _ State = (*state7)(nil)
func load7(store adt.Store, root cid.Cid) (State, error) {
out := state7{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make7(store adt.Store, networkName string) (State, error) {
out := state7{store: store}
s, err := init7.ConstructState(store, networkName)
if err != nil {
return nil, err
}
out.State = *s
return &out, nil
}
type state7 struct {
init7.State
store adt.Store
}
func (s *state7) ResolveAddress(address address.Address) (address.Address, bool, error) {
return s.State.ResolveAddress(s.store, address)
}
func (s *state7) MapAddressToNewID(address address.Address) (address.Address, error) {
return s.State.MapAddressToNewID(s.store, address)
}
func (s *state7) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error {
addrs, err := adt7.AsMap(s.store, s.State.AddressMap, builtin7.DefaultHamtBitwidth)
if err != nil {
return err
}
var actorID cbg.CborInt
return addrs.ForEach(&actorID, func(key string) error {
addr, err := address.NewFromBytes([]byte(key))
if err != nil {
return err
}
return cb(abi.ActorID(actorID), addr)
})
}
func (s *state7) NetworkName() (dtypes.NetworkName, error) {
return dtypes.NetworkName(s.State.NetworkName), nil
}
func (s *state7) SetNetworkName(name string) error {
s.State.NetworkName = name
return nil
}
func (s *state7) SetNextID(id abi.ActorID) error {
s.State.NextID = id
return nil
}
func (s *state7) Remove(addrs ...address.Address) (err error) {
m, err := adt7.AsMap(s.store, s.State.AddressMap, builtin7.DefaultHamtBitwidth)
if err != nil {
return err
}
for _, addr := range addrs {
if err = m.Delete(abi.AddrKey(addr)); err != nil {
return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err)
}
}
amr, err := m.Root()
if err != nil {
return xerrors.Errorf("failed to get address map root: %w", err)
}
s.State.AddressMap = amr
return nil
}
func (s *state7) SetAddressMap(mcid cid.Cid) error {
s.State.AddressMap = mcid
return nil
}
func (s *state7) AddressMap() (adt.Map, error) {
return adt7.AsMap(s.store, s.State.AddressMap, builtin7.DefaultHamtBitwidth)
}
func (s *state7) GetState() interface{} {
return &s.State
}

View File

@ -25,6 +25,8 @@ import (
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
@ -56,11 +58,15 @@ func init() {
builtin.RegisterActorState(builtin6.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load6(store, root)
})
builtin.RegisterActorState(builtin7.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load7(store, root)
})
}
var (
Address = builtin6.StorageMarketActorAddr
Methods = builtin6.MethodsMarket
Address = builtin7.StorageMarketActorAddr
Methods = builtin7.MethodsMarket
)
func Load(store adt.Store, act *types.Actor) (State, error) {
@ -84,6 +90,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case builtin6.StorageMarketActorCodeID:
return load6(store, act.Head)
case builtin7.StorageMarketActorCodeID:
return load7(store, act.Head)
}
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
}
@ -109,6 +118,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
case actors.Version6:
return make6(store)
case actors.Version7:
return make7(store)
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@ -134,6 +146,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version6:
return builtin6.StorageMarketActorCodeID, nil
case actors.Version7:
return builtin7.StorageMarketActorCodeID, nil
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
@ -211,6 +226,9 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora
case actors.Version6:
return decodePublishStorageDealsReturn6(b)
case actors.Version7:
return decodePublishStorageDealsReturn7(b)
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}

View File

@ -0,0 +1,252 @@
package market
import (
"bytes"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/types"
market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market"
adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt"
)
var _ State = (*state7)(nil)
func load7(store adt.Store, root cid.Cid) (State, error) {
out := state7{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make7(store adt.Store) (State, error) {
out := state7{store: store}
s, err := market7.ConstructState(store)
if err != nil {
return nil, err
}
out.State = *s
return &out, nil
}
type state7 struct {
market7.State
store adt.Store
}
func (s *state7) TotalLocked() (abi.TokenAmount, error) {
fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral)
fml = types.BigAdd(fml, s.TotalClientStorageFee)
return fml, nil
}
func (s *state7) BalancesChanged(otherState State) (bool, error) {
otherState7, ok := otherState.(*state7)
if !ok {
// there's no way to compare different versions of the state, so let's
// just say that means the state of balances has changed
return true, nil
}
return !s.State.EscrowTable.Equals(otherState7.State.EscrowTable) || !s.State.LockedTable.Equals(otherState7.State.LockedTable), nil
}
func (s *state7) StatesChanged(otherState State) (bool, error) {
otherState7, ok := otherState.(*state7)
if !ok {
// there's no way to compare different versions of the state, so let's
// just say that means the state of balances has changed
return true, nil
}
return !s.State.States.Equals(otherState7.State.States), nil
}
func (s *state7) States() (DealStates, error) {
stateArray, err := adt7.AsArray(s.store, s.State.States, market7.StatesAmtBitwidth)
if err != nil {
return nil, err
}
return &dealStates7{stateArray}, nil
}
func (s *state7) ProposalsChanged(otherState State) (bool, error) {
otherState7, ok := otherState.(*state7)
if !ok {
// there's no way to compare different versions of the state, so let's
// just say that means the state of balances has changed
return true, nil
}
return !s.State.Proposals.Equals(otherState7.State.Proposals), nil
}
func (s *state7) Proposals() (DealProposals, error) {
proposalArray, err := adt7.AsArray(s.store, s.State.Proposals, market7.ProposalsAmtBitwidth)
if err != nil {
return nil, err
}
return &dealProposals7{proposalArray}, nil
}
func (s *state7) EscrowTable() (BalanceTable, error) {
bt, err := adt7.AsBalanceTable(s.store, s.State.EscrowTable)
if err != nil {
return nil, err
}
return &balanceTable7{bt}, nil
}
func (s *state7) LockedTable() (BalanceTable, error) {
bt, err := adt7.AsBalanceTable(s.store, s.State.LockedTable)
if err != nil {
return nil, err
}
return &balanceTable7{bt}, nil
}
func (s *state7) VerifyDealsForActivation(
minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
) (weight, verifiedWeight abi.DealWeight, err error) {
w, vw, _, err := market7.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
return w, vw, err
}
func (s *state7) NextID() (abi.DealID, error) {
return s.State.NextID, nil
}
type balanceTable7 struct {
*adt7.BalanceTable
}
func (bt *balanceTable7) ForEach(cb func(address.Address, abi.TokenAmount) error) error {
asMap := (*adt7.Map)(bt.BalanceTable)
var ta abi.TokenAmount
return asMap.ForEach(&ta, func(key string) error {
a, err := address.NewFromBytes([]byte(key))
if err != nil {
return err
}
return cb(a, ta)
})
}
type dealStates7 struct {
adt.Array
}
func (s *dealStates7) Get(dealID abi.DealID) (*DealState, bool, error) {
var deal7 market7.DealState
found, err := s.Array.Get(uint64(dealID), &deal7)
if err != nil {
return nil, false, err
}
if !found {
return nil, false, nil
}
deal := fromV7DealState(deal7)
return &deal, true, nil
}
func (s *dealStates7) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
var ds7 market7.DealState
return s.Array.ForEach(&ds7, func(idx int64) error {
return cb(abi.DealID(idx), fromV7DealState(ds7))
})
}
func (s *dealStates7) decode(val *cbg.Deferred) (*DealState, error) {
var ds7 market7.DealState
if err := ds7.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err
}
ds := fromV7DealState(ds7)
return &ds, nil
}
func (s *dealStates7) array() adt.Array {
return s.Array
}
func fromV7DealState(v7 market7.DealState) DealState {
return (DealState)(v7)
}
type dealProposals7 struct {
adt.Array
}
func (s *dealProposals7) Get(dealID abi.DealID) (*DealProposal, bool, error) {
var proposal7 market7.DealProposal
found, err := s.Array.Get(uint64(dealID), &proposal7)
if err != nil {
return nil, false, err
}
if !found {
return nil, false, nil
}
proposal := fromV7DealProposal(proposal7)
return &proposal, true, nil
}
func (s *dealProposals7) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error {
var dp7 market7.DealProposal
return s.Array.ForEach(&dp7, func(idx int64) error {
return cb(abi.DealID(idx), fromV7DealProposal(dp7))
})
}
func (s *dealProposals7) decode(val *cbg.Deferred) (*DealProposal, error) {
var dp7 market7.DealProposal
if err := dp7.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err
}
dp := fromV7DealProposal(dp7)
return &dp, nil
}
func (s *dealProposals7) array() adt.Array {
return s.Array
}
func fromV7DealProposal(v7 market7.DealProposal) DealProposal {
return (DealProposal)(v7)
}
func (s *state7) GetState() interface{} {
return &s.State
}
var _ PublishStorageDealsReturn = (*publishStorageDealsReturn7)(nil)
func decodePublishStorageDealsReturn7(b []byte) (PublishStorageDealsReturn, error) {
var retval market7.PublishStorageDealsReturn
if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err)
}
return &publishStorageDealsReturn7{retval}, nil
}
type publishStorageDealsReturn7 struct {
market7.PublishStorageDealsReturn
}
func (r *publishStorageDealsReturn7) IsDealValid(index uint64) (bool, error) {
return r.ValidDeals.IsSet(index)
}
func (r *publishStorageDealsReturn7) DealIDs() ([]abi.DealID, error) {
return r.IDs, nil
}

View File

@ -177,6 +177,7 @@ type SectorOnChainInfo struct {
InitialPledge abi.TokenAmount
ExpectedDayReward abi.TokenAmount
ExpectedStoragePledge abi.TokenAmount
SectorKeyCID *cid.Cid
}
type SectorPreCommitInfo = miner0.SectorPreCommitInfo

View File

@ -35,6 +35,8 @@ import (
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
)
func init() {
@ -63,9 +65,13 @@ func init() {
return load6(store, root)
})
builtin.RegisterActorState(builtin7.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load7(store, root)
})
}
var Methods = builtin6.MethodsMiner
var Methods = builtin7.MethodsMiner
// Unchanged between v0, v2, v3, v4, and v5 actors
var WPoStProvingPeriod = miner0.WPoStProvingPeriod
@ -102,6 +108,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case builtin6.StorageMinerActorCodeID:
return load6(store, act.Head)
case builtin7.StorageMinerActorCodeID:
return load7(store, act.Head)
}
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
}
@ -127,6 +136,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
case actors.Version6:
return make6(store)
case actors.Version7:
return make7(store)
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@ -152,6 +164,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version6:
return builtin6.StorageMinerActorCodeID, nil
case actors.Version7:
return builtin7.StorageMinerActorCodeID, nil
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
@ -251,6 +266,7 @@ type SectorOnChainInfo struct {
InitialPledge abi.TokenAmount
ExpectedDayReward abi.TokenAmount
ExpectedStoragePledge abi.TokenAmount
SectorKeyCID *cid.Cid
}
type SectorPreCommitInfo = miner0.SectorPreCommitInfo

View File

@ -138,11 +138,22 @@ func (s *state{{.v}}) GetSectorExpiration(num abi.SectorNumber) (*SectorExpirati
return nil, err
}
// NOTE: this can be optimized significantly.
// 1. If the sector is non-faulty, it will either expire on-time (can be
{{if (ge .v 7) -}}
// 1. If the sector is non-faulty, it will expire on-time (can be
// learned from the sector info).
{{- else -}}
// 1. If the sector is non-faulty, it will either expire on-time (can be
// learned from the sector info), or in the next quantized expiration
// epoch (i.e., the first element in the partition's expiration queue.
{{- end}}
{{if (ge .v 6) -}}
// 2. If it's faulty, it will expire early within the first 42 entries
// of the expiration queue.
{{- else -}}
// 2. If it's faulty, it will expire early within the first 14 entries
// of the expiration queue.
{{- end}}
stopErr := errors.New("stop")
out := SectorExpiration{}
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner{{.v}}.Deadline) error {
@ -554,8 +565,7 @@ func (p *partition{{.v}}) UnprovenSectors() (bitfield.BitField, error) {
}
func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorOnChainInfo {
{{if (ge .v 2)}}
return SectorOnChainInfo{
info := SectorOnChainInfo{
SectorNumber: v{{.v}}.SectorNumber,
SealProof: v{{.v}}.SealProof,
SealedCID: v{{.v}}.SealedCID,
@ -567,10 +577,11 @@ func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorO
InitialPledge: v{{.v}}.InitialPledge,
ExpectedDayReward: v{{.v}}.ExpectedDayReward,
ExpectedStoragePledge: v{{.v}}.ExpectedStoragePledge,
{{if (ge .v 7)}}
SectorKeyCID: v{{.v}}.SectorKeyCID,
{{end}}
}
{{else}}
return (SectorOnChainInfo)(v0)
{{end}}
return info
}
func fromV{{.v}}SectorPreCommitOnChainInfo(v{{.v}} miner{{.v}}.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {

View File

@ -140,6 +140,7 @@ func (s *state0) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e
// epoch (i.e., the first element in the partition's expiration queue.
// 2. If it's faulty, it will expire early within the first 14 entries
// of the expiration queue.
stopErr := errors.New("stop")
out := SectorExpiration{}
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner0.Deadline) error {
@ -505,9 +506,20 @@ func (p *partition0) UnprovenSectors() (bitfield.BitField, error) {
}
func fromV0SectorOnChainInfo(v0 miner0.SectorOnChainInfo) SectorOnChainInfo {
return (SectorOnChainInfo)(v0)
info := SectorOnChainInfo{
SectorNumber: v0.SectorNumber,
SealProof: v0.SealProof,
SealedCID: v0.SealedCID,
DealIDs: v0.DealIDs,
Activation: v0.Activation,
Expiration: v0.Expiration,
DealWeight: v0.DealWeight,
VerifiedDealWeight: v0.VerifiedDealWeight,
InitialPledge: v0.InitialPledge,
ExpectedDayReward: v0.ExpectedDayReward,
ExpectedStoragePledge: v0.ExpectedStoragePledge,
}
return info
}
func fromV0SectorPreCommitOnChainInfo(v0 miner0.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {

View File

@ -138,6 +138,7 @@ func (s *state2) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e
// epoch (i.e., the first element in the partition's expiration queue.
// 2. If it's faulty, it will expire early within the first 14 entries
// of the expiration queue.
stopErr := errors.New("stop")
out := SectorExpiration{}
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner2.Deadline) error {
@ -535,8 +536,7 @@ func (p *partition2) UnprovenSectors() (bitfield.BitField, error) {
}
func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo {
return SectorOnChainInfo{
info := SectorOnChainInfo{
SectorNumber: v2.SectorNumber,
SealProof: v2.SealProof,
SealedCID: v2.SealedCID,
@ -549,7 +549,7 @@ func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo {
ExpectedDayReward: v2.ExpectedDayReward,
ExpectedStoragePledge: v2.ExpectedStoragePledge,
}
return info
}
func fromV2SectorPreCommitOnChainInfo(v2 miner2.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {

View File

@ -140,6 +140,7 @@ func (s *state3) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e
// epoch (i.e., the first element in the partition's expiration queue.
// 2. If it's faulty, it will expire early within the first 14 entries
// of the expiration queue.
stopErr := errors.New("stop")
out := SectorExpiration{}
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner3.Deadline) error {
@ -536,8 +537,7 @@ func (p *partition3) UnprovenSectors() (bitfield.BitField, error) {
}
func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo {
return SectorOnChainInfo{
info := SectorOnChainInfo{
SectorNumber: v3.SectorNumber,
SealProof: v3.SealProof,
SealedCID: v3.SealedCID,
@ -550,7 +550,7 @@ func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo {
ExpectedDayReward: v3.ExpectedDayReward,
ExpectedStoragePledge: v3.ExpectedStoragePledge,
}
return info
}
func fromV3SectorPreCommitOnChainInfo(v3 miner3.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {

View File

@ -140,6 +140,7 @@ func (s *state4) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e
// epoch (i.e., the first element in the partition's expiration queue.
// 2. If it's faulty, it will expire early within the first 14 entries
// of the expiration queue.
stopErr := errors.New("stop")
out := SectorExpiration{}
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner4.Deadline) error {
@ -536,8 +537,7 @@ func (p *partition4) UnprovenSectors() (bitfield.BitField, error) {
}
func fromV4SectorOnChainInfo(v4 miner4.SectorOnChainInfo) SectorOnChainInfo {
return SectorOnChainInfo{
info := SectorOnChainInfo{
SectorNumber: v4.SectorNumber,
SealProof: v4.SealProof,
SealedCID: v4.SealedCID,
@ -550,7 +550,7 @@ func fromV4SectorOnChainInfo(v4 miner4.SectorOnChainInfo) SectorOnChainInfo {
ExpectedDayReward: v4.ExpectedDayReward,
ExpectedStoragePledge: v4.ExpectedStoragePledge,
}
return info
}
func fromV4SectorPreCommitOnChainInfo(v4 miner4.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {

View File

@ -140,6 +140,7 @@ func (s *state5) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e
// epoch (i.e., the first element in the partition's expiration queue.
// 2. If it's faulty, it will expire early within the first 14 entries
// of the expiration queue.
stopErr := errors.New("stop")
out := SectorExpiration{}
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner5.Deadline) error {
@ -536,8 +537,7 @@ func (p *partition5) UnprovenSectors() (bitfield.BitField, error) {
}
func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo {
return SectorOnChainInfo{
info := SectorOnChainInfo{
SectorNumber: v5.SectorNumber,
SealProof: v5.SealProof,
SealedCID: v5.SealedCID,
@ -550,7 +550,7 @@ func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo {
ExpectedDayReward: v5.ExpectedDayReward,
ExpectedStoragePledge: v5.ExpectedStoragePledge,
}
return info
}
func fromV5SectorPreCommitOnChainInfo(v5 miner5.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {

View File

@ -138,8 +138,9 @@ func (s *state6) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e
// 1. If the sector is non-faulty, it will either expire on-time (can be
// learned from the sector info), or in the next quantized expiration
// epoch (i.e., the first element in the partition's expiration queue.
// 2. If it's faulty, it will expire early within the first 14 entries
// 2. If it's faulty, it will expire early within the first 42 entries
// of the expiration queue.
stopErr := errors.New("stop")
out := SectorExpiration{}
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner6.Deadline) error {
@ -536,8 +537,7 @@ func (p *partition6) UnprovenSectors() (bitfield.BitField, error) {
}
func fromV6SectorOnChainInfo(v6 miner6.SectorOnChainInfo) SectorOnChainInfo {
return SectorOnChainInfo{
info := SectorOnChainInfo{
SectorNumber: v6.SectorNumber,
SealProof: v6.SealProof,
SealedCID: v6.SealedCID,
@ -550,7 +550,7 @@ func fromV6SectorOnChainInfo(v6 miner6.SectorOnChainInfo) SectorOnChainInfo {
ExpectedDayReward: v6.ExpectedDayReward,
ExpectedStoragePledge: v6.ExpectedStoragePledge,
}
return info
}
func fromV6SectorPreCommitOnChainInfo(v6 miner6.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {

View File

@ -0,0 +1,571 @@
package miner
import (
"bytes"
"errors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
rle "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/actors/adt"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner"
adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt"
)
var _ State = (*state7)(nil)
func load7(store adt.Store, root cid.Cid) (State, error) {
out := state7{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make7(store adt.Store) (State, error) {
out := state7{store: store}
out.State = miner7.State{}
return &out, nil
}
type state7 struct {
miner7.State
store adt.Store
}
type deadline7 struct {
miner7.Deadline
store adt.Store
}
type partition7 struct {
miner7.Partition
store adt.Store
}
func (s *state7) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) {
defer func() {
if r := recover(); r != nil {
err = xerrors.Errorf("failed to get available balance: %w", r)
available = abi.NewTokenAmount(0)
}
}()
// this panics if the miner doesnt have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal)
return available, err
}
func (s *state7) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) {
return s.CheckVestedFunds(s.store, epoch)
}
func (s *state7) LockedFunds() (LockedFunds, error) {
return LockedFunds{
VestingFunds: s.State.LockedFunds,
InitialPledgeRequirement: s.State.InitialPledge,
PreCommitDeposits: s.State.PreCommitDeposits,
}, nil
}
func (s *state7) FeeDebt() (abi.TokenAmount, error) {
return s.State.FeeDebt, nil
}
func (s *state7) InitialPledge() (abi.TokenAmount, error) {
return s.State.InitialPledge, nil
}
func (s *state7) PreCommitDeposits() (abi.TokenAmount, error) {
return s.State.PreCommitDeposits, nil
}
func (s *state7) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) {
info, ok, err := s.State.GetSector(s.store, num)
if !ok || err != nil {
return nil, err
}
ret := fromV7SectorOnChainInfo(*info)
return &ret, nil
}
func (s *state7) FindSector(num abi.SectorNumber) (*SectorLocation, error) {
dlIdx, partIdx, err := s.State.FindSector(s.store, num)
if err != nil {
return nil, err
}
return &SectorLocation{
Deadline: dlIdx,
Partition: partIdx,
}, nil
}
func (s *state7) NumLiveSectors() (uint64, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return 0, err
}
var total uint64
if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner7.Deadline) error {
total += dl.LiveSectors
return nil
}); err != nil {
return 0, err
}
return total, nil
}
// GetSectorExpiration returns the effective expiration of the given sector.
//
// If the sector does not expire early, the Early expiration field is 0.
func (s *state7) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return nil, err
}
// NOTE: this can be optimized significantly.
// 1. If the sector is non-faulty, it will expire on-time (can be
// learned from the sector info).
// 2. If it's faulty, it will expire early within the first 42 entries
// of the expiration queue.
stopErr := errors.New("stop")
out := SectorExpiration{}
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner7.Deadline) error {
partitions, err := dl.PartitionsArray(s.store)
if err != nil {
return err
}
quant := s.State.QuantSpecForDeadline(dlIdx)
var part miner7.Partition
return partitions.ForEach(&part, func(partIdx int64) error {
if found, err := part.Sectors.IsSet(uint64(num)); err != nil {
return err
} else if !found {
return nil
}
if found, err := part.Terminated.IsSet(uint64(num)); err != nil {
return err
} else if found {
// already terminated
return stopErr
}
q, err := miner7.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner7.PartitionExpirationAmtBitwidth)
if err != nil {
return err
}
var exp miner7.ExpirationSet
return q.ForEach(&exp, func(epoch int64) error {
if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil {
return err
} else if early {
out.Early = abi.ChainEpoch(epoch)
return nil
}
if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil {
return err
} else if onTime {
out.OnTime = abi.ChainEpoch(epoch)
return stopErr
}
return nil
})
})
})
if err == stopErr {
err = nil
}
if err != nil {
return nil, err
}
if out.Early == 0 && out.OnTime == 0 {
return nil, xerrors.Errorf("failed to find sector %d", num)
}
return &out, nil
}
func (s *state7) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) {
info, ok, err := s.State.GetPrecommittedSector(s.store, num)
if !ok || err != nil {
return nil, err
}
ret := fromV7SectorPreCommitOnChainInfo(*info)
return &ret, nil
}
func (s *state7) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
precommitted, err := adt7.AsMap(s.store, s.State.PreCommittedSectors, builtin7.DefaultHamtBitwidth)
if err != nil {
return err
}
var info miner7.SectorPreCommitOnChainInfo
if err := precommitted.ForEach(&info, func(_ string) error {
return cb(fromV7SectorPreCommitOnChainInfo(info))
}); err != nil {
return err
}
return nil
}
func (s *state7) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
sectors, err := miner7.LoadSectors(s.store, s.State.Sectors)
if err != nil {
return nil, err
}
// If no sector numbers are specified, load all.
if snos == nil {
infos := make([]*SectorOnChainInfo, 0, sectors.Length())
var info7 miner7.SectorOnChainInfo
if err := sectors.ForEach(&info7, func(_ int64) error {
info := fromV7SectorOnChainInfo(info7)
infos = append(infos, &info)
return nil
}); err != nil {
return nil, err
}
return infos, nil
}
// Otherwise, load selected.
infos7, err := sectors.Load(*snos)
if err != nil {
return nil, err
}
infos := make([]*SectorOnChainInfo, len(infos7))
for i, info7 := range infos7 {
info := fromV7SectorOnChainInfo(*info7)
infos[i] = &info
}
return infos, nil
}
func (s *state7) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
return allocatedSectors, err
}
func (s *state7) IsAllocated(num abi.SectorNumber) (bool, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return false, err
}
return allocatedSectors.IsSet(uint64(num))
}
func (s *state7) GetProvingPeriodStart() (abi.ChainEpoch, error) {
return s.State.ProvingPeriodStart, nil
}
func (s *state7) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return nil, err
}
allocatedRuns, err := allocatedSectors.RunIterator()
if err != nil {
return nil, err
}
unallocatedRuns, err := rle.Subtract(
&rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
allocatedRuns,
)
if err != nil {
return nil, err
}
iter, err := rle.BitsFromRuns(unallocatedRuns)
if err != nil {
return nil, err
}
sectors := make([]abi.SectorNumber, 0, count)
for iter.HasNext() && len(sectors) < count {
nextNo, err := iter.Next()
if err != nil {
return nil, err
}
sectors = append(sectors, abi.SectorNumber(nextNo))
}
return sectors, nil
}
func (s *state7) GetAllocatedSectors() (*bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
return nil, err
}
return &allocatedSectors, nil
}
func (s *state7) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return nil, err
}
dl, err := dls.LoadDeadline(s.store, idx)
if err != nil {
return nil, err
}
return &deadline7{*dl, s.store}, nil
}
func (s *state7) ForEachDeadline(cb func(uint64, Deadline) error) error {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return err
}
return dls.ForEach(s.store, func(i uint64, dl *miner7.Deadline) error {
return cb(i, &deadline7{*dl, s.store})
})
}
func (s *state7) NumDeadlines() (uint64, error) {
return miner7.WPoStPeriodDeadlines, nil
}
func (s *state7) DeadlinesChanged(other State) (bool, error) {
other7, ok := other.(*state7)
if !ok {
// treat an upgrade as a change, always
return true, nil
}
return !s.State.Deadlines.Equals(other7.Deadlines), nil
}
func (s *state7) MinerInfoChanged(other State) (bool, error) {
other0, ok := other.(*state7)
if !ok {
// treat an upgrade as a change, always
return true, nil
}
return !s.State.Info.Equals(other0.State.Info), nil
}
func (s *state7) Info() (MinerInfo, error) {
info, err := s.State.GetInfo(s.store)
if err != nil {
return MinerInfo{}, err
}
var pid *peer.ID
if peerID, err := peer.IDFromBytes(info.PeerId); err == nil {
pid = &peerID
}
mi := MinerInfo{
Owner: info.Owner,
Worker: info.Worker,
ControlAddresses: info.ControlAddresses,
NewWorker: address.Undef,
WorkerChangeEpoch: -1,
PeerId: pid,
Multiaddrs: info.Multiaddrs,
WindowPoStProofType: info.WindowPoStProofType,
SectorSize: info.SectorSize,
WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
ConsensusFaultElapsed: info.ConsensusFaultElapsed,
}
if info.PendingWorkerKey != nil {
mi.NewWorker = info.PendingWorkerKey.NewWorker
mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt
}
return mi, nil
}
func (s *state7) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) {
return s.State.RecordedDeadlineInfo(epoch), nil
}
func (s *state7) DeadlineCronActive() (bool, error) {
return s.State.DeadlineCronActive, nil
}
func (s *state7) sectors() (adt.Array, error) {
return adt7.AsArray(s.store, s.Sectors, miner7.SectorsAmtBitwidth)
}
func (s *state7) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) {
var si miner7.SectorOnChainInfo
err := si.UnmarshalCBOR(bytes.NewReader(val.Raw))
if err != nil {
return SectorOnChainInfo{}, err
}
return fromV7SectorOnChainInfo(si), nil
}
func (s *state7) precommits() (adt.Map, error) {
return adt7.AsMap(s.store, s.PreCommittedSectors, builtin7.DefaultHamtBitwidth)
}
func (s *state7) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) {
var sp miner7.SectorPreCommitOnChainInfo
err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
if err != nil {
return SectorPreCommitOnChainInfo{}, err
}
return fromV7SectorPreCommitOnChainInfo(sp), nil
}
func (s *state7) EraseAllUnproven() error {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return err
}
err = dls.ForEach(s.store, func(dindx uint64, dl *miner7.Deadline) error {
ps, err := dl.PartitionsArray(s.store)
if err != nil {
return err
}
var part miner7.Partition
err = ps.ForEach(&part, func(pindx int64) error {
_ = part.ActivateUnproven()
err = ps.Set(uint64(pindx), &part)
return nil
})
if err != nil {
return err
}
dl.Partitions, err = ps.Root()
if err != nil {
return err
}
return dls.UpdateDeadline(s.store, dindx, dl)
})
if err != nil {
return err
}
return s.State.SaveDeadlines(s.store, dls)
}
func (d *deadline7) LoadPartition(idx uint64) (Partition, error) {
p, err := d.Deadline.LoadPartition(d.store, idx)
if err != nil {
return nil, err
}
return &partition7{*p, d.store}, nil
}
func (d *deadline7) ForEachPartition(cb func(uint64, Partition) error) error {
ps, err := d.Deadline.PartitionsArray(d.store)
if err != nil {
return err
}
var part miner7.Partition
return ps.ForEach(&part, func(i int64) error {
return cb(uint64(i), &partition7{part, d.store})
})
}
func (d *deadline7) PartitionsChanged(other Deadline) (bool, error) {
other7, ok := other.(*deadline7)
if !ok {
// treat an upgrade as a change, always
return true, nil
}
return !d.Deadline.Partitions.Equals(other7.Deadline.Partitions), nil
}
func (d *deadline7) PartitionsPoSted() (bitfield.BitField, error) {
return d.Deadline.PartitionsPoSted, nil
}
func (d *deadline7) DisputableProofCount() (uint64, error) {
ops, err := d.OptimisticProofsSnapshotArray(d.store)
if err != nil {
return 0, err
}
return ops.Length(), nil
}
func (p *partition7) AllSectors() (bitfield.BitField, error) {
return p.Partition.Sectors, nil
}
func (p *partition7) FaultySectors() (bitfield.BitField, error) {
return p.Partition.Faults, nil
}
func (p *partition7) RecoveringSectors() (bitfield.BitField, error) {
return p.Partition.Recoveries, nil
}
func (p *partition7) UnprovenSectors() (bitfield.BitField, error) {
return p.Partition.Unproven, nil
}
func fromV7SectorOnChainInfo(v7 miner7.SectorOnChainInfo) SectorOnChainInfo {
info := SectorOnChainInfo{
SectorNumber: v7.SectorNumber,
SealProof: v7.SealProof,
SealedCID: v7.SealedCID,
DealIDs: v7.DealIDs,
Activation: v7.Activation,
Expiration: v7.Expiration,
DealWeight: v7.DealWeight,
VerifiedDealWeight: v7.VerifiedDealWeight,
InitialPledge: v7.InitialPledge,
ExpectedDayReward: v7.ExpectedDayReward,
ExpectedStoragePledge: v7.ExpectedStoragePledge,
SectorKeyCID: v7.SectorKeyCID,
}
return info
}
func fromV7SectorPreCommitOnChainInfo(v7 miner7.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
return SectorPreCommitOnChainInfo{
Info: (SectorPreCommitInfo)(v7.Info),
PreCommitDeposit: v7.PreCommitDeposit,
PreCommitEpoch: v7.PreCommitEpoch,
DealWeight: v7.DealWeight,
VerifiedDealWeight: v7.VerifiedDealWeight,
}
}
func (s *state7) GetState() interface{} {
return &s.State
}

View File

@ -0,0 +1,71 @@
package multisig
import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init"
multisig7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/multisig"
"github.com/filecoin-project/lotus/chain/actors"
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/types"
)
type message7 struct{ message0 }
func (m message7) Create(
signers []address.Address, threshold uint64,
unlockStart, unlockDuration abi.ChainEpoch,
initialAmount abi.TokenAmount,
) (*types.Message, error) {
lenAddrs := uint64(len(signers))
if lenAddrs < threshold {
return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig")
}
if threshold == 0 {
threshold = lenAddrs
}
if m.from == address.Undef {
return nil, xerrors.Errorf("must provide source address")
}
// Set up constructor parameters for multisig
msigParams := &multisig7.ConstructorParams{
Signers: signers,
NumApprovalsThreshold: threshold,
UnlockDuration: unlockDuration,
StartEpoch: unlockStart,
}
enc, actErr := actors.SerializeParams(msigParams)
if actErr != nil {
return nil, actErr
}
// new actors are created by invoking 'exec' on the init actor with the constructor params
execParams := &init7.ExecParams{
CodeCID: builtin7.MultisigActorCodeID,
ConstructorParams: enc,
}
enc, actErr = actors.SerializeParams(execParams)
if actErr != nil {
return nil, actErr
}
return &types.Message{
To: init_.Address,
From: m.from,
Method: builtin7.MethodsInit.Exec,
Params: enc,
Value: initialAmount,
}, nil
}

View File

@ -13,7 +13,7 @@ import (
"github.com/ipfs/go-cid"
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
msig6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/multisig"
msig7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/multisig"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
@ -27,6 +27,8 @@ import (
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
@ -58,6 +60,10 @@ func init() {
builtin.RegisterActorState(builtin6.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load6(store, root)
})
builtin.RegisterActorState(builtin7.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load7(store, root)
})
}
func Load(store adt.Store, act *types.Actor) (State, error) {
@ -81,6 +87,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case builtin6.MultisigActorCodeID:
return load6(store, act.Head)
case builtin7.MultisigActorCodeID:
return load7(store, act.Head)
}
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
}
@ -106,6 +115,9 @@ func MakeState(store adt.Store, av actors.Version, signers []address.Address, th
case actors.Version6:
return make6(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
case actors.Version7:
return make7(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@ -131,6 +143,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version6:
return builtin6.MultisigActorCodeID, nil
case actors.Version7:
return builtin7.MultisigActorCodeID, nil
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
@ -156,7 +171,7 @@ type State interface {
type Transaction = msig0.Transaction
var Methods = builtin6.MethodsMultisig
var Methods = builtin7.MethodsMultisig
func Message(version actors.Version, from address.Address) MessageBuilder {
switch version {
@ -178,6 +193,9 @@ func Message(version actors.Version, from address.Address) MessageBuilder {
case actors.Version6:
return message6{message0{from}}
case actors.Version7:
return message7{message0{from}}
default:
panic(fmt.Sprintf("unsupported actors version: %d", version))
}
@ -201,13 +219,13 @@ type MessageBuilder interface {
}
// this type is the same between v0 and v2
type ProposalHashData = msig6.ProposalHashData
type ProposeReturn = msig6.ProposeReturn
type ProposeParams = msig6.ProposeParams
type ApproveReturn = msig6.ApproveReturn
type ProposalHashData = msig7.ProposalHashData
type ProposeReturn = msig7.ProposeReturn
type ProposeParams = msig7.ProposeParams
type ApproveReturn = msig7.ApproveReturn
func txnParams(id uint64, data *ProposalHashData) ([]byte, error) {
params := msig6.TxnIDParams{ID: msig6.TxnID(id)}
params := msig7.TxnIDParams{ID: msig7.TxnID(id)}
if data != nil {
if data.Requester.Protocol() != address.ID {
return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester)

View File

@ -0,0 +1,119 @@
package multisig
import (
"bytes"
"encoding/binary"
adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/actors/adt"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
msig7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/multisig"
)
var _ State = (*state7)(nil)
func load7(store adt.Store, root cid.Cid) (State, error) {
out := state7{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make7(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
out := state7{store: store}
out.State = msig7.State{}
out.State.Signers = signers
out.State.NumApprovalsThreshold = threshold
out.State.StartEpoch = startEpoch
out.State.UnlockDuration = unlockDuration
out.State.InitialBalance = initialBalance
em, err := adt7.StoreEmptyMap(store, builtin7.DefaultHamtBitwidth)
if err != nil {
return nil, err
}
out.State.PendingTxns = em
return &out, nil
}
type state7 struct {
msig7.State
store adt.Store
}
func (s *state7) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) {
return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil
}
func (s *state7) StartEpoch() (abi.ChainEpoch, error) {
return s.State.StartEpoch, nil
}
func (s *state7) UnlockDuration() (abi.ChainEpoch, error) {
return s.State.UnlockDuration, nil
}
func (s *state7) InitialBalance() (abi.TokenAmount, error) {
return s.State.InitialBalance, nil
}
func (s *state7) Threshold() (uint64, error) {
return s.State.NumApprovalsThreshold, nil
}
func (s *state7) Signers() ([]address.Address, error) {
return s.State.Signers, nil
}
func (s *state7) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error {
arr, err := adt7.AsMap(s.store, s.State.PendingTxns, builtin7.DefaultHamtBitwidth)
if err != nil {
return err
}
var out msig7.Transaction
return arr.ForEach(&out, func(key string) error {
txid, n := binary.Varint([]byte(key))
if n <= 0 {
return xerrors.Errorf("invalid pending transaction key: %v", key)
}
return cb(txid, (Transaction)(out)) //nolint:unconvert
})
}
func (s *state7) PendingTxnChanged(other State) (bool, error) {
other7, ok := other.(*state7)
if !ok {
// treat an upgrade as a change, always
return true, nil
}
return !s.State.PendingTxns.Equals(other7.PendingTxns), nil
}
func (s *state7) transactions() (adt.Map, error) {
return adt7.AsMap(s.store, s.PendingTxns, builtin7.DefaultHamtBitwidth)
}
func (s *state7) decodeTransaction(val *cbg.Deferred) (Transaction, error) {
var tx msig7.Transaction
if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return Transaction{}, err
}
return tx, nil
}
func (s *state7) GetState() interface{} {
return &s.State
}

View File

@ -0,0 +1,74 @@
package paych
import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init"
paych7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/actors"
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/types"
)
type message7 struct{ from address.Address }
func (m message7) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) {
params, aerr := actors.SerializeParams(&paych7.ConstructorParams{From: m.from, To: to})
if aerr != nil {
return nil, aerr
}
enc, aerr := actors.SerializeParams(&init7.ExecParams{
CodeCID: builtin7.PaymentChannelActorCodeID,
ConstructorParams: params,
})
if aerr != nil {
return nil, aerr
}
return &types.Message{
To: init_.Address,
From: m.from,
Value: initialAmount,
Method: builtin7.MethodsInit.Exec,
Params: enc,
}, nil
}
func (m message7) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
params, aerr := actors.SerializeParams(&paych7.UpdateChannelStateParams{
Sv: *sv,
Secret: secret,
})
if aerr != nil {
return nil, aerr
}
return &types.Message{
To: paych,
From: m.from,
Value: abi.NewTokenAmount(0),
Method: builtin7.MethodsPaych.UpdateChannelState,
Params: params,
}, nil
}
func (m message7) Settle(paych address.Address) (*types.Message, error) {
return &types.Message{
To: paych,
From: m.from,
Value: abi.NewTokenAmount(0),
Method: builtin7.MethodsPaych.Settle,
}, nil
}
func (m message7) Collect(paych address.Address) (*types.Message, error) {
return &types.Message{
To: paych,
From: m.from,
Value: abi.NewTokenAmount(0),
Method: builtin7.MethodsPaych.Collect,
}, nil
}

View File

@ -27,6 +27,8 @@ import (
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
@ -58,6 +60,10 @@ func init() {
builtin.RegisterActorState(builtin6.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load6(store, root)
})
builtin.RegisterActorState(builtin7.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load7(store, root)
})
}
// Load returns an abstract copy of payment channel state, irregardless of actor version
@ -82,6 +88,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case builtin6.PaymentChannelActorCodeID:
return load6(store, act.Head)
case builtin7.PaymentChannelActorCodeID:
return load7(store, act.Head)
}
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
}
@ -107,6 +116,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
case actors.Version6:
return make6(store)
case actors.Version7:
return make7(store)
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@ -132,6 +144,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version6:
return builtin6.PaymentChannelActorCodeID, nil
case actors.Version7:
return builtin7.PaymentChannelActorCodeID, nil
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
@ -185,7 +200,7 @@ func DecodeSignedVoucher(s string) (*SignedVoucher, error) {
return &sv, nil
}
var Methods = builtin6.MethodsPaych
var Methods = builtin7.MethodsPaych
func Message(version actors.Version, from address.Address) MessageBuilder {
switch version {
@ -208,6 +223,9 @@ func Message(version actors.Version, from address.Address) MessageBuilder {
case actors.Version6:
return message6{from}
case actors.Version7:
return message7{from}
default:
panic(fmt.Sprintf("unsupported actors version: %d", version))
}

View File

@ -0,0 +1,114 @@
package paych
import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/chain/actors/adt"
paych7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/paych"
adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt"
)
var _ State = (*state7)(nil)
func load7(store adt.Store, root cid.Cid) (State, error) {
out := state7{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make7(store adt.Store) (State, error) {
out := state7{store: store}
out.State = paych7.State{}
return &out, nil
}
type state7 struct {
paych7.State
store adt.Store
lsAmt *adt7.Array
}
// Channel owner, who has funded the actor
func (s *state7) From() (address.Address, error) {
return s.State.From, nil
}
// Recipient of payouts from channel
func (s *state7) To() (address.Address, error) {
return s.State.To, nil
}
// Height at which the channel can be `Collected`
func (s *state7) SettlingAt() (abi.ChainEpoch, error) {
return s.State.SettlingAt, nil
}
// Amount successfully redeemed through the payment channel, paid out on `Collect()`
func (s *state7) ToSend() (abi.TokenAmount, error) {
return s.State.ToSend, nil
}
func (s *state7) getOrLoadLsAmt() (*adt7.Array, error) {
if s.lsAmt != nil {
return s.lsAmt, nil
}
// Get the lane state from the chain
lsamt, err := adt7.AsArray(s.store, s.State.LaneStates, paych7.LaneStatesAmtBitwidth)
if err != nil {
return nil, err
}
s.lsAmt = lsamt
return lsamt, nil
}
// Get total number of lanes
func (s *state7) LaneCount() (uint64, error) {
lsamt, err := s.getOrLoadLsAmt()
if err != nil {
return 0, err
}
return lsamt.Length(), nil
}
func (s *state7) GetState() interface{} {
return &s.State
}
// Iterate lane states
func (s *state7) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error {
// Get the lane state from the chain
lsamt, err := s.getOrLoadLsAmt()
if err != nil {
return err
}
// Note: we use a map instead of an array to store laneStates because the
// client sets the lane ID (the index) and potentially they could use a
// very large index.
var ls paych7.LaneState
return lsamt.ForEach(&ls, func(i int64) error {
return cb(uint64(i), &laneState7{ls})
})
}
type laneState7 struct {
paych7.LaneState
}
func (ls *laneState7) Redeemed() (big.Int, error) {
return ls.LaneState.Redeemed, nil
}
func (ls *laneState7) Nonce() (uint64, error) {
return ls.LaneState.Nonce, nil
}

View File

@ -26,6 +26,8 @@ import (
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
)
func init() {
@ -53,11 +55,15 @@ func init() {
builtin.RegisterActorState(builtin6.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load6(store, root)
})
builtin.RegisterActorState(builtin7.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load7(store, root)
})
}
var (
Address = builtin6.StoragePowerActorAddr
Methods = builtin6.MethodsPower
Address = builtin7.StoragePowerActorAddr
Methods = builtin7.MethodsPower
)
func Load(store adt.Store, act *types.Actor) (State, error) {
@ -81,6 +87,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case builtin6.StoragePowerActorCodeID:
return load6(store, act.Head)
case builtin7.StoragePowerActorCodeID:
return load7(store, act.Head)
}
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
}
@ -106,6 +115,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
case actors.Version6:
return make6(store)
case actors.Version7:
return make7(store)
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@ -131,6 +143,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version6:
return builtin6.StoragePowerActorCodeID, nil
case actors.Version7:
return builtin7.StoragePowerActorCodeID, nil
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)

View File

@ -0,0 +1,187 @@
package power
import (
"bytes"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
power7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/power"
adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt"
)
var _ State = (*state7)(nil)
func load7(store adt.Store, root cid.Cid) (State, error) {
out := state7{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make7(store adt.Store) (State, error) {
out := state7{store: store}
s, err := power7.ConstructState(store)
if err != nil {
return nil, err
}
out.State = *s
return &out, nil
}
type state7 struct {
power7.State
store adt.Store
}
func (s *state7) TotalLocked() (abi.TokenAmount, error) {
return s.TotalPledgeCollateral, nil
}
func (s *state7) TotalPower() (Claim, error) {
return Claim{
RawBytePower: s.TotalRawBytePower,
QualityAdjPower: s.TotalQualityAdjPower,
}, nil
}
// Committed power to the network. Includes miners below the minimum threshold.
func (s *state7) TotalCommitted() (Claim, error) {
return Claim{
RawBytePower: s.TotalBytesCommitted,
QualityAdjPower: s.TotalQABytesCommitted,
}, nil
}
func (s *state7) MinerPower(addr address.Address) (Claim, bool, error) {
claims, err := s.claims()
if err != nil {
return Claim{}, false, err
}
var claim power7.Claim
ok, err := claims.Get(abi.AddrKey(addr), &claim)
if err != nil {
return Claim{}, false, err
}
return Claim{
RawBytePower: claim.RawBytePower,
QualityAdjPower: claim.QualityAdjPower,
}, ok, nil
}
func (s *state7) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) {
return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a)
}
func (s *state7) TotalPowerSmoothed() (builtin.FilterEstimate, error) {
return builtin.FromV7FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil
}
func (s *state7) MinerCounts() (uint64, uint64, error) {
return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil
}
func (s *state7) ListAllMiners() ([]address.Address, error) {
claims, err := s.claims()
if err != nil {
return nil, err
}
var miners []address.Address
err = claims.ForEach(nil, func(k string) error {
a, err := address.NewFromBytes([]byte(k))
if err != nil {
return err
}
miners = append(miners, a)
return nil
})
if err != nil {
return nil, err
}
return miners, nil
}
func (s *state7) ForEachClaim(cb func(miner address.Address, claim Claim) error) error {
claims, err := s.claims()
if err != nil {
return err
}
var claim power7.Claim
return claims.ForEach(&claim, func(k string) error {
a, err := address.NewFromBytes([]byte(k))
if err != nil {
return err
}
return cb(a, Claim{
RawBytePower: claim.RawBytePower,
QualityAdjPower: claim.QualityAdjPower,
})
})
}
func (s *state7) ClaimsChanged(other State) (bool, error) {
other7, ok := other.(*state7)
if !ok {
// treat an upgrade as a change, always
return true, nil
}
return !s.State.Claims.Equals(other7.State.Claims), nil
}
func (s *state7) SetTotalQualityAdjPower(p abi.StoragePower) error {
s.State.TotalQualityAdjPower = p
return nil
}
func (s *state7) SetTotalRawBytePower(p abi.StoragePower) error {
s.State.TotalRawBytePower = p
return nil
}
func (s *state7) SetThisEpochQualityAdjPower(p abi.StoragePower) error {
s.State.ThisEpochQualityAdjPower = p
return nil
}
func (s *state7) SetThisEpochRawBytePower(p abi.StoragePower) error {
s.State.ThisEpochRawBytePower = p
return nil
}
func (s *state7) GetState() interface{} {
return &s.State
}
func (s *state7) claims() (adt.Map, error) {
return adt7.AsMap(s.store, s.Claims, builtin7.DefaultHamtBitwidth)
}
func (s *state7) decodeClaim(val *cbg.Deferred) (Claim, error) {
var ci power7.Claim
if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return Claim{}, err
}
return fromV7Claim(ci), nil
}
func fromV7Claim(v7 power7.Claim) Claim {
return Claim{
RawBytePower: v7.RawBytePower,
QualityAdjPower: v7.QualityAdjPower,
}
}

View File

@ -21,6 +21,8 @@ import (
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/types"
@ -51,11 +53,15 @@ func init() {
builtin.RegisterActorState(builtin6.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load6(store, root)
})
builtin.RegisterActorState(builtin7.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load7(store, root)
})
}
var (
Address = builtin6.RewardActorAddr
Methods = builtin6.MethodsReward
Address = builtin7.RewardActorAddr
Methods = builtin7.MethodsReward
)
func Load(store adt.Store, act *types.Actor) (State, error) {
@ -79,6 +85,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case builtin6.RewardActorCodeID:
return load6(store, act.Head)
case builtin7.RewardActorCodeID:
return load7(store, act.Head)
}
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
}
@ -104,6 +113,9 @@ func MakeState(store adt.Store, av actors.Version, currRealizedPower abi.Storage
case actors.Version6:
return make6(store, currRealizedPower)
case actors.Version7:
return make7(store, currRealizedPower)
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@ -129,6 +141,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version6:
return builtin6.RewardActorCodeID, nil
case actors.Version7:
return builtin7.RewardActorCodeID, nil
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)

View File

@ -0,0 +1,98 @@
package reward
import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner"
reward7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/reward"
smoothing7 "github.com/filecoin-project/specs-actors/v7/actors/util/smoothing"
)
var _ State = (*state7)(nil)
func load7(store adt.Store, root cid.Cid) (State, error) {
out := state7{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make7(store adt.Store, currRealizedPower abi.StoragePower) (State, error) {
out := state7{store: store}
out.State = *reward7.ConstructState(currRealizedPower)
return &out, nil
}
type state7 struct {
reward7.State
store adt.Store
}
func (s *state7) ThisEpochReward() (abi.TokenAmount, error) {
return s.State.ThisEpochReward, nil
}
func (s *state7) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) {
return builtin.FilterEstimate{
PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate,
VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate,
}, nil
}
func (s *state7) ThisEpochBaselinePower() (abi.StoragePower, error) {
return s.State.ThisEpochBaselinePower, nil
}
func (s *state7) TotalStoragePowerReward() (abi.TokenAmount, error) {
return s.State.TotalStoragePowerReward, nil
}
func (s *state7) EffectiveBaselinePower() (abi.StoragePower, error) {
return s.State.EffectiveBaselinePower, nil
}
func (s *state7) EffectiveNetworkTime() (abi.ChainEpoch, error) {
return s.State.EffectiveNetworkTime, nil
}
func (s *state7) CumsumBaseline() (reward7.Spacetime, error) {
return s.State.CumsumBaseline, nil
}
func (s *state7) CumsumRealized() (reward7.Spacetime, error) {
return s.State.CumsumRealized, nil
}
func (s *state7) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) {
return miner7.InitialPledgeForPower(
qaPower,
s.State.ThisEpochBaselinePower,
s.State.ThisEpochRewardSmoothed,
smoothing7.FilterEstimate{
PositionEstimate: networkQAPower.PositionEstimate,
VelocityEstimate: networkQAPower.VelocityEstimate,
},
circSupply,
), nil
}
func (s *state7) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) {
return miner7.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed,
smoothing7.FilterEstimate{
PositionEstimate: networkQAPower.PositionEstimate,
VelocityEstimate: networkQAPower.VelocityEstimate,
},
sectorWeight), nil
}
func (s *state7) GetState() interface{} {
return &s.State
}

View File

@ -17,10 +17,12 @@ import (
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
)
var (
Address = builtin6.SystemActorAddr
Address = builtin7.SystemActorAddr
)
func MakeState(store adt.Store, av actors.Version) (State, error) {
@ -44,6 +46,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
case actors.Version6:
return make6(store)
case actors.Version7:
return make7(store)
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@ -69,6 +74,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version6:
return builtin6.SystemActorCodeID, nil
case actors.Version7:
return builtin7.SystemActorCodeID, nil
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)

View File

@ -0,0 +1,35 @@
package system
import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors/adt"
system7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/system"
)
var _ State = (*state7)(nil)
func load7(store adt.Store, root cid.Cid) (State, error) {
out := state7{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make7(store adt.Store) (State, error) {
out := state7{store: store}
out.State = system7.State{}
return &out, nil
}
type state7 struct {
system7.State
store adt.Store
}
func (s *state7) GetState() interface{} {
return &s.State
}

View File

@ -0,0 +1,75 @@
package verifreg
import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg"
adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt"
)
var _ State = (*state7)(nil)
func load7(store adt.Store, root cid.Cid) (State, error) {
out := state7{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make7(store adt.Store, rootKeyAddress address.Address) (State, error) {
out := state7{store: store}
s, err := verifreg7.ConstructState(store, rootKeyAddress)
if err != nil {
return nil, err
}
out.State = *s
return &out, nil
}
type state7 struct {
verifreg7.State
store adt.Store
}
func (s *state7) RootKey() (address.Address, error) {
return s.State.RootKey, nil
}
func (s *state7) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
return getDataCap(s.store, actors.Version7, s.verifiedClients, addr)
}
func (s *state7) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
return getDataCap(s.store, actors.Version7, s.verifiers, addr)
}
func (s *state7) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
return forEachCap(s.store, actors.Version7, s.verifiers, cb)
}
func (s *state7) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
return forEachCap(s.store, actors.Version7, s.verifiedClients, cb)
}
func (s *state7) verifiedClients() (adt.Map, error) {
return adt7.AsMap(s.store, s.VerifiedClients, builtin7.DefaultHamtBitwidth)
}
func (s *state7) verifiers() (adt.Map, error) {
return adt7.AsMap(s.store, s.Verifiers, builtin7.DefaultHamtBitwidth)
}
func (s *state7) GetState() interface{} {
return &s.State
}

View File

@ -21,6 +21,8 @@ import (
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
@ -53,11 +55,15 @@ func init() {
return load6(store, root)
})
builtin.RegisterActorState(builtin7.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load7(store, root)
})
}
var (
Address = builtin6.VerifiedRegistryActorAddr
Methods = builtin6.MethodsVerifiedRegistry
Address = builtin7.VerifiedRegistryActorAddr
Methods = builtin7.MethodsVerifiedRegistry
)
func Load(store adt.Store, act *types.Actor) (State, error) {
@ -81,6 +87,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case builtin6.VerifiedRegistryActorCodeID:
return load6(store, act.Head)
case builtin7.VerifiedRegistryActorCodeID:
return load7(store, act.Head)
}
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
}
@ -106,6 +115,9 @@ func MakeState(store adt.Store, av actors.Version, rootKeyAddress address.Addres
case actors.Version6:
return make6(store, rootKeyAddress)
case actors.Version7:
return make7(store, rootKeyAddress)
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@ -131,6 +143,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version6:
return builtin6.VerifiedRegistryActorCodeID, nil
case actors.Version7:
return builtin7.VerifiedRegistryActorCodeID, nil
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)

View File

@ -40,14 +40,19 @@ import (
miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner"
verifreg6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/verifreg"
paych6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/paych"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market"
miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner"
verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg"
paych7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/paych"
)
const (
ChainFinality = miner6.ChainFinality
ChainFinality = miner7.ChainFinality
SealRandomnessLookback = ChainFinality
PaychSettleDelay = paych6.SettleDelay
MaxPreCommitRandomnessLookback = builtin6.EpochsInDay + SealRandomnessLookback
PaychSettleDelay = paych7.SettleDelay
MaxPreCommitRandomnessLookback = builtin7.EpochsInDay + SealRandomnessLookback
)
// SetSupportedProofTypes sets supported proof types, across all actor versions.
@ -72,6 +77,8 @@ func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
miner6.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner7.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
AddSupportedProofTypes(types...)
}
@ -119,6 +126,15 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
miner6.WindowPoStProofTypes[wpp] = struct{}{}
miner7.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
wpp, err = t.RegisteredWindowPoStProof()
if err != nil {
// Fine to panic, this is a test-only method
panic(err)
}
miner7.WindowPoStProofTypes[wpp] = struct{}{}
}
}
@ -139,11 +155,13 @@ func SetPreCommitChallengeDelay(delay abi.ChainEpoch) {
miner6.PreCommitChallengeDelay = delay
miner7.PreCommitChallengeDelay = delay
}
// TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay.
func GetPreCommitChallengeDelay() abi.ChainEpoch {
return miner6.PreCommitChallengeDelay
return miner7.PreCommitChallengeDelay
}
// SetConsensusMinerMinPower sets the minimum power of an individual miner must
@ -173,6 +191,10 @@ func SetConsensusMinerMinPower(p abi.StoragePower) {
policy.ConsensusMinerMinPower = p
}
for _, policy := range builtin7.PoStProofPolicies {
policy.ConsensusMinerMinPower = p
}
}
// SetMinVerifiedDealSize sets the minimum size of a verified deal. This should
@ -191,6 +213,8 @@ func SetMinVerifiedDealSize(size abi.StoragePower) {
verifreg6.MinVerifiedDealSize = size
verifreg7.MinVerifiedDealSize = size
}
func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) (abi.ChainEpoch, error) {
@ -220,6 +244,10 @@ func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) (a
return miner6.MaxProveCommitDuration[t], nil
case actors.Version7:
return miner7.MaxProveCommitDuration[t], nil
default:
return 0, xerrors.Errorf("unsupported actors version")
}
@ -255,6 +283,11 @@ func SetProviderCollateralSupplyTarget(num, denom big.Int) {
Denominator: denom,
}
market7.ProviderCollateralSupplyTarget = builtin7.BigFrac{
Numerator: num,
Denominator: denom,
}
}
func DealProviderCollateralBounds(
@ -298,13 +331,18 @@ func DealProviderCollateralBounds(
min, max := market6.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
case actors.Version7:
min, max := market7.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
default:
return big.Zero(), big.Zero(), xerrors.Errorf("unsupported actors version")
}
}
func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) {
return market6.DealDurationBounds(pieceSize)
return market7.DealDurationBounds(pieceSize)
}
// Sets the challenge window and scales the proving period to match (such that
@ -345,6 +383,13 @@ func SetWPoStChallengeWindow(period abi.ChainEpoch) {
// scale it if we're scaling the challenge period.
miner6.WPoStDisputeWindow = period * 30
miner7.WPoStChallengeWindow = period
miner7.WPoStProvingPeriod = period * abi.ChainEpoch(miner7.WPoStPeriodDeadlines)
// by default, this is 2x finality which is 30 periods.
// scale it if we're scaling the challenge period.
miner7.WPoStDisputeWindow = period * 30
}
func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
@ -357,15 +402,15 @@ func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
}
func GetMaxSectorExpirationExtension() abi.ChainEpoch {
return miner6.MaxSectorExpirationExtension
return miner7.MaxSectorExpirationExtension
}
func GetMinSectorExpiration() abi.ChainEpoch {
return miner6.MinSectorExpiration
return miner7.MinSectorExpiration
}
func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) {
sectorsPerPart, err := builtin6.PoStProofWindowPoStPartitionSectors(p)
sectorsPerPart, err := builtin7.PoStProofWindowPoStPartitionSectors(p)
if err != nil {
return 0, err
}
@ -378,8 +423,8 @@ func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, e
func GetDefaultSectorSize() abi.SectorSize {
// supported sector sizes are the same across versions.
szs := make([]abi.SectorSize, 0, len(miner6.PreCommitSealProofTypesV8))
for spt := range miner6.PreCommitSealProofTypesV8 {
szs := make([]abi.SectorSize, 0, len(miner7.PreCommitSealProofTypesV8))
for spt := range miner7.PreCommitSealProofTypesV8 {
ss, err := spt.SectorSize()
if err != nil {
panic(err)
@ -404,7 +449,7 @@ func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version)
return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime
}
return builtin6.SealProofPoliciesV11[proof].SectorMaxLifetime
return builtin7.SealProofPoliciesV11[proof].SectorMaxLifetime
}
func GetAddressedSectorsMax(nwVer network.Version) (int, error) {
@ -432,6 +477,9 @@ func GetAddressedSectorsMax(nwVer network.Version) (int, error) {
case actors.Version6:
return miner6.AddressedSectorsMax, nil
case actors.Version7:
return miner7.AddressedSectorsMax, nil
default:
return 0, xerrors.Errorf("unsupported network version")
}
@ -469,6 +517,10 @@ func GetDeclarationsMax(nwVer network.Version) (int, error) {
return miner6.DeclarationsMax, nil
case actors.Version7:
return miner7.DeclarationsMax, nil
default:
return 0, xerrors.Errorf("unsupported network version")
}
@ -505,6 +557,10 @@ func AggregateProveCommitNetworkFee(nwVer network.Version, aggregateSize int, ba
return miner6.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil
case actors.Version7:
return miner7.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil
default:
return big.Zero(), xerrors.Errorf("unsupported network version")
}
@ -541,6 +597,10 @@ func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, base
return miner6.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
case actors.Version7:
return miner7.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
default:
return big.Zero(), xerrors.Errorf("unsupported network version")
}

View File

@ -20,9 +20,9 @@ const ({{range .actorVersions}}
/* inline-gen start */
var LatestVersion = 6
var LatestVersion = 7
var Versions = []int{0, 2, 3, 4, 5, 6}
var Versions = []int{0, 2, 3, 4, 5, 6, 7}
const (
Version0 Version = 0
@ -31,6 +31,7 @@ const (
Version4 Version = 4
Version5 Version = 5
Version6 Version = 6
Version7 Version = 7
)
/* inline-gen end */
@ -50,6 +51,8 @@ func VersionForNetwork(version network.Version) (Version, error) {
return Version5, nil
case network.Version14:
return Version6, nil
case network.Version15:
return Version7, nil
default:
return -1, fmt.Errorf("unsupported network version %d", version)
}

View File

@ -28,6 +28,7 @@ import (
exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported"
exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported"
exported6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/exported"
exported7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/exported"
/* inline-gen end */
@ -59,6 +60,7 @@ func NewActorRegistry() *vm.ActorRegistry {
inv.Register(vm.ActorsVersionPredicate(actors.Version4), exported4.BuiltinActors()...)
inv.Register(vm.ActorsVersionPredicate(actors.Version5), exported5.BuiltinActors()...)
inv.Register(vm.ActorsVersionPredicate(actors.Version6), exported6.BuiltinActors()...)
inv.Register(vm.ActorsVersionPredicate(actors.Version7), exported7.BuiltinActors()...)
/* inline-gen end */

View File

@ -65,7 +65,7 @@ func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api.
}
blsMsgCids = append(blsMsgCids, c)
} else {
} else if msg.Signature.Type == crypto.SigTypeSecp256k1 {
c, err := filec.sm.ChainStore().PutMessage(ctx, msg)
if err != nil {
return nil, err
@ -74,6 +74,8 @@ func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api.
secpkMsgCids = append(secpkMsgCids, c)
secpkMessages = append(secpkMessages, msg)
} else {
return nil, xerrors.Errorf("unknown sig type: %d", msg.Signature.Type)
}
}

View File

@ -6,6 +6,7 @@ import (
"time"
"github.com/filecoin-project/specs-actors/v6/actors/migration/nv14"
"github.com/filecoin-project/specs-actors/v7/actors/migration/nv15"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
@ -156,6 +157,22 @@ func DefaultUpgradeSchedule() stmgr.UpgradeSchedule {
StopWithin: 5,
}},
Expensive: true,
}, {
Height: build.UpgradeSnapDealsHeight,
Network: network.Version15,
Migration: UpgradeActorsV7,
PreMigrations: []stmgr.PreMigration{{
PreMigration: PreUpgradeActorsV7,
StartWithin: 120,
DontStartWithin: 60,
StopWithin: 35,
}, {
PreMigration: PreUpgradeActorsV7,
StartWithin: 30,
DontStartWithin: 15,
StopWithin: 5,
}},
Expensive: true,
},
}
@ -1170,7 +1187,94 @@ func upgradeActorsV6Common(
// Perform the migration
newHamtRoot, err := nv14.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v5: %w", err)
return cid.Undef, xerrors.Errorf("upgrading to actors v6: %w", err)
}
// Persist the result.
newRoot, err := store.Put(ctx, &types.StateRoot{
Version: types.StateTreeVersion4,
Actors: newHamtRoot,
Info: stateRoot.Info,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
// Persist the new tree.
{
from := buf
to := buf.Read()
if err := vm.Copy(ctx, from, to, newRoot); err != nil {
return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
}
}
return newRoot, nil
}
func UpgradeActorsV7(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3.
workerCount := runtime.NumCPU() - 3
if workerCount <= 0 {
workerCount = 1
}
config := nv15.Config{
MaxWorkers: uint(workerCount),
JobQueueSize: 1000,
ResultQueueSize: 100,
ProgressLogPeriod: 10 * time.Second,
}
newRoot, err := upgradeActorsV7Common(ctx, sm, cache, root, epoch, ts, config)
if err != nil {
return cid.Undef, xerrors.Errorf("migrating actors v6 state: %w", err)
}
return newRoot, nil
}
func PreUpgradeActorsV7(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
// Use half the CPUs for pre-migration, but leave at least 3.
workerCount := runtime.NumCPU()
if workerCount <= 4 {
workerCount = 1
} else {
workerCount /= 2
}
config := nv15.Config{MaxWorkers: uint(workerCount)}
_, err := upgradeActorsV7Common(ctx, sm, cache, root, epoch, ts, config)
return err
}
func upgradeActorsV7Common(
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
config nv15.Config,
) (cid.Cid, error) {
buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
store := store.ActorStore(ctx, buf)
// Load the state root.
var stateRoot types.StateRoot
if err := store.Get(ctx, root, &stateRoot); err != nil {
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
}
if stateRoot.Version != types.StateTreeVersion4 {
return cid.Undef, xerrors.Errorf(
"expected state root version 4 for actors v7 upgrade, got %d",
stateRoot.Version,
)
}
// Perform the migration
newHamtRoot, err := nv15.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v7: %w", err)
}
// Persist the result.

View File

@ -9,6 +9,8 @@ import (
"sync/atomic"
"time"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/lotus/chain/rand"
"github.com/filecoin-project/go-state-types/network"
@ -686,6 +688,10 @@ func (m genFakeVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVeri
panic("not supported")
}
func (m genFakeVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) {
panic("not supported")
}
func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
panic("not supported")
}

View File

@ -479,6 +479,10 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, sys vm.Sysca
verifNeeds := make(map[address.Address]abi.PaddedPieceSize)
var sum abi.PaddedPieceSize
csc := func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) {
return big.Zero(), nil
}
vmopt := vm.VMOpts{
StateBase: stateroot,
Epoch: 0,
@ -486,7 +490,7 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, sys vm.Sysca
Bstore: cs.StateBlockstore(),
Actors: filcns.NewActorRegistry(),
Syscalls: mkFakedSigSyscalls(sys),
CircSupplyCalc: nil,
CircSupplyCalc: csc,
NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version {
return nv
},

View File

@ -6,6 +6,8 @@ import (
"fmt"
"math/rand"
runtime7 "github.com/filecoin-project/specs-actors/v7/actors/runtime"
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
"github.com/ipfs/go-cid"
@ -29,7 +31,6 @@ import (
market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market"
power4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/power"
reward4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/reward"
runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
@ -57,7 +58,7 @@ func MinerAddress(genesisIndex uint64) address.Address {
}
type fakedSigSyscalls struct {
runtime5.Syscalls
runtime7.Syscalls
}
func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error {
@ -65,7 +66,7 @@ func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer
}
func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder {
return func(ctx context.Context, rt *vm.Runtime) runtime5.Syscalls {
return func(ctx context.Context, rt *vm.Runtime) runtime7.Syscalls {
return &fakedSigSyscalls{
base(ctx, rt),
}

View File

@ -121,7 +121,7 @@ loop:
// we can't fit the current chain but there is gas to spare
// trim it and push it down
chain.Trim(gasLimit, mp, baseFee)
chain.Trim(gasLimit, repubMsgLimit, mp, baseFee)
for j := i; j < len(chains)-1; j++ {
if chains[j].Before(chains[j+1]) {
break
@ -131,6 +131,10 @@ loop:
}
count := 0
if len(msgs) > repubMsgLimit {
msgs = msgs[:repubMsgLimit]
}
log.Infof("republishing %d messages", len(msgs))
for _, m := range msgs {
mb, err := m.Serialize()

View File

@ -7,6 +7,10 @@ import (
"sort"
"time"
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/go-state-types/crypto"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
@ -34,9 +38,10 @@ type msgChain struct {
merged bool
next *msgChain
prev *msgChain
sigType crypto.SigType
}
func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq float64) (msgs []*types.SignedMessage, err error) {
func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) {
mp.curTsLk.Lock()
defer mp.curTsLk.Unlock()
@ -46,24 +51,156 @@ func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq
// if the ticket quality is high enough that the first block has higher probability
// than any other block, then we don't bother with optimal selection because the
// first block will always have higher effective performance
var sm *selectedMessages
var err error
if tq > 0.84 {
msgs, err = mp.selectMessagesGreedy(ctx, mp.curTs, ts)
sm, err = mp.selectMessagesGreedy(ctx, mp.curTs, ts)
} else {
msgs, err = mp.selectMessagesOptimal(ctx, mp.curTs, ts, tq)
sm, err = mp.selectMessagesOptimal(ctx, mp.curTs, ts, tq)
}
if err != nil {
return nil, err
}
if len(msgs) > build.BlockMessageLimit {
msgs = msgs[:build.BlockMessageLimit]
if sm == nil {
return nil, nil
}
return msgs, nil
// one last sanity check
if len(sm.msgs) > build.BlockMessageLimit {
log.Errorf("message selection chose too many messages %d > %d", len(sm.msgs), build.BlockMessageLimit)
sm.msgs = sm.msgs[:build.BlockMessageLimit]
}
return sm.msgs, nil
}
func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) {
type selectedMessages struct {
msgs []*types.SignedMessage
gasLimit int64
secpLimit int
blsLimit int
}
// returns false if chain can't be added due to block constraints
func (sm *selectedMessages) tryToAdd(mc *msgChain) bool {
l := len(mc.msgs)
if build.BlockMessageLimit < l+len(sm.msgs) || sm.gasLimit < mc.gasLimit {
return false
}
if mc.sigType == crypto.SigTypeBLS {
if sm.blsLimit < l {
return false
}
sm.msgs = append(sm.msgs, mc.msgs...)
sm.blsLimit -= l
sm.gasLimit -= mc.gasLimit
} else if mc.sigType == crypto.SigTypeSecp256k1 {
if sm.secpLimit < l {
return false
}
sm.msgs = append(sm.msgs, mc.msgs...)
sm.secpLimit -= l
sm.gasLimit -= mc.gasLimit
}
// don't add the weird sigType msg, but otherwise proceed
return true
}
// returns false if messages can't be added due to block constraints
// will trim / invalidate chain as appropriate
func (sm *selectedMessages) tryToAddWithDeps(mc *msgChain, mp *MessagePool, baseFee types.BigInt) bool {
// compute the dependencies that must be merged and the gas limit including deps
chainGasLimit := mc.gasLimit
chainMsgLimit := len(mc.msgs)
depGasLimit := int64(0)
depMsgLimit := 0
smMsgLimit := 0
if mc.sigType == crypto.SigTypeBLS {
smMsgLimit = sm.blsLimit
} else if mc.sigType == crypto.SigTypeSecp256k1 {
smMsgLimit = sm.secpLimit
} else {
return false
}
if smMsgLimit > build.BlockMessageLimit-len(sm.msgs) {
smMsgLimit = build.BlockMessageLimit - len(sm.msgs)
}
var chainDeps []*msgChain
for curChain := mc.prev; curChain != nil && !curChain.merged; curChain = curChain.prev {
chainDeps = append(chainDeps, curChain)
chainGasLimit += curChain.gasLimit
chainMsgLimit += len(curChain.msgs)
depGasLimit += curChain.gasLimit
depMsgLimit += len(curChain.msgs)
}
// the chain doesn't fit as-is, so trim / invalidate it and return false
if chainGasLimit > sm.gasLimit || chainMsgLimit > smMsgLimit {
// it doesn't all fit; now we have to take into account the dependent chains before
// making a decision about trimming or invalidating.
// if the dependencies exceed the block limits, then we must invalidate the chain
// as it can never be included.
// Otherwise we can just trim and continue
if depGasLimit > sm.gasLimit || depMsgLimit >= smMsgLimit {
mc.Invalidate()
} else {
// dependencies fit, just trim it
mc.Trim(sm.gasLimit-depGasLimit, smMsgLimit-depMsgLimit, mp, baseFee)
}
return false
}
// the chain fits! include it together with all dependencies
for i := len(chainDeps) - 1; i >= 0; i-- {
curChain := chainDeps[i]
curChain.merged = true
sm.msgs = append(sm.msgs, curChain.msgs...)
}
mc.merged = true
sm.msgs = append(sm.msgs, mc.msgs...)
sm.gasLimit -= chainGasLimit
if mc.sigType == crypto.SigTypeBLS {
sm.blsLimit -= chainMsgLimit
} else if mc.sigType == crypto.SigTypeSecp256k1 {
sm.secpLimit -= chainMsgLimit
}
return true
}
func (sm *selectedMessages) trimChain(mc *msgChain, mp *MessagePool, baseFee types.BigInt) {
msgLimit := build.BlockMessageLimit - len(sm.msgs)
if mc.sigType == crypto.SigTypeBLS {
if msgLimit > sm.blsLimit {
msgLimit = sm.blsLimit
}
} else if mc.sigType == crypto.SigTypeSecp256k1 {
if msgLimit > sm.secpLimit {
msgLimit = sm.secpLimit
}
}
if mc.gasLimit > sm.gasLimit || len(mc.msgs) > msgLimit {
mc.Trim(sm.gasLimit, msgLimit, mp, baseFee)
}
}
func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *types.TipSet, tq float64) (*selectedMessages, error) {
start := time.Now()
baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts)
@ -89,10 +226,10 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ
// 0b. Select all priority messages that fit in the block
minGas := int64(gasguess.MinGas)
result, gasLimit := mp.selectPriorityMessages(ctx, pending, baseFee, ts)
result := mp.selectPriorityMessages(ctx, pending, baseFee, ts)
// have we filled the block?
if gasLimit < minGas {
if result.gasLimit < minGas || len(result.msgs) >= build.BlockMessageLimit {
return result, nil
}
@ -117,19 +254,21 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ
return result, nil
}
// 3. Parition chains into blocks (without trimming)
// 3. Partition chains into blocks (without trimming)
// we use the full blockGasLimit (as opposed to the residual gas limit from the
// priority message selection) as we have to account for what other miners are doing
// priority message selection) as we have to account for what other block providers are doing
nextChain := 0
partitions := make([][]*msgChain, MaxBlocks)
for i := 0; i < MaxBlocks && nextChain < len(chains); i++ {
gasLimit := int64(build.BlockGasLimit)
msgLimit := build.BlockMessageLimit
for nextChain < len(chains) {
chain := chains[nextChain]
nextChain++
partitions[i] = append(partitions[i], chain)
gasLimit -= chain.gasLimit
if gasLimit < minGas {
msgLimit -= len(chain.msgs)
if gasLimit < minGas || msgLimit <= 0 {
break
}
}
@ -158,7 +297,7 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ
})
// 6. Merge the head chains to produce the list of messages selected for inclusion
// subject to the residual gas limit
// subject to the residual block limits
// When a chain is merged in, all its previous dependent chains *must* also be
// merged in or we'll have a broken block
startMerge := time.Now()
@ -174,35 +313,16 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ
continue
}
// compute the dependencies that must be merged and the gas limit including deps
chainGasLimit := chain.gasLimit
var chainDeps []*msgChain
for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev {
chainDeps = append(chainDeps, curChain)
chainGasLimit += curChain.gasLimit
}
// does it all fit in the block?
if chainGasLimit <= gasLimit {
// include it together with all dependencies
for i := len(chainDeps) - 1; i >= 0; i-- {
curChain := chainDeps[i]
curChain.merged = true
result = append(result, curChain.msgs...)
}
chain.merged = true
// adjust the effective pefromance for all subsequent chains
if result.tryToAddWithDeps(chain, mp, baseFee) {
// adjust the effective performance for all subsequent chains
if next := chain.next; next != nil && next.effPerf > 0 {
next.effPerf += next.parentOffset
for next = next.next; next != nil && next.effPerf > 0; next = next.next {
next.setEffPerf()
}
}
result = append(result, chain.msgs...)
gasLimit -= chainGasLimit
// resort to account for already merged chains and effective performance adjustments
// re-sort to account for already merged chains and effective performance adjustments
// the sort *must* be stable or we end up getting negative gasPerfs pushed up.
sort.SliceStable(chains[i+1:], func(i, j int) bool {
return chains[i].BeforeEffective(chains[j])
@ -211,7 +331,7 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ
continue
}
// we can't fit this chain and its dependencies because of block gasLimit -- we are
// we can't fit this chain and its dependencies because of block limits -- we are
// at the edge
last = i
break
@ -222,18 +342,22 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ
// 7. We have reached the edge of what can fit wholesale; if we still hae available
// gasLimit to pack some more chains, then trim the last chain and push it down.
// Trimming invalidaates subsequent dependent chains so that they can't be selected
// Trimming invalidates subsequent dependent chains so that they can't be selected
// as their dependency cannot be (fully) included.
// We do this in a loop because the blocker might have been inordinately large and
// we might have to do it multiple times to satisfy tail packing
startTail := time.Now()
tailLoop:
for gasLimit >= minGas && last < len(chains) {
// trim if necessary
if chains[last].gasLimit > gasLimit {
chains[last].Trim(gasLimit, mp, baseFee)
for result.gasLimit >= minGas && last < len(chains) {
if !chains[last].valid {
last++
continue tailLoop
}
// trim if necessary
result.trimChain(chains[last], mp, baseFee)
// push down if it hasn't been invalidated
if chains[last].valid {
for i := last; i < len(chains)-1; i++ {
@ -245,7 +369,7 @@ tailLoop:
}
// select the next (valid and fitting) chain and its dependencies for inclusion
for i, chain := range chains[last:] {
for _, chain := range chains[last:] {
// has the chain been invalidated?
if !chain.valid {
continue
@ -261,45 +385,10 @@ tailLoop:
break tailLoop
}
// compute the dependencies that must be merged and the gas limit including deps
chainGasLimit := chain.gasLimit
depGasLimit := int64(0)
var chainDeps []*msgChain
for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev {
chainDeps = append(chainDeps, curChain)
chainGasLimit += curChain.gasLimit
depGasLimit += curChain.gasLimit
}
// does it all fit in the bock
if chainGasLimit <= gasLimit {
// include it together with all dependencies
for i := len(chainDeps) - 1; i >= 0; i-- {
curChain := chainDeps[i]
curChain.merged = true
result = append(result, curChain.msgs...)
}
chain.merged = true
result = append(result, chain.msgs...)
gasLimit -= chainGasLimit
if result.tryToAddWithDeps(chain, mp, baseFee) {
continue
}
// it doesn't all fit; now we have to take into account the dependent chains before
// making a decision about trimming or invalidating.
// if the dependencies exceed the gas limit, then we must invalidate the chain
// as it can never be included.
// Otherwise we can just trim and continue
if depGasLimit > gasLimit {
chain.Invalidate()
last += i + 1
continue tailLoop
}
// dependencies fit, just trim it
chain.Trim(gasLimit-depGasLimit, mp, baseFee)
last += i
continue tailLoop
}
@ -311,17 +400,17 @@ tailLoop:
log.Infow("pack tail chains done", "took", dt)
}
// if we have gasLimit to spare, pick some random (non-negative) chains to fill the block
// we pick randomly so that we minimize the probability of duplication among all miners
if gasLimit >= minGas {
randomCount := 0
// if we have room to spare, pick some random (non-negative) chains to fill the block
// we pick randomly so that we minimize the probability of duplication among all block producers
if result.gasLimit >= minGas && len(result.msgs) <= build.BlockMessageLimit {
preRandomLength := len(result.msgs)
startRandom := time.Now()
shuffleChains(chains)
for _, chain := range chains {
// have we filled the block
if gasLimit < minGas {
if result.gasLimit < minGas || len(result.msgs) >= build.BlockMessageLimit {
break
}
@ -335,59 +424,31 @@ tailLoop:
continue
}
// compute the dependencies that must be merged and the gas limit including deps
chainGasLimit := chain.gasLimit
depGasLimit := int64(0)
var chainDeps []*msgChain
for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev {
chainDeps = append(chainDeps, curChain)
chainGasLimit += curChain.gasLimit
depGasLimit += curChain.gasLimit
}
// do the deps fit? if the deps won't fit, invalidate the chain
if depGasLimit > gasLimit {
chain.Invalidate()
if result.tryToAddWithDeps(chain, mp, baseFee) {
continue
}
// do they fit as is? if it doesn't, trim to make it fit if possible
if chainGasLimit > gasLimit {
chain.Trim(gasLimit-depGasLimit, mp, baseFee)
if !chain.valid {
continue
}
if chain.valid {
// chain got trimmed on the previous call to tryToAddWithDeps, can now be included
result.tryToAddWithDeps(chain, mp, baseFee)
continue
}
// include it together with all dependencies
for i := len(chainDeps) - 1; i >= 0; i-- {
curChain := chainDeps[i]
curChain.merged = true
result = append(result, curChain.msgs...)
randomCount += len(curChain.msgs)
}
chain.merged = true
result = append(result, chain.msgs...)
randomCount += len(chain.msgs)
gasLimit -= chainGasLimit
}
if dt := time.Since(startRandom); dt > time.Millisecond {
log.Infow("pack random tail chains done", "took", dt)
}
if randomCount > 0 {
if len(result.msgs) != preRandomLength {
log.Warnf("optimal selection failed to pack a block; picked %d messages with random selection",
randomCount)
len(result.msgs)-preRandomLength)
}
}
return result, nil
}
func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *types.TipSet) ([]*types.SignedMessage, error) {
func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *types.TipSet) (*selectedMessages, error) {
start := time.Now()
baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts)
@ -413,10 +474,10 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type
// 0b. Select all priority messages that fit in the block
minGas := int64(gasguess.MinGas)
result, gasLimit := mp.selectPriorityMessages(ctx, pending, baseFee, ts)
result := mp.selectPriorityMessages(ctx, pending, baseFee, ts)
// have we filled the block?
if gasLimit < minGas {
if result.gasLimit < minGas || len(result.msgs) > build.BlockMessageLimit {
return result, nil
}
@ -442,7 +503,7 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type
}
// 3. Merge the head chains to produce the list of messages selected for inclusion, subject to
// the block gas limit.
// the block gas and message limits.
startMerge := time.Now()
last := len(chains)
for i, chain := range chains {
@ -452,13 +513,12 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type
}
// does it fit in the block?
if chain.gasLimit <= gasLimit {
gasLimit -= chain.gasLimit
result = append(result, chain.msgs...)
if result.tryToAdd(chain) {
// there was room, we added the chain, keep going
continue
}
// we can't fit this chain because of block gasLimit -- we are at the edge
// we can't fit this chain because of block limits -- we are at the edge
last = i
break
}
@ -474,9 +534,9 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type
// have to do it multiple times to satisfy tail packing.
startTail := time.Now()
tailLoop:
for gasLimit >= minGas && last < len(chains) {
for result.gasLimit >= minGas && last < len(chains) {
// trim
chains[last].Trim(gasLimit, mp, baseFee)
result.trimChain(chains[last], mp, baseFee)
// push down if it hasn't been invalidated
if chains[last].valid {
@ -501,9 +561,8 @@ tailLoop:
}
// does it fit in the bock?
if chain.gasLimit <= gasLimit {
gasLimit -= chain.gasLimit
result = append(result, chain.msgs...)
if result.tryToAdd(chain) {
// there was room, we added the chain, keep going
continue
}
@ -523,7 +582,7 @@ tailLoop:
return result, nil
}
func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) ([]*types.SignedMessage, int64) {
func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) *selectedMessages {
start := time.Now()
defer func() {
if dt := time.Since(start); dt > time.Millisecond {
@ -531,8 +590,12 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a
}
}()
mpCfg := mp.getConfig()
result := make([]*types.SignedMessage, 0, mpCfg.SizeLimitLow)
gasLimit := int64(build.BlockGasLimit)
result := &selectedMessages{
msgs: make([]*types.SignedMessage, 0, mpCfg.SizeLimitLow),
gasLimit: int64(build.BlockGasLimit),
blsLimit: cbg.MaxLength,
secpLimit: cbg.MaxLength,
}
minGas := int64(gasguess.MinGas)
// 1. Get priority actor chains
@ -542,7 +605,7 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a
pk, err := mp.resolveToKey(ctx, actor)
if err != nil {
log.Debugf("mpooladdlocal failed to resolve sender: %s", err)
return nil, gasLimit
return result
}
mset, ok := pending[pk]
@ -554,9 +617,8 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a
chains = append(chains, next...)
}
}
if len(chains) == 0 {
return nil, gasLimit
return result
}
// 2. Sort the chains
@ -566,7 +628,7 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a
if len(chains) != 0 && chains[0].gasPerf < 0 {
log.Warnw("all priority messages in mpool have negative gas performance", "bestGasPerf", chains[0].gasPerf)
return nil, gasLimit
return result
}
// 3. Merge chains until the block limit, as long as they have non-negative gas performance
@ -576,9 +638,8 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a
break
}
if chain.gasLimit <= gasLimit {
gasLimit -= chain.gasLimit
result = append(result, chain.msgs...)
if result.tryToAdd(chain) {
// there was room, we added the chain, keep going
continue
}
@ -588,9 +649,10 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a
}
tailLoop:
for gasLimit >= minGas && last < len(chains) {
for result.gasLimit >= minGas && last < len(chains) {
// trim, discarding negative performing messages
chains[last].Trim(gasLimit, mp, baseFee)
result.trimChain(chains[last], mp, baseFee)
// push down if it hasn't been invalidated
if chains[last].valid {
@ -615,9 +677,8 @@ tailLoop:
}
// does it fit in the bock?
if chain.gasLimit <= gasLimit {
gasLimit -= chain.gasLimit
result = append(result, chain.msgs...)
if result.tryToAdd(chain) {
// there was room, we added the chain, keep going
continue
}
@ -631,7 +692,7 @@ tailLoop:
break
}
return result, gasLimit
return result
}
func (mp *MessagePool) getPendingMessages(ctx context.Context, curTs, ts *types.TipSet) (map[address.Address]map[uint64]*types.SignedMessage, error) {
@ -778,11 +839,16 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
return nil
}
// if we have more messages from this sender than can fit in a block, drop the extra ones
if len(msgs) > build.BlockMessageLimit {
msgs = msgs[:build.BlockMessageLimit]
}
// ok, now we can construct the chains using the messages we have
// invariant: each chain has a bigger gasPerf than the next -- otherwise they can be merged
// and increase the gasPerf of the first chain
// We do this in two passes:
// - in the first pass we create chains that aggreagate messages with non-decreasing gasPerf
// - in the first pass we create chains that aggregate messages with non-decreasing gasPerf
// - in the second pass we merge chains to maintain the invariant.
var chains []*msgChain
var curChain *msgChain
@ -794,6 +860,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
chain.gasLimit = m.Message.GasLimit
chain.gasPerf = mp.getGasPerf(chain.gasReward, chain.gasLimit)
chain.valid = true
chain.sigType = m.Signature.Type
return chain
}
@ -808,7 +875,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
gasLimit := curChain.gasLimit + m.Message.GasLimit
gasPerf := mp.getGasPerf(gasReward, gasLimit)
// try to add the message to the current chain -- if it decreases the gasPerf, then make a
// try to add the message to the current chain -- if it decreases the gasPerf, or then make a
// new chain
if gasPerf < curChain.gasPerf {
chains = append(chains, curChain)
@ -868,9 +935,9 @@ func (mc *msgChain) Before(other *msgChain) bool {
(mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0)
}
func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt) {
func (mc *msgChain) Trim(gasLimit int64, msgLimit int, mp *MessagePool, baseFee types.BigInt) {
i := len(mc.msgs) - 1
for i >= 0 && (mc.gasLimit > gasLimit || mc.gasPerf < 0) {
for i >= 0 && (mc.gasLimit > gasLimit || mc.gasPerf < 0 || i >= msgLimit) {
gasReward := mp.getGasReward(mc.msgs[i], baseFee)
mc.gasReward = new(big.Int).Sub(mc.gasReward, gasReward)
mc.gasLimit -= mc.msgs[i].Message.GasLimit
@ -893,6 +960,7 @@ func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt)
mc.msgs = mc.msgs[:i+1]
}
// TODO: if the trim above is a no-op, this (may) needlessly invalidates the next chain
if mc.next != nil {
mc.next.Invalidate()
mc.next = nil

View File

@ -13,6 +13,10 @@ import (
"sort"
"testing"
"github.com/filecoin-project/go-state-types/crypto"
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/go-address"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
@ -527,7 +531,7 @@ func TestBasicMessageSelection(t *testing.T) {
}
}
func TestMessageSelectionTrimming(t *testing.T) {
func TestMessageSelectionTrimmingGas(t *testing.T) {
mp, tma := makeTestMpool()
// the actors
@ -577,7 +581,7 @@ func TestMessageSelectionTrimming(t *testing.T) {
expected := int(build.BlockGasLimit / gasLimit)
if len(msgs) != expected {
t.Fatalf("expected %d messages, bug got %d", expected, len(msgs))
t.Fatalf("expected %d messages, but got %d", expected, len(msgs))
}
mGasLimit := int64(0)
@ -590,6 +594,199 @@ func TestMessageSelectionTrimming(t *testing.T) {
}
func TestMessageSelectionTrimmingMsgsBasic(t *testing.T) {
mp, tma := makeTestMpool()
// the actors
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
block := tma.nextBlock()
ts := mock.TipSet(block)
tma.applyBlock(t, block)
tma.setBalance(a1, 1) // in FIL
// create a larger than selectable chain
for i := 0; i < build.BlockMessageLimit; i++ {
m := makeTestMessage(w1, a1, a1, uint64(i), 300000, 100)
mustAdd(t, mp, m)
}
msgs, err := mp.SelectMessages(context.Background(), ts, 1.0)
if err != nil {
t.Fatal(err)
}
expected := cbg.MaxLength
if len(msgs) != expected {
t.Fatalf("expected %d messages, but got %d", expected, len(msgs))
}
mGasLimit := int64(0)
for _, m := range msgs {
mGasLimit += m.Message.GasLimit
}
if mGasLimit > build.BlockGasLimit {
t.Fatal("selected messages gas limit exceeds block gas limit!")
}
}
func TestMessageSelectionTrimmingMsgsTwoSendersBasic(t *testing.T) {
mp, tma := makeTestMpool()
// the actors
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a2, err := w2.WalletNew(context.Background(), types.KTBLS)
if err != nil {
t.Fatal(err)
}
block := tma.nextBlock()
ts := mock.TipSet(block)
tma.applyBlock(t, block)
tma.setBalance(a1, 1) // in FIL
tma.setBalance(a2, 1) // in FIL
// create 2 larger than selectable chains
for i := 0; i < build.BlockMessageLimit; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), 300000, 100)
mustAdd(t, mp, m)
// a2's messages are preferred
m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 1000)
mustAdd(t, mp, m)
}
msgs, err := mp.SelectMessages(context.Background(), ts, 1.0)
if err != nil {
t.Fatal(err)
}
mGasLimit := int64(0)
counts := make(map[crypto.SigType]uint)
for _, m := range msgs {
mGasLimit += m.Message.GasLimit
counts[m.Signature.Type]++
}
if mGasLimit > build.BlockGasLimit {
t.Fatal("selected messages gas limit exceeds block gas limit!")
}
expected := build.BlockMessageLimit
if len(msgs) != expected {
t.Fatalf("expected %d messages, but got %d", expected, len(msgs))
}
if counts[crypto.SigTypeBLS] != cbg.MaxLength {
t.Fatalf("expected %d bls messages, but got %d", cbg.MaxLength, len(msgs))
}
}
func TestMessageSelectionTrimmingMsgsTwoSendersAdvanced(t *testing.T) {
mp, tma := makeTestMpool()
// the actors
w1, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1)
if err != nil {
t.Fatal(err)
}
w2, err := wallet.NewWallet(wallet.NewMemKeyStore())
if err != nil {
t.Fatal(err)
}
a2, err := w2.WalletNew(context.Background(), types.KTBLS)
if err != nil {
t.Fatal(err)
}
block := tma.nextBlock()
ts := mock.TipSet(block)
tma.applyBlock(t, block)
tma.setBalance(a1, 1) // in FIL
tma.setBalance(a2, 1) // in FIL
// create 2 almost max-length chains of equal value
i := 0
for i = 0; i < cbg.MaxLength-1; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), 300000, 100)
mustAdd(t, mp, m)
// a2's messages are preferred
m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 100)
mustAdd(t, mp, m)
}
// a1's 8192th message is worth more than a2's
m := makeTestMessage(w1, a1, a2, uint64(i), 300000, 1000)
mustAdd(t, mp, m)
m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 100)
mustAdd(t, mp, m)
i++
// a2's (unselectable) 8193rd message is worth SO MUCH
m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 1000000)
mustAdd(t, mp, m)
msgs, err := mp.SelectMessages(context.Background(), ts, 1.0)
if err != nil {
t.Fatal(err)
}
mGasLimit := int64(0)
counts := make(map[crypto.SigType]uint)
for _, m := range msgs {
mGasLimit += m.Message.GasLimit
counts[m.Signature.Type]++
}
if mGasLimit > build.BlockGasLimit {
t.Fatal("selected messages gas limit exceeds block gas limit!")
}
expected := build.BlockMessageLimit
if len(msgs) != expected {
t.Fatalf("expected %d messages, but got %d", expected, len(msgs))
}
// we should have taken the secp chain
if counts[crypto.SigTypeSecp256k1] != cbg.MaxLength {
t.Fatalf("expected %d bls messages, but got %d", cbg.MaxLength, len(msgs))
}
}
func TestPriorityMessageSelection(t *testing.T) {
mp, tma := makeTestMpool()
@ -978,7 +1175,7 @@ func TestOptimalMessageSelection2(t *testing.T) {
func TestOptimalMessageSelection3(t *testing.T) {
// this test uses 10 actors sending a block of messages to each other, with the the first
// actors paying higher gas premium than the subsequent actors.
// We select with a low ticket quality; the chain depenent merging algorithm should pick
// We select with a low ticket quality; the chain dependent merging algorithm should pick
// messages from the median actor from the start
mp, tma := makeTestMpool()
@ -1109,11 +1306,13 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu
logging.SetLogLevel("messagepool", "error")
// 1. greedy selection
greedyMsgs, err := mp.selectMessagesGreedy(context.Background(), ts, ts)
gm, err := mp.selectMessagesGreedy(context.Background(), ts, ts)
if err != nil {
t.Fatal(err)
}
greedyMsgs := gm.msgs
totalGreedyCapacity := 0.0
totalGreedyReward := 0.0
totalOptimalCapacity := 0.0

View File

@ -159,7 +159,7 @@ func VersionForNetwork(ver network.Version) (types.StateTreeVersion, error) {
/* inline-gen start */
case network.Version13, network.Version14:
case network.Version13, network.Version14, network.Version15:
/* inline-gen end */
return types.StateTreeVersion4, nil

View File

@ -3,13 +3,14 @@ package vm
import (
"fmt"
vmr "github.com/filecoin-project/specs-actors/v7/actors/runtime"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/go-address"
addr "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/build"
vmr5 "github.com/filecoin-project/specs-actors/v5/actors/runtime"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/ipfs/go-cid"
)
@ -73,9 +74,10 @@ type Pricelist interface {
OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error)
OnHashing(dataSize int) GasCharge
OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge
OnVerifySeal(info proof5.SealVerifyInfo) GasCharge
OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge
OnVerifyPost(info proof5.WindowPoStVerifyInfo) GasCharge
OnVerifySeal(info proof7.SealVerifyInfo) GasCharge
OnVerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) GasCharge
OnVerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) GasCharge
OnVerifyPost(info proof7.WindowPoStVerifyInfo) GasCharge
OnVerifyConsensusFault() GasCharge
}
@ -227,7 +229,7 @@ func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist {
}
type pricedSyscalls struct {
under vmr5.Syscalls
under vmr.Syscalls
pl Pricelist
chargeGas func(GasCharge)
}
@ -261,7 +263,7 @@ func (ps pricedSyscalls) ComputeUnsealedSectorCID(reg abi.RegisteredSealProof, p
}
// Verifies a sector seal proof.
func (ps pricedSyscalls) VerifySeal(vi proof5.SealVerifyInfo) error {
func (ps pricedSyscalls) VerifySeal(vi proof7.SealVerifyInfo) error {
ps.chargeGas(ps.pl.OnVerifySeal(vi))
defer ps.chargeGas(gasOnActorExec)
@ -269,7 +271,7 @@ func (ps pricedSyscalls) VerifySeal(vi proof5.SealVerifyInfo) error {
}
// Verifies a proof of spacetime.
func (ps pricedSyscalls) VerifyPoSt(vi proof5.WindowPoStVerifyInfo) error {
func (ps pricedSyscalls) VerifyPoSt(vi proof7.WindowPoStVerifyInfo) error {
ps.chargeGas(ps.pl.OnVerifyPost(vi))
defer ps.chargeGas(gasOnActorExec)
@ -286,14 +288,14 @@ func (ps pricedSyscalls) VerifyPoSt(vi proof5.WindowPoStVerifyInfo) error {
// the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the
// blocks in the parent of h2 (i.e. h2's grandparent).
// Returns nil and an error if the headers don't prove a fault.
func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr5.ConsensusFault, error) {
func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr.ConsensusFault, error) {
ps.chargeGas(ps.pl.OnVerifyConsensusFault())
defer ps.chargeGas(gasOnActorExec)
return ps.under.VerifyConsensusFault(h1, h2, extra)
}
func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof5.SealVerifyInfo) (map[address.Address][]bool, error) {
func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof7.SealVerifyInfo) (map[address.Address][]bool, error) {
count := int64(0)
for _, svis := range inp {
count += int64(len(svis))
@ -307,9 +309,16 @@ func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof5.SealV
return ps.under.BatchVerifySeals(inp)
}
func (ps pricedSyscalls) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) error {
func (ps pricedSyscalls) VerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) error {
ps.chargeGas(ps.pl.OnVerifyAggregateSeals(aggregate))
defer ps.chargeGas(gasOnActorExec)
return ps.under.VerifyAggregateSeals(aggregate)
}
func (ps pricedSyscalls) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) error {
ps.chargeGas(ps.pl.OnVerifyReplicaUpdate(update))
defer ps.chargeGas(gasOnActorExec)
return ps.under.VerifyReplicaUpdate(update)
}

View File

@ -3,8 +3,7 @@ package vm
import (
"fmt"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
@ -206,14 +205,14 @@ func (pl *pricelistV0) OnComputeUnsealedSectorCid(proofType abi.RegisteredSealPr
}
// OnVerifySeal
func (pl *pricelistV0) OnVerifySeal(info proof2.SealVerifyInfo) GasCharge {
func (pl *pricelistV0) OnVerifySeal(info proof7.SealVerifyInfo) GasCharge {
// TODO: this needs more cost tunning, check with @lotus
// this is not used
return newGasCharge("OnVerifySeal", pl.verifySealBase, 0)
}
// OnVerifyAggregateSeals
func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge {
func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) GasCharge {
proofType := aggregate.SealProof
perProof, ok := pl.verifyAggregateSealPer[proofType]
if !ok {
@ -228,8 +227,14 @@ func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof5.AggregateSealVeri
return newGasCharge("OnVerifyAggregateSeals", perProof*num+step.Lookup(num), 0)
}
// OnVerifyReplicaUpdate
func (pl *pricelistV0) OnVerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) GasCharge {
// TODO: do the thing
return GasCharge{}
}
// OnVerifyPost
func (pl *pricelistV0) OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge {
func (pl *pricelistV0) OnVerifyPost(info proof7.WindowPoStVerifyInfo) GasCharge {
sectorSize := "unknown"
var proofType abi.RegisteredPoStProof

View File

@ -16,7 +16,7 @@ import (
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
vmr "github.com/filecoin-project/specs-actors/v5/actors/runtime"
vmr "github.com/filecoin-project/specs-actors/v7/actors/runtime"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/exitcode"

View File

@ -26,6 +26,7 @@ import (
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
/* inline-gen end */
@ -130,6 +131,8 @@ func newAccountActor(ver actors.Version) *types.Actor {
code = builtin5.AccountActorCodeID
case actors.Version6:
code = builtin6.AccountActorCodeID
case actors.Version7:
code = builtin7.AccountActorCodeID
/* inline-gen end */
default:
panic("unsupported actors version")

View File

@ -17,8 +17,12 @@ import (
"github.com/filecoin-project/go-state-types/network"
rtt "github.com/filecoin-project/go-state-types/rt"
rt0 "github.com/filecoin-project/specs-actors/actors/runtime"
rt2 "github.com/filecoin-project/specs-actors/v2/actors/runtime"
rt3 "github.com/filecoin-project/specs-actors/v3/actors/runtime"
rt4 "github.com/filecoin-project/specs-actors/v4/actors/runtime"
rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime"
rt6 "github.com/filecoin-project/specs-actors/v6/actors/runtime"
rt7 "github.com/filecoin-project/specs-actors/v7/actors/runtime"
"github.com/ipfs/go-cid"
ipldcbor "github.com/ipfs/go-ipld-cbor"
"go.opencensus.io/trace"
@ -56,8 +60,8 @@ func (m *Message) ValueReceived() abi.TokenAmount {
var EnableGasTracing = os.Getenv("LOTUS_VM_ENABLE_GAS_TRACING_VERY_SLOW") == "1"
type Runtime struct {
rt5.Message
rt5.Syscalls
rt7.Message
rt7.Syscalls
ctx context.Context
@ -143,7 +147,12 @@ func (rt *Runtime) StorePut(x cbor.Marshaler) cid.Cid {
var _ rt0.Runtime = (*Runtime)(nil)
var _ rt5.Runtime = (*Runtime)(nil)
var _ rt2.Runtime = (*Runtime)(nil)
var _ rt3.Runtime = (*Runtime)(nil)
var _ rt4.Runtime = (*Runtime)(nil)
var _ rt5.Runtime = (*Runtime)(nil)
var _ rt6.Runtime = (*Runtime)(nil)
var _ rt7.Runtime = (*Runtime)(nil)
func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.ActorError) {
defer func() {
@ -324,7 +333,7 @@ func (rt *Runtime) DeleteActor(beneficiary address.Address) {
}
// Transfer the executing actor's balance to the beneficiary
if err := rt.vm.transfer(rt.Receiver(), beneficiary, act.Balance); err != nil {
if err := rt.vm.transfer(rt.Receiver(), beneficiary, act.Balance, rt.NetworkVersion()); err != nil {
panic(aerrors.Fatalf("failed to transfer balance to beneficiary actor: %s", err))
}
}

View File

@ -7,6 +7,8 @@ import (
goruntime "runtime"
"sync"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/minio/blake2b-simd"
@ -26,8 +28,8 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
"github.com/filecoin-project/lotus/lib/sigs"
runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
runtime7 "github.com/filecoin-project/specs-actors/v7/actors/runtime"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
)
func init() {
@ -36,10 +38,10 @@ func init() {
// Actual type is defined in chain/types/vmcontext.go because the VMContext interface is there
type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime5.Syscalls
type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime7.Syscalls
func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder {
return func(ctx context.Context, rt *Runtime) runtime5.Syscalls {
return func(ctx context.Context, rt *Runtime) runtime7.Syscalls {
return &syscallShim{
ctx: ctx,
@ -90,7 +92,7 @@ func (ss *syscallShim) HashBlake2b(data []byte) [32]byte {
// Checks validity of the submitted consensus fault with the two block headers needed to prove the fault
// and an optional extra one to check common ancestry (as needed).
// Note that the blocks are ordered: the method requires a.Epoch() <= b.Epoch().
func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.ConsensusFault, error) {
func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime7.ConsensusFault, error) {
// Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions.
// Whether or not it could ever have been accepted in a chain is not checked/does not matter here.
// for that reason when checking block parent relationships, rather than instantiating a Tipset to do so
@ -133,14 +135,14 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.Conse
}
// (2) check for the consensus faults themselves
var consensusFault *runtime5.ConsensusFault
var consensusFault *runtime7.ConsensusFault
// (a) double-fork mining fault
if blockA.Height == blockB.Height {
consensusFault = &runtime5.ConsensusFault{
consensusFault = &runtime7.ConsensusFault{
Target: blockA.Miner,
Epoch: blockB.Height,
Type: runtime5.ConsensusFaultDoubleForkMining,
Type: runtime7.ConsensusFaultDoubleForkMining,
}
}
@ -148,10 +150,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.Conse
// strictly speaking no need to compare heights based on double fork mining check above,
// but at same height this would be a different fault.
if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height {
consensusFault = &runtime5.ConsensusFault{
consensusFault = &runtime7.ConsensusFault{
Target: blockA.Miner,
Epoch: blockB.Height,
Type: runtime5.ConsensusFaultTimeOffsetMining,
Type: runtime7.ConsensusFaultTimeOffsetMining,
}
}
@ -171,10 +173,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.Conse
if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height &&
types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) {
consensusFault = &runtime5.ConsensusFault{
consensusFault = &runtime7.ConsensusFault{
Target: blockA.Miner,
Epoch: blockB.Height,
Type: runtime5.ConsensusFaultParentGrinding,
Type: runtime7.ConsensusFaultParentGrinding,
}
}
}
@ -286,6 +288,7 @@ func (ss *syscallShim) VerifyAggregateSeals(aggregate proof5.AggregateSealVerify
if err != nil {
return xerrors.Errorf("failed to verify aggregated PoRep: %w", err)
}
if !ok {
return fmt.Errorf("invalid aggregate proof")
}
@ -293,6 +296,19 @@ func (ss *syscallShim) VerifyAggregateSeals(aggregate proof5.AggregateSealVerify
return nil
}
func (ss *syscallShim) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) error {
ok, err := ss.verifier.VerifyReplicaUpdate(update)
if err != nil {
return xerrors.Errorf("failed to verify replica update: %w", err)
}
if !ok {
return fmt.Errorf("invalid replica update")
}
return nil
}
func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Address, input []byte) error {
// TODO: in genesis setup, we are currently faking signatures

View File

@ -202,9 +202,7 @@ type (
)
type VM struct {
cstate *state.StateTree
// TODO: Is base actually used? Can we delete it?
base cid.Cid
cstate *state.StateTree
cst *cbor.BasicIpldStore
buf *blockstore.BufferedBlockstore
blockHeight abi.ChainEpoch
@ -214,6 +212,7 @@ type VM struct {
ntwkVersion NtwkVersionGetter
baseFee abi.TokenAmount
lbStateGet LookbackStateGetter
baseCircSupply abi.TokenAmount
Syscalls SyscallBuilder
}
@ -239,9 +238,13 @@ func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) {
return nil, err
}
baseCirc, err := opts.CircSupplyCalc(ctx, opts.Epoch, state)
if err != nil {
return nil, err
}
return &VM{
cstate: state,
base: opts.StateBase,
cst: cst,
buf: buf,
blockHeight: opts.Epoch,
@ -251,6 +254,7 @@ func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) {
ntwkVersion: opts.NtwkVersion,
Syscalls: opts.Syscalls,
baseFee: opts.BaseFee,
baseCircSupply: baseCirc,
lbStateGet: opts.LookbackState,
}, nil
}
@ -339,7 +343,7 @@ func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime,
defer rt.chargeGasSafe(newGasCharge("OnMethodInvocationDone", 0, 0))
if types.BigCmp(msg.Value, types.NewInt(0)) != 0 {
if err := vm.transfer(msg.From, msg.To, msg.Value); err != nil {
if err := vm.transfer(msg.From, msg.To, msg.Value, vm.ntwkVersion(ctx, vm.blockHeight)); err != nil {
return nil, aerrors.Wrap(err, "failed to transfer funds")
}
}
@ -859,7 +863,12 @@ func (vm *VM) GetNtwkVersion(ctx context.Context, ce abi.ChainEpoch) network.Ver
}
func (vm *VM) GetCircSupply(ctx context.Context) (abi.TokenAmount, error) {
return vm.circSupplyCalc(ctx, vm.blockHeight, vm.cstate)
// Before v15, this was recalculated on each invocation as the state tree was mutated
if vm.GetNtwkVersion(ctx, vm.blockHeight) <= network.Version14 {
return vm.circSupplyCalc(ctx, vm.blockHeight, vm.cstate)
}
return vm.baseCircSupply, nil
}
func (vm *VM) incrementNonce(addr address.Address) error {
@ -869,32 +878,71 @@ func (vm *VM) incrementNonce(addr address.Address) error {
})
}
func (vm *VM) transfer(from, to address.Address, amt types.BigInt) aerrors.ActorError {
if from == to {
return nil
}
func (vm *VM) transfer(from, to address.Address, amt types.BigInt, networkVersion network.Version) aerrors.ActorError {
var f *types.Actor
var fromID, toID address.Address
var err error
// switching the order around so that transactions for more than the balance sent to self fail
if networkVersion >= network.Version15 {
if amt.LessThan(types.NewInt(0)) {
return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt)
}
fromID, err := vm.cstate.LookupID(from)
if err != nil {
return aerrors.Fatalf("transfer failed when resolving sender address: %s", err)
}
fromID, err = vm.cstate.LookupID(from)
if err != nil {
return aerrors.Fatalf("transfer failed when resolving sender address: %s", err)
}
toID, err := vm.cstate.LookupID(to)
if err != nil {
return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err)
}
f, err = vm.cstate.GetActor(fromID)
if err != nil {
return aerrors.Fatalf("transfer failed when retrieving sender actor: %s", err)
}
if fromID == toID {
return nil
}
if f.Balance.LessThan(amt) {
return aerrors.Newf(exitcode.SysErrInsufficientFunds, "transfer failed, insufficient balance in sender actor: %v", f.Balance)
}
if amt.LessThan(types.NewInt(0)) {
return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt)
}
if from == to {
log.Infow("sending to same address: noop", "from/to addr", from)
return nil
}
f, err := vm.cstate.GetActor(fromID)
if err != nil {
return aerrors.Fatalf("transfer failed when retrieving sender actor: %s", err)
toID, err = vm.cstate.LookupID(to)
if err != nil {
return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err)
}
if fromID == toID {
log.Infow("sending to same actor ID: noop", "from/to actor", fromID)
return nil
}
} else {
if from == to {
return nil
}
fromID, err = vm.cstate.LookupID(from)
if err != nil {
return aerrors.Fatalf("transfer failed when resolving sender address: %s", err)
}
toID, err = vm.cstate.LookupID(to)
if err != nil {
return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err)
}
if fromID == toID {
return nil
}
if amt.LessThan(types.NewInt(0)) {
return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt)
}
f, err = vm.cstate.GetActor(fromID)
if err != nil {
return aerrors.Fatalf("transfer failed when retrieving sender actor: %s", err)
}
}
t, err := vm.cstate.GetActor(toID)
@ -902,17 +950,17 @@ func (vm *VM) transfer(from, to address.Address, amt types.BigInt) aerrors.Actor
return aerrors.Fatalf("transfer failed when retrieving receiver actor: %s", err)
}
if err := deductFunds(f, amt); err != nil {
if err = deductFunds(f, amt); err != nil {
return aerrors.Newf(exitcode.SysErrInsufficientFunds, "transfer failed when deducting funds (%s): %s", types.FIL(amt), err)
}
depositFunds(t, amt)
if err := vm.cstate.SetActor(fromID, f); err != nil {
return aerrors.Fatalf("transfer failed when setting receiver actor: %s", err)
if err = vm.cstate.SetActor(fromID, f); err != nil {
return aerrors.Fatalf("transfer failed when setting sender actor: %s", err)
}
if err := vm.cstate.SetActor(toID, t); err != nil {
return aerrors.Fatalf("transfer failed when setting sender actor: %s", err)
if err = vm.cstate.SetActor(toID, t); err != nil {
return aerrors.Fatalf("transfer failed when setting receiver actor: %s", err)
}
return nil

View File

@ -5,10 +5,11 @@ import (
"context"
"errors"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/ipfs/go-datastore"
"github.com/minio/blake2b-simd"
cbg "github.com/whyrusleeping/cbor-gen"
@ -97,8 +98,12 @@ func (cv *cachingVerifier) GenerateWinningPoStSectorChallenge(ctx context.Contex
return cv.backend.GenerateWinningPoStSectorChallenge(ctx, proofType, a, rnd, u)
}
func (cv cachingVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) {
func (cv cachingVerifier) VerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) (bool, error) {
return cv.backend.VerifyAggregateSeals(aggregate)
}
func (cv cachingVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) {
return cv.backend.VerifyReplicaUpdate(update)
}
var _ ffiwrapper.Verifier = (*cachingVerifier)(nil)

View File

@ -65,6 +65,7 @@ func main() {
fr32Cmd,
chainCmd,
balancerCmd,
terminationsCmd,
}
app := &cli.App{

View File

@ -0,0 +1,181 @@
package main
import (
"bytes"
"context"
"fmt"
"io"
"strconv"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/node/repo"
miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/urfave/cli/v2"
)
var terminationsCmd = &cli.Command{
Name: "terminations",
Description: "Lists terminated deals from the past 2 days",
ArgsUsage: "[block to look back from] [lookback period (epochs)]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "repo",
Value: "~/.lotus",
},
},
Action: func(cctx *cli.Context) error {
ctx := context.TODO()
if cctx.NArg() != 2 {
return fmt.Errorf("must pass block cid && lookback period")
}
blkCid, err := cid.Decode(cctx.Args().First())
if err != nil {
return fmt.Errorf("failed to parse input: %w", err)
}
fsrepo, err := repo.NewFS(cctx.String("repo"))
if err != nil {
return err
}
lkrepo, err := fsrepo.Lock(repo.FullNode)
if err != nil {
return err
}
defer lkrepo.Close() //nolint:errcheck
bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
return fmt.Errorf("failed to open blockstore: %w", err)
}
defer func() {
if c, ok := bs.(io.Closer); ok {
if err := c.Close(); err != nil {
log.Warnf("failed to close blockstore: %s", err)
}
}
}()
mds, err := lkrepo.Datastore(context.Background(), "/metadata")
if err != nil {
return err
}
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
cst := cbor.NewCborStore(bs)
store := adt.WrapStore(ctx, cst)
blk, err := cs.GetBlock(blkCid)
if err != nil {
return err
}
lbp, err := strconv.Atoi(cctx.Args().Get(1))
if err != nil {
return fmt.Errorf("failed to parse input: %w", err)
}
cutoff := blk.Height - abi.ChainEpoch(lbp)
for blk.Height > cutoff {
pts, err := cs.LoadTipSet(types.NewTipSetKey(blk.Parents...))
if err != nil {
return err
}
blk = pts.Blocks()[0]
msgs, err := cs.MessagesForTipset(pts)
if err != nil {
return err
}
for _, v := range msgs {
msg := v.VMMessage()
if msg.Method != miner.Methods.TerminateSectors {
continue
}
tree, err := state.LoadStateTree(cst, blk.ParentStateRoot)
if err != nil {
return err
}
minerAct, err := tree.GetActor(msg.To)
if err != nil {
return err
}
if !builtin.IsStorageMinerActor(minerAct.Code) {
continue
}
minerSt, err := miner.Load(store, minerAct)
if err != nil {
return err
}
marketAct, err := tree.GetActor(market.Address)
if err != nil {
return err
}
marketSt, err := market.Load(store, marketAct)
if err != nil {
return err
}
proposals, err := marketSt.Proposals()
if err != nil {
return err
}
var termParams miner2.TerminateSectorsParams
err = termParams.UnmarshalCBOR(bytes.NewBuffer(msg.Params))
if err != nil {
return err
}
for _, t := range termParams.Terminations {
sectors, err := minerSt.LoadSectors(&t.Sectors)
if err != nil {
return err
}
for _, sector := range sectors {
for _, deal := range sector.DealIDs {
prop, find, err := proposals.Get(deal)
if err != nil {
return err
}
if find {
fmt.Printf("%s, %d, %d, %s, %s, %s\n", msg.To, sector.SectorNumber, deal, prop.Client, prop.PieceCID, prop.Label)
}
}
}
}
}
}
return nil
},
}

View File

@ -6,6 +6,8 @@ import (
"encoding/binary"
"fmt"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
@ -70,6 +72,12 @@ func (mockVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyPro
)
return false, nil
}
// TODO: do the thing
func (mockVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) {
return false, nil
}
func (mockVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
panic("should not be called")
}

View File

@ -153,6 +153,14 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params
results: []*vm.ApplyRet{},
}
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) {
vmopt.CircSupplyCalc = func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) {
return big.Zero(), nil
}
return vm.NewVM(ctx, vmopt)
})
postcid, receiptsroot, err := tse.ApplyBlocks(context.Background(),
sm,
params.ParentEpoch,

View File

@ -94,9 +94,13 @@
* [ReturnAddPiece](#ReturnAddPiece)
* [ReturnFetch](#ReturnFetch)
* [ReturnFinalizeSector](#ReturnFinalizeSector)
* [ReturnGenerateSectorKeyFromData](#ReturnGenerateSectorKeyFromData)
* [ReturnMoveStorage](#ReturnMoveStorage)
* [ReturnProveReplicaUpdate1](#ReturnProveReplicaUpdate1)
* [ReturnProveReplicaUpdate2](#ReturnProveReplicaUpdate2)
* [ReturnReadPiece](#ReturnReadPiece)
* [ReturnReleaseUnsealed](#ReturnReleaseUnsealed)
* [ReturnReplicaUpdate](#ReturnReplicaUpdate)
* [ReturnSealCommit1](#ReturnSealCommit1)
* [ReturnSealCommit2](#ReturnSealCommit2)
* [ReturnSealPreCommit1](#ReturnSealPreCommit1)
@ -1441,6 +1445,30 @@ Response: `{}`
### ReturnFinalizeSector
Perms: admin
Inputs:
```json
[
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
},
{
"Code": 0,
"Message": "string value"
}
]
```
Response: `{}`
### ReturnGenerateSectorKeyFromData
Perms: admin
Inputs:
@ -1486,6 +1514,56 @@ Inputs:
Response: `{}`
### ReturnProveReplicaUpdate1
Perms: admin
Inputs:
```json
[
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
},
null,
{
"Code": 0,
"Message": "string value"
}
]
```
Response: `{}`
### ReturnProveReplicaUpdate2
Perms: admin
Inputs:
```json
[
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
},
null,
{
"Code": 0,
"Message": "string value"
}
]
```
Response: `{}`
### ReturnReadPiece
@ -1535,6 +1613,38 @@ Inputs:
Response: `{}`
### ReturnReplicaUpdate
Perms: admin
Inputs:
```json
[
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
},
{
"NewSealed": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
"NewUnsealed": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
}
},
{
"Code": 0,
"Message": "string value"
}
]
```
Response: `{}`
### ReturnSealCommit1
@ -2260,11 +2370,15 @@ Response:
"Write": [
0,
0,
1
1,
0,
0
],
"Read": [
2,
3,
0,
0,
0
]
}

View File

@ -11,12 +11,19 @@
* [AddPiece](#AddPiece)
* [Finalize](#Finalize)
* [FinalizeSector](#FinalizeSector)
* [Generate](#Generate)
* [GenerateSectorKeyFromData](#GenerateSectorKeyFromData)
* [Move](#Move)
* [MoveStorage](#MoveStorage)
* [Process](#Process)
* [ProcessSession](#ProcessSession)
* [Prove](#Prove)
* [ProveReplicaUpdate1](#ProveReplicaUpdate1)
* [ProveReplicaUpdate2](#ProveReplicaUpdate2)
* [Release](#Release)
* [ReleaseUnsealed](#ReleaseUnsealed)
* [Replica](#Replica)
* [ReplicaUpdate](#ReplicaUpdate)
* [Seal](#Seal)
* [SealCommit1](#SealCommit1)
* [SealCommit2](#SealCommit2)
@ -792,6 +799,41 @@ Response:
}
```
## Generate
### GenerateSectorKeyFromData
Perms: admin
Inputs:
```json
[
{
"ID": {
"Miner": 1000,
"Number": 9
},
"ProofType": 8
},
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
}
]
```
Response:
```json
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
}
```
## Move
@ -839,12 +881,125 @@ Inputs: `null`
Response: `"07070707-0707-0707-0707-070707070707"`
## Prove
### ProveReplicaUpdate1
Perms: admin
Inputs:
```json
[
{
"ID": {
"Miner": 1000,
"Number": 9
},
"ProofType": 8
},
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
}
]
```
Response:
```json
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
}
```
### ProveReplicaUpdate2
Perms: admin
Inputs:
```json
[
{
"ID": {
"Miner": 1000,
"Number": 9
},
"ProofType": 8
},
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
{
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
null
]
```
Response:
```json
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
}
```
## Release
### ReleaseUnsealed
Perms: admin
Inputs:
```json
[
{
"ID": {
"Miner": 1000,
"Number": 9
},
"ProofType": 8
},
null
]
```
Response:
```json
{
"Sector": {
"Miner": 1000,
"Number": 9
},
"ID": "07070707-0707-0707-0707-070707070707"
}
```
## Replica
### ReplicaUpdate
Perms: admin
Inputs:

View File

@ -4699,7 +4699,7 @@ Inputs:
]
```
Response: `14`
Response: `15`
### StateReadState
StateReadState returns the indicated actor's state.
@ -5017,7 +5017,8 @@ Response:
"VerifiedDealWeight": "0",
"InitialPledge": "0",
"ExpectedDayReward": "0",
"ExpectedStoragePledge": "0"
"ExpectedStoragePledge": "0",
"SectorKeyCID": null
}
```

View File

@ -4982,7 +4982,7 @@ Inputs:
]
```
Response: `14`
Response: `15`
### StateReadState
StateReadState returns the indicated actor's state.
@ -5257,7 +5257,8 @@ Response:
"VerifiedDealWeight": "0",
"InitialPledge": "0",
"ExpectedDayReward": "0",
"ExpectedStoragePledge": "0"
"ExpectedStoragePledge": "0",
"SectorKeyCID": null
}
```

2
extern/filecoin-ffi vendored

@ -1 +1 @@
Subproject commit 7912389334e347bbb2eac0520c836830875c39de
Subproject commit 52d80081bfdd8a30bc44bcfe44cb0f299615b9f3

View File

@ -24,6 +24,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-storage/storage"
"github.com/detailyang/go-fallocate"
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
"github.com/filecoin-project/go-commp-utils/zerocomm"
"github.com/filecoin-project/lotus/extern/sector-storage/fr32"
@ -253,6 +254,23 @@ func (sb *Sealer) pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Cid, err
return pieceCID, werr()
}
func (sb *Sealer) tryDecodeUpdatedReplica(ctx context.Context, sector storage.SectorRef, commD cid.Cid, unsealedPath string) (bool, error) {
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUpdate|storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage)
if xerrors.Is(err, storiface.ErrSectorNotFound) {
return false, nil
} else if err != nil {
return false, xerrors.Errorf("reading updated replica: %w", err)
}
defer done()
// Sector data stored in replica update
updateProof, err := sector.ProofType.RegisteredUpdateProof()
if err != nil {
return false, err
}
return true, ffi.SectorUpdate.DecodeFrom(updateProof, unsealedPath, paths.Update, paths.Sealed, paths.Cache, commD)
}
func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error {
ssize, err := sector.ProofType.SectorSize()
if err != nil {
@ -303,6 +321,16 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, off
return nil
}
// If piece data stored in updated replica decode whole sector
decoded, err := sb.tryDecodeUpdatedReplica(ctx, sector, commd, unsealedPath.Unsealed)
if err != nil {
return xerrors.Errorf("decoding sector from replica: %w", err)
}
if decoded {
return pf.MarkAllocated(0, maxPieceSize)
}
// Piece data sealed in sector
srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache|storiface.FTSealed, storiface.FTNone, storiface.PathStorage)
if err != nil {
return xerrors.Errorf("acquire sealed sector paths: %w", err)
@ -639,6 +667,109 @@ func (sb *Sealer) SealCommit2(ctx context.Context, sector storage.SectorRef, pha
return ffi.SealCommitPhase2(phase1Out, sector.ID.Number, sector.ID.Miner)
}
func (sb *Sealer) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storage.ReplicaUpdateOut, error) {
empty := storage.ReplicaUpdateOut{}
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing)
if err != nil {
return empty, xerrors.Errorf("failed to acquire sector paths: %w", err)
}
defer done()
updateProofType := abi.SealProofInfos[sector.ProofType].UpdateProof
s, err := os.Stat(paths.Sealed)
if err != nil {
return empty, err
}
sealedSize := s.Size()
u, err := os.OpenFile(paths.Update, os.O_RDWR|os.O_CREATE, 0644) // nolint:gosec
if err != nil {
return empty, xerrors.Errorf("ensuring updated replica file exists: %w", err)
}
if err := fallocate.Fallocate(u, 0, sealedSize); err != nil {
return empty, xerrors.Errorf("allocating space for replica update file: %w", err)
}
if err := u.Close(); err != nil {
return empty, err
}
if err := os.Mkdir(paths.UpdateCache, 0755); err != nil { // nolint
if os.IsExist(err) {
log.Warnf("existing cache in %s; removing", paths.Cache)
if err := os.RemoveAll(paths.UpdateCache); err != nil {
return empty, xerrors.Errorf("remove existing sector cache from %s (sector %d): %w", paths.Cache, sector, err)
}
if err := os.Mkdir(paths.UpdateCache, 0755); err != nil { // nolint:gosec
return empty, xerrors.Errorf("mkdir cache path after cleanup: %w", err)
}
} else {
return empty, err
}
}
sealed, unsealed, err := ffi.SectorUpdate.EncodeInto(updateProofType, paths.Update, paths.UpdateCache, paths.Sealed, paths.Cache, paths.Unsealed, pieces)
if err != nil {
return empty, xerrors.Errorf("failed to update replica %d with new deal data: %w", sector.ID.Number, err)
}
return storage.ReplicaUpdateOut{NewSealed: sealed, NewUnsealed: unsealed}, nil
}
func (sb *Sealer) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storage.ReplicaVanillaProofs, error) {
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache|storiface.FTUpdateCache|storiface.FTUpdate, storiface.FTNone, storiface.PathSealing)
if err != nil {
return nil, xerrors.Errorf("failed to acquire sector paths: %w", err)
}
defer done()
updateProofType := abi.SealProofInfos[sector.ProofType].UpdateProof
vanillaProofs, err := ffi.SectorUpdate.GenerateUpdateVanillaProofs(updateProofType, sectorKey, newSealed, newUnsealed, paths.Update, paths.UpdateCache, paths.Sealed, paths.Cache)
if err != nil {
return nil, xerrors.Errorf("failed to generate proof of replica update for sector %d: %w", sector.ID.Number, err)
}
return vanillaProofs, nil
}
func (sb *Sealer) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storage.ReplicaUpdateProof, error) {
updateProofType := abi.SealProofInfos[sector.ProofType].UpdateProof
return ffi.SectorUpdate.GenerateUpdateProofWithVanilla(updateProofType, sectorKey, newSealed, newUnsealed, vanillaProofs)
}
func (sb *Sealer) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) error {
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTSealed, storiface.PathSealing)
defer done()
if err != nil {
return xerrors.Errorf("failed to acquire sector paths: %w", err)
}
s, err := os.Stat(paths.Update)
if err != nil {
return xerrors.Errorf("measuring update file size: %w", err)
}
sealedSize := s.Size()
e, err := os.OpenFile(paths.Sealed, os.O_RDWR|os.O_CREATE, 0644) // nolint:gosec
if err != nil {
return xerrors.Errorf("ensuring sector key file exists: %w", err)
}
if err := fallocate.Fallocate(e, 0, sealedSize); err != nil {
return xerrors.Errorf("allocating space for sector key file: %w", err)
}
if err := e.Close(); err != nil {
return err
}
updateProofType := abi.SealProofInfos[sector.ProofType].UpdateProof
return ffi.SectorUpdate.RemoveData(updateProofType, paths.Sealed, paths.Cache, paths.Update, paths.UpdateCache, paths.Unsealed, commD)
}
func (sb *Sealer) ReleaseSealed(ctx context.Context, sector storage.SectorRef) error {
return xerrors.Errorf("not supported at this layer")
}
func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error {
ssize, err := sector.ProofType.SectorSize()
if err != nil {

View File

@ -4,6 +4,8 @@ import (
"context"
"io"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/ipfs/go-cid"
@ -36,6 +38,7 @@ type Storage interface {
type Verifier interface {
VerifySeal(proof5.SealVerifyInfo) (bool, error)
VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error)
VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error)
VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error)
VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error)

View File

@ -12,6 +12,7 @@ import (
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/go-state-types/abi"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
@ -120,6 +121,10 @@ func (proofVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyPr
return ffi.VerifyAggregateSeals(aggregate)
}
func (proofVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) {
return ffi.SectorUpdate.VerifyUpdateProof(update)
}
func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
info.Randomness[31] &= 0x3f
_, span := trace.StartSpan(ctx, "VerifyWinningPoSt")

View File

@ -573,11 +573,72 @@ func (m *Manager) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef,
return nil
}
func (m *Manager) ReleaseSectorKey(ctx context.Context, sector storage.SectorRef) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed); err != nil {
return xerrors.Errorf("acquiring sector lock: %w", err)
}
return m.storage.Remove(ctx, sector.ID, storiface.FTSealed, true, nil)
}
func (m *Manager) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTRegenSectorKey, sector, commD)
if err != nil {
return xerrors.Errorf("getWork: %w", err)
}
defer cancel()
var waitErr error
waitRes := func() {
_, werr := m.waitWork(ctx, wk)
if werr != nil {
waitErr = werr
return
}
}
if wait { // already in progress
waitRes()
return waitErr
}
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed|storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTSealed|storiface.FTCache); err != nil {
return xerrors.Errorf("acquiring sector lock: %w", err)
}
// NOTE: We set allowFetch to false in so that we always execute on a worker
// with direct access to the data. We want to do that because this step is
// generally very cheap / fast, and transferring data is not worth the effort
selector := newExistingSelector(m.index, sector.ID, storiface.FTUnsealed|storiface.FTUpdate|storiface.FTUpdateCache|storiface.FTCache, true)
err = m.sched.Schedule(ctx, sector, sealtasks.TTRegenSectorKey, selector, m.schedFetch(sector, storiface.FTUpdate|storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error {
err := m.startWork(ctx, w, wk)(w.GenerateSectorKeyFromData(ctx, sector, commD))
if err != nil {
return err
}
waitRes()
return nil
})
if err != nil {
return err
}
return waitErr
}
func (m *Manager) Remove(ctx context.Context, sector storage.SectorRef) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil {
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache); err != nil {
return xerrors.Errorf("acquiring sector lock: %w", err)
}
@ -592,10 +653,161 @@ func (m *Manager) Remove(ctx context.Context, sector storage.SectorRef) error {
if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTUnsealed, true, nil); rerr != nil {
err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr))
}
if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTUpdate, true, nil); rerr != nil {
err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr))
}
if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTUpdateCache, true, nil); rerr != nil {
err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr))
}
return err
}
func (m *Manager) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (out storage.ReplicaUpdateOut, err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTReplicaUpdate, sector, pieces)
if err != nil {
return storage.ReplicaUpdateOut{}, xerrors.Errorf("getWork: %w", err)
}
defer cancel()
var waitErr error
waitRes := func() {
p, werr := m.waitWork(ctx, wk)
if werr != nil {
waitErr = werr
return
}
if p != nil {
out = p.(storage.ReplicaUpdateOut)
}
}
if wait { // already in progress
waitRes()
return out, waitErr
}
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache); err != nil {
return storage.ReplicaUpdateOut{}, xerrors.Errorf("acquiring sector lock: %w", err)
}
selector := newAllocSelector(m.index, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing)
err = m.sched.Schedule(ctx, sector, sealtasks.TTReplicaUpdate, selector, m.schedFetch(sector, storiface.FTSealed, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error {
err := m.startWork(ctx, w, wk)(w.ReplicaUpdate(ctx, sector, pieces))
if err != nil {
return err
}
waitRes()
return nil
})
if err != nil {
return storage.ReplicaUpdateOut{}, err
}
return out, waitErr
}
func (m *Manager) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (out storage.ReplicaVanillaProofs, err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTProveReplicaUpdate1, sector, sectorKey, newSealed, newUnsealed)
if err != nil {
return nil, xerrors.Errorf("getWork: %w", err)
}
defer cancel()
var waitErr error
waitRes := func() {
p, werr := m.waitWork(ctx, wk)
if werr != nil {
waitErr = werr
return
}
if p != nil {
out = p.(storage.ReplicaVanillaProofs)
}
}
if wait { // already in progress
waitRes()
return out, waitErr
}
if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed|storiface.FTUpdate|storiface.FTCache|storiface.FTUpdateCache, storiface.FTNone); err != nil {
return nil, xerrors.Errorf("acquiring sector lock: %w", err)
}
selector := newExistingSelector(m.index, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache|storiface.FTSealed|storiface.FTCache, true)
err = m.sched.Schedule(ctx, sector, sealtasks.TTProveReplicaUpdate1, selector, m.schedFetch(sector, storiface.FTSealed, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error {
err := m.startWork(ctx, w, wk)(w.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed))
if err != nil {
return err
}
waitRes()
return nil
})
if err != nil {
return nil, err
}
return out, waitErr
}
func (m *Manager) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (out storage.ReplicaUpdateProof, err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTProveReplicaUpdate2, sector, sectorKey, newSealed, newUnsealed, vanillaProofs)
if err != nil {
return nil, xerrors.Errorf("getWork: %w", err)
}
defer cancel()
var waitErr error
waitRes := func() {
p, werr := m.waitWork(ctx, wk)
if werr != nil {
waitErr = werr
return
}
if p != nil {
out = p.(storage.ReplicaUpdateProof)
}
}
if wait { // already in progress
waitRes()
return out, waitErr
}
selector := newTaskSelector()
err = m.sched.Schedule(ctx, sector, sealtasks.TTProveReplicaUpdate2, selector, schedNop, func(ctx context.Context, w Worker) error {
err := m.startWork(ctx, w, wk)(w.ProveReplicaUpdate2(ctx, sector, sectorKey, newSealed, newUnsealed, vanillaProofs))
if err != nil {
return err
}
waitRes()
return nil
})
if err != nil {
return nil, err
}
return out, waitErr
}
func (m *Manager) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error {
return m.returnResult(ctx, callID, pi, err)
}
@ -624,6 +836,22 @@ func (m *Manager) ReturnReleaseUnsealed(ctx context.Context, callID storiface.Ca
return m.returnResult(ctx, callID, nil, err)
}
func (m *Manager) ReturnReplicaUpdate(ctx context.Context, callID storiface.CallID, out storage.ReplicaUpdateOut, err *storiface.CallError) error {
return m.returnResult(ctx, callID, out, err)
}
func (m *Manager) ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, out storage.ReplicaVanillaProofs, err *storiface.CallError) error {
return m.returnResult(ctx, callID, out, err)
}
func (m *Manager) ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, proof storage.ReplicaUpdateProof, err *storiface.CallError) error {
return m.returnResult(ctx, callID, proof, err)
}
func (m *Manager) ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
return m.returnResult(ctx, callID, nil, err)
}
func (m *Manager) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
return m.returnResult(ctx, callID, nil, err)
}

View File

@ -385,7 +385,6 @@ func (m *Manager) returnResult(ctx context.Context, callID storiface.CallID, r i
if ok {
return xerrors.Errorf("result for call %v already reported", wid)
}
m.results[wid] = res
err := m.work.Get(wid).Mutate(func(ws *WorkState) error {

View File

@ -5,6 +5,7 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
@ -17,10 +18,12 @@ import (
"github.com/google/uuid"
"github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-statestore"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
@ -65,7 +68,12 @@ func newTestStorage(t *testing.T) *testStorage {
}
func (t testStorage) cleanup() {
noCleanup := os.Getenv("LOTUS_TEST_NO_CLEANUP") != ""
for _, path := range t.StoragePaths {
if noCleanup {
fmt.Printf("Not cleaning up test storage at %s\n", path)
continue
}
if err := os.RemoveAll(path.Path); err != nil {
fmt.Println("Cleanup error:", err)
}
@ -162,6 +170,150 @@ func TestSimple(t *testing.T) {
require.NoError(t, err)
}
type Reader struct{}
func (Reader) Read(out []byte) (int, error) {
for i := range out {
out[i] = 0
}
return len(out), nil
}
type NullReader struct {
*io.LimitedReader
}
func NewNullReader(size abi.UnpaddedPieceSize) io.Reader {
return &NullReader{(io.LimitReader(&Reader{}, int64(size))).(*io.LimitedReader)}
}
func (m NullReader) NullBytes() int64 {
return m.N
}
func TestSnapDeals(t *testing.T) {
logging.SetAllLoggers(logging.LevelWarn)
ctx := context.Background()
m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, datastore.NewMapDatastore())
defer cleanup()
localTasks := []sealtasks.TaskType{
sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit1, sealtasks.TTCommit2, sealtasks.TTFinalize,
sealtasks.TTFetch, sealtasks.TTReplicaUpdate, sealtasks.TTProveReplicaUpdate1, sealtasks.TTProveReplicaUpdate2, sealtasks.TTUnseal,
sealtasks.TTRegenSectorKey,
}
wds := datastore.NewMapDatastore()
w := NewLocalWorker(WorkerConfig{TaskTypes: localTasks}, stor, lstor, idx, m, statestore.New(wds))
err := m.AddWorker(ctx, w)
require.NoError(t, err)
proofType := abi.RegisteredSealProof_StackedDrg2KiBV1
ptStr := os.Getenv("LOTUS_TEST_SNAP_DEALS_PROOF_TYPE")
switch ptStr {
case "2k":
case "8M":
proofType = abi.RegisteredSealProof_StackedDrg8MiBV1
case "512M":
proofType = abi.RegisteredSealProof_StackedDrg512MiBV1
case "32G":
proofType = abi.RegisteredSealProof_StackedDrg32GiBV1
case "64G":
proofType = abi.RegisteredSealProof_StackedDrg64GiBV1
default:
log.Warn("Unspecified proof type, make sure to set LOTUS_TEST_SNAP_DEALS_PROOF_TYPE to '2k', '8M', '512M', '32G' or '64G'")
log.Warn("Continuing test with 2k sectors")
}
sid := storage.SectorRef{
ID: abi.SectorID{Miner: 1000, Number: 1},
ProofType: proofType,
}
ss, err := proofType.SectorSize()
require.NoError(t, err)
unpaddedSectorSize := abi.PaddedPieceSize(ss).Unpadded()
// Pack sector with no pieces
p0, err := m.AddPiece(ctx, sid, nil, unpaddedSectorSize, NewNullReader(unpaddedSectorSize))
require.NoError(t, err)
ccPieces := []abi.PieceInfo{p0}
// Precommit and Seal a CC sector
fmt.Printf("PC1\n")
ticket := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9}
pc1Out, err := m.SealPreCommit1(ctx, sid, ticket, ccPieces)
require.NoError(t, err)
fmt.Printf("PC2\n")
pc2Out, err := m.SealPreCommit2(ctx, sid, pc1Out)
require.NoError(t, err)
// Now do a snap deals replica update
sectorKey := pc2Out.Sealed
// Two pieces each half the size of the sector
unpaddedPieceSize := unpaddedSectorSize / 2
p1, err := m.AddPiece(ctx, sid, nil, unpaddedPieceSize, strings.NewReader(strings.Repeat("k", int(unpaddedPieceSize))))
require.NoError(t, err)
require.Equal(t, unpaddedPieceSize.Padded(), p1.Size)
p2, err := m.AddPiece(ctx, sid, []abi.UnpaddedPieceSize{p1.Size.Unpadded()}, unpaddedPieceSize, strings.NewReader(strings.Repeat("j", int(unpaddedPieceSize))))
require.NoError(t, err)
require.Equal(t, unpaddedPieceSize.Padded(), p1.Size)
pieces := []abi.PieceInfo{p1, p2}
fmt.Printf("RU\n")
startRU := time.Now()
out, err := m.ReplicaUpdate(ctx, sid, pieces)
require.NoError(t, err)
fmt.Printf("RU duration (%s): %s\n", ss.ShortString(), time.Since(startRU))
updateProofType, err := sid.ProofType.RegisteredUpdateProof()
require.NoError(t, err)
require.NotNil(t, out)
fmt.Printf("PR1\n")
startPR1 := time.Now()
vanillaProofs, err := m.ProveReplicaUpdate1(ctx, sid, sectorKey, out.NewSealed, out.NewUnsealed)
require.NoError(t, err)
require.NotNil(t, vanillaProofs)
fmt.Printf("PR1 duration (%s): %s\n", ss.ShortString(), time.Since(startPR1))
fmt.Printf("PR2\n")
startPR2 := time.Now()
proof, err := m.ProveReplicaUpdate2(ctx, sid, sectorKey, out.NewSealed, out.NewUnsealed, vanillaProofs)
require.NoError(t, err)
require.NotNil(t, proof)
fmt.Printf("PR2 duration (%s): %s\n", ss.ShortString(), time.Since(startPR2))
vInfo := proof7.ReplicaUpdateInfo{
Proof: proof,
UpdateProofType: updateProofType,
OldSealedSectorCID: sectorKey,
NewSealedSectorCID: out.NewSealed,
NewUnsealedSectorCID: out.NewUnsealed,
}
pass, err := ffiwrapper.ProofVerifier.VerifyReplicaUpdate(vInfo)
require.NoError(t, err)
assert.True(t, pass)
fmt.Printf("Decode\n")
// Remove unsealed data and decode for retrieval
require.NoError(t, m.FinalizeSector(ctx, sid, nil))
startDecode := time.Now()
require.NoError(t, m.SectorsUnsealPiece(ctx, sid, 0, p1.Size.Unpadded(), ticket, &out.NewUnsealed))
fmt.Printf("Decode duration (%s): %s\n", ss.ShortString(), time.Since(startDecode))
// Remove just the first piece and decode for retrieval
require.NoError(t, m.FinalizeSector(ctx, sid, []storage.Range{{Offset: p1.Size.Unpadded(), Size: p2.Size.Unpadded()}}))
require.NoError(t, m.SectorsUnsealPiece(ctx, sid, 0, p1.Size.Unpadded(), ticket, &out.NewUnsealed))
fmt.Printf("GSK\n")
require.NoError(t, m.ReleaseSectorKey(ctx, sid))
startGSK := time.Now()
require.NoError(t, m.GenerateSectorKeyFromData(ctx, sid, out.NewUnsealed))
fmt.Printf("GSK duration (%s): %s\n", ss.ShortString(), time.Since(startGSK))
}
func TestRedoPC1(t *testing.T) {
logging.SetAllLoggers(logging.LevelDebug)

View File

@ -10,6 +10,8 @@ import (
"math/rand"
"sync"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/dagstore/mount"
@ -262,6 +264,28 @@ func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid storage.SectorRef, ph
return out[:], nil
}
func (mgr *SectorMgr) ReplicaUpdate(ctx context.Context, sid storage.SectorRef, pieces []abi.PieceInfo) (storage.ReplicaUpdateOut, error) {
out := storage.ReplicaUpdateOut{}
return out, nil
}
func (mgr *SectorMgr) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storage.ReplicaVanillaProofs, error) {
out := make([][]byte, 0)
return out, nil
}
func (mgr *SectorMgr) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storage.ReplicaUpdateProof, error) {
return make([]byte, 0), nil
}
func (mgr *SectorMgr) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) error {
return nil
}
func (mgr *SectorMgr) ReleaseSealed(ctx context.Context, sid storage.SectorRef) error {
return nil
}
// Test Instrumentation Methods
func (mgr *SectorMgr) MarkFailed(sid storage.SectorRef, failed bool) error {
@ -467,6 +491,8 @@ func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStPr
return bad, nil
}
var _ storiface.WorkerReturn = &SectorMgr{}
func (mgr *SectorMgr) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error {
panic("not supported")
}
@ -511,6 +537,22 @@ func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID,
panic("not supported")
}
func (mgr *SectorMgr) ReturnReplicaUpdate(ctx context.Context, callID storiface.CallID, out storage.ReplicaUpdateOut, err *storiface.CallError) error {
panic("not supported")
}
func (mgr *SectorMgr) ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, out storage.ReplicaVanillaProofs, err *storiface.CallError) error {
panic("not supported")
}
func (mgr *SectorMgr) ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, out storage.ReplicaUpdateProof, err *storiface.CallError) error {
panic("not supported")
}
func (mgr *SectorMgr) ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error {
panic("not supported")
}
func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) {
plen, err := svi.SealProof.ProofSize()
if err != nil {
@ -558,6 +600,10 @@ func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVeri
return ok, nil
}
func (m mockVerifProver) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) {
return true, nil
}
func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) {
out := make([]byte, m.aggLen(len(aggregateInfo.Infos))) // todo: figure out more real length
for pi, proof := range proofs {

View File

@ -102,6 +102,22 @@ func (s *schedTestWorker) AddPiece(ctx context.Context, sector storage.SectorRef
panic("implement me")
}
func (s *schedTestWorker) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, peices []abi.PieceInfo) (storiface.CallID, error) {
panic("implement me")
}
func (s *schedTestWorker) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) {
panic("implement me")
}
func (s *schedTestWorker) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) {
panic("implement me")
}
func (s *schedTestWorker) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) (storiface.CallID, error) {
panic("implement me")
}
func (s *schedTestWorker) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) {
panic("implement me")
}

View File

@ -13,17 +13,26 @@ const (
TTFetch TaskType = "seal/v0/fetch"
TTUnseal TaskType = "seal/v0/unseal"
TTReplicaUpdate TaskType = "seal/v0/replicaupdate"
TTProveReplicaUpdate1 TaskType = "seal/v0/provereplicaupdate/1"
TTProveReplicaUpdate2 TaskType = "seal/v0/provereplicaupdate/2"
TTRegenSectorKey TaskType = "seal/v0/regensectorkey"
)
var order = map[TaskType]int{
TTAddPiece: 6, // least priority
TTPreCommit1: 5,
TTPreCommit2: 4,
TTCommit2: 3,
TTCommit1: 2,
TTUnseal: 1,
TTFetch: -1,
TTFinalize: -2, // most priority
TTRegenSectorKey: 10, // least priority
TTAddPiece: 9,
TTReplicaUpdate: 8,
TTProveReplicaUpdate2: 7,
TTProveReplicaUpdate1: 6,
TTPreCommit1: 5,
TTPreCommit2: 4,
TTCommit2: 3,
TTCommit1: 2,
TTUnseal: 1,
TTFetch: -1,
TTFinalize: -2, // most priority
}
var shortNames = map[TaskType]string{
@ -38,6 +47,11 @@ var shortNames = map[TaskType]string{
TTFetch: "GET",
TTUnseal: "UNS",
TTReplicaUpdate: "RU",
TTProveReplicaUpdate1: "PR1",
TTProveReplicaUpdate2: "PR2",
TTRegenSectorKey: "GSK",
}
func (a TaskType) MuchLess(b TaskType) (bool, bool) {

View File

@ -12,11 +12,13 @@ const (
FTUnsealed SectorFileType = 1 << iota
FTSealed
FTCache
FTUpdate
FTUpdateCache
FileTypes = iota
)
var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache}
var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache, FTUpdate, FTUpdateCache}
const (
FTNone SectorFileType = 0
@ -25,15 +27,21 @@ const (
const FSOverheadDen = 10
var FSOverheadSeal = map[SectorFileType]int{ // 10x overheads
FTUnsealed: FSOverheadDen,
FTSealed: FSOverheadDen,
FTCache: 141, // 11 layers + D(2x ssize) + C + R
FTUnsealed: FSOverheadDen,
FTSealed: FSOverheadDen,
FTUpdate: FSOverheadDen,
FTUpdateCache: FSOverheadDen * 2,
FTCache: 141, // 11 layers + D(2x ssize) + C + R'
}
// sector size * disk / fs overhead. FSOverheadDen is like the unit of sector size
var FsOverheadFinalized = map[SectorFileType]int{
FTUnsealed: FSOverheadDen,
FTSealed: FSOverheadDen,
FTCache: 2,
FTUnsealed: FSOverheadDen,
FTSealed: FSOverheadDen,
FTUpdate: FSOverheadDen * 2, // XXX: we should clear the update cache on Finalize???
FTUpdateCache: FSOverheadDen,
FTCache: 2,
}
type SectorFileType int
@ -46,6 +54,10 @@ func (t SectorFileType) String() string {
return "sealed"
case FTCache:
return "cache"
case FTUpdate:
return "update"
case FTUpdateCache:
return "update-cache"
default:
return fmt.Sprintf("<unknown %d>", t)
}
@ -104,9 +116,11 @@ func (t SectorFileType) All() [FileTypes]bool {
type SectorPaths struct {
ID abi.SectorID
Unsealed string
Sealed string
Cache string
Unsealed string
Sealed string
Cache string
Update string
UpdateCache string
}
func ParseSectorID(baseName string) (abi.SectorID, error) {
@ -139,6 +153,10 @@ func PathByType(sps SectorPaths, fileType SectorFileType) string {
return sps.Sealed
case FTCache:
return sps.Cache
case FTUpdate:
return sps.Update
case FTUpdateCache:
return sps.UpdateCache
}
panic("requested unknown path type")
@ -152,5 +170,9 @@ func SetPathByType(sps *SectorPaths, fileType SectorFileType, p string) {
sps.Sealed = p
case FTCache:
sps.Cache = p
case FTUpdate:
sps.Update = p
case FTUpdateCache:
sps.UpdateCache = p
}
}

View File

@ -121,6 +121,10 @@ type WorkerCalls interface {
SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (CallID, error)
FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (CallID, error)
ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (CallID, error)
ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (CallID, error)
ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (CallID, error)
ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (CallID, error)
GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) (CallID, error)
MoveStorage(ctx context.Context, sector storage.SectorRef, types SectorFileType) (CallID, error)
UnsealPiece(context.Context, storage.SectorRef, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (CallID, error)
Fetch(context.Context, storage.SectorRef, SectorFileType, PathType, AcquireMode) (CallID, error)
@ -174,6 +178,10 @@ type WorkerReturn interface {
ReturnSealCommit2(ctx context.Context, callID CallID, proof storage.Proof, err *CallError) error
ReturnFinalizeSector(ctx context.Context, callID CallID, err *CallError) error
ReturnReleaseUnsealed(ctx context.Context, callID CallID, err *CallError) error
ReturnReplicaUpdate(ctx context.Context, callID CallID, out storage.ReplicaUpdateOut, err *CallError) error
ReturnProveReplicaUpdate1(ctx context.Context, callID CallID, proofs storage.ReplicaVanillaProofs, err *CallError) error
ReturnProveReplicaUpdate2(ctx context.Context, callID CallID, proof storage.ReplicaUpdateProof, err *CallError) error
ReturnGenerateSectorKeyFromData(ctx context.Context, callID CallID, err *CallError) error
ReturnMoveStorage(ctx context.Context, callID CallID, err *CallError) error
ReturnUnsealPiece(ctx context.Context, callID CallID, err *CallError) error
ReturnReadPiece(ctx context.Context, callID CallID, ok bool, err *CallError) error

View File

@ -55,10 +55,30 @@ func (t *testExec) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef
panic("implement me")
}
func (t *testExec) ReleaseSealed(ctx context.Context, sector storage.SectorRef) error {
panic("implement me")
}
func (t *testExec) Remove(ctx context.Context, sector storage.SectorRef) error {
panic("implement me")
}
func (t *testExec) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storage.ReplicaUpdateOut, error) {
panic("implement me")
}
func (t *testExec) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storage.ReplicaVanillaProofs, error) {
panic("implement me")
}
func (t *testExec) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storage.ReplicaUpdateProof, error) {
panic("implement me")
}
func (t *testExec) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) error {
panic("implement me")
}
func (t *testExec) NewSector(ctx context.Context, sector storage.SectorRef) error {
panic("implement me")
}

View File

@ -7,6 +7,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/specs-storage/storage"
"github.com/google/uuid"
cid "github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
@ -67,6 +68,33 @@ func (t *testWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pie
})
}
func (t *testWorker) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) {
return t.asyncCall(sector, func(ci storiface.CallID) {
out, err := t.mockSeal.ReplicaUpdate(ctx, sector, pieces)
if err := t.ret.ReturnReplicaUpdate(ctx, ci, out, toCallError(err)); err != nil {
log.Error(err)
}
})
}
func (t *testWorker) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) {
return t.asyncCall(sector, func(ci storiface.CallID) {
vanillaProofs, err := t.mockSeal.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed)
if err := t.ret.ReturnProveReplicaUpdate1(ctx, ci, vanillaProofs, toCallError(err)); err != nil {
log.Error(err)
}
})
}
func (t *testWorker) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) {
return t.asyncCall(sector, func(ci storiface.CallID) {
proof, err := t.mockSeal.ProveReplicaUpdate2(ctx, sector, sectorKey, newSealed, newUnsealed, vanillaProofs)
if err := t.ret.ReturnProveReplicaUpdate2(ctx, ci, proof, toCallError(err)); err != nil {
log.Error(err)
}
})
}
func (t *testWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) {
return t.asyncCall(sector, func(ci storiface.CallID) {
t.pc1s++

View File

@ -28,7 +28,7 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
)
var pathTypes = []storiface.SectorFileType{storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache}
var pathTypes = []storiface.SectorFileType{storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache, storiface.FTUpdate, storiface.FTUpdateCache}
type WorkerConfig struct {
TaskTypes []sealtasks.TaskType
@ -148,7 +148,6 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector stor
}
sid := storiface.PathByType(storageIDs, fileType)
if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector.ID, fileType, l.op == storiface.AcquireMove); err != nil {
log.Errorf("declare sector error: %+v", err)
}
@ -163,16 +162,20 @@ func (l *LocalWorker) ffiExec() (ffiwrapper.Storage, error) {
type ReturnType string
const (
AddPiece ReturnType = "AddPiece"
SealPreCommit1 ReturnType = "SealPreCommit1"
SealPreCommit2 ReturnType = "SealPreCommit2"
SealCommit1 ReturnType = "SealCommit1"
SealCommit2 ReturnType = "SealCommit2"
FinalizeSector ReturnType = "FinalizeSector"
ReleaseUnsealed ReturnType = "ReleaseUnsealed"
MoveStorage ReturnType = "MoveStorage"
UnsealPiece ReturnType = "UnsealPiece"
Fetch ReturnType = "Fetch"
AddPiece ReturnType = "AddPiece"
SealPreCommit1 ReturnType = "SealPreCommit1"
SealPreCommit2 ReturnType = "SealPreCommit2"
SealCommit1 ReturnType = "SealCommit1"
SealCommit2 ReturnType = "SealCommit2"
FinalizeSector ReturnType = "FinalizeSector"
ReplicaUpdate ReturnType = "ReplicaUpdate"
ProveReplicaUpdate1 ReturnType = "ProveReplicaUpdate1"
ProveReplicaUpdate2 ReturnType = "ProveReplicaUpdate2"
GenerateSectorKey ReturnType = "GenerateSectorKey"
ReleaseUnsealed ReturnType = "ReleaseUnsealed"
MoveStorage ReturnType = "MoveStorage"
UnsealPiece ReturnType = "UnsealPiece"
Fetch ReturnType = "Fetch"
)
// in: func(WorkerReturn, context.Context, CallID, err string)
@ -210,16 +213,20 @@ func rfunc(in interface{}) func(context.Context, storiface.CallID, storiface.Wor
}
var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, *storiface.CallError) error{
AddPiece: rfunc(storiface.WorkerReturn.ReturnAddPiece),
SealPreCommit1: rfunc(storiface.WorkerReturn.ReturnSealPreCommit1),
SealPreCommit2: rfunc(storiface.WorkerReturn.ReturnSealPreCommit2),
SealCommit1: rfunc(storiface.WorkerReturn.ReturnSealCommit1),
SealCommit2: rfunc(storiface.WorkerReturn.ReturnSealCommit2),
FinalizeSector: rfunc(storiface.WorkerReturn.ReturnFinalizeSector),
ReleaseUnsealed: rfunc(storiface.WorkerReturn.ReturnReleaseUnsealed),
MoveStorage: rfunc(storiface.WorkerReturn.ReturnMoveStorage),
UnsealPiece: rfunc(storiface.WorkerReturn.ReturnUnsealPiece),
Fetch: rfunc(storiface.WorkerReturn.ReturnFetch),
AddPiece: rfunc(storiface.WorkerReturn.ReturnAddPiece),
SealPreCommit1: rfunc(storiface.WorkerReturn.ReturnSealPreCommit1),
SealPreCommit2: rfunc(storiface.WorkerReturn.ReturnSealPreCommit2),
SealCommit1: rfunc(storiface.WorkerReturn.ReturnSealCommit1),
SealCommit2: rfunc(storiface.WorkerReturn.ReturnSealCommit2),
FinalizeSector: rfunc(storiface.WorkerReturn.ReturnFinalizeSector),
ReleaseUnsealed: rfunc(storiface.WorkerReturn.ReturnReleaseUnsealed),
ReplicaUpdate: rfunc(storiface.WorkerReturn.ReturnReplicaUpdate),
ProveReplicaUpdate1: rfunc(storiface.WorkerReturn.ReturnProveReplicaUpdate1),
ProveReplicaUpdate2: rfunc(storiface.WorkerReturn.ReturnProveReplicaUpdate2),
GenerateSectorKey: rfunc(storiface.WorkerReturn.ReturnGenerateSectorKeyFromData),
MoveStorage: rfunc(storiface.WorkerReturn.ReturnMoveStorage),
UnsealPiece: rfunc(storiface.WorkerReturn.ReturnUnsealPiece),
Fetch: rfunc(storiface.WorkerReturn.ReturnFetch),
}
func (l *LocalWorker) asyncCall(ctx context.Context, sector storage.SectorRef, rt ReturnType, work func(ctx context.Context, ci storiface.CallID) (interface{}, error)) (storiface.CallID, error) {
@ -243,7 +250,6 @@ func (l *LocalWorker) asyncCall(ctx context.Context, sector storage.SectorRef, r
}
res, err := work(ctx, ci)
if err != nil {
rb, err := json.Marshal(res)
if err != nil {
@ -261,7 +267,6 @@ func (l *LocalWorker) asyncCall(ctx context.Context, sector storage.SectorRef, r
}
}
}()
return ci, nil
}
@ -385,6 +390,51 @@ func (l *LocalWorker) SealCommit2(ctx context.Context, sector storage.SectorRef,
})
}
func (l *LocalWorker) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) {
sb, err := l.executor()
if err != nil {
return storiface.UndefCall, err
}
return l.asyncCall(ctx, sector, ReplicaUpdate, func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
sealerOut, err := sb.ReplicaUpdate(ctx, sector, pieces)
return sealerOut, err
})
}
func (l *LocalWorker) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) {
sb, err := l.executor()
if err != nil {
return storiface.UndefCall, err
}
return l.asyncCall(ctx, sector, ProveReplicaUpdate1, func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
return sb.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed)
})
}
func (l *LocalWorker) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) {
sb, err := l.executor()
if err != nil {
return storiface.UndefCall, err
}
return l.asyncCall(ctx, sector, ProveReplicaUpdate2, func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
return sb.ProveReplicaUpdate2(ctx, sector, sectorKey, newSealed, newUnsealed, vanillaProofs)
})
}
func (l *LocalWorker) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) (storiface.CallID, error) {
sb, err := l.executor()
if err != nil {
return storiface.UndefCall, err
}
return l.asyncCall(ctx, sector, GenerateSectorKey, func(ctx context.Context, ci storiface.CallID) (interface{}, error) {
return nil, sb.GenerateSectorKeyFromData(ctx, sector, commD)
})
}
func (l *LocalWorker) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) {
sb, err := l.executor()
if err != nil {

View File

@ -98,7 +98,6 @@ func (wt *workTracker) track(ctx context.Context, ready chan struct{}, wid stori
wt.lk.Lock()
delete(wt.prepared, prepID)
}
callID, err := cb()
if err != nil {
return callID, err
@ -198,4 +197,22 @@ func (t *trackedWorker) UnsealPiece(ctx context.Context, id storage.SectorRef, i
return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, id, sealtasks.TTUnseal, func() (storiface.CallID, error) { return t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid) })
}
func (t *trackedWorker) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) {
return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTReplicaUpdate, func() (storiface.CallID, error) {
return t.Worker.ReplicaUpdate(ctx, sector, pieces)
})
}
func (t *trackedWorker) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) {
return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTProveReplicaUpdate1, func() (storiface.CallID, error) {
return t.Worker.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed)
})
}
func (t *trackedWorker) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) {
return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTProveReplicaUpdate2, func() (storiface.CallID, error) {
return t.Worker.ProveReplicaUpdate2(ctx, sector, sectorKey, newSealed, newUnsealed, vanillaProofs)
})
}
var _ Worker = &trackedWorker{}

View File

@ -1,7 +1,7 @@
{
"actorVersions": [0, 2, 3, 4, 5, 6],
"latestActorsVersion": 6,
"actorVersions": [0, 2, 3, 4, 5, 6, 7],
"latestActorsVersion": 7,
"networkVersions": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
"latestNetworkVersion": 14
"networkVersions": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
"latestNetworkVersion": 15
}

5
go.mod
View File

@ -50,7 +50,8 @@ require (
github.com/filecoin-project/specs-actors/v4 v4.0.1
github.com/filecoin-project/specs-actors/v5 v5.0.4
github.com/filecoin-project/specs-actors/v6 v6.0.1
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211118013026-3dce48197cec
github.com/filecoin-project/specs-storage v0.1.1-0.20211213202648-f14267c929ff
github.com/filecoin-project/test-vectors/schema v0.0.5
github.com/gbrlsnchs/jwt/v3 v3.0.1
github.com/gdamore/tcell/v2 v2.2.0
@ -162,7 +163,7 @@ require (
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20211209171907-798191bca915
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac
golang.org/x/tools v0.1.5
golang.org/x/tools v0.1.7
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
gopkg.in/cheggaaa/pb.v1 v1.0.28
gotest.tools v2.2.0+incompatible

15
go.sum
View File

@ -369,13 +369,16 @@ github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008
github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng=
github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg=
github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng=
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI=
github.com/filecoin-project/specs-actors/v5 v5.0.4 h1:OY7BdxJWlUfUFXWV/kpNBYGXNPasDIedf42T3sGx08s=
github.com/filecoin-project/specs-actors/v5 v5.0.4/go.mod h1:5BAKRAMsOOlD8+qCw4UvT/lTLInCJ3JwOWZbX8Ipwq4=
github.com/filecoin-project/specs-actors/v6 v6.0.0/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk=
github.com/filecoin-project/specs-actors/v6 v6.0.1 h1:laxvHNsvrq83Y9n+W7znVCePi3oLyRf0Rkl4jFO8Wew=
github.com/filecoin-project/specs-actors/v6 v6.0.1/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk=
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw=
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211117170924-fd07a4c7dff9/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211118013026-3dce48197cec h1:KV9vE+Sl2Y3qKsrpba4HcE7wHwK7v6O5U/S0xHbje6A=
github.com/filecoin-project/specs-actors/v7 v7.0.0-20211118013026-3dce48197cec/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE=
github.com/filecoin-project/specs-storage v0.1.1-0.20211213202648-f14267c929ff h1:JO62nquOGhjoDf9+JkAcV+wsD5yhoyIKOMj70ZNdD3Q=
github.com/filecoin-project/specs-storage v0.1.1-0.20211213202648-f14267c929ff/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
@ -1829,6 +1832,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8=
github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM=
github.com/zondax/ledger-go v0.12.1 h1:hYRcyznPRJp+5mzF2sazTLP2nGvGjYDD2VzhHhFomLU=
@ -2073,6 +2077,7 @@ golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
@ -2196,6 +2201,7 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211209171907-798191bca915 h1:P+8mCzuEpyszAT6T42q0sxU+eveBAF/cJ2Kp0x6/8+0=
@ -2286,8 +2292,9 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ=
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -13,6 +13,7 @@ import (
"github.com/stretchr/testify/require"
)
// TODO: This needs to be repurposed into a SnapDeals test suite
func TestCCUpgrade(t *testing.T) {
kit.QuietMiningLogs()
@ -32,7 +33,7 @@ func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) {
ctx := context.Background()
blockTime := 5 * time.Millisecond
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.LatestActorsAt(upgradeHeight))
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.TurboUpgradeAt(upgradeHeight))
ens.InterconnectAll().BeginMining(blockTime)
maddr, err := miner.ActorAddress(ctx)

View File

@ -49,12 +49,12 @@ func LatestActorsAt(upgradeHeight abi.ChainEpoch) EnsembleOpt {
})
/* inline-gen start */
return UpgradeSchedule(stmgr.Upgrade{
Network: network.Version13,
Network: network.Version14,
Height: -1,
}, stmgr.Upgrade{
Network: network.Version14,
Network: network.Version15,
Height: upgradeHeight,
Migration: filcns.UpgradeActorsV6,
Migration: filcns.UpgradeActorsV7,
})
/* inline-gen end */
}

View File

@ -0,0 +1,101 @@
package itests
import (
"context"
"testing"
"time"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/itests/kit"
"github.com/stretchr/testify/require"
)
// these tests check that the versioned code in vm.transfer is functioning correctly across versions!
// we reordered the checks to make sure that a transaction with too much money in it sent to yourself will fail instead of succeeding as a noop
// more info in this PR! https://github.com/filecoin-project/lotus/pull/7637
func TestSelfSentTxnV15(t *testing.T) {
ctx := context.Background()
kit.QuietMiningLogs()
client15, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.GenesisNetworkVersion(network.Version15))
ens.InterconnectAll().BeginMining(10 * time.Millisecond)
bal, err := client15.WalletBalance(ctx, client15.DefaultKey.Address)
require.NoError(t, err)
// send self half of account balance
msgHalfBal := &types.Message{
From: client15.DefaultKey.Address,
To: client15.DefaultKey.Address,
Value: big.Div(bal, big.NewInt(2)),
}
smHalfBal, err := client15.MpoolPushMessage(ctx, msgHalfBal, nil)
require.NoError(t, err)
mLookup, err := client15.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
msgOverBal := &types.Message{
From: client15.DefaultKey.Address,
To: client15.DefaultKey.Address,
Value: big.Mul(big.NewInt(2), bal),
GasLimit: 10000000000,
GasPremium: big.NewInt(10000000000),
GasFeeCap: big.NewInt(100000000000),
Nonce: 1,
}
smOverBal, err := client15.WalletSignMessage(ctx, client15.DefaultKey.Address, msgOverBal)
require.NoError(t, err)
smcid, err := client15.MpoolPush(ctx, smOverBal)
require.NoError(t, err)
mLookup, err = client15.StateWaitMsg(ctx, smcid, 3, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Equal(t, exitcode.SysErrInsufficientFunds, mLookup.Receipt.ExitCode)
}
func TestSelfSentTxnV14(t *testing.T) {
ctx := context.Background()
kit.QuietMiningLogs()
client14, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.GenesisNetworkVersion(network.Version14))
ens.InterconnectAll().BeginMining(10 * time.Millisecond)
bal, err := client14.WalletBalance(ctx, client14.DefaultKey.Address)
require.NoError(t, err)
// send self half of account balance
msgHalfBal := &types.Message{
From: client14.DefaultKey.Address,
To: client14.DefaultKey.Address,
Value: big.Div(bal, big.NewInt(2)),
}
smHalfBal, err := client14.MpoolPushMessage(ctx, msgHalfBal, nil)
require.NoError(t, err)
mLookup, err := client14.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
msgOverBal := &types.Message{
From: client14.DefaultKey.Address,
To: client14.DefaultKey.Address,
Value: big.Mul(big.NewInt(2), bal),
GasLimit: 10000000000,
GasPremium: big.NewInt(10000000000),
GasFeeCap: big.NewInt(100000000000),
Nonce: 1,
}
smOverBal, err := client14.WalletSignMessage(ctx, client14.DefaultKey.Address, msgOverBal)
require.NoError(t, err)
smcid, err := client14.MpoolPush(ctx, smOverBal)
require.NoError(t, err)
mLookup, err = client14.StateWaitMsg(ctx, smcid, 3, api.LookbackNoLimit, true)
require.NoError(t, err)
require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode)
}

View File

@ -622,5 +622,112 @@
"AddVerifiedClient",
"UseBytes",
"RestoreBytes"
],
"fil/7/account": [
"Send",
"Constructor",
"PubkeyAddress"
],
"fil/7/cron": [
"Send",
"Constructor",
"EpochTick"
],
"fil/7/init": [
"Send",
"Constructor",
"Exec"
],
"fil/7/multisig": [
"Send",
"Constructor",
"Propose",
"Approve",
"Cancel",
"AddSigner",
"RemoveSigner",
"SwapSigner",
"ChangeNumApprovalsThreshold",
"LockBalance"
],
"fil/7/paymentchannel": [
"Send",
"Constructor",
"UpdateChannelState",
"Settle",
"Collect"
],
"fil/7/reward": [
"Send",
"Constructor",
"AwardBlockReward",
"ThisEpochReward",
"UpdateNetworkKPI"
],
"fil/7/storagemarket": [
"Send",
"Constructor",
"AddBalance",
"WithdrawBalance",
"PublishStorageDeals",
"VerifyDealsForActivation",
"ActivateDeals",
"OnMinerSectorsTerminate",
"ComputeDataCommitment",
"CronTick"
],
"fil/7/storageminer": [
"Send",
"Constructor",
"ControlAddresses",
"ChangeWorkerAddress",
"ChangePeerID",
"SubmitWindowedPoSt",
"PreCommitSector",
"ProveCommitSector",
"ExtendSectorExpiration",
"TerminateSectors",
"DeclareFaults",
"DeclareFaultsRecovered",
"OnDeferredCronEvent",
"CheckSectorProven",
"ApplyRewards",
"ReportConsensusFault",
"WithdrawBalance",
"ConfirmSectorProofsValid",
"ChangeMultiaddrs",
"CompactPartitions",
"CompactSectorNumbers",
"ConfirmUpdateWorkerKey",
"RepayDebt",
"ChangeOwnerAddress",
"DisputeWindowedPoSt",
"PreCommitSectorBatch",
"ProveCommitAggregate",
"ProveReplicaUpdates"
],
"fil/7/storagepower": [
"Send",
"Constructor",
"CreateMiner",
"UpdateClaimedPower",
"EnrollCronEvent",
"OnEpochTickEnd",
"UpdatePledgeTotal",
"SubmitPoRepForBulkVerify",
"CurrentTotalPower"
],
"fil/7/system": [
"Send",
"Constructor"
],
"fil/7/verifiedregistry": [
"Send",
"Constructor",
"AddVerifier",
"RemoveVerifier",
"AddVerifiedClient",
"UseBytes",
"RestoreBytes"
]
}

View File

@ -5,6 +5,8 @@ import (
"context"
"testing"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
@ -22,12 +24,6 @@ import (
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/go-state-types/network"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
@ -35,6 +31,10 @@ import (
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/lotus/journal"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof"
tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
)
type mockStorageMinerAPI struct {
@ -149,7 +149,11 @@ func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStV
return true, nil
}
func (m mockVerif) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) {
func (m mockVerif) VerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) (bool, error) {
panic("implement me")
}
func (m mockVerif) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) {
panic("implement me")
}

Some files were not shown because too many files have changed in this diff Show More