Merge pull request #6342 from filecoin-project/feat/nv13-1.11

Network version 13 (v1.11)
This commit is contained in:
Łukasz Magiera 2021-06-03 09:58:43 +02:00 committed by GitHub
commit bf468762b6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
173 changed files with 6278 additions and 1087 deletions

View File

@ -24,6 +24,7 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
"github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
) )
// MODIFYING THE API INTERFACE // MODIFYING THE API INTERFACE
@ -91,6 +92,16 @@ type StorageMiner interface {
// SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message // SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message
SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error //perm:admin SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error //perm:admin
// SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit.
// Returns null if message wasn't sent
SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) //perm:admin
// SectorPreCommitPending returns a list of pending PreCommit sectors to be sent in the next batch message
SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
// SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit.
// Returns null if message wasn't sent
SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) //perm:admin
// SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message
SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
// WorkerConnect tells the node to connect to workers RPC // WorkerConnect tells the node to connect to workers RPC
WorkerConnect(context.Context, string) error //perm:admin retry:true WorkerConnect(context.Context, string) error //perm:admin retry:true

View File

@ -27,6 +27,7 @@ import (
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
"github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/stores"
"github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/sector-storage/storiface"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
marketevents "github.com/filecoin-project/lotus/markets/loggers" marketevents "github.com/filecoin-project/lotus/markets/loggers"
"github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/specs-storage/storage"
@ -659,12 +660,20 @@ type StorageMinerStruct struct {
SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"` SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"`
SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"`
SectorCommitPending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"`
SectorGetExpectedSealDuration func(p0 context.Context) (time.Duration, error) `perm:"read"` SectorGetExpectedSealDuration func(p0 context.Context) (time.Duration, error) `perm:"read"`
SectorGetSealDelay func(p0 context.Context) (time.Duration, error) `perm:"read"` SectorGetSealDelay func(p0 context.Context) (time.Duration, error) `perm:"read"`
SectorMarkForUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` SectorMarkForUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
SectorPreCommitFlush func(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) `perm:"admin"`
SectorPreCommitPending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"`
SectorRemove func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` SectorRemove func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
SectorSetExpectedSealDuration func(p0 context.Context, p1 time.Duration) error `perm:"write"` SectorSetExpectedSealDuration func(p0 context.Context, p1 time.Duration) error `perm:"write"`
@ -3117,6 +3126,22 @@ func (s *StorageMinerStub) SealingSchedDiag(p0 context.Context, p1 bool) (interf
return nil, xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *StorageMinerStruct) SectorCommitFlush(p0 context.Context) ([]sealiface.CommitBatchRes, error) {
return s.Internal.SectorCommitFlush(p0)
}
func (s *StorageMinerStub) SectorCommitFlush(p0 context.Context) ([]sealiface.CommitBatchRes, error) {
return *new([]sealiface.CommitBatchRes), xerrors.New("method not supported")
}
func (s *StorageMinerStruct) SectorCommitPending(p0 context.Context) ([]abi.SectorID, error) {
return s.Internal.SectorCommitPending(p0)
}
func (s *StorageMinerStub) SectorCommitPending(p0 context.Context) ([]abi.SectorID, error) {
return *new([]abi.SectorID), xerrors.New("method not supported")
}
func (s *StorageMinerStruct) SectorGetExpectedSealDuration(p0 context.Context) (time.Duration, error) { func (s *StorageMinerStruct) SectorGetExpectedSealDuration(p0 context.Context) (time.Duration, error) {
return s.Internal.SectorGetExpectedSealDuration(p0) return s.Internal.SectorGetExpectedSealDuration(p0)
} }
@ -3141,6 +3166,22 @@ func (s *StorageMinerStub) SectorMarkForUpgrade(p0 context.Context, p1 abi.Secto
return xerrors.New("method not supported") return xerrors.New("method not supported")
} }
func (s *StorageMinerStruct) SectorPreCommitFlush(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) {
return s.Internal.SectorPreCommitFlush(p0)
}
func (s *StorageMinerStub) SectorPreCommitFlush(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) {
return *new([]sealiface.PreCommitBatchRes), xerrors.New("method not supported")
}
func (s *StorageMinerStruct) SectorPreCommitPending(p0 context.Context) ([]abi.SectorID, error) {
return s.Internal.SectorPreCommitPending(p0)
}
func (s *StorageMinerStub) SectorPreCommitPending(p0 context.Context) ([]abi.SectorID, error) {
return *new([]abi.SectorID), xerrors.New("method not supported")
}
func (s *StorageMinerStruct) SectorRemove(p0 context.Context, p1 abi.SectorNumber) error { func (s *StorageMinerStruct) SectorRemove(p0 context.Context, p1 abi.SectorNumber) error {
return s.Internal.SectorRemove(p0, p1) return s.Internal.SectorRemove(p0, p1)
} }

View File

@ -63,7 +63,7 @@ func TestDeadlineToggling(t *testing.T, b APIBuilder, blocktime time.Duration) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(upgradeH)}, OneMiner) n, sn := b(t, []FullNodeOpts{FullNodeWithNetworkUpgradeAt(network.Version12, upgradeH)}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI) client := n[0].FullNode.(*impl.FullNodeAPI)
minerA := sn[0] minerA := sn[0]

View File

@ -500,6 +500,8 @@ func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNod
require.NoError(t, miner.SectorStartSealing(ctx, snum)) require.NoError(t, miner.SectorStartSealing(ctx, snum))
} }
} }
flushSealingBatches(t, ctx, miner)
} }
func testRetrieval(t *testing.T, ctx context.Context, client api.FullNode, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) { func testRetrieval(t *testing.T, ctx context.Context, client api.FullNode, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) {

389
api/test/pledge.go Normal file
View File

@ -0,0 +1,389 @@
package test
import (
"context"
"fmt"
"sort"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/stmgr"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
bminer "github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
)
func TestSDRUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []FullNodeOpts{FullNodeWithSDRAt(500, 1000)}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
pledge := make(chan struct{})
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
round := 0
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
// 3 sealing rounds: before, during after.
if round >= 3 {
continue
}
head, err := client.ChainHead(ctx)
assert.NoError(t, err)
// rounds happen every 100 blocks, with a 50 block offset.
if head.Height() >= abi.ChainEpoch(round*500+50) {
round++
pledge <- struct{}{}
ver, err := client.StateNetworkVersion(ctx, head.Key())
assert.NoError(t, err)
switch round {
case 1:
assert.Equal(t, network.Version6, ver)
case 2:
assert.Equal(t, network.Version7, ver)
case 3:
assert.Equal(t, network.Version8, ver)
}
}
}
}()
// before.
pledgeSectors(t, ctx, miner, 9, 0, pledge)
s, err := miner.SectorsList(ctx)
require.NoError(t, err)
sort.Slice(s, func(i, j int) bool {
return s[i] < s[j]
})
for i, id := range s {
info, err := miner.SectorsStatus(ctx, id, true)
require.NoError(t, err)
expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1
if i >= 3 {
// after
expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1
}
assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id)
}
atomic.StoreInt64(&mine, 0)
<-done
}
func TestPledgeBatching(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(-1)}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
for {
h, err := client.ChainHead(ctx)
require.NoError(t, err)
if h.Height() > 10 {
break
}
}
toCheck := startPledge(t, ctx, miner, nSectors, 0, nil)
for len(toCheck) > 0 {
states := map[api.SectorState]int{}
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n, false)
require.NoError(t, err)
states[st.State]++
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
if states[api.SectorState(sealing.SubmitPreCommitBatch)] == nSectors ||
(states[api.SectorState(sealing.SubmitPreCommitBatch)] > 0 && states[api.SectorState(sealing.PreCommit1)] == 0 && states[api.SectorState(sealing.PreCommit2)] == 0) {
pcb, err := miner.SectorPreCommitFlush(ctx)
require.NoError(t, err)
if pcb != nil {
fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb)
}
}
if states[api.SectorState(sealing.SubmitCommitAggregate)] == nSectors ||
(states[api.SectorState(sealing.SubmitCommitAggregate)] > 0 && states[api.SectorState(sealing.WaitSeed)] == 0 && states[api.SectorState(sealing.Committing)] == 0) {
cb, err := miner.SectorCommitFlush(ctx)
require.NoError(t, err)
if cb != nil {
fmt.Printf("COMMIT BATCH: %+v\n", cb)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
atomic.StoreInt64(&mine, 0)
<-done
}
func TestPledgeBeforeNv13(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []FullNodeOpts{
{
Opts: func(nodes []TestNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
Network: network.Version9,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version10,
Height: 2,
Migration: stmgr.UpgradeActorsV3,
}, {
Network: network.Version12,
Height: 3,
Migration: stmgr.UpgradeActorsV4,
}, {
Network: network.Version13,
Height: 1000000000,
Migration: stmgr.UpgradeActorsV5,
}})
},
},
}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
for {
h, err := client.ChainHead(ctx)
require.NoError(t, err)
if h.Height() > 10 {
break
}
}
toCheck := startPledge(t, ctx, miner, nSectors, 0, nil)
for len(toCheck) > 0 {
states := map[api.SectorState]int{}
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n, false)
require.NoError(t, err)
states[st.State]++
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
atomic.StoreInt64(&mine, 0)
<-done
}
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, OneFull, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
pledgeSectors(t, ctx, miner, nSectors, 0, nil)
atomic.StoreInt64(&mine, 0)
<-done
}
func flushSealingBatches(t *testing.T, ctx context.Context, miner TestStorageNode) {
pcb, err := miner.SectorPreCommitFlush(ctx)
require.NoError(t, err)
if pcb != nil {
fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb)
}
cb, err := miner.SectorCommitFlush(ctx)
require.NoError(t, err)
if cb != nil {
fmt.Printf("COMMIT BATCH: %+v\n", cb)
}
}
func startPledge(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} {
for i := 0; i < n; i++ {
if i%3 == 0 && blockNotif != nil {
<-blockNotif
log.Errorf("WAIT")
}
log.Errorf("PLEDGING %d", i)
_, err := miner.PledgeSector(ctx)
require.NoError(t, err)
}
for {
s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
require.NoError(t, err)
fmt.Printf("Sectors: %d\n", len(s))
if len(s) >= n+existing {
break
}
build.Clock.Sleep(100 * time.Millisecond)
}
fmt.Printf("All sectors is fsm\n")
s, err := miner.SectorsList(ctx)
require.NoError(t, err)
toCheck := map[abi.SectorNumber]struct{}{}
for _, number := range s {
toCheck[number] = struct{}{}
}
return toCheck
}
func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) {
toCheck := startPledge(t, ctx, miner, n, existing, blockNotif)
for len(toCheck) > 0 {
flushSealingBatches(t, ctx, miner)
states := map[api.SectorState]int{}
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n, false)
require.NoError(t, err)
states[st.State]++
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
}

View File

@ -122,26 +122,46 @@ var OneFull = DefaultFullOpts(1)
var TwoFull = DefaultFullOpts(2) var TwoFull = DefaultFullOpts(2)
var FullNodeWithLatestActorsAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts { var FullNodeWithLatestActorsAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
if upgradeHeight == -1 { // Attention: Update this when introducing new actor versions or your tests will be sad
upgradeHeight = 3 return FullNodeWithNetworkUpgradeAt(network.Version13, upgradeHeight)
}
var FullNodeWithNetworkUpgradeAt = func(version network.Version, upgradeHeight abi.ChainEpoch) FullNodeOpts {
fullSchedule := stmgr.UpgradeSchedule{{
// prepare for upgrade.
Network: network.Version9,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version10,
Height: 2,
Migration: stmgr.UpgradeActorsV3,
}, {
Network: network.Version12,
Height: 3,
Migration: stmgr.UpgradeActorsV4,
}, {
Network: network.Version13,
Height: 4,
Migration: stmgr.UpgradeActorsV5,
}}
schedule := stmgr.UpgradeSchedule{}
for _, upgrade := range fullSchedule {
if upgrade.Network > version {
break
}
schedule = append(schedule, upgrade)
}
if upgradeHeight > 0 {
schedule[len(schedule)-1].Height = upgradeHeight
} }
return FullNodeOpts{ return FullNodeOpts{
Opts: func(nodes []TestNode) node.Option { Opts: func(nodes []TestNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{ return node.Override(new(stmgr.UpgradeSchedule), schedule)
// prepare for upgrade.
Network: network.Version9,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version10,
Height: 2,
Migration: stmgr.UpgradeActorsV3,
}, {
Network: network.Version12,
Height: upgradeHeight,
Migration: stmgr.UpgradeActorsV4,
}})
}, },
} }
} }

View File

@ -4,6 +4,8 @@ import (
"context" "context"
"strings" "strings"
"github.com/filecoin-project/go-state-types/network"
lapi "github.com/filecoin-project/lotus/api" lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
@ -19,108 +21,120 @@ import (
) )
func AddVerifiedClient(t *testing.T, b APIBuilder) { func AddVerifiedClient(t *testing.T, b APIBuilder) {
test := func(nv network.Version, shouldWork bool) func(*testing.T) {
return func(t *testing.T) {
nodes, miners := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(-1)}, OneMiner) nodes, miners := b(t, []FullNodeOpts{FullNodeWithNetworkUpgradeAt(nv, -1)}, OneMiner)
api := nodes[0].FullNode.(*impl.FullNodeAPI) api := nodes[0].FullNode.(*impl.FullNodeAPI)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
//Get VRH //Get VRH
vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{}) vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
}
//Add verifier
verifier, err := api.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
params, err := actors.SerializeParams(&verifreg4.AddVerifierParams{Address: verifier, Allowance: big.NewInt(100000000000)})
if err != nil {
t.Fatal(err)
}
msg := &types.Message{
To: verifreg.Address,
From: vrh,
Method: verifreg.Methods.AddVerifier,
Params: params,
Value: big.Zero(),
}
bm := NewBlockMiner(ctx, t, miners[0], 100*time.Millisecond)
bm.MineBlocks()
defer bm.Stop()
sm, err := api.MpoolPushMessage(ctx, msg, nil)
if err != nil {
t.Fatal("AddVerifier failed: ", err)
}
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send message")
}
//Assign datacap to a client
datacap := big.NewInt(10000)
clientAddress, err := api.WalletNew(ctx, types.KTBLS)
if err != nil {
t.Fatal(err)
}
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: clientAddress, Allowance: datacap})
if err != nil {
t.Fatal(err)
}
msg = &types.Message{
To: verifreg.Address,
From: verifier,
Method: verifreg.Methods.AddVerifiedClient,
Params: params,
Value: big.Zero(),
}
sm, err = api.MpoolPushMessage(ctx, msg, nil)
if err != nil {
t.Fatal("AddVerifiedClient faield: ", err)
}
res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send message")
}
//check datacap balance
dcap, err := api.StateVerifiedClientStatus(ctx, clientAddress, types.EmptyTSK)
if err != nil {
t.Fatal(err)
}
if !dcap.Equals(datacap) {
t.Fatal("")
}
//try to assign datacap to the same client should fail for actor v4 and below
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: clientAddress, Allowance: datacap})
if err != nil {
t.Fatal(err)
}
msg = &types.Message{
To: verifreg.Address,
From: verifier,
Method: verifreg.Methods.AddVerifiedClient,
Params: params,
Value: big.Zero(),
}
_, err = api.MpoolPushMessage(ctx, msg, nil)
if shouldWork && err != nil {
t.Fatal("expected nil err", err)
}
if !shouldWork && (err == nil || !strings.Contains(err.Error(), "verified client already exists")) {
t.Fatal("Add datacap to an existing verified client should fail")
}
}
} }
//Add verifier t.Run("nv12", test(network.Version12, false))
verifier, err := api.WalletDefaultAddress(ctx) t.Run("nv13", test(network.Version13, true))
if err != nil {
t.Fatal(err)
}
params, err := actors.SerializeParams(&verifreg4.AddVerifierParams{Address: verifier, Allowance: big.NewInt(100000000000)})
if err != nil {
t.Fatal(err)
}
msg := &types.Message{
To: verifreg.Address,
From: vrh,
Method: verifreg.Methods.AddVerifier,
Params: params,
Value: big.Zero(),
}
bm := NewBlockMiner(ctx, t, miners[0], 100*time.Millisecond)
bm.MineBlocks()
defer bm.Stop()
sm, err := api.MpoolPushMessage(ctx, msg, nil)
if err != nil {
t.Fatal("AddVerifier failed: ", err)
}
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send message")
}
//Assign datacap to a client
datacap := big.NewInt(10000)
clientAddress, err := api.WalletNew(ctx, types.KTBLS)
if err != nil {
t.Fatal(err)
}
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: clientAddress, Allowance: datacap})
if err != nil {
t.Fatal(err)
}
msg = &types.Message{
To: verifreg.Address,
From: verifier,
Method: verifreg.Methods.AddVerifiedClient,
Params: params,
Value: big.Zero(),
}
sm, err = api.MpoolPushMessage(ctx, msg, nil)
if err != nil {
t.Fatal("AddVerifiedClient faield: ", err)
}
res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send message")
}
//check datacap balance
dcap, err := api.StateVerifiedClientStatus(ctx, clientAddress, types.EmptyTSK)
if err != nil {
t.Fatal(err)
}
if !dcap.Equals(datacap) {
t.Fatal("")
}
//try to assign datacap to the same client should fail for actor v4 and below
params, err = actors.SerializeParams(&verifreg4.AddVerifiedClientParams{Address: clientAddress, Allowance: datacap})
if err != nil {
t.Fatal(err)
}
msg = &types.Message{
To: verifreg.Address,
From: verifier,
Method: verifreg.Methods.AddVerifiedClient,
Params: params,
Value: big.Zero(),
}
if _, err = api.MpoolPushMessage(ctx, msg, nil); !strings.Contains(err.Error(), "verified client already exists") {
t.Fatal("Add datacap to an exist verified client should fail")
}
} }

View File

@ -3,14 +3,11 @@ package test
import ( import (
"context" "context"
"fmt" "fmt"
"sort"
"sync/atomic"
"strings"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert" "github.com/filecoin-project/go-state-types/big"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
@ -18,7 +15,6 @@ import (
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/extern/sector-storage/mock" "github.com/filecoin-project/lotus/extern/sector-storage/mock"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing" sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
proof3 "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof" proof3 "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof"
@ -29,181 +25,9 @@ import (
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
minerActor "github.com/filecoin-project/lotus/chain/actors/builtin/miner" minerActor "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
bminer "github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node/impl" "github.com/filecoin-project/lotus/node/impl"
) )
func TestSDRUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []FullNodeOpts{FullNodeWithSDRAt(500, 1000)}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
pledge := make(chan struct{})
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
round := 0
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
// 3 sealing rounds: before, during after.
if round >= 3 {
continue
}
head, err := client.ChainHead(ctx)
assert.NoError(t, err)
// rounds happen every 100 blocks, with a 50 block offset.
if head.Height() >= abi.ChainEpoch(round*500+50) {
round++
pledge <- struct{}{}
ver, err := client.StateNetworkVersion(ctx, head.Key())
assert.NoError(t, err)
switch round {
case 1:
assert.Equal(t, network.Version6, ver)
case 2:
assert.Equal(t, network.Version7, ver)
case 3:
assert.Equal(t, network.Version8, ver)
}
}
}
}()
// before.
pledgeSectors(t, ctx, miner, 9, 0, pledge)
s, err := miner.SectorsList(ctx)
require.NoError(t, err)
sort.Slice(s, func(i, j int) bool {
return s[i] < s[j]
})
for i, id := range s {
info, err := miner.SectorsStatus(ctx, id, true)
require.NoError(t, err)
expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1
if i >= 3 {
// after
expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1
}
assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id)
}
atomic.StoreInt64(&mine, 0)
<-done
}
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, OneFull, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
pledgeSectors(t, ctx, miner, nSectors, 0, nil)
atomic.StoreInt64(&mine, 0)
<-done
}
func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) {
for i := 0; i < n; i++ {
if i%3 == 0 && blockNotif != nil {
<-blockNotif
log.Errorf("WAIT")
}
log.Errorf("PLEDGING %d", i)
_, err := miner.PledgeSector(ctx)
require.NoError(t, err)
}
for {
s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
require.NoError(t, err)
fmt.Printf("Sectors: %d\n", len(s))
if len(s) >= n+existing {
break
}
build.Clock.Sleep(100 * time.Millisecond)
}
fmt.Printf("All sectors is fsm\n")
s, err := miner.SectorsList(ctx)
require.NoError(t, err)
toCheck := map[abi.SectorNumber]struct{}{}
for _, number := range s {
toCheck[number] = struct{}{}
}
for len(toCheck) > 0 {
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n, false)
require.NoError(t, err)
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d\n", len(s))
}
}
func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
for _, height := range []abi.ChainEpoch{ for _, height := range []abi.ChainEpoch{
-1, // before -1, // before
@ -719,7 +543,7 @@ func TestWindowPostDispute(t *testing.T, b APIBuilder, blocktime time.Duration)
for { for {
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err) require.NoError(t, err)
if di.Index == evilSectorLoc.Deadline { if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.PeriodStart > 1 {
break break
} }
build.Clock.Sleep(blocktime) build.Clock.Sleep(blocktime)
@ -816,7 +640,7 @@ func TestWindowPostDispute(t *testing.T, b APIBuilder, blocktime time.Duration)
for { for {
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
require.NoError(t, err) require.NoError(t, err)
if di.Index == evilSectorLoc.Deadline { if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.PeriodStart > 1 {
break break
} }
build.Clock.Sleep(blocktime) build.Clock.Sleep(blocktime)
@ -1024,3 +848,155 @@ waitForProof:
require.Contains(t, err.Error(), "failed to dispute valid post (RetCode=16)") require.Contains(t, err.Error(), "failed to dispute valid post (RetCode=16)")
} }
} }
func TestWindowPostBaseFeeNoBurn(t *testing.T, b APIBuilder, blocktime time.Duration) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
och := build.UpgradeClausHeight
build.UpgradeClausHeight = 10
n, sn := b(t, DefaultFullOpts(1), OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
{
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
}
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
build.Clock.Sleep(time.Second)
done := make(chan struct{})
go func() {
defer close(done)
for ctx.Err() == nil {
build.Clock.Sleep(blocktime)
if err := miner.MineOne(ctx, MineNext); err != nil {
if ctx.Err() != nil {
// context was canceled, ignore the error.
return
}
t.Error(err)
}
}
}()
defer func() {
cancel()
<-done
}()
pledgeSectors(t, ctx, miner, 10, 0, nil)
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
require.NoError(t, err)
en := wact.Nonce
// wait for a new message to be sent from worker address, it will be a PoSt
waitForProof:
for {
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
require.NoError(t, err)
if wact.Nonce > en {
break waitForProof
}
build.Clock.Sleep(blocktime)
}
slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
require.NoError(t, err)
pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0])
require.NoError(t, err)
require.Equal(t, pmr.GasCost.BaseFeeBurn, big.Zero())
build.UpgradeClausHeight = och
}
func TestWindowPostBaseFeeBurn(t *testing.T, b APIBuilder, blocktime time.Duration) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(-1)}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
{
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
}
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
build.Clock.Sleep(time.Second)
done := make(chan struct{})
go func() {
defer close(done)
for ctx.Err() == nil {
build.Clock.Sleep(blocktime)
if err := miner.MineOne(ctx, MineNext); err != nil {
if ctx.Err() != nil {
// context was canceled, ignore the error.
return
}
t.Error(err)
}
}
}()
defer func() {
cancel()
<-done
}()
pledgeSectors(t, ctx, miner, 10, 0, nil)
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
require.NoError(t, err)
en := wact.Nonce
// wait for a new message to be sent from worker address, it will be a PoSt
waitForProof:
for {
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
require.NoError(t, err)
if wact.Nonce > en {
break waitForProof
}
build.Clock.Sleep(blocktime)
}
slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
require.NoError(t, err)
pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0])
require.NoError(t, err)
require.NotEqual(t, pmr.GasCost.BaseFeeBurn, big.Zero())
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -7,6 +7,13 @@ import (
//go:embed proof-params/parameters.json //go:embed proof-params/parameters.json
var params []byte var params []byte
//go:embed proof-params/srs-inner-product.json
var srs []byte
func ParametersJSON() []byte { func ParametersJSON() []byte {
return params return params
} }
func SrsJSON() []byte {
return srs
}

View File

@ -24,7 +24,7 @@ var UpgradeIgnitionHeight = abi.ChainEpoch(-2)
var UpgradeRefuelHeight = abi.ChainEpoch(-3) var UpgradeRefuelHeight = abi.ChainEpoch(-3)
var UpgradeTapeHeight = abi.ChainEpoch(-4) var UpgradeTapeHeight = abi.ChainEpoch(-4)
var UpgradeActorsV2Height = abi.ChainEpoch(-5) var UpgradeAssemblyHeight = abi.ChainEpoch(-5)
var UpgradeLiftoffHeight = abi.ChainEpoch(-6) var UpgradeLiftoffHeight = abi.ChainEpoch(-6)
var UpgradeKumquatHeight = abi.ChainEpoch(-7) var UpgradeKumquatHeight = abi.ChainEpoch(-7)
@ -33,11 +33,13 @@ var UpgradePersianHeight = abi.ChainEpoch(-9)
var UpgradeOrangeHeight = abi.ChainEpoch(-10) var UpgradeOrangeHeight = abi.ChainEpoch(-10)
var UpgradeClausHeight = abi.ChainEpoch(-11) var UpgradeClausHeight = abi.ChainEpoch(-11)
var UpgradeActorsV3Height = abi.ChainEpoch(-12) var UpgradeTrustHeight = abi.ChainEpoch(-12)
var UpgradeNorwegianHeight = abi.ChainEpoch(-13) var UpgradeNorwegianHeight = abi.ChainEpoch(-13)
var UpgradeActorsV4Height = abi.ChainEpoch(-14) var UpgradeTurboHeight = abi.ChainEpoch(-14)
var UpgradeHyperdriveHeight = abi.ChainEpoch(-15)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet, 0: DrandMainnet,
@ -68,16 +70,17 @@ func init() {
UpgradeIgnitionHeight = getUpgradeHeight("LOTUS_IGNITION_HEIGHT", UpgradeIgnitionHeight) UpgradeIgnitionHeight = getUpgradeHeight("LOTUS_IGNITION_HEIGHT", UpgradeIgnitionHeight)
UpgradeRefuelHeight = getUpgradeHeight("LOTUS_REFUEL_HEIGHT", UpgradeRefuelHeight) UpgradeRefuelHeight = getUpgradeHeight("LOTUS_REFUEL_HEIGHT", UpgradeRefuelHeight)
UpgradeTapeHeight = getUpgradeHeight("LOTUS_TAPE_HEIGHT", UpgradeTapeHeight) UpgradeTapeHeight = getUpgradeHeight("LOTUS_TAPE_HEIGHT", UpgradeTapeHeight)
UpgradeActorsV2Height = getUpgradeHeight("LOTUS_ACTORSV2_HEIGHT", UpgradeActorsV2Height) UpgradeAssemblyHeight = getUpgradeHeight("LOTUS_ACTORSV2_HEIGHT", UpgradeAssemblyHeight)
UpgradeLiftoffHeight = getUpgradeHeight("LOTUS_LIFTOFF_HEIGHT", UpgradeLiftoffHeight) UpgradeLiftoffHeight = getUpgradeHeight("LOTUS_LIFTOFF_HEIGHT", UpgradeLiftoffHeight)
UpgradeKumquatHeight = getUpgradeHeight("LOTUS_KUMQUAT_HEIGHT", UpgradeKumquatHeight) UpgradeKumquatHeight = getUpgradeHeight("LOTUS_KUMQUAT_HEIGHT", UpgradeKumquatHeight)
UpgradeCalicoHeight = getUpgradeHeight("LOTUS_CALICO_HEIGHT", UpgradeCalicoHeight) UpgradeCalicoHeight = getUpgradeHeight("LOTUS_CALICO_HEIGHT", UpgradeCalicoHeight)
UpgradePersianHeight = getUpgradeHeight("LOTUS_PERSIAN_HEIGHT", UpgradePersianHeight) UpgradePersianHeight = getUpgradeHeight("LOTUS_PERSIAN_HEIGHT", UpgradePersianHeight)
UpgradeOrangeHeight = getUpgradeHeight("LOTUS_ORANGE_HEIGHT", UpgradeOrangeHeight) UpgradeOrangeHeight = getUpgradeHeight("LOTUS_ORANGE_HEIGHT", UpgradeOrangeHeight)
UpgradeClausHeight = getUpgradeHeight("LOTUS_CLAUS_HEIGHT", UpgradeClausHeight) UpgradeClausHeight = getUpgradeHeight("LOTUS_CLAUS_HEIGHT", UpgradeClausHeight)
UpgradeActorsV3Height = getUpgradeHeight("LOTUS_ACTORSV3_HEIGHT", UpgradeActorsV3Height) UpgradeTrustHeight = getUpgradeHeight("LOTUS_ACTORSV3_HEIGHT", UpgradeTrustHeight)
UpgradeNorwegianHeight = getUpgradeHeight("LOTUS_NORWEGIAN_HEIGHT", UpgradeNorwegianHeight) UpgradeNorwegianHeight = getUpgradeHeight("LOTUS_NORWEGIAN_HEIGHT", UpgradeNorwegianHeight)
UpgradeActorsV4Height = getUpgradeHeight("LOTUS_ACTORSV4_HEIGHT", UpgradeActorsV4Height) UpgradeTurboHeight = getUpgradeHeight("LOTUS_ACTORSV4_HEIGHT", UpgradeTurboHeight)
UpgradeHyperdriveHeight = getUpgradeHeight("LOTUS_HYPERDRIVE_HEIGHT", UpgradeHyperdriveHeight)
BuildType |= Build2k BuildType |= Build2k
} }

View File

@ -23,7 +23,7 @@ const UpgradeSmokeHeight = -2
const UpgradeIgnitionHeight = -3 const UpgradeIgnitionHeight = -3
const UpgradeRefuelHeight = -4 const UpgradeRefuelHeight = -4
var UpgradeActorsV2Height = abi.ChainEpoch(30) var UpgradeAssemblyHeight = abi.ChainEpoch(30)
const UpgradeTapeHeight = 60 const UpgradeTapeHeight = 60
const UpgradeLiftoffHeight = -5 const UpgradeLiftoffHeight = -5
@ -32,9 +32,10 @@ const UpgradeCalicoHeight = 120
const UpgradePersianHeight = 150 const UpgradePersianHeight = 150
const UpgradeClausHeight = 180 const UpgradeClausHeight = 180
const UpgradeOrangeHeight = 210 const UpgradeOrangeHeight = 210
const UpgradeActorsV3Height = 240 const UpgradeTrustHeight = 240
const UpgradeNorwegianHeight = UpgradeActorsV3Height + (builtin2.EpochsInHour * 12) const UpgradeNorwegianHeight = UpgradeTrustHeight + (builtin2.EpochsInHour * 12)
const UpgradeActorsV4Height = 8922 const UpgradeTurboHeight = 8922
const UpgradeHyperdriveHeight = 9999999
func init() { func init() {
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30)) policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30))

View File

@ -25,7 +25,7 @@ const UpgradeSmokeHeight = -2
const UpgradeIgnitionHeight = -3 const UpgradeIgnitionHeight = -3
const UpgradeRefuelHeight = -4 const UpgradeRefuelHeight = -4
var UpgradeActorsV2Height = abi.ChainEpoch(30) var UpgradeAssemblyHeight = abi.ChainEpoch(30)
const UpgradeTapeHeight = 60 const UpgradeTapeHeight = 60
@ -40,10 +40,12 @@ const UpgradeClausHeight = 250
const UpgradeOrangeHeight = 300 const UpgradeOrangeHeight = 300
const UpgradeActorsV3Height = 600 const UpgradeTrustHeight = 600
const UpgradeNorwegianHeight = 114000 const UpgradeNorwegianHeight = 114000
const UpgradeActorsV4Height = 193789 const UpgradeTurboHeight = 193789
const UpgradeHyperdriveHeight = 9999999
func init() { func init() {
policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30)) policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30))

View File

@ -8,6 +8,7 @@
package build package build
import ( import (
"math"
"os" "os"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
@ -32,7 +33,7 @@ const UpgradeSmokeHeight = 51000
const UpgradeIgnitionHeight = 94000 const UpgradeIgnitionHeight = 94000
const UpgradeRefuelHeight = 130800 const UpgradeRefuelHeight = 130800
const UpgradeActorsV2Height = 138720 const UpgradeAssemblyHeight = 138720
const UpgradeTapeHeight = 140760 const UpgradeTapeHeight = 140760
@ -49,22 +50,29 @@ const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60)
const UpgradeOrangeHeight = 336458 const UpgradeOrangeHeight = 336458
// 2020-12-22T02:00:00Z // 2020-12-22T02:00:00Z
const UpgradeClausHeight = 343200 var UpgradeClausHeight = abi.ChainEpoch(343200)
// 2021-03-04T00:00:30Z // 2021-03-04T00:00:30Z
const UpgradeActorsV3Height = 550321 const UpgradeTrustHeight = 550321
// 2021-04-12T22:00:00Z // 2021-04-12T22:00:00Z
const UpgradeNorwegianHeight = 665280 const UpgradeNorwegianHeight = 665280
// 2021-04-29T06:00:00Z // 2021-04-29T06:00:00Z
const UpgradeActorsV4Height = 712320 const UpgradeTurboHeight = 712320
// ???
var UpgradeHyperdriveHeight = abi.ChainEpoch(9999999)
func init() { func init() {
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" { if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
SetAddressNetwork(address.Mainnet) SetAddressNetwork(address.Mainnet)
} }
if os.Getenv("LOTUS_DISABLE_HYPERDRIVE") == "1" {
UpgradeHyperdriveHeight = math.MaxInt64
}
Devnet = false Devnet = false
BuildType = BuildMainnet BuildType = BuildMainnet

View File

@ -27,7 +27,7 @@ const UpgradeRefuelHeight = -3
const UpgradeLiftoffHeight = -5 const UpgradeLiftoffHeight = -5
const UpgradeActorsV2Height = 30 // critical: the network can bootstrap from v1 only const UpgradeAssemblyHeight = 30 // critical: the network can bootstrap from v1 only
const UpgradeTapeHeight = 60 const UpgradeTapeHeight = 60
const UpgradeKumquatHeight = 90 const UpgradeKumquatHeight = 90
@ -39,9 +39,10 @@ const UpgradeClausHeight = 250
const UpgradeOrangeHeight = 300 const UpgradeOrangeHeight = 300
const UpgradeActorsV3Height = 600 const UpgradeTrustHeight = 600
const UpgradeNorwegianHeight = 201000 const UpgradeNorwegianHeight = 201000
const UpgradeActorsV4Height = 203000 const UpgradeTurboHeight = 203000
const UpgradeHyperdriveHeight = 999999999
func init() { func init() {
// Minimum block production power is set to 4 TiB // Minimum block production power is set to 4 TiB

View File

@ -25,7 +25,7 @@ const UnixfsLinksPerLevel = 1024
// Consensus / Network // Consensus / Network
const AllowableClockDriftSecs = uint64(1) const AllowableClockDriftSecs = uint64(1)
const NewestNetworkVersion = network.Version12 const NewestNetworkVersion = network.Version13
const ActorUpgradeNetworkVersion = network.Version4 const ActorUpgradeNetworkVersion = network.Version4
// Epochs // Epochs

View File

@ -82,20 +82,21 @@ var (
UpgradeBreezeHeight abi.ChainEpoch = -1 UpgradeBreezeHeight abi.ChainEpoch = -1
BreezeGasTampingDuration abi.ChainEpoch = 0 BreezeGasTampingDuration abi.ChainEpoch = 0
UpgradeSmokeHeight abi.ChainEpoch = -1 UpgradeSmokeHeight abi.ChainEpoch = -1
UpgradeIgnitionHeight abi.ChainEpoch = -2 UpgradeIgnitionHeight abi.ChainEpoch = -2
UpgradeRefuelHeight abi.ChainEpoch = -3 UpgradeRefuelHeight abi.ChainEpoch = -3
UpgradeTapeHeight abi.ChainEpoch = -4 UpgradeTapeHeight abi.ChainEpoch = -4
UpgradeActorsV2Height abi.ChainEpoch = 10 UpgradeAssemblyHeight abi.ChainEpoch = 10
UpgradeLiftoffHeight abi.ChainEpoch = -5 UpgradeLiftoffHeight abi.ChainEpoch = -5
UpgradeKumquatHeight abi.ChainEpoch = -6 UpgradeKumquatHeight abi.ChainEpoch = -6
UpgradeCalicoHeight abi.ChainEpoch = -7 UpgradeCalicoHeight abi.ChainEpoch = -7
UpgradePersianHeight abi.ChainEpoch = -8 UpgradePersianHeight abi.ChainEpoch = -8
UpgradeOrangeHeight abi.ChainEpoch = -9 UpgradeOrangeHeight abi.ChainEpoch = -9
UpgradeClausHeight abi.ChainEpoch = -10 UpgradeClausHeight abi.ChainEpoch = -10
UpgradeActorsV3Height abi.ChainEpoch = -11 UpgradeTrustHeight abi.ChainEpoch = -11
UpgradeNorwegianHeight abi.ChainEpoch = -12 UpgradeNorwegianHeight abi.ChainEpoch = -12
UpgradeActorsV4Height abi.ChainEpoch = -13 UpgradeTurboHeight abi.ChainEpoch = -13
UpgradeHyperdriveHeight abi.ChainEpoch = -13
DrandSchedule = map[abi.ChainEpoch]DrandEnum{ DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet, 0: DrandMainnet,

View File

@ -0,0 +1,7 @@
{
"v28-fil-inner-product-v1.srs": {
"cid": "Qmdq44DjcQnFfU3PJcdX7J49GCqcUYszr1TxMbHtAkvQ3g",
"digest": "ae20310138f5ba81451d723f858e3797",
"sector_size": 0
}
}

View File

View File

View File

@ -19,6 +19,8 @@ import (
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
) )
func init() { func init() {
@ -38,6 +40,10 @@ func init() {
builtin.RegisterActorState(builtin4.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { builtin.RegisterActorState(builtin4.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load4(store, root) return load4(store, root)
}) })
builtin.RegisterActorState(builtin5.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load5(store, root)
})
} }
var Methods = builtin4.MethodsAccount var Methods = builtin4.MethodsAccount
@ -57,6 +63,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case builtin4.AccountActorCodeID: case builtin4.AccountActorCodeID:
return load4(store, act.Head) return load4(store, act.Head)
case builtin5.AccountActorCodeID:
return load5(store, act.Head)
} }
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
@ -76,6 +85,9 @@ func MakeState(store adt.Store, av actors.Version, addr address.Address) (State,
case actors.Version4: case actors.Version4:
return make4(store, addr) return make4(store, addr)
case actors.Version5:
return make5(store, addr)
} }
return nil, xerrors.Errorf("unknown actor version %d", av) return nil, xerrors.Errorf("unknown actor version %d", av)
} }
@ -95,6 +107,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version4: case actors.Version4:
return builtin4.AccountActorCodeID, nil return builtin4.AccountActorCodeID, nil
case actors.Version5:
return builtin5.AccountActorCodeID, nil
} }
return cid.Undef, xerrors.Errorf("unknown actor version %d", av) return cid.Undef, xerrors.Errorf("unknown actor version %d", av)

View File

@ -0,0 +1,40 @@
package account
import (
"github.com/filecoin-project/go-address"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors/adt"
account5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/account"
)
var _ State = (*state5)(nil)
func load5(store adt.Store, root cid.Cid) (State, error) {
out := state5{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make5(store adt.Store, addr address.Address) (State, error) {
out := state5{store: store}
out.State = account5.State{Address: addr}
return &out, nil
}
type state5 struct {
account5.State
store adt.Store
}
func (s *state5) PubkeyAddress() (address.Address, error) {
return s.Address, nil
}
func (s *state5) GetState() interface{} {
return &s.State
}

View File

@ -17,46 +17,49 @@ import (
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
smoothing4 "github.com/filecoin-project/specs-actors/v4/actors/util/smoothing" smoothing4 "github.com/filecoin-project/specs-actors/v4/actors/util/smoothing"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
smoothing5 "github.com/filecoin-project/specs-actors/v5/actors/util/smoothing"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
miner4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/miner" miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
proof4 "github.com/filecoin-project/specs-actors/v4/actors/runtime/proof" proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
) )
var SystemActorAddr = builtin4.SystemActorAddr var SystemActorAddr = builtin5.SystemActorAddr
var BurntFundsActorAddr = builtin4.BurntFundsActorAddr var BurntFundsActorAddr = builtin5.BurntFundsActorAddr
var CronActorAddr = builtin4.CronActorAddr var CronActorAddr = builtin5.CronActorAddr
var SaftAddress = makeAddress("t0122") var SaftAddress = makeAddress("t0122")
var ReserveAddress = makeAddress("t090") var ReserveAddress = makeAddress("t090")
var RootVerifierAddress = makeAddress("t080") var RootVerifierAddress = makeAddress("t080")
var ( var (
ExpectedLeadersPerEpoch = builtin4.ExpectedLeadersPerEpoch ExpectedLeadersPerEpoch = builtin5.ExpectedLeadersPerEpoch
) )
const ( const (
EpochDurationSeconds = builtin4.EpochDurationSeconds EpochDurationSeconds = builtin5.EpochDurationSeconds
EpochsInDay = builtin4.EpochsInDay EpochsInDay = builtin5.EpochsInDay
SecondsInDay = builtin4.SecondsInDay SecondsInDay = builtin5.SecondsInDay
) )
const ( const (
MethodSend = builtin4.MethodSend MethodSend = builtin5.MethodSend
MethodConstructor = builtin4.MethodConstructor MethodConstructor = builtin5.MethodConstructor
) )
// These are all just type aliases across actor versions. In the future, that might change // These are all just type aliases across actor versions. In the future, that might change
// and we might need to do something fancier. // and we might need to do something fancier.
type SectorInfo = proof4.SectorInfo type SectorInfo = proof5.SectorInfo
type PoStProof = proof4.PoStProof type PoStProof = proof5.PoStProof
type FilterEstimate = smoothing0.FilterEstimate type FilterEstimate = smoothing0.FilterEstimate
func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower { func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
return miner4.QAPowerForWeight(size, duration, dealWeight, verifiedWeight) return miner5.QAPowerForWeight(size, duration, dealWeight, verifiedWeight)
} }
func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate { func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate {
@ -83,6 +86,12 @@ func FromV4FilterEstimate(v4 smoothing4.FilterEstimate) FilterEstimate {
} }
func FromV5FilterEstimate(v5 smoothing5.FilterEstimate) FilterEstimate {
return (FilterEstimate)(v5)
}
type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error)
var ActorStateLoaders = make(map[cid.Cid]ActorStateLoader) var ActorStateLoaders = make(map[cid.Cid]ActorStateLoader)
@ -114,6 +123,9 @@ func ActorNameByCode(c cid.Cid) string {
case builtin4.IsBuiltinActor(c): case builtin4.IsBuiltinActor(c):
return builtin4.ActorNameByCode(c) return builtin4.ActorNameByCode(c)
case builtin5.IsBuiltinActor(c):
return builtin5.ActorNameByCode(c)
default: default:
return "<unknown>" return "<unknown>"
} }
@ -137,6 +149,10 @@ func IsBuiltinActor(c cid.Cid) bool {
return true return true
} }
if builtin5.IsBuiltinActor(c) {
return true
}
return false return false
} }
@ -158,6 +174,10 @@ func IsAccountActor(c cid.Cid) bool {
return true return true
} }
if c == builtin5.AccountActorCodeID {
return true
}
return false return false
} }
@ -179,6 +199,10 @@ func IsStorageMinerActor(c cid.Cid) bool {
return true return true
} }
if c == builtin5.StorageMinerActorCodeID {
return true
}
return false return false
} }
@ -200,6 +224,10 @@ func IsMultisigActor(c cid.Cid) bool {
return true return true
} }
if c == builtin5.MultisigActorCodeID {
return true
}
return false return false
} }
@ -221,6 +249,10 @@ func IsPaymentChannelActor(c cid.Cid) bool {
return true return true
} }
if c == builtin5.PaymentChannelActorCodeID {
return true
}
return false return false
} }

View File

@ -17,7 +17,7 @@ import (
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
miner{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/builtin/miner" miner{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/builtin/miner"
proof{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/runtime/proof" proof{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/runtime/proof"
) )
var SystemActorAddr = builtin{{.latestVersion}}.SystemActorAddr var SystemActorAddr = builtin{{.latestVersion}}.SystemActorAddr
@ -33,12 +33,12 @@ var (
const ( const (
EpochDurationSeconds = builtin{{.latestVersion}}.EpochDurationSeconds EpochDurationSeconds = builtin{{.latestVersion}}.EpochDurationSeconds
EpochsInDay = builtin{{.latestVersion}}.EpochsInDay EpochsInDay = builtin{{.latestVersion}}.EpochsInDay
SecondsInDay = builtin{{.latestVersion}}.SecondsInDay SecondsInDay = builtin{{.latestVersion}}.SecondsInDay
) )
const ( const (
MethodSend = builtin{{.latestVersion}}.MethodSend MethodSend = builtin{{.latestVersion}}.MethodSend
MethodConstructor = builtin{{.latestVersion}}.MethodConstructor MethodConstructor = builtin{{.latestVersion}}.MethodConstructor
) )

View File

@ -13,6 +13,8 @@ import (
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
) )
func MakeState(store adt.Store, av actors.Version) (State, error) { func MakeState(store adt.Store, av actors.Version) (State, error) {
@ -30,6 +32,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
case actors.Version4: case actors.Version4:
return make4(store) return make4(store)
case actors.Version5:
return make5(store)
} }
return nil, xerrors.Errorf("unknown actor version %d", av) return nil, xerrors.Errorf("unknown actor version %d", av)
} }
@ -49,14 +54,17 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version4: case actors.Version4:
return builtin4.CronActorCodeID, nil return builtin4.CronActorCodeID, nil
case actors.Version5:
return builtin5.CronActorCodeID, nil
} }
return cid.Undef, xerrors.Errorf("unknown actor version %d", av) return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
} }
var ( var (
Address = builtin4.CronActorAddr Address = builtin5.CronActorAddr
Methods = builtin4.MethodsCron Methods = builtin5.MethodsCron
) )
type State interface { type State interface {

View File

@ -0,0 +1,35 @@
package cron
import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors/adt"
cron5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/cron"
)
var _ State = (*state5)(nil)
func load5(store adt.Store, root cid.Cid) (State, error) {
out := state5{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make5(store adt.Store) (State, error) {
out := state5{store: store}
out.State = *cron5.ConstructState(cron5.BuiltInEntries())
return &out, nil
}
type state5 struct {
cron5.State
store adt.Store
}
func (s *state5) GetState() interface{} {
return &s.State
}

View File

@ -21,6 +21,8 @@ import (
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
) )
func init() { func init() {
@ -40,11 +42,15 @@ func init() {
builtin.RegisterActorState(builtin4.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { builtin.RegisterActorState(builtin4.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load4(store, root) return load4(store, root)
}) })
builtin.RegisterActorState(builtin5.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load5(store, root)
})
} }
var ( var (
Address = builtin4.InitActorAddr Address = builtin5.InitActorAddr
Methods = builtin4.MethodsInit Methods = builtin5.MethodsInit
) )
func Load(store adt.Store, act *types.Actor) (State, error) { func Load(store adt.Store, act *types.Actor) (State, error) {
@ -62,6 +68,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case builtin4.InitActorCodeID: case builtin4.InitActorCodeID:
return load4(store, act.Head) return load4(store, act.Head)
case builtin5.InitActorCodeID:
return load5(store, act.Head)
} }
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
@ -81,6 +90,9 @@ func MakeState(store adt.Store, av actors.Version, networkName string) (State, e
case actors.Version4: case actors.Version4:
return make4(store, networkName) return make4(store, networkName)
case actors.Version5:
return make5(store, networkName)
} }
return nil, xerrors.Errorf("unknown actor version %d", av) return nil, xerrors.Errorf("unknown actor version %d", av)
} }
@ -100,6 +112,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version4: case actors.Version4:
return builtin4.InitActorCodeID, nil return builtin4.InitActorCodeID, nil
case actors.Version5:
return builtin5.InitActorCodeID, nil
} }
return cid.Undef, xerrors.Errorf("unknown actor version %d", av) return cid.Undef, xerrors.Errorf("unknown actor version %d", av)

View File

@ -0,0 +1,114 @@
package init
import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/node/modules/dtypes"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
init5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/init"
adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt"
)
var _ State = (*state5)(nil)
func load5(store adt.Store, root cid.Cid) (State, error) {
out := state5{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make5(store adt.Store, networkName string) (State, error) {
out := state5{store: store}
s, err := init5.ConstructState(store, networkName)
if err != nil {
return nil, err
}
out.State = *s
return &out, nil
}
type state5 struct {
init5.State
store adt.Store
}
func (s *state5) ResolveAddress(address address.Address) (address.Address, bool, error) {
return s.State.ResolveAddress(s.store, address)
}
func (s *state5) MapAddressToNewID(address address.Address) (address.Address, error) {
return s.State.MapAddressToNewID(s.store, address)
}
func (s *state5) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error {
addrs, err := adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth)
if err != nil {
return err
}
var actorID cbg.CborInt
return addrs.ForEach(&actorID, func(key string) error {
addr, err := address.NewFromBytes([]byte(key))
if err != nil {
return err
}
return cb(abi.ActorID(actorID), addr)
})
}
func (s *state5) NetworkName() (dtypes.NetworkName, error) {
return dtypes.NetworkName(s.State.NetworkName), nil
}
func (s *state5) SetNetworkName(name string) error {
s.State.NetworkName = name
return nil
}
func (s *state5) SetNextID(id abi.ActorID) error {
s.State.NextID = id
return nil
}
func (s *state5) Remove(addrs ...address.Address) (err error) {
m, err := adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth)
if err != nil {
return err
}
for _, addr := range addrs {
if err = m.Delete(abi.AddrKey(addr)); err != nil {
return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err)
}
}
amr, err := m.Root()
if err != nil {
return xerrors.Errorf("failed to get address map root: %w", err)
}
s.State.AddressMap = amr
return nil
}
func (s *state5) SetAddressMap(mcid cid.Cid) error {
s.State.AddressMap = mcid
return nil
}
func (s *state5) AddressMap() (adt.Map, error) {
return adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth)
}
func (s *state5) GetState() interface{} {
return &s.State
}

View File

@ -104,6 +104,7 @@ type DealProposals interface {
type PublishStorageDealsParams = market0.PublishStorageDealsParams type PublishStorageDealsParams = market0.PublishStorageDealsParams
type PublishStorageDealsReturn = market0.PublishStorageDealsReturn type PublishStorageDealsReturn = market0.PublishStorageDealsReturn
type VerifyDealsForActivationParams = market0.VerifyDealsForActivationParams
type WithdrawBalanceParams = market0.WithdrawBalanceParams type WithdrawBalanceParams = market0.WithdrawBalanceParams
type ClientDealProposal = market0.ClientDealProposal type ClientDealProposal = market0.ClientDealProposal
@ -111,7 +112,7 @@ type ClientDealProposal = market0.ClientDealProposal
type DealState struct { type DealState struct {
SectorStartEpoch abi.ChainEpoch // -1 if not yet included in proven sector SectorStartEpoch abi.ChainEpoch // -1 if not yet included in proven sector
LastUpdatedEpoch abi.ChainEpoch // -1 if deal state never updated LastUpdatedEpoch abi.ChainEpoch // -1 if deal state never updated
SlashEpoch abi.ChainEpoch // -1 if deal never slashed SlashEpoch abi.ChainEpoch // -1 if deal never slashed
} }
type DealProposal struct { type DealProposal struct {

View File

@ -20,6 +20,8 @@ import (
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin"
@ -43,11 +45,15 @@ func init() {
builtin.RegisterActorState(builtin4.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { builtin.RegisterActorState(builtin4.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load4(store, root) return load4(store, root)
}) })
builtin.RegisterActorState(builtin5.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load5(store, root)
})
} }
var ( var (
Address = builtin4.StorageMarketActorAddr Address = builtin5.StorageMarketActorAddr
Methods = builtin4.MethodsMarket Methods = builtin5.MethodsMarket
) )
func Load(store adt.Store, act *types.Actor) (State, error) { func Load(store adt.Store, act *types.Actor) (State, error) {
@ -65,6 +71,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case builtin4.StorageMarketActorCodeID: case builtin4.StorageMarketActorCodeID:
return load4(store, act.Head) return load4(store, act.Head)
case builtin5.StorageMarketActorCodeID:
return load5(store, act.Head)
} }
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
@ -84,6 +93,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
case actors.Version4: case actors.Version4:
return make4(store) return make4(store)
case actors.Version5:
return make5(store)
} }
return nil, xerrors.Errorf("unknown actor version %d", av) return nil, xerrors.Errorf("unknown actor version %d", av)
} }
@ -103,6 +115,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version4: case actors.Version4:
return builtin4.StorageMarketActorCodeID, nil return builtin4.StorageMarketActorCodeID, nil
case actors.Version5:
return builtin5.StorageMarketActorCodeID, nil
} }
return cid.Undef, xerrors.Errorf("unknown actor version %d", av) return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
@ -148,6 +163,7 @@ type DealProposals interface {
type PublishStorageDealsParams = market0.PublishStorageDealsParams type PublishStorageDealsParams = market0.PublishStorageDealsParams
type PublishStorageDealsReturn = market0.PublishStorageDealsReturn type PublishStorageDealsReturn = market0.PublishStorageDealsReturn
type VerifyDealsForActivationParams = market0.VerifyDealsForActivationParams
type WithdrawBalanceParams = market0.WithdrawBalanceParams type WithdrawBalanceParams = market0.WithdrawBalanceParams
type ClientDealProposal = market0.ClientDealProposal type ClientDealProposal = market0.ClientDealProposal

View File

@ -235,4 +235,4 @@ func fromV{{.v}}DealProposal(v{{.v}} market{{.v}}.DealProposal) DealProposal {
func (s *state{{.v}}) GetState() interface{} { func (s *state{{.v}}) GetState() interface{} {
return &s.State return &s.State
} }

View File

@ -0,0 +1,226 @@
package market
import (
"bytes"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/types"
market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market"
adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt"
)
var _ State = (*state5)(nil)
func load5(store adt.Store, root cid.Cid) (State, error) {
out := state5{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make5(store adt.Store) (State, error) {
out := state5{store: store}
s, err := market5.ConstructState(store)
if err != nil {
return nil, err
}
out.State = *s
return &out, nil
}
type state5 struct {
market5.State
store adt.Store
}
func (s *state5) TotalLocked() (abi.TokenAmount, error) {
fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral)
fml = types.BigAdd(fml, s.TotalClientStorageFee)
return fml, nil
}
func (s *state5) BalancesChanged(otherState State) (bool, error) {
otherState5, ok := otherState.(*state5)
if !ok {
// there's no way to compare different versions of the state, so let's
// just say that means the state of balances has changed
return true, nil
}
return !s.State.EscrowTable.Equals(otherState5.State.EscrowTable) || !s.State.LockedTable.Equals(otherState5.State.LockedTable), nil
}
func (s *state5) StatesChanged(otherState State) (bool, error) {
otherState5, ok := otherState.(*state5)
if !ok {
// there's no way to compare different versions of the state, so let's
// just say that means the state of balances has changed
return true, nil
}
return !s.State.States.Equals(otherState5.State.States), nil
}
func (s *state5) States() (DealStates, error) {
stateArray, err := adt5.AsArray(s.store, s.State.States, market5.StatesAmtBitwidth)
if err != nil {
return nil, err
}
return &dealStates5{stateArray}, nil
}
func (s *state5) ProposalsChanged(otherState State) (bool, error) {
otherState5, ok := otherState.(*state5)
if !ok {
// there's no way to compare different versions of the state, so let's
// just say that means the state of balances has changed
return true, nil
}
return !s.State.Proposals.Equals(otherState5.State.Proposals), nil
}
func (s *state5) Proposals() (DealProposals, error) {
proposalArray, err := adt5.AsArray(s.store, s.State.Proposals, market5.ProposalsAmtBitwidth)
if err != nil {
return nil, err
}
return &dealProposals5{proposalArray}, nil
}
func (s *state5) EscrowTable() (BalanceTable, error) {
bt, err := adt5.AsBalanceTable(s.store, s.State.EscrowTable)
if err != nil {
return nil, err
}
return &balanceTable5{bt}, nil
}
func (s *state5) LockedTable() (BalanceTable, error) {
bt, err := adt5.AsBalanceTable(s.store, s.State.LockedTable)
if err != nil {
return nil, err
}
return &balanceTable5{bt}, nil
}
func (s *state5) VerifyDealsForActivation(
minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
) (weight, verifiedWeight abi.DealWeight, err error) {
w, vw, _, err := market5.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
return w, vw, err
}
func (s *state5) NextID() (abi.DealID, error) {
return s.State.NextID, nil
}
type balanceTable5 struct {
*adt5.BalanceTable
}
func (bt *balanceTable5) ForEach(cb func(address.Address, abi.TokenAmount) error) error {
asMap := (*adt5.Map)(bt.BalanceTable)
var ta abi.TokenAmount
return asMap.ForEach(&ta, func(key string) error {
a, err := address.NewFromBytes([]byte(key))
if err != nil {
return err
}
return cb(a, ta)
})
}
type dealStates5 struct {
adt.Array
}
func (s *dealStates5) Get(dealID abi.DealID) (*DealState, bool, error) {
var deal5 market5.DealState
found, err := s.Array.Get(uint64(dealID), &deal5)
if err != nil {
return nil, false, err
}
if !found {
return nil, false, nil
}
deal := fromV5DealState(deal5)
return &deal, true, nil
}
func (s *dealStates5) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
var ds5 market5.DealState
return s.Array.ForEach(&ds5, func(idx int64) error {
return cb(abi.DealID(idx), fromV5DealState(ds5))
})
}
func (s *dealStates5) decode(val *cbg.Deferred) (*DealState, error) {
var ds5 market5.DealState
if err := ds5.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err
}
ds := fromV5DealState(ds5)
return &ds, nil
}
func (s *dealStates5) array() adt.Array {
return s.Array
}
func fromV5DealState(v5 market5.DealState) DealState {
return (DealState)(v5)
}
type dealProposals5 struct {
adt.Array
}
func (s *dealProposals5) Get(dealID abi.DealID) (*DealProposal, bool, error) {
var proposal5 market5.DealProposal
found, err := s.Array.Get(uint64(dealID), &proposal5)
if err != nil {
return nil, false, err
}
if !found {
return nil, false, nil
}
proposal := fromV5DealProposal(proposal5)
return &proposal, true, nil
}
func (s *dealProposals5) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error {
var dp5 market5.DealProposal
return s.Array.ForEach(&dp5, func(idx int64) error {
return cb(abi.DealID(idx), fromV5DealProposal(dp5))
})
}
func (s *dealProposals5) decode(val *cbg.Deferred) (*DealProposal, error) {
var dp5 market5.DealProposal
if err := dp5.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err
}
dp := fromV5DealProposal(dp5)
return &dp, nil
}
func (s *dealProposals5) array() adt.Array {
return s.Array
}
func fromV5DealProposal(v5 market5.DealProposal) DealProposal {
return (DealProposal)(v5)
}
func (s *state5) GetState() interface{} {
return &s.State
}

View File

@ -143,26 +143,26 @@ type Partition interface {
} }
type SectorOnChainInfo struct { type SectorOnChainInfo struct {
SectorNumber abi.SectorNumber SectorNumber abi.SectorNumber
SealProof abi.RegisteredSealProof SealProof abi.RegisteredSealProof
SealedCID cid.Cid SealedCID cid.Cid
DealIDs []abi.DealID DealIDs []abi.DealID
Activation abi.ChainEpoch Activation abi.ChainEpoch
Expiration abi.ChainEpoch Expiration abi.ChainEpoch
DealWeight abi.DealWeight DealWeight abi.DealWeight
VerifiedDealWeight abi.DealWeight VerifiedDealWeight abi.DealWeight
InitialPledge abi.TokenAmount InitialPledge abi.TokenAmount
ExpectedDayReward abi.TokenAmount ExpectedDayReward abi.TokenAmount
ExpectedStoragePledge abi.TokenAmount ExpectedStoragePledge abi.TokenAmount
} }
type SectorPreCommitInfo = miner0.SectorPreCommitInfo type SectorPreCommitInfo = miner0.SectorPreCommitInfo
type SectorPreCommitOnChainInfo struct { type SectorPreCommitOnChainInfo struct {
Info SectorPreCommitInfo Info SectorPreCommitInfo
PreCommitDeposit abi.TokenAmount PreCommitDeposit abi.TokenAmount
PreCommitEpoch abi.ChainEpoch PreCommitEpoch abi.ChainEpoch
DealWeight abi.DealWeight DealWeight abi.DealWeight
VerifiedDealWeight abi.DealWeight VerifiedDealWeight abi.DealWeight
} }
@ -231,17 +231,17 @@ func WinningPoStProofTypeFromWindowPoStProofType(nver network.Version, proof abi
} }
type MinerInfo struct { type MinerInfo struct {
Owner address.Address // Must be an ID-address. Owner address.Address // Must be an ID-address.
Worker address.Address // Must be an ID-address. Worker address.Address // Must be an ID-address.
NewWorker address.Address // Must be an ID-address. NewWorker address.Address // Must be an ID-address.
ControlAddresses []address.Address // Must be an ID-addresses. ControlAddresses []address.Address // Must be an ID-addresses.
WorkerChangeEpoch abi.ChainEpoch WorkerChangeEpoch abi.ChainEpoch
PeerId *peer.ID PeerId *peer.ID
Multiaddrs []abi.Multiaddrs Multiaddrs []abi.Multiaddrs
WindowPoStProofType abi.RegisteredPoStProof WindowPoStProofType abi.RegisteredPoStProof
SectorSize abi.SectorSize SectorSize abi.SectorSize
WindowPoStPartitionSectors uint64 WindowPoStPartitionSectors uint64
ConsensusFaultElapsed abi.ChainEpoch ConsensusFaultElapsed abi.ChainEpoch
} }
func (mi MinerInfo) IsController(addr address.Address) bool { func (mi MinerInfo) IsController(addr address.Address) bool {
@ -272,25 +272,25 @@ type SectorLocation struct {
} }
type SectorChanges struct { type SectorChanges struct {
Added []SectorOnChainInfo Added []SectorOnChainInfo
Extended []SectorExtensions Extended []SectorExtensions
Removed []SectorOnChainInfo Removed []SectorOnChainInfo
} }
type SectorExtensions struct { type SectorExtensions struct {
From SectorOnChainInfo From SectorOnChainInfo
To SectorOnChainInfo To SectorOnChainInfo
} }
type PreCommitChanges struct { type PreCommitChanges struct {
Added []SectorPreCommitOnChainInfo Added []SectorPreCommitOnChainInfo
Removed []SectorPreCommitOnChainInfo Removed []SectorPreCommitOnChainInfo
} }
type LockedFunds struct { type LockedFunds struct {
VestingFunds abi.TokenAmount VestingFunds abi.TokenAmount
InitialPledgeRequirement abi.TokenAmount InitialPledgeRequirement abi.TokenAmount
PreCommitDeposits abi.TokenAmount PreCommitDeposits abi.TokenAmount
} }
func (lf LockedFunds) TotalLockedFunds() abi.TokenAmount { func (lf LockedFunds) TotalLockedFunds() abi.TokenAmount {

View File

@ -30,6 +30,8 @@ import (
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
) )
func init() { func init() {
@ -50,9 +52,13 @@ func init() {
return load4(store, root) return load4(store, root)
}) })
builtin.RegisterActorState(builtin5.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load5(store, root)
})
} }
var Methods = builtin4.MethodsMiner var Methods = builtin5.MethodsMiner
// Unchanged between v0, v2, v3, and v4 actors // Unchanged between v0, v2, v3, and v4 actors
var WPoStProvingPeriod = miner0.WPoStProvingPeriod var WPoStProvingPeriod = miner0.WPoStProvingPeriod
@ -83,6 +89,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case builtin4.StorageMinerActorCodeID: case builtin4.StorageMinerActorCodeID:
return load4(store, act.Head) return load4(store, act.Head)
case builtin5.StorageMinerActorCodeID:
return load5(store, act.Head)
} }
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
@ -102,6 +111,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
case actors.Version4: case actors.Version4:
return make4(store) return make4(store)
case actors.Version5:
return make5(store)
} }
return nil, xerrors.Errorf("unknown actor version %d", av) return nil, xerrors.Errorf("unknown actor version %d", av)
} }
@ -121,6 +133,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version4: case actors.Version4:
return builtin4.StorageMinerActorCodeID, nil return builtin4.StorageMinerActorCodeID, nil
case actors.Version5:
return builtin5.StorageMinerActorCodeID, nil
} }
return cid.Undef, xerrors.Errorf("unknown actor version %d", av) return cid.Undef, xerrors.Errorf("unknown actor version %d", av)

View File

@ -74,9 +74,9 @@ func (s *state{{.v}}) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error)
func (s *state{{.v}}) LockedFunds() (LockedFunds, error) { func (s *state{{.v}}) LockedFunds() (LockedFunds, error) {
return LockedFunds{ return LockedFunds{
VestingFunds: s.State.LockedFunds, VestingFunds: s.State.LockedFunds,
InitialPledgeRequirement: s.State.InitialPledge{{if (le .v 1)}}Requirement{{end}}, InitialPledgeRequirement: s.State.InitialPledge{{if (le .v 1)}}Requirement{{end}},
PreCommitDeposits: s.State.PreCommitDeposits, PreCommitDeposits: s.State.PreCommitDeposits,
}, nil }, nil
} }
@ -317,19 +317,19 @@ func (s *state{{.v}}) Info() (MinerInfo, error) {
} }
{{end}} {{end}}
mi := MinerInfo{ mi := MinerInfo{
Owner: info.Owner, Owner: info.Owner,
Worker: info.Worker, Worker: info.Worker,
ControlAddresses: info.ControlAddresses, ControlAddresses: info.ControlAddresses,
NewWorker: address.Undef, NewWorker: address.Undef,
WorkerChangeEpoch: -1, WorkerChangeEpoch: -1,
PeerId: pid, PeerId: pid,
Multiaddrs: info.Multiaddrs, Multiaddrs: info.Multiaddrs,
WindowPoStProofType: {{if (ge .v 3)}}info.WindowPoStProofType{{else}}wpp{{end}}, WindowPoStProofType: {{if (ge .v 3)}}info.WindowPoStProofType{{else}}wpp{{end}},
SectorSize: info.SectorSize, SectorSize: info.SectorSize,
WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
ConsensusFaultElapsed: {{if (ge .v 2)}}info.ConsensusFaultElapsed{{else}}-1{{end}}, ConsensusFaultElapsed: {{if (ge .v 2)}}info.ConsensusFaultElapsed{{else}}-1{{end}},
} }
if info.PendingWorkerKey != nil { if info.PendingWorkerKey != nil {
@ -477,16 +477,16 @@ func (p *partition{{.v}}) RecoveringSectors() (bitfield.BitField, error) {
func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorOnChainInfo { func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorOnChainInfo {
{{if (ge .v 2)}} {{if (ge .v 2)}}
return SectorOnChainInfo{ return SectorOnChainInfo{
SectorNumber: v{{.v}}.SectorNumber, SectorNumber: v{{.v}}.SectorNumber,
SealProof: v{{.v}}.SealProof, SealProof: v{{.v}}.SealProof,
SealedCID: v{{.v}}.SealedCID, SealedCID: v{{.v}}.SealedCID,
DealIDs: v{{.v}}.DealIDs, DealIDs: v{{.v}}.DealIDs,
Activation: v{{.v}}.Activation, Activation: v{{.v}}.Activation,
Expiration: v{{.v}}.Expiration, Expiration: v{{.v}}.Expiration,
DealWeight: v{{.v}}.DealWeight, DealWeight: v{{.v}}.DealWeight,
VerifiedDealWeight: v{{.v}}.VerifiedDealWeight, VerifiedDealWeight: v{{.v}}.VerifiedDealWeight,
InitialPledge: v{{.v}}.InitialPledge, InitialPledge: v{{.v}}.InitialPledge,
ExpectedDayReward: v{{.v}}.ExpectedDayReward, ExpectedDayReward: v{{.v}}.ExpectedDayReward,
ExpectedStoragePledge: v{{.v}}.ExpectedStoragePledge, ExpectedStoragePledge: v{{.v}}.ExpectedStoragePledge,
} }
{{else}} {{else}}
@ -497,10 +497,10 @@ func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorO
func fromV{{.v}}SectorPreCommitOnChainInfo(v{{.v}} miner{{.v}}.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { func fromV{{.v}}SectorPreCommitOnChainInfo(v{{.v}} miner{{.v}}.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
{{if (ge .v 2)}} {{if (ge .v 2)}}
return SectorPreCommitOnChainInfo{ return SectorPreCommitOnChainInfo{
Info: (SectorPreCommitInfo)(v{{.v}}.Info), Info: (SectorPreCommitInfo)(v{{.v}}.Info),
PreCommitDeposit: v{{.v}}.PreCommitDeposit, PreCommitDeposit: v{{.v}}.PreCommitDeposit,
PreCommitEpoch: v{{.v}}.PreCommitEpoch, PreCommitEpoch: v{{.v}}.PreCommitEpoch,
DealWeight: v{{.v}}.DealWeight, DealWeight: v{{.v}}.DealWeight,
VerifiedDealWeight: v{{.v}}.VerifiedDealWeight, VerifiedDealWeight: v{{.v}}.VerifiedDealWeight,
} }
{{else}} {{else}}
@ -510,4 +510,4 @@ func fromV{{.v}}SectorPreCommitOnChainInfo(v{{.v}} miner{{.v}}.SectorPreCommitOn
func (s *state{{.v}}) GetState() interface{} { func (s *state{{.v}}) GetState() interface{} {
return &s.State return &s.State
} }

View File

@ -0,0 +1,496 @@
package miner
import (
"bytes"
"errors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/actors/adt"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt"
)
var _ State = (*state5)(nil)
func load5(store adt.Store, root cid.Cid) (State, error) {
out := state5{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make5(store adt.Store) (State, error) {
out := state5{store: store}
out.State = miner5.State{}
return &out, nil
}
type state5 struct {
miner5.State
store adt.Store
}
type deadline5 struct {
miner5.Deadline
store adt.Store
}
type partition5 struct {
miner5.Partition
store adt.Store
}
func (s *state5) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) {
defer func() {
if r := recover(); r != nil {
err = xerrors.Errorf("failed to get available balance: %w", r)
available = abi.NewTokenAmount(0)
}
}()
// this panics if the miner doesnt have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal)
return available, err
}
func (s *state5) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) {
return s.CheckVestedFunds(s.store, epoch)
}
func (s *state5) LockedFunds() (LockedFunds, error) {
return LockedFunds{
VestingFunds: s.State.LockedFunds,
InitialPledgeRequirement: s.State.InitialPledge,
PreCommitDeposits: s.State.PreCommitDeposits,
}, nil
}
func (s *state5) FeeDebt() (abi.TokenAmount, error) {
return s.State.FeeDebt, nil
}
func (s *state5) InitialPledge() (abi.TokenAmount, error) {
return s.State.InitialPledge, nil
}
func (s *state5) PreCommitDeposits() (abi.TokenAmount, error) {
return s.State.PreCommitDeposits, nil
}
func (s *state5) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) {
info, ok, err := s.State.GetSector(s.store, num)
if !ok || err != nil {
return nil, err
}
ret := fromV5SectorOnChainInfo(*info)
return &ret, nil
}
func (s *state5) FindSector(num abi.SectorNumber) (*SectorLocation, error) {
dlIdx, partIdx, err := s.State.FindSector(s.store, num)
if err != nil {
return nil, err
}
return &SectorLocation{
Deadline: dlIdx,
Partition: partIdx,
}, nil
}
func (s *state5) NumLiveSectors() (uint64, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return 0, err
}
var total uint64
if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner5.Deadline) error {
total += dl.LiveSectors
return nil
}); err != nil {
return 0, err
}
return total, nil
}
// GetSectorExpiration returns the effective expiration of the given sector.
//
// If the sector does not expire early, the Early expiration field is 0.
func (s *state5) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return nil, err
}
// NOTE: this can be optimized significantly.
// 1. If the sector is non-faulty, it will either expire on-time (can be
// learned from the sector info), or in the next quantized expiration
// epoch (i.e., the first element in the partition's expiration queue.
// 2. If it's faulty, it will expire early within the first 14 entries
// of the expiration queue.
stopErr := errors.New("stop")
out := SectorExpiration{}
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner5.Deadline) error {
partitions, err := dl.PartitionsArray(s.store)
if err != nil {
return err
}
quant := s.State.QuantSpecForDeadline(dlIdx)
var part miner5.Partition
return partitions.ForEach(&part, func(partIdx int64) error {
if found, err := part.Sectors.IsSet(uint64(num)); err != nil {
return err
} else if !found {
return nil
}
if found, err := part.Terminated.IsSet(uint64(num)); err != nil {
return err
} else if found {
// already terminated
return stopErr
}
q, err := miner5.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner5.PartitionExpirationAmtBitwidth)
if err != nil {
return err
}
var exp miner5.ExpirationSet
return q.ForEach(&exp, func(epoch int64) error {
if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil {
return err
} else if early {
out.Early = abi.ChainEpoch(epoch)
return nil
}
if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil {
return err
} else if onTime {
out.OnTime = abi.ChainEpoch(epoch)
return stopErr
}
return nil
})
})
})
if err == stopErr {
err = nil
}
if err != nil {
return nil, err
}
if out.Early == 0 && out.OnTime == 0 {
return nil, xerrors.Errorf("failed to find sector %d", num)
}
return &out, nil
}
func (s *state5) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) {
info, ok, err := s.State.GetPrecommittedSector(s.store, num)
if !ok || err != nil {
return nil, err
}
ret := fromV5SectorPreCommitOnChainInfo(*info)
return &ret, nil
}
func (s *state5) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
sectors, err := miner5.LoadSectors(s.store, s.State.Sectors)
if err != nil {
return nil, err
}
// If no sector numbers are specified, load all.
if snos == nil {
infos := make([]*SectorOnChainInfo, 0, sectors.Length())
var info5 miner5.SectorOnChainInfo
if err := sectors.ForEach(&info5, func(_ int64) error {
info := fromV5SectorOnChainInfo(info5)
infos = append(infos, &info)
return nil
}); err != nil {
return nil, err
}
return infos, nil
}
// Otherwise, load selected.
infos5, err := sectors.Load(*snos)
if err != nil {
return nil, err
}
infos := make([]*SectorOnChainInfo, len(infos5))
for i, info5 := range infos5 {
info := fromV5SectorOnChainInfo(*info5)
infos[i] = &info
}
return infos, nil
}
func (s *state5) IsAllocated(num abi.SectorNumber) (bool, error) {
var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
return false, err
}
return allocatedSectors.IsSet(uint64(num))
}
func (s *state5) GetProvingPeriodStart() (abi.ChainEpoch, error) {
return s.State.ProvingPeriodStart, nil
}
func (s *state5) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return nil, err
}
dl, err := dls.LoadDeadline(s.store, idx)
if err != nil {
return nil, err
}
return &deadline5{*dl, s.store}, nil
}
func (s *state5) ForEachDeadline(cb func(uint64, Deadline) error) error {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return err
}
return dls.ForEach(s.store, func(i uint64, dl *miner5.Deadline) error {
return cb(i, &deadline5{*dl, s.store})
})
}
func (s *state5) NumDeadlines() (uint64, error) {
return miner5.WPoStPeriodDeadlines, nil
}
func (s *state5) DeadlinesChanged(other State) (bool, error) {
other5, ok := other.(*state5)
if !ok {
// treat an upgrade as a change, always
return true, nil
}
return !s.State.Deadlines.Equals(other5.Deadlines), nil
}
func (s *state5) MinerInfoChanged(other State) (bool, error) {
other0, ok := other.(*state5)
if !ok {
// treat an upgrade as a change, always
return true, nil
}
return !s.State.Info.Equals(other0.State.Info), nil
}
func (s *state5) Info() (MinerInfo, error) {
info, err := s.State.GetInfo(s.store)
if err != nil {
return MinerInfo{}, err
}
var pid *peer.ID
if peerID, err := peer.IDFromBytes(info.PeerId); err == nil {
pid = &peerID
}
mi := MinerInfo{
Owner: info.Owner,
Worker: info.Worker,
ControlAddresses: info.ControlAddresses,
NewWorker: address.Undef,
WorkerChangeEpoch: -1,
PeerId: pid,
Multiaddrs: info.Multiaddrs,
WindowPoStProofType: info.WindowPoStProofType,
SectorSize: info.SectorSize,
WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
ConsensusFaultElapsed: info.ConsensusFaultElapsed,
}
if info.PendingWorkerKey != nil {
mi.NewWorker = info.PendingWorkerKey.NewWorker
mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt
}
return mi, nil
}
func (s *state5) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) {
return s.State.RecordedDeadlineInfo(epoch), nil
}
func (s *state5) DeadlineCronActive() (bool, error) {
return s.State.DeadlineCronActive, nil
}
func (s *state5) sectors() (adt.Array, error) {
return adt5.AsArray(s.store, s.Sectors, miner5.SectorsAmtBitwidth)
}
func (s *state5) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) {
var si miner5.SectorOnChainInfo
err := si.UnmarshalCBOR(bytes.NewReader(val.Raw))
if err != nil {
return SectorOnChainInfo{}, err
}
return fromV5SectorOnChainInfo(si), nil
}
func (s *state5) precommits() (adt.Map, error) {
return adt5.AsMap(s.store, s.PreCommittedSectors, builtin5.DefaultHamtBitwidth)
}
func (s *state5) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) {
var sp miner5.SectorPreCommitOnChainInfo
err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
if err != nil {
return SectorPreCommitOnChainInfo{}, err
}
return fromV5SectorPreCommitOnChainInfo(sp), nil
}
func (s *state5) EraseAllUnproven() error {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return err
}
err = dls.ForEach(s.store, func(dindx uint64, dl *miner5.Deadline) error {
ps, err := dl.PartitionsArray(s.store)
if err != nil {
return err
}
var part miner5.Partition
err = ps.ForEach(&part, func(pindx int64) error {
_ = part.ActivateUnproven()
err = ps.Set(uint64(pindx), &part)
return nil
})
if err != nil {
return err
}
dl.Partitions, err = ps.Root()
if err != nil {
return err
}
return dls.UpdateDeadline(s.store, dindx, dl)
})
return s.State.SaveDeadlines(s.store, dls)
return nil
}
func (d *deadline5) LoadPartition(idx uint64) (Partition, error) {
p, err := d.Deadline.LoadPartition(d.store, idx)
if err != nil {
return nil, err
}
return &partition5{*p, d.store}, nil
}
func (d *deadline5) ForEachPartition(cb func(uint64, Partition) error) error {
ps, err := d.Deadline.PartitionsArray(d.store)
if err != nil {
return err
}
var part miner5.Partition
return ps.ForEach(&part, func(i int64) error {
return cb(uint64(i), &partition5{part, d.store})
})
}
func (d *deadline5) PartitionsChanged(other Deadline) (bool, error) {
other5, ok := other.(*deadline5)
if !ok {
// treat an upgrade as a change, always
return true, nil
}
return !d.Deadline.Partitions.Equals(other5.Deadline.Partitions), nil
}
func (d *deadline5) PartitionsPoSted() (bitfield.BitField, error) {
return d.Deadline.PartitionsPoSted, nil
}
func (d *deadline5) DisputableProofCount() (uint64, error) {
ops, err := d.OptimisticProofsSnapshotArray(d.store)
if err != nil {
return 0, err
}
return ops.Length(), nil
}
func (p *partition5) AllSectors() (bitfield.BitField, error) {
return p.Partition.Sectors, nil
}
func (p *partition5) FaultySectors() (bitfield.BitField, error) {
return p.Partition.Faults, nil
}
func (p *partition5) RecoveringSectors() (bitfield.BitField, error) {
return p.Partition.Recoveries, nil
}
func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo {
return SectorOnChainInfo{
SectorNumber: v5.SectorNumber,
SealProof: v5.SealProof,
SealedCID: v5.SealedCID,
DealIDs: v5.DealIDs,
Activation: v5.Activation,
Expiration: v5.Expiration,
DealWeight: v5.DealWeight,
VerifiedDealWeight: v5.VerifiedDealWeight,
InitialPledge: v5.InitialPledge,
ExpectedDayReward: v5.ExpectedDayReward,
ExpectedStoragePledge: v5.ExpectedStoragePledge,
}
}
func fromV5SectorPreCommitOnChainInfo(v5 miner5.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
return SectorPreCommitOnChainInfo{
Info: (SectorPreCommitInfo)(v5.Info),
PreCommitDeposit: v5.PreCommitDeposit,
PreCommitEpoch: v5.PreCommitEpoch,
DealWeight: v5.DealWeight,
VerifiedDealWeight: v5.VerifiedDealWeight,
}
}
func (s *state5) GetState() interface{} {
return &s.State
}

View File

@ -12,7 +12,8 @@ import (
"github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/cbor"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
msig{{.latestVersion}} "github.com/filecoin-project/specs-actors/v4/actors/builtin/multisig" msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
msig{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/builtin/multisig"
{{range .versions}} {{range .versions}}
builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
{{end}} {{end}}
@ -79,7 +80,7 @@ type State interface {
GetState() interface{} GetState() interface{}
} }
type Transaction = msig{{.latestVersion}}.Transaction type Transaction = msig0.Transaction
var Methods = builtin{{.latestVersion}}.MethodsMultisig var Methods = builtin{{.latestVersion}}.MethodsMultisig
@ -88,7 +89,7 @@ func Message(version actors.Version, from address.Address) MessageBuilder {
{{range .versions}} {{range .versions}}
case actors.Version{{.}}: case actors.Version{{.}}:
return message{{.}}{{"{"}}{{if (ge . 2)}}message0{from}{{else}}from{{end}}} return message{{.}}{{"{"}}{{if (ge . 2)}}message0{from}{{else}}from{{end}}}
{{end}} default: {{end}} default:
panic(fmt.Sprintf("unsupported actors version: %d", version)) panic(fmt.Sprintf("unsupported actors version: %d", version))
} }
} }

View File

@ -43,10 +43,10 @@ func (m message{{.v}}) Create(
{{end}} {{end}}
// Set up constructor parameters for multisig // Set up constructor parameters for multisig
msigParams := &multisig{{.v}}.ConstructorParams{ msigParams := &multisig{{.v}}.ConstructorParams{
Signers: signers, Signers: signers,
NumApprovalsThreshold: threshold, NumApprovalsThreshold: threshold,
UnlockDuration: unlockDuration,{{if (ge .v 2)}} UnlockDuration: unlockDuration,{{if (ge .v 2)}}
StartEpoch: unlockStart,{{end}} StartEpoch: unlockStart,{{end}}
} }
enc, actErr := actors.SerializeParams(msigParams) enc, actErr := actors.SerializeParams(msigParams)
@ -56,7 +56,7 @@ func (m message{{.v}}) Create(
// new actors are created by invoking 'exec' on the init actor with the constructor params // new actors are created by invoking 'exec' on the init actor with the constructor params
execParams := &init{{.v}}.ExecParams{ execParams := &init{{.v}}.ExecParams{
CodeCID: builtin{{.v}}.MultisigActorCodeID, CodeCID: builtin{{.v}}.MultisigActorCodeID,
ConstructorParams: enc, ConstructorParams: enc,
} }
@ -66,11 +66,11 @@ func (m message{{.v}}) Create(
} }
return &types.Message{ return &types.Message{
To: init_.Address, To: init_.Address,
From: m.from, From: m.from,
Method: builtin{{.v}}.MethodsInit.Exec, Method: builtin{{.v}}.MethodsInit.Exec,
Params: enc, Params: enc,
Value: initialAmount, Value: initialAmount,
}, nil }, nil
} }
@ -96,8 +96,8 @@ func (m message0) Propose(msig, to address.Address, amt abi.TokenAmount,
} }
enc, actErr := actors.SerializeParams(&multisig0.ProposeParams{ enc, actErr := actors.SerializeParams(&multisig0.ProposeParams{
To: to, To: to,
Value: amt, Value: amt,
Method: method, Method: method,
Params: params, Params: params,
}) })
@ -106,9 +106,9 @@ func (m message0) Propose(msig, to address.Address, amt abi.TokenAmount,
} }
return &types.Message{ return &types.Message{
To: msig, To: msig,
From: m.from, From: m.from,
Value: abi.NewTokenAmount(0), Value: abi.NewTokenAmount(0),
Method: builtin0.MethodsMultisig.Propose, Method: builtin0.MethodsMultisig.Propose,
Params: enc, Params: enc,
}, nil }, nil
@ -121,9 +121,9 @@ func (m message0) Approve(msig address.Address, txID uint64, hashData *ProposalH
} }
return &types.Message{ return &types.Message{
To: msig, To: msig,
From: m.from, From: m.from,
Value: types.NewInt(0), Value: types.NewInt(0),
Method: builtin0.MethodsMultisig.Approve, Method: builtin0.MethodsMultisig.Approve,
Params: enc, Params: enc,
}, nil }, nil
@ -136,9 +136,9 @@ func (m message0) Cancel(msig address.Address, txID uint64, hashData *ProposalHa
} }
return &types.Message{ return &types.Message{
To: msig, To: msig,
From: m.from, From: m.from,
Value: types.NewInt(0), Value: types.NewInt(0),
Method: builtin0.MethodsMultisig.Cancel, Method: builtin0.MethodsMultisig.Cancel,
Params: enc, Params: enc,
}, nil }, nil

View File

@ -0,0 +1,71 @@
package multisig
import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
init5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/init"
multisig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig"
"github.com/filecoin-project/lotus/chain/actors"
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/types"
)
type message5 struct{ message0 }
func (m message5) Create(
signers []address.Address, threshold uint64,
unlockStart, unlockDuration abi.ChainEpoch,
initialAmount abi.TokenAmount,
) (*types.Message, error) {
lenAddrs := uint64(len(signers))
if lenAddrs < threshold {
return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig")
}
if threshold == 0 {
threshold = lenAddrs
}
if m.from == address.Undef {
return nil, xerrors.Errorf("must provide source address")
}
// Set up constructor parameters for multisig
msigParams := &multisig5.ConstructorParams{
Signers: signers,
NumApprovalsThreshold: threshold,
UnlockDuration: unlockDuration,
StartEpoch: unlockStart,
}
enc, actErr := actors.SerializeParams(msigParams)
if actErr != nil {
return nil, actErr
}
// new actors are created by invoking 'exec' on the init actor with the constructor params
execParams := &init5.ExecParams{
CodeCID: builtin5.MultisigActorCodeID,
ConstructorParams: enc,
}
enc, actErr = actors.SerializeParams(execParams)
if actErr != nil {
return nil, actErr
}
return &types.Message{
To: init_.Address,
From: m.from,
Method: builtin5.MethodsInit.Exec,
Params: enc,
Value: initialAmount,
}, nil
}

View File

@ -12,7 +12,8 @@ import (
"github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/cbor"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
msig4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/multisig" msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
msig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
@ -22,6 +23,8 @@ import (
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin"
@ -45,6 +48,10 @@ func init() {
builtin.RegisterActorState(builtin4.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { builtin.RegisterActorState(builtin4.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load4(store, root) return load4(store, root)
}) })
builtin.RegisterActorState(builtin5.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load5(store, root)
})
} }
func Load(store adt.Store, act *types.Actor) (State, error) { func Load(store adt.Store, act *types.Actor) (State, error) {
@ -62,6 +69,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case builtin4.MultisigActorCodeID: case builtin4.MultisigActorCodeID:
return load4(store, act.Head) return load4(store, act.Head)
case builtin5.MultisigActorCodeID:
return load5(store, act.Head)
} }
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
@ -81,6 +91,9 @@ func MakeState(store adt.Store, av actors.Version, signers []address.Address, th
case actors.Version4: case actors.Version4:
return make4(store, signers, threshold, startEpoch, unlockDuration, initialBalance) return make4(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
case actors.Version5:
return make5(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
} }
return nil, xerrors.Errorf("unknown actor version %d", av) return nil, xerrors.Errorf("unknown actor version %d", av)
} }
@ -100,6 +113,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version4: case actors.Version4:
return builtin4.MultisigActorCodeID, nil return builtin4.MultisigActorCodeID, nil
case actors.Version5:
return builtin5.MultisigActorCodeID, nil
} }
return cid.Undef, xerrors.Errorf("unknown actor version %d", av) return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
@ -123,9 +139,9 @@ type State interface {
GetState() interface{} GetState() interface{}
} }
type Transaction = msig4.Transaction type Transaction = msig0.Transaction
var Methods = builtin4.MethodsMultisig var Methods = builtin5.MethodsMultisig
func Message(version actors.Version, from address.Address) MessageBuilder { func Message(version actors.Version, from address.Address) MessageBuilder {
switch version { switch version {
@ -141,6 +157,9 @@ func Message(version actors.Version, from address.Address) MessageBuilder {
case actors.Version4: case actors.Version4:
return message4{message0{from}} return message4{message0{from}}
case actors.Version5:
return message5{message0{from}}
default: default:
panic(fmt.Sprintf("unsupported actors version: %d", version)) panic(fmt.Sprintf("unsupported actors version: %d", version))
} }
@ -164,12 +183,12 @@ type MessageBuilder interface {
} }
// this type is the same between v0 and v2 // this type is the same between v0 and v2
type ProposalHashData = msig4.ProposalHashData type ProposalHashData = msig5.ProposalHashData
type ProposeReturn = msig4.ProposeReturn type ProposeReturn = msig5.ProposeReturn
type ProposeParams = msig4.ProposeParams type ProposeParams = msig5.ProposeParams
func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { func txnParams(id uint64, data *ProposalHashData) ([]byte, error) {
params := msig4.TxnIDParams{ID: msig4.TxnID(id)} params := msig5.TxnIDParams{ID: msig5.TxnID(id)}
if data != nil { if data != nil {
if data.Requester.Protocol() != address.ID { if data.Requester.Protocol() != address.ID {
return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester) return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester)

View File

@ -124,4 +124,4 @@ func (s *state{{.v}}) decodeTransaction(val *cbg.Deferred) (Transaction, error)
func (s *state{{.v}}) GetState() interface{} { func (s *state{{.v}}) GetState() interface{} {
return &s.State return &s.State
} }

View File

@ -0,0 +1,119 @@
package multisig
import (
"bytes"
"encoding/binary"
adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/actors/adt"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
msig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig"
)
var _ State = (*state5)(nil)
func load5(store adt.Store, root cid.Cid) (State, error) {
out := state5{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make5(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
out := state5{store: store}
out.State = msig5.State{}
out.State.Signers = signers
out.State.NumApprovalsThreshold = threshold
out.State.StartEpoch = startEpoch
out.State.UnlockDuration = unlockDuration
out.State.InitialBalance = initialBalance
em, err := adt5.StoreEmptyMap(store, builtin5.DefaultHamtBitwidth)
if err != nil {
return nil, err
}
out.State.PendingTxns = em
return &out, nil
}
type state5 struct {
msig5.State
store adt.Store
}
func (s *state5) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) {
return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil
}
func (s *state5) StartEpoch() (abi.ChainEpoch, error) {
return s.State.StartEpoch, nil
}
func (s *state5) UnlockDuration() (abi.ChainEpoch, error) {
return s.State.UnlockDuration, nil
}
func (s *state5) InitialBalance() (abi.TokenAmount, error) {
return s.State.InitialBalance, nil
}
func (s *state5) Threshold() (uint64, error) {
return s.State.NumApprovalsThreshold, nil
}
func (s *state5) Signers() ([]address.Address, error) {
return s.State.Signers, nil
}
func (s *state5) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error {
arr, err := adt5.AsMap(s.store, s.State.PendingTxns, builtin5.DefaultHamtBitwidth)
if err != nil {
return err
}
var out msig5.Transaction
return arr.ForEach(&out, func(key string) error {
txid, n := binary.Varint([]byte(key))
if n <= 0 {
return xerrors.Errorf("invalid pending transaction key: %v", key)
}
return cb(txid, (Transaction)(out)) //nolint:unconvert
})
}
func (s *state5) PendingTxnChanged(other State) (bool, error) {
other5, ok := other.(*state5)
if !ok {
// treat an upgrade as a change, always
return true, nil
}
return !s.State.PendingTxns.Equals(other5.PendingTxns), nil
}
func (s *state5) transactions() (adt.Map, error) {
return adt5.AsMap(s.store, s.PendingTxns, builtin5.DefaultHamtBitwidth)
}
func (s *state5) decodeTransaction(val *cbg.Deferred) (Transaction, error) {
var tx msig5.Transaction
if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return Transaction{}, err
}
return tx, nil
}
func (s *state5) GetState() interface{} {
return &s.State
}

View File

@ -21,7 +21,7 @@ func (m message{{.v}}) Create(to address.Address, initialAmount abi.TokenAmount)
return nil, aerr return nil, aerr
} }
enc, aerr := actors.SerializeParams(&init{{.v}}.ExecParams{ enc, aerr := actors.SerializeParams(&init{{.v}}.ExecParams{
CodeCID: builtin{{.v}}.PaymentChannelActorCodeID, CodeCID: builtin{{.v}}.PaymentChannelActorCodeID,
ConstructorParams: params, ConstructorParams: params,
}) })
if aerr != nil { if aerr != nil {
@ -29,9 +29,9 @@ func (m message{{.v}}) Create(to address.Address, initialAmount abi.TokenAmount)
} }
return &types.Message{ return &types.Message{
To: init_.Address, To: init_.Address,
From: m.from, From: m.from,
Value: initialAmount, Value: initialAmount,
Method: builtin{{.v}}.MethodsInit.Exec, Method: builtin{{.v}}.MethodsInit.Exec,
Params: enc, Params: enc,
}, nil }, nil
@ -39,7 +39,7 @@ func (m message{{.v}}) Create(to address.Address, initialAmount abi.TokenAmount)
func (m message{{.v}}) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) { func (m message{{.v}}) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
params, aerr := actors.SerializeParams(&paych{{.v}}.UpdateChannelStateParams{ params, aerr := actors.SerializeParams(&paych{{.v}}.UpdateChannelStateParams{
Sv: *sv, Sv: *sv,
Secret: secret, Secret: secret,
}) })
if aerr != nil { if aerr != nil {
@ -47,9 +47,9 @@ func (m message{{.v}}) Update(paych address.Address, sv *SignedVoucher, secret [
} }
return &types.Message{ return &types.Message{
To: paych, To: paych,
From: m.from, From: m.from,
Value: abi.NewTokenAmount(0), Value: abi.NewTokenAmount(0),
Method: builtin{{.v}}.MethodsPaych.UpdateChannelState, Method: builtin{{.v}}.MethodsPaych.UpdateChannelState,
Params: params, Params: params,
}, nil }, nil
@ -57,18 +57,18 @@ func (m message{{.v}}) Update(paych address.Address, sv *SignedVoucher, secret [
func (m message{{.v}}) Settle(paych address.Address) (*types.Message, error) { func (m message{{.v}}) Settle(paych address.Address) (*types.Message, error) {
return &types.Message{ return &types.Message{
To: paych, To: paych,
From: m.from, From: m.from,
Value: abi.NewTokenAmount(0), Value: abi.NewTokenAmount(0),
Method: builtin{{.v}}.MethodsPaych.Settle, Method: builtin{{.v}}.MethodsPaych.Settle,
}, nil }, nil
} }
func (m message{{.v}}) Collect(paych address.Address) (*types.Message, error) { func (m message{{.v}}) Collect(paych address.Address) (*types.Message, error) {
return &types.Message{ return &types.Message{
To: paych, To: paych,
From: m.from, From: m.from,
Value: abi.NewTokenAmount(0), Value: abi.NewTokenAmount(0),
Method: builtin{{.v}}.MethodsPaych.Collect, Method: builtin{{.v}}.MethodsPaych.Collect,
}, nil }, nil
} }

View File

@ -0,0 +1,74 @@
package paych
import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
init5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/init"
paych5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/actors"
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/types"
)
type message5 struct{ from address.Address }
func (m message5) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) {
params, aerr := actors.SerializeParams(&paych5.ConstructorParams{From: m.from, To: to})
if aerr != nil {
return nil, aerr
}
enc, aerr := actors.SerializeParams(&init5.ExecParams{
CodeCID: builtin5.PaymentChannelActorCodeID,
ConstructorParams: params,
})
if aerr != nil {
return nil, aerr
}
return &types.Message{
To: init_.Address,
From: m.from,
Value: initialAmount,
Method: builtin5.MethodsInit.Exec,
Params: enc,
}, nil
}
func (m message5) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
params, aerr := actors.SerializeParams(&paych5.UpdateChannelStateParams{
Sv: *sv,
Secret: secret,
})
if aerr != nil {
return nil, aerr
}
return &types.Message{
To: paych,
From: m.from,
Value: abi.NewTokenAmount(0),
Method: builtin5.MethodsPaych.UpdateChannelState,
Params: params,
}, nil
}
func (m message5) Settle(paych address.Address) (*types.Message, error) {
return &types.Message{
To: paych,
From: m.from,
Value: abi.NewTokenAmount(0),
Method: builtin5.MethodsPaych.Settle,
}, nil
}
func (m message5) Collect(paych address.Address) (*types.Message, error) {
return &types.Message{
To: paych,
From: m.from,
Value: abi.NewTokenAmount(0),
Method: builtin5.MethodsPaych.Collect,
}, nil
}

View File

@ -23,6 +23,8 @@ import (
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin"
@ -46,6 +48,10 @@ func init() {
builtin.RegisterActorState(builtin4.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { builtin.RegisterActorState(builtin4.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load4(store, root) return load4(store, root)
}) })
builtin.RegisterActorState(builtin5.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load5(store, root)
})
} }
// Load returns an abstract copy of payment channel state, irregardless of actor version // Load returns an abstract copy of payment channel state, irregardless of actor version
@ -64,6 +70,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case builtin4.PaymentChannelActorCodeID: case builtin4.PaymentChannelActorCodeID:
return load4(store, act.Head) return load4(store, act.Head)
case builtin5.PaymentChannelActorCodeID:
return load5(store, act.Head)
} }
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
@ -83,6 +92,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
case actors.Version4: case actors.Version4:
return make4(store) return make4(store)
case actors.Version5:
return make5(store)
} }
return nil, xerrors.Errorf("unknown actor version %d", av) return nil, xerrors.Errorf("unknown actor version %d", av)
} }
@ -102,6 +114,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version4: case actors.Version4:
return builtin4.PaymentChannelActorCodeID, nil return builtin4.PaymentChannelActorCodeID, nil
case actors.Version5:
return builtin5.PaymentChannelActorCodeID, nil
} }
return cid.Undef, xerrors.Errorf("unknown actor version %d", av) return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
@ -155,7 +170,7 @@ func DecodeSignedVoucher(s string) (*SignedVoucher, error) {
return &sv, nil return &sv, nil
} }
var Methods = builtin4.MethodsPaych var Methods = builtin5.MethodsPaych
func Message(version actors.Version, from address.Address) MessageBuilder { func Message(version actors.Version, from address.Address) MessageBuilder {
switch version { switch version {
@ -172,6 +187,9 @@ func Message(version actors.Version, from address.Address) MessageBuilder {
case actors.Version4: case actors.Version4:
return message4{from} return message4{from}
case actors.Version5:
return message5{from}
default: default:
panic(fmt.Sprintf("unsupported actors version: %d", version)) panic(fmt.Sprintf("unsupported actors version: %d", version))
} }

View File

@ -0,0 +1,114 @@
package paych
import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/chain/actors/adt"
paych5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/paych"
adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt"
)
var _ State = (*state5)(nil)
func load5(store adt.Store, root cid.Cid) (State, error) {
out := state5{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make5(store adt.Store) (State, error) {
out := state5{store: store}
out.State = paych5.State{}
return &out, nil
}
type state5 struct {
paych5.State
store adt.Store
lsAmt *adt5.Array
}
// Channel owner, who has funded the actor
func (s *state5) From() (address.Address, error) {
return s.State.From, nil
}
// Recipient of payouts from channel
func (s *state5) To() (address.Address, error) {
return s.State.To, nil
}
// Height at which the channel can be `Collected`
func (s *state5) SettlingAt() (abi.ChainEpoch, error) {
return s.State.SettlingAt, nil
}
// Amount successfully redeemed through the payment channel, paid out on `Collect()`
func (s *state5) ToSend() (abi.TokenAmount, error) {
return s.State.ToSend, nil
}
func (s *state5) getOrLoadLsAmt() (*adt5.Array, error) {
if s.lsAmt != nil {
return s.lsAmt, nil
}
// Get the lane state from the chain
lsamt, err := adt5.AsArray(s.store, s.State.LaneStates, paych5.LaneStatesAmtBitwidth)
if err != nil {
return nil, err
}
s.lsAmt = lsamt
return lsamt, nil
}
// Get total number of lanes
func (s *state5) LaneCount() (uint64, error) {
lsamt, err := s.getOrLoadLsAmt()
if err != nil {
return 0, err
}
return lsamt.Length(), nil
}
func (s *state5) GetState() interface{} {
return &s.State
}
// Iterate lane states
func (s *state5) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error {
// Get the lane state from the chain
lsamt, err := s.getOrLoadLsAmt()
if err != nil {
return err
}
// Note: we use a map instead of an array to store laneStates because the
// client sets the lane ID (the index) and potentially they could use a
// very large index.
var ls paych5.LaneState
return lsamt.ForEach(&ls, func(i int64) error {
return cb(uint64(i), &laneState5{ls})
})
}
type laneState5 struct {
paych5.LaneState
}
func (ls *laneState5) Redeemed() (big.Int, error) {
return ls.LaneState.Redeemed, nil
}
func (ls *laneState5) Nonce() (uint64, error) {
return ls.LaneState.Nonce, nil
}

View File

@ -101,7 +101,7 @@ type Claim struct {
func AddClaims(a Claim, b Claim) Claim { func AddClaims(a Claim, b Claim) Claim {
return Claim{ return Claim{
RawBytePower: big.Add(a.RawBytePower, b.RawBytePower), RawBytePower: big.Add(a.RawBytePower, b.RawBytePower),
QualityAdjPower: big.Add(a.QualityAdjPower, b.QualityAdjPower), QualityAdjPower: big.Add(a.QualityAdjPower, b.QualityAdjPower),
} }
} }

View File

@ -22,6 +22,8 @@ import (
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
) )
func init() { func init() {
@ -41,11 +43,15 @@ func init() {
builtin.RegisterActorState(builtin4.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { builtin.RegisterActorState(builtin4.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load4(store, root) return load4(store, root)
}) })
builtin.RegisterActorState(builtin5.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load5(store, root)
})
} }
var ( var (
Address = builtin4.StoragePowerActorAddr Address = builtin5.StoragePowerActorAddr
Methods = builtin4.MethodsPower Methods = builtin5.MethodsPower
) )
func Load(store adt.Store, act *types.Actor) (State, error) { func Load(store adt.Store, act *types.Actor) (State, error) {
@ -63,6 +69,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case builtin4.StoragePowerActorCodeID: case builtin4.StoragePowerActorCodeID:
return load4(store, act.Head) return load4(store, act.Head)
case builtin5.StoragePowerActorCodeID:
return load5(store, act.Head)
} }
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
@ -82,6 +91,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
case actors.Version4: case actors.Version4:
return make4(store) return make4(store)
case actors.Version5:
return make5(store)
} }
return nil, xerrors.Errorf("unknown actor version %d", av) return nil, xerrors.Errorf("unknown actor version %d", av)
} }
@ -101,6 +113,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version4: case actors.Version4:
return builtin4.StoragePowerActorCodeID, nil return builtin4.StoragePowerActorCodeID, nil
case actors.Version5:
return builtin5.StoragePowerActorCodeID, nil
} }
return cid.Undef, xerrors.Errorf("unknown actor version %d", av) return cid.Undef, xerrors.Errorf("unknown actor version %d", av)

View File

@ -66,7 +66,7 @@ func (s *state{{.v}}) TotalLocked() (abi.TokenAmount, error) {
func (s *state{{.v}}) TotalPower() (Claim, error) { func (s *state{{.v}}) TotalPower() (Claim, error) {
return Claim{ return Claim{
RawBytePower: s.TotalRawBytePower, RawBytePower: s.TotalRawBytePower,
QualityAdjPower: s.TotalQualityAdjPower, QualityAdjPower: s.TotalQualityAdjPower,
}, nil }, nil
} }
@ -74,7 +74,7 @@ func (s *state{{.v}}) TotalPower() (Claim, error) {
// Committed power to the network. Includes miners below the minimum threshold. // Committed power to the network. Includes miners below the minimum threshold.
func (s *state{{.v}}) TotalCommitted() (Claim, error) { func (s *state{{.v}}) TotalCommitted() (Claim, error) {
return Claim{ return Claim{
RawBytePower: s.TotalBytesCommitted, RawBytePower: s.TotalBytesCommitted,
QualityAdjPower: s.TotalQABytesCommitted, QualityAdjPower: s.TotalQABytesCommitted,
}, nil }, nil
} }
@ -90,7 +90,7 @@ func (s *state{{.v}}) MinerPower(addr address.Address) (Claim, bool, error) {
return Claim{}, false, err return Claim{}, false, err
} }
return Claim{ return Claim{
RawBytePower: claim.RawBytePower, RawBytePower: claim.RawBytePower,
QualityAdjPower: claim.QualityAdjPower, QualityAdjPower: claim.QualityAdjPower,
}, ok, nil }, ok, nil
} }
@ -142,7 +142,7 @@ func (s *state{{.v}}) ForEachClaim(cb func(miner address.Address, claim Claim) e
return err return err
} }
return cb(a, Claim{ return cb(a, Claim{
RawBytePower: claim.RawBytePower, RawBytePower: claim.RawBytePower,
QualityAdjPower: claim.QualityAdjPower, QualityAdjPower: claim.QualityAdjPower,
}) })
}) })
@ -195,7 +195,7 @@ func (s *state{{.v}}) decodeClaim(val *cbg.Deferred) (Claim, error) {
func fromV{{.v}}Claim(v{{.v}} power{{.v}}.Claim) Claim { func fromV{{.v}}Claim(v{{.v}} power{{.v}}.Claim) Claim {
return Claim{ return Claim{
RawBytePower: v{{.v}}.RawBytePower, RawBytePower: v{{.v}}.RawBytePower,
QualityAdjPower: v{{.v}}.QualityAdjPower, QualityAdjPower: v{{.v}}.QualityAdjPower,
} }
} }

View File

@ -0,0 +1,187 @@
package power
import (
"bytes"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
power5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/power"
adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt"
)
var _ State = (*state5)(nil)
func load5(store adt.Store, root cid.Cid) (State, error) {
out := state5{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make5(store adt.Store) (State, error) {
out := state5{store: store}
s, err := power5.ConstructState(store)
if err != nil {
return nil, err
}
out.State = *s
return &out, nil
}
type state5 struct {
power5.State
store adt.Store
}
func (s *state5) TotalLocked() (abi.TokenAmount, error) {
return s.TotalPledgeCollateral, nil
}
func (s *state5) TotalPower() (Claim, error) {
return Claim{
RawBytePower: s.TotalRawBytePower,
QualityAdjPower: s.TotalQualityAdjPower,
}, nil
}
// Committed power to the network. Includes miners below the minimum threshold.
func (s *state5) TotalCommitted() (Claim, error) {
return Claim{
RawBytePower: s.TotalBytesCommitted,
QualityAdjPower: s.TotalQABytesCommitted,
}, nil
}
func (s *state5) MinerPower(addr address.Address) (Claim, bool, error) {
claims, err := s.claims()
if err != nil {
return Claim{}, false, err
}
var claim power5.Claim
ok, err := claims.Get(abi.AddrKey(addr), &claim)
if err != nil {
return Claim{}, false, err
}
return Claim{
RawBytePower: claim.RawBytePower,
QualityAdjPower: claim.QualityAdjPower,
}, ok, nil
}
func (s *state5) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) {
return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a)
}
func (s *state5) TotalPowerSmoothed() (builtin.FilterEstimate, error) {
return builtin.FromV5FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil
}
func (s *state5) MinerCounts() (uint64, uint64, error) {
return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil
}
func (s *state5) ListAllMiners() ([]address.Address, error) {
claims, err := s.claims()
if err != nil {
return nil, err
}
var miners []address.Address
err = claims.ForEach(nil, func(k string) error {
a, err := address.NewFromBytes([]byte(k))
if err != nil {
return err
}
miners = append(miners, a)
return nil
})
if err != nil {
return nil, err
}
return miners, nil
}
func (s *state5) ForEachClaim(cb func(miner address.Address, claim Claim) error) error {
claims, err := s.claims()
if err != nil {
return err
}
var claim power5.Claim
return claims.ForEach(&claim, func(k string) error {
a, err := address.NewFromBytes([]byte(k))
if err != nil {
return err
}
return cb(a, Claim{
RawBytePower: claim.RawBytePower,
QualityAdjPower: claim.QualityAdjPower,
})
})
}
func (s *state5) ClaimsChanged(other State) (bool, error) {
other5, ok := other.(*state5)
if !ok {
// treat an upgrade as a change, always
return true, nil
}
return !s.State.Claims.Equals(other5.State.Claims), nil
}
func (s *state5) SetTotalQualityAdjPower(p abi.StoragePower) error {
s.State.TotalQualityAdjPower = p
return nil
}
func (s *state5) SetTotalRawBytePower(p abi.StoragePower) error {
s.State.TotalRawBytePower = p
return nil
}
func (s *state5) SetThisEpochQualityAdjPower(p abi.StoragePower) error {
s.State.ThisEpochQualityAdjPower = p
return nil
}
func (s *state5) SetThisEpochRawBytePower(p abi.StoragePower) error {
s.State.ThisEpochRawBytePower = p
return nil
}
func (s *state5) GetState() interface{} {
return &s.State
}
func (s *state5) claims() (adt.Map, error) {
return adt5.AsMap(s.store, s.Claims, builtin5.DefaultHamtBitwidth)
}
func (s *state5) decodeClaim(val *cbg.Deferred) (Claim, error) {
var ci power5.Claim
if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return Claim{}, err
}
return fromV5Claim(ci), nil
}
func fromV5Claim(v5 power5.Claim) Claim {
return Claim{
RawBytePower: v5.RawBytePower,
QualityAdjPower: v5.QualityAdjPower,
}
}

View File

@ -17,6 +17,8 @@ import (
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
@ -39,11 +41,15 @@ func init() {
builtin.RegisterActorState(builtin4.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { builtin.RegisterActorState(builtin4.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load4(store, root) return load4(store, root)
}) })
builtin.RegisterActorState(builtin5.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load5(store, root)
})
} }
var ( var (
Address = builtin4.RewardActorAddr Address = builtin5.RewardActorAddr
Methods = builtin4.MethodsReward Methods = builtin5.MethodsReward
) )
func Load(store adt.Store, act *types.Actor) (State, error) { func Load(store adt.Store, act *types.Actor) (State, error) {
@ -61,6 +67,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case builtin4.RewardActorCodeID: case builtin4.RewardActorCodeID:
return load4(store, act.Head) return load4(store, act.Head)
case builtin5.RewardActorCodeID:
return load5(store, act.Head)
} }
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
@ -80,6 +89,9 @@ func MakeState(store adt.Store, av actors.Version, currRealizedPower abi.Storage
case actors.Version4: case actors.Version4:
return make4(store, currRealizedPower) return make4(store, currRealizedPower)
case actors.Version5:
return make5(store, currRealizedPower)
} }
return nil, xerrors.Errorf("unknown actor version %d", av) return nil, xerrors.Errorf("unknown actor version %d", av)
} }
@ -99,6 +111,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version4: case actors.Version4:
return builtin4.RewardActorCodeID, nil return builtin4.RewardActorCodeID, nil
case actors.Version5:
return builtin5.RewardActorCodeID, nil
} }
return cid.Undef, xerrors.Errorf("unknown actor version %d", av) return cid.Undef, xerrors.Errorf("unknown actor version %d", av)

View File

@ -110,4 +110,4 @@ func (s *state{{.v}}) PreCommitDepositForPower(networkQAPower builtin.FilterEsti
func (s *state{{.v}}) GetState() interface{} { func (s *state{{.v}}) GetState() interface{} {
return &s.State return &s.State
} }

View File

@ -0,0 +1,98 @@
package reward
import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
reward5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/reward"
smoothing5 "github.com/filecoin-project/specs-actors/v5/actors/util/smoothing"
)
var _ State = (*state5)(nil)
func load5(store adt.Store, root cid.Cid) (State, error) {
out := state5{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make5(store adt.Store, currRealizedPower abi.StoragePower) (State, error) {
out := state5{store: store}
out.State = *reward5.ConstructState(currRealizedPower)
return &out, nil
}
type state5 struct {
reward5.State
store adt.Store
}
func (s *state5) ThisEpochReward() (abi.TokenAmount, error) {
return s.State.ThisEpochReward, nil
}
func (s *state5) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) {
return builtin.FilterEstimate{
PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate,
VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate,
}, nil
}
func (s *state5) ThisEpochBaselinePower() (abi.StoragePower, error) {
return s.State.ThisEpochBaselinePower, nil
}
func (s *state5) TotalStoragePowerReward() (abi.TokenAmount, error) {
return s.State.TotalStoragePowerReward, nil
}
func (s *state5) EffectiveBaselinePower() (abi.StoragePower, error) {
return s.State.EffectiveBaselinePower, nil
}
func (s *state5) EffectiveNetworkTime() (abi.ChainEpoch, error) {
return s.State.EffectiveNetworkTime, nil
}
func (s *state5) CumsumBaseline() (reward5.Spacetime, error) {
return s.State.CumsumBaseline, nil
}
func (s *state5) CumsumRealized() (reward5.Spacetime, error) {
return s.State.CumsumRealized, nil
}
func (s *state5) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) {
return miner5.InitialPledgeForPower(
qaPower,
s.State.ThisEpochBaselinePower,
s.State.ThisEpochRewardSmoothed,
smoothing5.FilterEstimate{
PositionEstimate: networkQAPower.PositionEstimate,
VelocityEstimate: networkQAPower.VelocityEstimate,
},
circSupply,
), nil
}
func (s *state5) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) {
return miner5.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed,
smoothing5.FilterEstimate{
PositionEstimate: networkQAPower.PositionEstimate,
VelocityEstimate: networkQAPower.VelocityEstimate,
},
sectorWeight), nil
}
func (s *state5) GetState() interface{} {
return &s.State
}

View File

@ -13,10 +13,12 @@ import (
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
) )
var ( var (
Address = builtin4.SystemActorAddr Address = builtin5.SystemActorAddr
) )
func MakeState(store adt.Store, av actors.Version) (State, error) { func MakeState(store adt.Store, av actors.Version) (State, error) {
@ -34,6 +36,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
case actors.Version4: case actors.Version4:
return make4(store) return make4(store)
case actors.Version5:
return make5(store)
} }
return nil, xerrors.Errorf("unknown actor version %d", av) return nil, xerrors.Errorf("unknown actor version %d", av)
} }
@ -53,6 +58,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version4: case actors.Version4:
return builtin4.SystemActorCodeID, nil return builtin4.SystemActorCodeID, nil
case actors.Version5:
return builtin5.SystemActorCodeID, nil
} }
return cid.Undef, xerrors.Errorf("unknown actor version %d", av) return cid.Undef, xerrors.Errorf("unknown actor version %d", av)

View File

@ -0,0 +1,35 @@
package system
import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors/adt"
system5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/system"
)
var _ State = (*state5)(nil)
func load5(store adt.Store, root cid.Cid) (State, error) {
out := state5{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make5(store adt.Store) (State, error) {
out := state5{store: store}
out.State = system5.State{}
return &out, nil
}
type state5 struct {
system5.State
store adt.Store
}
func (s *state5) GetState() interface{} {
return &s.State
}

View File

@ -9,7 +9,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
{{if (ge .v 3)}} builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin" {{if (ge .v 3)}} builtin{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin"
{{end}} verifreg{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/verifreg" {{end}} verifreg{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/verifreg"
adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt" adt{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/util/adt"
) )

View File

@ -0,0 +1,75 @@
package verifreg
import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
verifreg5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/verifreg"
adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt"
)
var _ State = (*state5)(nil)
func load5(store adt.Store, root cid.Cid) (State, error) {
out := state5{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make5(store adt.Store, rootKeyAddress address.Address) (State, error) {
out := state5{store: store}
s, err := verifreg5.ConstructState(store, rootKeyAddress)
if err != nil {
return nil, err
}
out.State = *s
return &out, nil
}
type state5 struct {
verifreg5.State
store adt.Store
}
func (s *state5) RootKey() (address.Address, error) {
return s.State.RootKey, nil
}
func (s *state5) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
return getDataCap(s.store, actors.Version5, s.verifiedClients, addr)
}
func (s *state5) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
return getDataCap(s.store, actors.Version5, s.verifiers, addr)
}
func (s *state5) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
return forEachCap(s.store, actors.Version5, s.verifiers, cb)
}
func (s *state5) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
return forEachCap(s.store, actors.Version5, s.verifiedClients, cb)
}
func (s *state5) verifiedClients() (adt.Map, error) {
return adt5.AsMap(s.store, s.VerifiedClients, builtin5.DefaultHamtBitwidth)
}
func (s *state5) verifiers() (adt.Map, error) {
return adt5.AsMap(s.store, s.Verifiers, builtin5.DefaultHamtBitwidth)
}
func (s *state5) GetState() interface{} {
return &s.State
}

View File

@ -17,6 +17,8 @@ import (
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin"
@ -41,11 +43,15 @@ func init() {
return load4(store, root) return load4(store, root)
}) })
builtin.RegisterActorState(builtin5.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
return load5(store, root)
})
} }
var ( var (
Address = builtin4.VerifiedRegistryActorAddr Address = builtin5.VerifiedRegistryActorAddr
Methods = builtin4.MethodsVerifiedRegistry Methods = builtin5.MethodsVerifiedRegistry
) )
func Load(store adt.Store, act *types.Actor) (State, error) { func Load(store adt.Store, act *types.Actor) (State, error) {
@ -63,6 +69,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case builtin4.VerifiedRegistryActorCodeID: case builtin4.VerifiedRegistryActorCodeID:
return load4(store, act.Head) return load4(store, act.Head)
case builtin5.VerifiedRegistryActorCodeID:
return load5(store, act.Head)
} }
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
@ -82,6 +91,9 @@ func MakeState(store adt.Store, av actors.Version, rootKeyAddress address.Addres
case actors.Version4: case actors.Version4:
return make4(store, rootKeyAddress) return make4(store, rootKeyAddress)
case actors.Version5:
return make5(store, rootKeyAddress)
} }
return nil, xerrors.Errorf("unknown actor version %d", av) return nil, xerrors.Errorf("unknown actor version %d", av)
} }
@ -101,6 +113,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) {
case actors.Version4: case actors.Version4:
return builtin4.VerifiedRegistryActorCodeID, nil return builtin4.VerifiedRegistryActorCodeID, nil
case actors.Version5:
return builtin5.VerifiedRegistryActorCodeID, nil
} }
return cid.Undef, xerrors.Errorf("unknown actor version %d", av) return cid.Undef, xerrors.Errorf("unknown actor version %d", av)

View File

@ -27,14 +27,19 @@ import (
miner4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/miner" miner4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/miner"
verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg" verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
paych4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/paych" builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
verifreg5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/verifreg"
paych5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/paych"
) )
const ( const (
ChainFinality = miner4.ChainFinality ChainFinality = miner5.ChainFinality
SealRandomnessLookback = ChainFinality SealRandomnessLookback = ChainFinality
PaychSettleDelay = paych4.SettleDelay PaychSettleDelay = paych5.SettleDelay
MaxPreCommitRandomnessLookback = builtin4.EpochsInDay + SealRandomnessLookback MaxPreCommitRandomnessLookback = builtin5.EpochsInDay + SealRandomnessLookback
) )
// SetSupportedProofTypes sets supported proof types, across all actor versions. // SetSupportedProofTypes sets supported proof types, across all actor versions.
@ -55,6 +60,8 @@ func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
miner4.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2) miner4.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2)
miner4.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) miner4.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner5.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
AddSupportedProofTypes(types...) AddSupportedProofTypes(types...)
} }
@ -84,6 +91,15 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
miner4.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} miner4.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
miner4.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} miner4.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
miner5.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
wpp, err := t.RegisteredWindowPoStProof()
if err != nil {
// Fine to panic, this is a test-only method
panic(err)
}
miner5.WindowPoStProofTypes[wpp] = struct{}{}
} }
} }
@ -100,11 +116,13 @@ func SetPreCommitChallengeDelay(delay abi.ChainEpoch) {
miner4.PreCommitChallengeDelay = delay miner4.PreCommitChallengeDelay = delay
miner5.PreCommitChallengeDelay = delay
} }
// TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay. // TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay.
func GetPreCommitChallengeDelay() abi.ChainEpoch { func GetPreCommitChallengeDelay() abi.ChainEpoch {
return miner4.PreCommitChallengeDelay return miner5.PreCommitChallengeDelay
} }
// SetConsensusMinerMinPower sets the minimum power of an individual miner must // SetConsensusMinerMinPower sets the minimum power of an individual miner must
@ -126,6 +144,10 @@ func SetConsensusMinerMinPower(p abi.StoragePower) {
policy.ConsensusMinerMinPower = p policy.ConsensusMinerMinPower = p
} }
for _, policy := range builtin5.PoStProofPolicies {
policy.ConsensusMinerMinPower = p
}
} }
// SetMinVerifiedDealSize sets the minimum size of a verified deal. This should // SetMinVerifiedDealSize sets the minimum size of a verified deal. This should
@ -140,6 +162,8 @@ func SetMinVerifiedDealSize(size abi.StoragePower) {
verifreg4.MinVerifiedDealSize = size verifreg4.MinVerifiedDealSize = size
verifreg5.MinVerifiedDealSize = size
} }
func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) abi.ChainEpoch { func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) abi.ChainEpoch {
@ -161,6 +185,10 @@ func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) ab
return miner4.MaxProveCommitDuration[t] return miner4.MaxProveCommitDuration[t]
case actors.Version5:
return miner5.MaxProveCommitDuration[t]
default: default:
panic("unsupported actors version") panic("unsupported actors version")
} }
@ -189,13 +217,17 @@ func DealProviderCollateralBounds(
return market4.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) return market4.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
case actors.Version5:
return market5.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
default: default:
panic("unsupported actors version") panic("unsupported actors version")
} }
} }
func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) { func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) {
return market4.DealDurationBounds(pieceSize) return market5.DealDurationBounds(pieceSize)
} }
// Sets the challenge window and scales the proving period to match (such that // Sets the challenge window and scales the proving period to match (such that
@ -222,6 +254,13 @@ func SetWPoStChallengeWindow(period abi.ChainEpoch) {
// scale it if we're scaling the challenge period. // scale it if we're scaling the challenge period.
miner4.WPoStDisputeWindow = period * 30 miner4.WPoStDisputeWindow = period * 30
miner5.WPoStChallengeWindow = period
miner5.WPoStProvingPeriod = period * abi.ChainEpoch(miner5.WPoStPeriodDeadlines)
// by default, this is 2x finality which is 30 periods.
// scale it if we're scaling the challenge period.
miner5.WPoStDisputeWindow = period * 30
} }
func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
@ -234,22 +273,22 @@ func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
} }
func GetMaxSectorExpirationExtension() abi.ChainEpoch { func GetMaxSectorExpirationExtension() abi.ChainEpoch {
return miner4.MaxSectorExpirationExtension return miner5.MaxSectorExpirationExtension
} }
// TODO: we'll probably need to abstract over this better in the future. // TODO: we'll probably need to abstract over this better in the future.
func GetMaxPoStPartitions(p abi.RegisteredPoStProof) (int, error) { func GetMaxPoStPartitions(p abi.RegisteredPoStProof) (int, error) {
sectorsPerPart, err := builtin4.PoStProofWindowPoStPartitionSectors(p) sectorsPerPart, err := builtin5.PoStProofWindowPoStPartitionSectors(p)
if err != nil { if err != nil {
return 0, err return 0, err
} }
return int(miner4.AddressedSectorsMax / sectorsPerPart), nil return int(miner5.AddressedSectorsMax / sectorsPerPart), nil
} }
func GetDefaultSectorSize() abi.SectorSize { func GetDefaultSectorSize() abi.SectorSize {
// supported sector sizes are the same across versions. // supported sector sizes are the same across versions.
szs := make([]abi.SectorSize, 0, len(miner4.PreCommitSealProofTypesV8)) szs := make([]abi.SectorSize, 0, len(miner5.PreCommitSealProofTypesV8))
for spt := range miner4.PreCommitSealProofTypesV8 { for spt := range miner5.PreCommitSealProofTypesV8 {
ss, err := spt.SectorSize() ss, err := spt.SectorSize()
if err != nil { if err != nil {
panic(err) panic(err)
@ -265,12 +304,16 @@ func GetDefaultSectorSize() abi.SectorSize {
return szs[0] return szs[0]
} }
func GetDefaultAggregationProof() abi.RegisteredAggregationProof {
return abi.RegisteredAggregationProof_SnarkPackV1
}
func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch { func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch {
if nwVer <= network.Version10 { if nwVer <= network.Version10 {
return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime
} }
return builtin4.SealProofPoliciesV11[proof].SectorMaxLifetime return builtin5.SealProofPoliciesV11[proof].SectorMaxLifetime
} }
func GetAddressedSectorsMax(nwVer network.Version) int { func GetAddressedSectorsMax(nwVer network.Version) int {
@ -288,6 +331,9 @@ func GetAddressedSectorsMax(nwVer network.Version) int {
case actors.Version4: case actors.Version4:
return miner4.AddressedSectorsMax return miner4.AddressedSectorsMax
case actors.Version5:
return miner5.AddressedSectorsMax
default: default:
panic("unsupported network version") panic("unsupported network version")
} }
@ -313,6 +359,10 @@ func GetDeclarationsMax(nwVer network.Version) int {
return miner4.DeclarationsMax return miner4.DeclarationsMax
case actors.Version5:
return miner5.DeclarationsMax
default: default:
panic("unsupported network version") panic("unsupported network version")
} }

View File

@ -19,9 +19,9 @@ import (
) )
const ( const (
ChainFinality = miner{{.latestVersion}}.ChainFinality ChainFinality = miner{{.latestVersion}}.ChainFinality
SealRandomnessLookback = ChainFinality SealRandomnessLookback = ChainFinality
PaychSettleDelay = paych{{.latestVersion}}.SettleDelay PaychSettleDelay = paych{{.latestVersion}}.SettleDelay
MaxPreCommitRandomnessLookback = builtin{{.latestVersion}}.EpochsInDay + SealRandomnessLookback MaxPreCommitRandomnessLookback = builtin{{.latestVersion}}.EpochsInDay + SealRandomnessLookback
) )
@ -31,10 +31,12 @@ func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
{{range .versions}} {{range .versions}}
{{if (eq . 0)}} {{if (eq . 0)}}
miner{{.}}.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types)) miner{{.}}.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types))
{{else}} {{else if (le . 4)}}
miner{{.}}.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types)) miner{{.}}.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner{{.}}.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2) miner{{.}}.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2)
miner{{.}}.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) miner{{.}}.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
{{else}}
miner{{.}}.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
{{end}} {{end}}
{{end}} {{end}}
@ -51,15 +53,24 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
// Set for all miner versions. // Set for all miner versions.
{{range .versions}} {{range .versions}}
{{if (eq . 0)}} {{if (eq . 0)}}
miner{{.}}.SupportedProofTypes[t] = struct{}{} miner{{.}}.SupportedProofTypes[t] = struct{}{}
{{else}} {{else if (le . 4)}}
miner{{.}}.PreCommitSealProofTypesV0[t] = struct{}{} miner{{.}}.PreCommitSealProofTypesV0[t] = struct{}{}
miner{{.}}.PreCommitSealProofTypesV7[t] = struct{}{} miner{{.}}.PreCommitSealProofTypesV7[t] = struct{}{}
miner{{.}}.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} miner{{.}}.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
{{end}} {{else}}
{{end}} miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
wpp, err := t.RegisteredWindowPoStProof()
if err != nil {
// Fine to panic, this is a test-only method
panic(err)
}
miner{{.}}.WindowPoStProofTypes[wpp] = struct{}{}
{{end}}
{{end}}
} }
} }
@ -197,9 +208,13 @@ func GetDefaultSectorSize() abi.SectorSize {
return szs[0] return szs[0]
} }
func GetDefaultAggregationProof() abi.RegisteredAggregationProof {
return abi.RegisteredAggregationProof_SnarkPackV1
}
func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch { func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch {
if nwVer <= network.Version10 { if nwVer <= network.Version10 {
return builtin{{.latestVersion}}.SealProofPoliciesV0[proof].SectorMaxLifetime return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime
} }
return builtin{{.latestVersion}}.SealProofPoliciesV11[proof].SectorMaxLifetime return builtin{{.latestVersion}}.SealProofPoliciesV11[proof].SectorMaxLifetime

View File

View File

@ -8,15 +8,16 @@ import (
type Version int type Version int
var LatestVersion = 4 var LatestVersion = 5
var Versions = []int{0, 2, 3, LatestVersion} var Versions = []int{0, 2, 3, 4, LatestVersion}
const ( const (
Version0 Version = 0 Version0 Version = 0
Version2 Version = 2 Version2 Version = 2
Version3 Version = 3 Version3 Version = 3
Version4 Version = 4 Version4 Version = 4
Version5 Version = 5
) )
// Converts a network version into an actors adt version. // Converts a network version into an actors adt version.
@ -30,6 +31,8 @@ func VersionForNetwork(version network.Version) Version {
return Version3 return Version3
case network.Version12: case network.Version12:
return Version4 return Version4
case network.Version13:
return Version5
default: default:
panic(fmt.Sprintf("unsupported network version %d", version)) panic(fmt.Sprintf("unsupported network version %d", version))
} }

View File

@ -26,7 +26,7 @@ import (
"go.opencensus.io/trace" "go.opencensus.io/trace"
"golang.org/x/xerrors" "golang.org/x/xerrors"
proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/blockstore"
@ -52,7 +52,7 @@ const msgsPerBlock = 20
//nolint:deadcode,varcheck //nolint:deadcode,varcheck
var log = logging.Logger("gen") var log = logging.Logger("gen")
var ValidWpostForTesting = []proof2.PoStProof{{ var ValidWpostForTesting = []proof5.PoStProof{{
ProofBytes: []byte("valid proof"), ProofBytes: []byte("valid proof"),
}} }}
@ -76,9 +76,10 @@ type ChainGen struct {
w *wallet.LocalWallet w *wallet.LocalWallet
eppProvs map[address.Address]WinningPoStProver eppProvs map[address.Address]WinningPoStProver
Miners []address.Address Miners []address.Address
receivers []address.Address receivers []address.Address
// a SecP address
banker address.Address banker address.Address
bankerNonce uint64 bankerNonce uint64
@ -111,7 +112,7 @@ var DefaultRemainderAccountActor = genesis.Actor{
Meta: remAccMeta.ActorMeta(), Meta: remAccMeta.ActorMeta(),
} }
func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeSchedule) (*ChainGen, error) {
j := journal.NilJournal() j := journal.NilJournal()
// TODO: we really shouldn't modify a global variable here. // TODO: we really shouldn't modify a global variable here.
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
@ -246,7 +247,10 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
mgen[genesis2.MinerAddress(uint64(i))] = &wppProvider{} mgen[genesis2.MinerAddress(uint64(i))] = &wppProvider{}
} }
sm := stmgr.NewStateManager(cs) sm, err := stmgr.NewStateManagerWithUpgradeSchedule(cs, us)
if err != nil {
return nil, xerrors.Errorf("initing stmgr: %w", err)
}
miners := []address.Address{maddr1, maddr2} miners := []address.Address{maddr1, maddr2}
@ -284,6 +288,14 @@ func NewGenerator() (*ChainGen, error) {
return NewGeneratorWithSectors(1) return NewGeneratorWithSectors(1)
} }
func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
return NewGeneratorWithSectorsAndUpgradeSchedule(numSectors, stmgr.DefaultUpgradeSchedule())
}
func NewGeneratorWithUpgradeSchedule(us stmgr.UpgradeSchedule) (*ChainGen, error) {
return NewGeneratorWithSectorsAndUpgradeSchedule(1, us)
}
func (cg *ChainGen) StateManager() *stmgr.StateManager { func (cg *ChainGen) StateManager() *stmgr.StateManager {
return cg.sm return cg.sm
} }
@ -386,7 +398,7 @@ type MinedTipSet struct {
} }
func (cg *ChainGen) NextTipSet() (*MinedTipSet, error) { func (cg *ChainGen) NextTipSet() (*MinedTipSet, error) {
mts, err := cg.NextTipSetFromMiners(cg.CurTipset.TipSet(), cg.Miners) mts, err := cg.NextTipSetFromMiners(cg.CurTipset.TipSet(), cg.Miners, 0)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -399,7 +411,7 @@ func (cg *ChainGen) SetWinningPoStProver(m address.Address, wpp WinningPoStProve
cg.eppProvs[m] = wpp cg.eppProvs[m] = wpp
} }
func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Address) (*MinedTipSet, error) { func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Address, nulls abi.ChainEpoch) (*MinedTipSet, error) {
ms, err := cg.GetMessages(cg) ms, err := cg.GetMessages(cg)
if err != nil { if err != nil {
return nil, xerrors.Errorf("get random messages: %w", err) return nil, xerrors.Errorf("get random messages: %w", err)
@ -410,21 +422,23 @@ func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Ad
msgs[i] = ms msgs[i] = ms
} }
fts, err := cg.NextTipSetFromMinersWithMessages(base, miners, msgs) fts, err := cg.NextTipSetFromMinersWithMessagesAndNulls(base, miners, msgs, nulls)
if err != nil { if err != nil {
return nil, err return nil, err
} }
cg.CurTipset = fts
return &MinedTipSet{ return &MinedTipSet{
TipSet: fts, TipSet: fts,
Messages: ms, Messages: ms,
}, nil }, nil
} }
func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners []address.Address, msgs [][]*types.SignedMessage) (*store.FullTipSet, error) { func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet, miners []address.Address, msgs [][]*types.SignedMessage, nulls abi.ChainEpoch) (*store.FullTipSet, error) {
var blks []*types.FullBlock var blks []*types.FullBlock
for round := base.Height() + 1; len(blks) == 0; round++ { for round := base.Height() + nulls + 1; len(blks) == 0; round++ {
for mi, m := range miners { for mi, m := range miners {
bvals, et, ticket, err := cg.nextBlockProof(context.TODO(), base, m, round) bvals, et, ticket, err := cg.nextBlockProof(context.TODO(), base, m, round)
if err != nil { if err != nil {
@ -457,12 +471,14 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners
return nil, err return nil, err
} }
cg.CurTipset = fts
return fts, nil return fts, nil
} }
func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket, func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket,
eticket *types.ElectionProof, bvals []types.BeaconEntry, height abi.ChainEpoch, eticket *types.ElectionProof, bvals []types.BeaconEntry, height abi.ChainEpoch,
wpost []proof2.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) { wpost []proof5.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) {
var ts uint64 var ts uint64
if cg.Timestamper != nil { if cg.Timestamper != nil {
@ -576,7 +592,11 @@ func (mca mca) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipS
return nil, xerrors.Errorf("loading tipset key: %w", err) return nil, xerrors.Errorf("loading tipset key: %w", err)
} }
return mca.sm.ChainStore().GetChainRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy) if randEpoch > build.UpgradeHyperdriveHeight {
return mca.sm.ChainStore().GetChainRandomnessLookingForward(ctx, pts.Cids(), personalization, randEpoch, entropy)
}
return mca.sm.ChainStore().GetChainRandomnessLookingBack(ctx, pts.Cids(), personalization, randEpoch, entropy)
} }
func (mca mca) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { func (mca mca) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
@ -585,7 +605,11 @@ func (mca mca) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSe
return nil, xerrors.Errorf("loading tipset key: %w", err) return nil, xerrors.Errorf("loading tipset key: %w", err)
} }
return mca.sm.ChainStore().GetBeaconRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy) if randEpoch > build.UpgradeHyperdriveHeight {
return mca.sm.ChainStore().GetBeaconRandomnessLookingForward(ctx, pts.Cids(), personalization, randEpoch, entropy)
}
return mca.sm.ChainStore().GetBeaconRandomnessLookingBack(ctx, pts.Cids(), personalization, randEpoch, entropy)
} }
func (mca mca) MinerGetBaseInfo(ctx context.Context, maddr address.Address, epoch abi.ChainEpoch, tsk types.TipSetKey) (*api.MiningBaseInfo, error) { func (mca mca) MinerGetBaseInfo(ctx context.Context, maddr address.Address, epoch abi.ChainEpoch, tsk types.TipSetKey) (*api.MiningBaseInfo, error) {
@ -600,7 +624,7 @@ func (mca mca) WalletSign(ctx context.Context, a address.Address, v []byte) (*cr
type WinningPoStProver interface { type WinningPoStProver interface {
GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error) GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error)
ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error)
} }
type wppProvider struct{} type wppProvider struct{}
@ -609,7 +633,7 @@ func (wpp *wppProvider) GenerateCandidates(ctx context.Context, _ abi.PoStRandom
return []uint64{0}, nil return []uint64{0}, nil
} }
func (wpp *wppProvider) ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) { func (wpp *wppProvider) ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error) {
return ValidWpostForTesting, nil return ValidWpostForTesting, nil
} }
@ -676,15 +700,19 @@ type genFakeVerifier struct{}
var _ ffiwrapper.Verifier = (*genFakeVerifier)(nil) var _ ffiwrapper.Verifier = (*genFakeVerifier)(nil)
func (m genFakeVerifier) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { func (m genFakeVerifier) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) {
return true, nil return true, nil
} }
func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { func (m genFakeVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) {
panic("not supported") panic("not supported")
} }
func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
panic("not supported")
}
func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) {
panic("not supported") panic("not supported")
} }

View File

@ -43,7 +43,7 @@ import (
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward"
runtime2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime"
"github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/store"
@ -62,7 +62,7 @@ func MinerAddress(genesisIndex uint64) address.Address {
} }
type fakedSigSyscalls struct { type fakedSigSyscalls struct {
runtime2.Syscalls runtime5.Syscalls
} }
func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error { func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error {
@ -70,7 +70,7 @@ func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer
} }
func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder { func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder {
return func(ctx context.Context, rt *vm.Runtime) runtime2.Syscalls { return func(ctx context.Context, rt *vm.Runtime) runtime5.Syscalls {
return &fakedSigSyscalls{ return &fakedSigSyscalls{
base(ctx, rt), base(ctx, rt),
} }
@ -488,13 +488,25 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
// TODO: copied from actors test harness, deduplicate or remove from here // TODO: copied from actors test harness, deduplicate or remove from here
type fakeRand struct{} type fakeRand struct{}
func (fr *fakeRand) GetChainRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { func (fr *fakeRand) GetChainRandomnessLookingForward(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
out := make([]byte, 32) out := make([]byte, 32)
_, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint
return out, nil return out, nil
} }
func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { func (fr *fakeRand) GetChainRandomnessLookingBack(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
out := make([]byte, 32)
_, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint
return out, nil
}
func (fr *fakeRand) GetBeaconRandomnessLookingForward(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
out := make([]byte, 32)
_, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint
return out, nil
}
func (fr *fakeRand) GetBeaconRandomnessLookingBack(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
out := make([]byte, 32) out := make([]byte, 32)
_, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint
return out, nil return out, nil

View File

@ -126,10 +126,14 @@ type MessagePool struct {
republished map[cid.Cid]struct{} republished map[cid.Cid]struct{}
// do NOT access this map directly, use isLocal, setLocal, and forEachLocal respectively
localAddrs map[address.Address]struct{} localAddrs map[address.Address]struct{}
// do NOT access this map directly, use getPendingMset, setPendingMset, deletePendingMset, forEachPending, and clearPending respectively
pending map[address.Address]*msgSet pending map[address.Address]*msgSet
keyCache map[address.Address]address.Address
curTsLk sync.Mutex // DO NOT LOCK INSIDE lk curTsLk sync.Mutex // DO NOT LOCK INSIDE lk
curTs *types.TipSet curTs *types.TipSet
@ -329,6 +333,20 @@ func (ms *msgSet) getRequiredFunds(nonce uint64) types.BigInt {
return types.BigInt{Int: requiredFunds} return types.BigInt{Int: requiredFunds}
} }
func (ms *msgSet) toSlice() []*types.SignedMessage {
set := make([]*types.SignedMessage, 0, len(ms.msgs))
for _, m := range ms.msgs {
set = append(set, m)
}
sort.Slice(set, func(i, j int) bool {
return set[i].Message.Nonce < set[j].Message.Nonce
})
return set
}
func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) { func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) {
cache, _ := lru.New2Q(build.BlsSignatureCacheSize) cache, _ := lru.New2Q(build.BlsSignatureCacheSize)
verifcache, _ := lru.New2Q(build.VerifSigCacheSize) verifcache, _ := lru.New2Q(build.VerifSigCacheSize)
@ -350,6 +368,7 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ
repubTrigger: make(chan struct{}, 1), repubTrigger: make(chan struct{}, 1),
localAddrs: make(map[address.Address]struct{}), localAddrs: make(map[address.Address]struct{}),
pending: make(map[address.Address]*msgSet), pending: make(map[address.Address]*msgSet),
keyCache: make(map[address.Address]address.Address),
minGasPrice: types.NewInt(0), minGasPrice: types.NewInt(0),
pruneTrigger: make(chan struct{}, 1), pruneTrigger: make(chan struct{}, 1),
pruneCooldown: make(chan struct{}, 1), pruneCooldown: make(chan struct{}, 1),
@ -371,9 +390,11 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ
// enable initial prunes // enable initial prunes
mp.pruneCooldown <- struct{}{} mp.pruneCooldown <- struct{}{}
ctx, cancel := context.WithCancel(context.TODO())
// load the current tipset and subscribe to head changes _before_ loading local messages // load the current tipset and subscribe to head changes _before_ loading local messages
mp.curTs = api.SubscribeHeadChanges(func(rev, app []*types.TipSet) error { mp.curTs = api.SubscribeHeadChanges(func(rev, app []*types.TipSet) error {
err := mp.HeadChange(rev, app) err := mp.HeadChange(ctx, rev, app)
if err != nil { if err != nil {
log.Errorf("mpool head notif handler error: %+v", err) log.Errorf("mpool head notif handler error: %+v", err)
} }
@ -384,7 +405,8 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ
mp.lk.Lock() mp.lk.Lock()
go func() { go func() {
err := mp.loadLocal() defer cancel()
err := mp.loadLocal(ctx)
mp.lk.Unlock() mp.lk.Unlock()
mp.curTsLk.Unlock() mp.curTsLk.Unlock()
@ -395,12 +417,106 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ
log.Info("mpool ready") log.Info("mpool ready")
mp.runLoop() mp.runLoop(ctx)
}() }()
return mp, nil return mp, nil
} }
func (mp *MessagePool) resolveToKey(ctx context.Context, addr address.Address) (address.Address, error) {
// check the cache
a, f := mp.keyCache[addr]
if f {
return a, nil
}
// resolve the address
ka, err := mp.api.StateAccountKeyAtFinality(ctx, addr, mp.curTs)
if err != nil {
return address.Undef, err
}
// place both entries in the cache (may both be key addresses, which is fine)
mp.keyCache[addr] = ka
mp.keyCache[ka] = ka
return ka, nil
}
func (mp *MessagePool) getPendingMset(ctx context.Context, addr address.Address) (*msgSet, bool, error) {
ra, err := mp.resolveToKey(ctx, addr)
if err != nil {
return nil, false, err
}
ms, f := mp.pending[ra]
return ms, f, nil
}
func (mp *MessagePool) setPendingMset(ctx context.Context, addr address.Address, ms *msgSet) error {
ra, err := mp.resolveToKey(ctx, addr)
if err != nil {
return err
}
mp.pending[ra] = ms
return nil
}
// This method isn't strictly necessary, since it doesn't resolve any addresses, but it's safer to have
func (mp *MessagePool) forEachPending(f func(address.Address, *msgSet)) {
for la, ms := range mp.pending {
f(la, ms)
}
}
func (mp *MessagePool) deletePendingMset(ctx context.Context, addr address.Address) error {
ra, err := mp.resolveToKey(ctx, addr)
if err != nil {
return err
}
delete(mp.pending, ra)
return nil
}
// This method isn't strictly necessary, since it doesn't resolve any addresses, but it's safer to have
func (mp *MessagePool) clearPending() {
mp.pending = make(map[address.Address]*msgSet)
}
func (mp *MessagePool) isLocal(ctx context.Context, addr address.Address) (bool, error) {
ra, err := mp.resolveToKey(ctx, addr)
if err != nil {
return false, err
}
_, f := mp.localAddrs[ra]
return f, nil
}
func (mp *MessagePool) setLocal(ctx context.Context, addr address.Address) error {
ra, err := mp.resolveToKey(ctx, addr)
if err != nil {
return err
}
mp.localAddrs[ra] = struct{}{}
return nil
}
// This method isn't strictly necessary, since it doesn't resolve any addresses, but it's safer to have
func (mp *MessagePool) forEachLocal(ctx context.Context, f func(context.Context, address.Address)) {
for la := range mp.localAddrs {
f(ctx, la)
}
}
func (mp *MessagePool) Close() error { func (mp *MessagePool) Close() error {
close(mp.closer) close(mp.closer)
return nil return nil
@ -418,15 +534,15 @@ func (mp *MessagePool) Prune() {
mp.pruneTrigger <- struct{}{} mp.pruneTrigger <- struct{}{}
} }
func (mp *MessagePool) runLoop() { func (mp *MessagePool) runLoop(ctx context.Context) {
for { for {
select { select {
case <-mp.repubTk.C: case <-mp.repubTk.C:
if err := mp.republishPendingMessages(); err != nil { if err := mp.republishPendingMessages(ctx); err != nil {
log.Errorf("error while republishing messages: %s", err) log.Errorf("error while republishing messages: %s", err)
} }
case <-mp.repubTrigger: case <-mp.repubTrigger:
if err := mp.republishPendingMessages(); err != nil { if err := mp.republishPendingMessages(ctx); err != nil {
log.Errorf("error while republishing messages: %s", err) log.Errorf("error while republishing messages: %s", err)
} }
@ -442,8 +558,10 @@ func (mp *MessagePool) runLoop() {
} }
} }
func (mp *MessagePool) addLocal(m *types.SignedMessage) error { func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) error {
mp.localAddrs[m.Message.From] = struct{}{} if err := mp.setLocal(ctx, m.Message.From); err != nil {
return err
}
msgb, err := m.Serialize() msgb, err := m.Serialize()
if err != nil { if err != nil {
@ -475,7 +593,7 @@ func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.T
return false, xerrors.Errorf("message will not be included in a block: %w", err) return false, xerrors.Errorf("message will not be included in a block: %w", err)
} }
// this checks if the GasFeeCap is suffisciently high for inclusion in the next 20 blocks // this checks if the GasFeeCap is sufficiently high for inclusion in the next 20 blocks
// if the GasFeeCap is too low, we soft reject the message (Ignore in pubsub) and rely // if the GasFeeCap is too low, we soft reject the message (Ignore in pubsub) and rely
// on republish to push it through later, if the baseFee has fallen. // on republish to push it through later, if the baseFee has fallen.
// this is a defensive check that stops minimum baseFee spam attacks from overloading validation // this is a defensive check that stops minimum baseFee spam attacks from overloading validation
@ -510,7 +628,7 @@ func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.T
return publish, nil return publish, nil
} }
func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) { func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) {
err := mp.checkMessage(m) err := mp.checkMessage(m)
if err != nil { if err != nil {
return cid.Undef, err return cid.Undef, err
@ -523,7 +641,7 @@ func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) {
}() }()
mp.curTsLk.Lock() mp.curTsLk.Lock()
publish, err := mp.addTs(m, mp.curTs, true, false) publish, err := mp.addTs(ctx, m, mp.curTs, true, false)
if err != nil { if err != nil {
mp.curTsLk.Unlock() mp.curTsLk.Unlock()
return cid.Undef, err return cid.Undef, err
@ -576,7 +694,7 @@ func (mp *MessagePool) checkMessage(m *types.SignedMessage) error {
return nil return nil
} }
func (mp *MessagePool) Add(m *types.SignedMessage) error { func (mp *MessagePool) Add(ctx context.Context, m *types.SignedMessage) error {
err := mp.checkMessage(m) err := mp.checkMessage(m)
if err != nil { if err != nil {
return err return err
@ -591,7 +709,7 @@ func (mp *MessagePool) Add(m *types.SignedMessage) error {
mp.curTsLk.Lock() mp.curTsLk.Lock()
defer mp.curTsLk.Unlock() defer mp.curTsLk.Unlock()
_, err = mp.addTs(m, mp.curTs, false, false) _, err = mp.addTs(ctx, m, mp.curTs, false, false)
return err return err
} }
@ -631,7 +749,7 @@ func (mp *MessagePool) VerifyMsgSig(m *types.SignedMessage) error {
return nil return nil
} }
func (mp *MessagePool) checkBalance(m *types.SignedMessage, curTs *types.TipSet) error { func (mp *MessagePool) checkBalance(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet) error {
balance, err := mp.getStateBalance(m.Message.From, curTs) balance, err := mp.getStateBalance(m.Message.From, curTs)
if err != nil { if err != nil {
return xerrors.Errorf("failed to check sender balance: %s: %w", err, ErrSoftValidationFailure) return xerrors.Errorf("failed to check sender balance: %s: %w", err, ErrSoftValidationFailure)
@ -645,7 +763,12 @@ func (mp *MessagePool) checkBalance(m *types.SignedMessage, curTs *types.TipSet)
// add Value for soft failure check // add Value for soft failure check
//requiredFunds = types.BigAdd(requiredFunds, m.Message.Value) //requiredFunds = types.BigAdd(requiredFunds, m.Message.Value)
mset, ok := mp.pending[m.Message.From] mset, ok, err := mp.getPendingMset(ctx, m.Message.From)
if err != nil {
log.Debugf("mpoolcheckbalance failed to get pending mset: %s", err)
return err
}
if ok { if ok {
requiredFunds = types.BigAdd(requiredFunds, mset.getRequiredFunds(m.Message.Nonce)) requiredFunds = types.BigAdd(requiredFunds, mset.getRequiredFunds(m.Message.Nonce))
} }
@ -659,7 +782,7 @@ func (mp *MessagePool) checkBalance(m *types.SignedMessage, curTs *types.TipSet)
return nil return nil
} }
func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local, untrusted bool) (bool, error) { func (mp *MessagePool) addTs(ctx context.Context, m *types.SignedMessage, curTs *types.TipSet, local, untrusted bool) (bool, error) {
snonce, err := mp.getStateNonce(m.Message.From, curTs) snonce, err := mp.getStateNonce(m.Message.From, curTs)
if err != nil { if err != nil {
return false, xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure) return false, xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrSoftValidationFailure)
@ -677,17 +800,17 @@ func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local,
return false, err return false, err
} }
if err := mp.checkBalance(m, curTs); err != nil { if err := mp.checkBalance(ctx, m, curTs); err != nil {
return false, err return false, err
} }
err = mp.addLocked(m, !local, untrusted) err = mp.addLocked(ctx, m, !local, untrusted)
if err != nil { if err != nil {
return false, err return false, err
} }
if local { if local {
err = mp.addLocal(m) err = mp.addLocal(ctx, m)
if err != nil { if err != nil {
return false, xerrors.Errorf("error persisting local message: %w", err) return false, xerrors.Errorf("error persisting local message: %w", err)
} }
@ -696,7 +819,7 @@ func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet, local,
return publish, nil return publish, nil
} }
func (mp *MessagePool) addLoaded(m *types.SignedMessage) error { func (mp *MessagePool) addLoaded(ctx context.Context, m *types.SignedMessage) error {
err := mp.checkMessage(m) err := mp.checkMessage(m)
if err != nil { if err != nil {
return err return err
@ -722,21 +845,21 @@ func (mp *MessagePool) addLoaded(m *types.SignedMessage) error {
return err return err
} }
if err := mp.checkBalance(m, curTs); err != nil { if err := mp.checkBalance(ctx, m, curTs); err != nil {
return err return err
} }
return mp.addLocked(m, false, false) return mp.addLocked(ctx, m, false, false)
} }
func (mp *MessagePool) addSkipChecks(m *types.SignedMessage) error { func (mp *MessagePool) addSkipChecks(ctx context.Context, m *types.SignedMessage) error {
mp.lk.Lock() mp.lk.Lock()
defer mp.lk.Unlock() defer mp.lk.Unlock()
return mp.addLocked(m, false, false) return mp.addLocked(ctx, m, false, false)
} }
func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool) error { func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, strict, untrusted bool) error {
log.Debugf("mpooladd: %s %d", m.Message.From, m.Message.Nonce) log.Debugf("mpooladd: %s %d", m.Message.From, m.Message.Nonce)
if m.Signature.Type == crypto.SigTypeBLS { if m.Signature.Type == crypto.SigTypeBLS {
mp.blsSigCache.Add(m.Cid(), m.Signature) mp.blsSigCache.Add(m.Cid(), m.Signature)
@ -752,7 +875,13 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool)
return err return err
} }
mset, ok := mp.pending[m.Message.From] // Note: If performance becomes an issue, making this getOrCreatePendingMset will save some work
mset, ok, err := mp.getPendingMset(ctx, m.Message.From)
if err != nil {
log.Debug(err)
return err
}
if !ok { if !ok {
nonce, err := mp.getStateNonce(m.Message.From, mp.curTs) nonce, err := mp.getStateNonce(m.Message.From, mp.curTs)
if err != nil { if err != nil {
@ -760,7 +889,9 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool)
} }
mset = newMsgSet(nonce) mset = newMsgSet(nonce)
mp.pending[m.Message.From] = mset if err = mp.setPendingMset(ctx, m.Message.From, mset); err != nil {
return xerrors.Errorf("failed to set pending mset: %w", err)
}
} }
incr, err := mset.add(m, mp, strict, untrusted) incr, err := mset.add(m, mp, strict, untrusted)
@ -795,14 +926,14 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool)
return nil return nil
} }
func (mp *MessagePool) GetNonce(_ context.Context, addr address.Address, _ types.TipSetKey) (uint64, error) { func (mp *MessagePool) GetNonce(ctx context.Context, addr address.Address, _ types.TipSetKey) (uint64, error) {
mp.curTsLk.Lock() mp.curTsLk.Lock()
defer mp.curTsLk.Unlock() defer mp.curTsLk.Unlock()
mp.lk.Lock() mp.lk.Lock()
defer mp.lk.Unlock() defer mp.lk.Unlock()
return mp.getNonceLocked(addr, mp.curTs) return mp.getNonceLocked(ctx, addr, mp.curTs)
} }
// GetActor should not be used. It is only here to satisfy interface mess caused by lite node handling // GetActor should not be used. It is only here to satisfy interface mess caused by lite node handling
@ -812,13 +943,18 @@ func (mp *MessagePool) GetActor(_ context.Context, addr address.Address, _ types
return mp.api.GetActorAfter(addr, mp.curTs) return mp.api.GetActorAfter(addr, mp.curTs)
} }
func (mp *MessagePool) getNonceLocked(addr address.Address, curTs *types.TipSet) (uint64, error) { func (mp *MessagePool) getNonceLocked(ctx context.Context, addr address.Address, curTs *types.TipSet) (uint64, error) {
stateNonce, err := mp.getStateNonce(addr, curTs) // sanity check stateNonce, err := mp.getStateNonce(addr, curTs) // sanity check
if err != nil { if err != nil {
return 0, err return 0, err
} }
mset, ok := mp.pending[addr] mset, ok, err := mp.getPendingMset(ctx, addr)
if err != nil {
log.Debugf("mpoolgetnonce failed to get mset: %s", err)
return 0, err
}
if ok { if ok {
if stateNonce > mset.nextNonce { if stateNonce > mset.nextNonce {
log.Errorf("state nonce was larger than mset.nextNonce (%d > %d)", stateNonce, mset.nextNonce) log.Errorf("state nonce was larger than mset.nextNonce (%d > %d)", stateNonce, mset.nextNonce)
@ -855,7 +991,7 @@ func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) (
// - strict checks are enabled // - strict checks are enabled
// - extra strict add checks are used when adding the messages to the msgSet // - extra strict add checks are used when adding the messages to the msgSet
// that means: no nonce gaps, at most 10 pending messages for the actor // that means: no nonce gaps, at most 10 pending messages for the actor
func (mp *MessagePool) PushUntrusted(m *types.SignedMessage) (cid.Cid, error) { func (mp *MessagePool) PushUntrusted(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) {
err := mp.checkMessage(m) err := mp.checkMessage(m)
if err != nil { if err != nil {
return cid.Undef, err return cid.Undef, err
@ -868,7 +1004,7 @@ func (mp *MessagePool) PushUntrusted(m *types.SignedMessage) (cid.Cid, error) {
}() }()
mp.curTsLk.Lock() mp.curTsLk.Lock()
publish, err := mp.addTs(m, mp.curTs, true, true) publish, err := mp.addTs(ctx, m, mp.curTs, true, true)
if err != nil { if err != nil {
mp.curTsLk.Unlock() mp.curTsLk.Unlock()
return cid.Undef, err return cid.Undef, err
@ -890,15 +1026,20 @@ func (mp *MessagePool) PushUntrusted(m *types.SignedMessage) (cid.Cid, error) {
return m.Cid(), nil return m.Cid(), nil
} }
func (mp *MessagePool) Remove(from address.Address, nonce uint64, applied bool) { func (mp *MessagePool) Remove(ctx context.Context, from address.Address, nonce uint64, applied bool) {
mp.lk.Lock() mp.lk.Lock()
defer mp.lk.Unlock() defer mp.lk.Unlock()
mp.remove(from, nonce, applied) mp.remove(ctx, from, nonce, applied)
} }
func (mp *MessagePool) remove(from address.Address, nonce uint64, applied bool) { func (mp *MessagePool) remove(ctx context.Context, from address.Address, nonce uint64, applied bool) {
mset, ok := mp.pending[from] mset, ok, err := mp.getPendingMset(ctx, from)
if err != nil {
log.Debugf("mpoolremove failed to get mset: %s", err)
return
}
if !ok { if !ok {
return return
} }
@ -923,58 +1064,57 @@ func (mp *MessagePool) remove(from address.Address, nonce uint64, applied bool)
mset.rm(nonce, applied) mset.rm(nonce, applied)
if len(mset.msgs) == 0 { if len(mset.msgs) == 0 {
delete(mp.pending, from) if err = mp.deletePendingMset(ctx, from); err != nil {
log.Debugf("mpoolremove failed to delete mset: %s", err)
return
}
} }
} }
func (mp *MessagePool) Pending() ([]*types.SignedMessage, *types.TipSet) { func (mp *MessagePool) Pending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) {
mp.curTsLk.Lock() mp.curTsLk.Lock()
defer mp.curTsLk.Unlock() defer mp.curTsLk.Unlock()
mp.lk.Lock() mp.lk.Lock()
defer mp.lk.Unlock() defer mp.lk.Unlock()
return mp.allPending() return mp.allPending(ctx)
} }
func (mp *MessagePool) allPending() ([]*types.SignedMessage, *types.TipSet) { func (mp *MessagePool) allPending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) {
out := make([]*types.SignedMessage, 0) out := make([]*types.SignedMessage, 0)
for a := range mp.pending {
out = append(out, mp.pendingFor(a)...) mp.forEachPending(func(a address.Address, mset *msgSet) {
} out = append(out, mset.toSlice()...)
})
return out, mp.curTs return out, mp.curTs
} }
func (mp *MessagePool) PendingFor(a address.Address) ([]*types.SignedMessage, *types.TipSet) { func (mp *MessagePool) PendingFor(ctx context.Context, a address.Address) ([]*types.SignedMessage, *types.TipSet) {
mp.curTsLk.Lock() mp.curTsLk.Lock()
defer mp.curTsLk.Unlock() defer mp.curTsLk.Unlock()
mp.lk.Lock() mp.lk.Lock()
defer mp.lk.Unlock() defer mp.lk.Unlock()
return mp.pendingFor(a), mp.curTs return mp.pendingFor(ctx, a), mp.curTs
} }
func (mp *MessagePool) pendingFor(a address.Address) []*types.SignedMessage { func (mp *MessagePool) pendingFor(ctx context.Context, a address.Address) []*types.SignedMessage {
mset := mp.pending[a] mset, ok, err := mp.getPendingMset(ctx, a)
if mset == nil || len(mset.msgs) == 0 { if err != nil {
log.Debugf("mpoolpendingfor failed to get mset: %s", err)
return nil return nil
} }
set := make([]*types.SignedMessage, 0, len(mset.msgs)) if mset == nil || !ok || len(mset.msgs) == 0 {
return nil
for _, m := range mset.msgs {
set = append(set, m)
} }
sort.Slice(set, func(i, j int) bool { return mset.toSlice()
return set[i].Message.Nonce < set[j].Message.Nonce
})
return set
} }
func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet) error { func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, apply []*types.TipSet) error {
mp.curTsLk.Lock() mp.curTsLk.Lock()
defer mp.curTsLk.Unlock() defer mp.curTsLk.Unlock()
@ -991,7 +1131,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
rm := func(from address.Address, nonce uint64) { rm := func(from address.Address, nonce uint64) {
s, ok := rmsgs[from] s, ok := rmsgs[from]
if !ok { if !ok {
mp.Remove(from, nonce, true) mp.Remove(ctx, from, nonce, true)
return return
} }
@ -1000,7 +1140,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
return return
} }
mp.Remove(from, nonce, true) mp.Remove(ctx, from, nonce, true)
} }
maybeRepub := func(cid cid.Cid) { maybeRepub := func(cid cid.Cid) {
@ -1071,7 +1211,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
for _, s := range rmsgs { for _, s := range rmsgs {
for _, msg := range s { for _, msg := range s {
if err := mp.addSkipChecks(msg); err != nil { if err := mp.addSkipChecks(ctx, msg); err != nil {
log.Errorf("Failed to readd message from reorg to mpool: %s", err) log.Errorf("Failed to readd message from reorg to mpool: %s", err)
} }
} }
@ -1079,7 +1219,7 @@ func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet)
if len(revert) > 0 && futureDebug { if len(revert) > 0 && futureDebug {
mp.lk.Lock() mp.lk.Lock()
msgs, ts := mp.allPending() msgs, ts := mp.allPending(ctx)
mp.lk.Unlock() mp.lk.Unlock()
buckets := map[address.Address]*statBucket{} buckets := map[address.Address]*statBucket{}
@ -1286,7 +1426,7 @@ func (mp *MessagePool) Updates(ctx context.Context) (<-chan api.MpoolUpdate, err
return out, nil return out, nil
} }
func (mp *MessagePool) loadLocal() error { func (mp *MessagePool) loadLocal(ctx context.Context) error {
res, err := mp.localMsgs.Query(query.Query{}) res, err := mp.localMsgs.Query(query.Query{})
if err != nil { if err != nil {
return xerrors.Errorf("query local messages: %w", err) return xerrors.Errorf("query local messages: %w", err)
@ -1302,7 +1442,7 @@ func (mp *MessagePool) loadLocal() error {
return xerrors.Errorf("unmarshaling local message: %w", err) return xerrors.Errorf("unmarshaling local message: %w", err)
} }
if err := mp.addLoaded(&sm); err != nil { if err := mp.addLoaded(ctx, &sm); err != nil {
if xerrors.Is(err, ErrNonceTooLow) { if xerrors.Is(err, ErrNonceTooLow) {
continue // todo: drop the message from local cache (if above certain confidence threshold) continue // todo: drop the message from local cache (if above certain confidence threshold)
} }
@ -1310,47 +1450,61 @@ func (mp *MessagePool) loadLocal() error {
log.Errorf("adding local message: %+v", err) log.Errorf("adding local message: %+v", err)
} }
mp.localAddrs[sm.Message.From] = struct{}{} if err = mp.setLocal(ctx, sm.Message.From); err != nil {
log.Debugf("mpoolloadLocal errored: %s", err)
return err
}
} }
return nil return nil
} }
func (mp *MessagePool) Clear(local bool) { func (mp *MessagePool) Clear(ctx context.Context, local bool) {
mp.lk.Lock() mp.lk.Lock()
defer mp.lk.Unlock() defer mp.lk.Unlock()
// remove everything if local is true, including removing local messages from // remove everything if local is true, including removing local messages from
// the datastore // the datastore
if local { if local {
for a := range mp.localAddrs { mp.forEachLocal(ctx, func(ctx context.Context, la address.Address) {
mset, ok := mp.pending[a] mset, ok, err := mp.getPendingMset(ctx, la)
if !ok { if err != nil {
continue log.Warnf("errored while getting pending mset: %w", err)
return
} }
for _, m := range mset.msgs { if ok {
err := mp.localMsgs.Delete(datastore.NewKey(string(m.Cid().Bytes()))) for _, m := range mset.msgs {
if err != nil { err := mp.localMsgs.Delete(datastore.NewKey(string(m.Cid().Bytes())))
log.Warnf("error deleting local message: %s", err) if err != nil {
log.Warnf("error deleting local message: %s", err)
}
} }
} }
} })
mp.pending = make(map[address.Address]*msgSet) mp.clearPending()
mp.republished = nil mp.republished = nil
return return
} }
// remove everything except the local messages mp.forEachPending(func(a address.Address, ms *msgSet) {
for a := range mp.pending { isLocal, err := mp.isLocal(ctx, a)
_, isLocal := mp.localAddrs[a] if err != nil {
if isLocal { log.Warnf("errored while determining isLocal: %w", err)
continue return
} }
delete(mp.pending, a)
} if isLocal {
return
}
if err = mp.deletePendingMset(ctx, a); err != nil {
log.Warnf("errored while deleting mset: %w", err)
return
}
})
} }
func getBaseFeeLowerBound(baseFee, factor types.BigInt) types.BigInt { func getBaseFeeLowerBound(baseFee, factor types.BigInt) types.BigInt {

View File

@ -153,7 +153,7 @@ func (tma *testMpoolAPI) GetActorAfter(addr address.Address, ts *types.TipSet) (
}, nil }, nil
} }
func (tma *testMpoolAPI) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { func (tma *testMpoolAPI) StateAccountKeyAtFinality(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
if addr.Protocol() != address.BLS && addr.Protocol() != address.SECP256K1 { if addr.Protocol() != address.BLS && addr.Protocol() != address.SECP256K1 {
return address.Undef, fmt.Errorf("given address was not a key addr") return address.Undef, fmt.Errorf("given address was not a key addr")
} }
@ -202,7 +202,7 @@ func (tma *testMpoolAPI) ChainComputeBaseFee(ctx context.Context, ts *types.TipS
func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) { func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) {
t.Helper() t.Helper()
n, err := mp.GetNonce(context.Background(), addr, types.EmptyTSK) n, err := mp.GetNonce(context.TODO(), addr, types.EmptyTSK)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -214,7 +214,7 @@ func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64
func mustAdd(t *testing.T, mp *MessagePool, msg *types.SignedMessage) { func mustAdd(t *testing.T, mp *MessagePool, msg *types.SignedMessage) {
t.Helper() t.Helper()
if err := mp.Add(msg); err != nil { if err := mp.Add(context.TODO(), msg); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -296,9 +296,9 @@ func TestMessagePoolMessagesInEachBlock(t *testing.T) {
tma.applyBlock(t, a) tma.applyBlock(t, a)
tsa := mock.TipSet(a) tsa := mock.TipSet(a)
_, _ = mp.Pending() _, _ = mp.Pending(context.TODO())
selm, _ := mp.SelectMessages(tsa, 1) selm, _ := mp.SelectMessages(context.Background(), tsa, 1)
if len(selm) == 0 { if len(selm) == 0 {
t.Fatal("should have returned the rest of the messages") t.Fatal("should have returned the rest of the messages")
} }
@ -358,7 +358,7 @@ func TestRevertMessages(t *testing.T) {
assertNonce(t, mp, sender, 4) assertNonce(t, mp, sender, 4)
p, _ := mp.Pending() p, _ := mp.Pending(context.TODO())
fmt.Printf("%+v\n", p) fmt.Printf("%+v\n", p)
if len(p) != 3 { if len(p) != 3 {
t.Fatal("expected three messages in mempool") t.Fatal("expected three messages in mempool")
@ -399,14 +399,14 @@ func TestPruningSimple(t *testing.T) {
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
smsg := mock.MkMessage(sender, target, uint64(i), w) smsg := mock.MkMessage(sender, target, uint64(i), w)
if err := mp.Add(smsg); err != nil { if err := mp.Add(context.TODO(), smsg); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
for i := 10; i < 50; i++ { for i := 10; i < 50; i++ {
smsg := mock.MkMessage(sender, target, uint64(i), w) smsg := mock.MkMessage(sender, target, uint64(i), w)
if err := mp.Add(smsg); err != nil { if err := mp.Add(context.TODO(), smsg); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -416,7 +416,7 @@ func TestPruningSimple(t *testing.T) {
mp.Prune() mp.Prune()
msgs, _ := mp.Pending() msgs, _ := mp.Pending(context.TODO())
if len(msgs) != 5 { if len(msgs) != 5 {
t.Fatal("expected only 5 messages in pool, got: ", len(msgs)) t.Fatal("expected only 5 messages in pool, got: ", len(msgs))
} }
@ -458,7 +458,7 @@ func TestLoadLocal(t *testing.T) {
msgs := make(map[cid.Cid]struct{}) msgs := make(map[cid.Cid]struct{})
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
cid, err := mp.Push(m) cid, err := mp.Push(context.TODO(), m)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -474,7 +474,7 @@ func TestLoadLocal(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
pmsgs, _ := mp.Pending() pmsgs, _ := mp.Pending(context.TODO())
if len(msgs) != len(pmsgs) { if len(msgs) != len(pmsgs) {
t.Fatalf("expected %d messages, but got %d", len(msgs), len(pmsgs)) t.Fatalf("expected %d messages, but got %d", len(msgs), len(pmsgs))
} }
@ -529,7 +529,7 @@ func TestClearAll(t *testing.T) {
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}]
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
_, err := mp.Push(m) _, err := mp.Push(context.TODO(), m)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -540,9 +540,9 @@ func TestClearAll(t *testing.T) {
mustAdd(t, mp, m) mustAdd(t, mp, m)
} }
mp.Clear(true) mp.Clear(context.Background(), true)
pending, _ := mp.Pending() pending, _ := mp.Pending(context.TODO())
if len(pending) > 0 { if len(pending) > 0 {
t.Fatalf("cleared the mpool, but got %d pending messages", len(pending)) t.Fatalf("cleared the mpool, but got %d pending messages", len(pending))
} }
@ -584,7 +584,7 @@ func TestClearNonLocal(t *testing.T) {
gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}]
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
_, err := mp.Push(m) _, err := mp.Push(context.TODO(), m)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -595,9 +595,9 @@ func TestClearNonLocal(t *testing.T) {
mustAdd(t, mp, m) mustAdd(t, mp, m)
} }
mp.Clear(false) mp.Clear(context.Background(), false)
pending, _ := mp.Pending() pending, _ := mp.Pending(context.TODO())
if len(pending) != 10 { if len(pending) != 10 {
t.Fatalf("expected 10 pending messages, but got %d instead", len(pending)) t.Fatalf("expected 10 pending messages, but got %d instead", len(pending))
} }
@ -654,7 +654,7 @@ func TestUpdates(t *testing.T) {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
_, err := mp.Push(m) _, err := mp.Push(context.TODO(), m)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -26,7 +26,7 @@ type Provider interface {
PutMessage(m types.ChainMsg) (cid.Cid, error) PutMessage(m types.ChainMsg) (cid.Cid, error)
PubSubPublish(string, []byte) error PubSubPublish(string, []byte) error
GetActorAfter(address.Address, *types.TipSet) (*types.Actor, error) GetActorAfter(address.Address, *types.TipSet) (*types.Actor, error)
StateAccountKey(context.Context, address.Address, *types.TipSet) (address.Address, error) StateAccountKeyAtFinality(context.Context, address.Address, *types.TipSet) (address.Address, error)
MessagesForBlock(*types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) MessagesForBlock(*types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error)
MessagesForTipset(*types.TipSet) ([]types.ChainMsg, error) MessagesForTipset(*types.TipSet) ([]types.ChainMsg, error)
LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error)
@ -41,6 +41,8 @@ type mpoolProvider struct {
lite messagesigner.MpoolNonceAPI lite messagesigner.MpoolNonceAPI
} }
var _ Provider = (*mpoolProvider)(nil)
func NewProvider(sm *stmgr.StateManager, ps *pubsub.PubSub) Provider { func NewProvider(sm *stmgr.StateManager, ps *pubsub.PubSub) Provider {
return &mpoolProvider{sm: sm, ps: ps} return &mpoolProvider{sm: sm, ps: ps}
} }
@ -97,8 +99,8 @@ func (mpp *mpoolProvider) GetActorAfter(addr address.Address, ts *types.TipSet)
return st.GetActor(addr) return st.GetActor(addr)
} }
func (mpp *mpoolProvider) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { func (mpp *mpoolProvider) StateAccountKeyAtFinality(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
return mpp.sm.ResolveToKeyAddress(ctx, addr, ts) return mpp.sm.ResolveToKeyAddressAtFinality(ctx, addr, ts)
} }
func (mpp *mpoolProvider) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { func (mpp *mpoolProvider) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) {

View File

@ -57,13 +57,18 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro
mpCfg := mp.getConfig() mpCfg := mp.getConfig()
// we never prune priority addresses // we never prune priority addresses
for _, actor := range mpCfg.PriorityAddrs { for _, actor := range mpCfg.PriorityAddrs {
protected[actor] = struct{}{} pk, err := mp.resolveToKey(ctx, actor)
if err != nil {
log.Debugf("pruneMessages failed to resolve priority address: %s", err)
}
protected[pk] = struct{}{}
} }
// we also never prune locally published messages // we also never prune locally published messages
for actor := range mp.localAddrs { mp.forEachLocal(ctx, func(ctx context.Context, actor address.Address) {
protected[actor] = struct{}{} protected[actor] = struct{}{}
} })
// Collect all messages to track which ones to remove and create chains for block inclusion // Collect all messages to track which ones to remove and create chains for block inclusion
pruneMsgs := make(map[cid.Cid]*types.SignedMessage, mp.currentSize) pruneMsgs := make(map[cid.Cid]*types.SignedMessage, mp.currentSize)
@ -108,7 +113,7 @@ keepLoop:
// and remove all messages that are still in pruneMsgs after processing the chains // and remove all messages that are still in pruneMsgs after processing the chains
log.Infof("Pruning %d messages", len(pruneMsgs)) log.Infof("Pruning %d messages", len(pruneMsgs))
for _, m := range pruneMsgs { for _, m := range pruneMsgs {
mp.remove(m.Message.From, m.Message.Nonce, false) mp.remove(ctx, m.Message.From, m.Message.Nonce, false)
} }
return nil return nil

View File

@ -18,7 +18,7 @@ const repubMsgLimit = 30
var RepublishBatchDelay = 100 * time.Millisecond var RepublishBatchDelay = 100 * time.Millisecond
func (mp *MessagePool) republishPendingMessages() error { func (mp *MessagePool) republishPendingMessages(ctx context.Context) error {
mp.curTsLk.Lock() mp.curTsLk.Lock()
ts := mp.curTs ts := mp.curTs
@ -32,13 +32,18 @@ func (mp *MessagePool) republishPendingMessages() error {
pending := make(map[address.Address]map[uint64]*types.SignedMessage) pending := make(map[address.Address]map[uint64]*types.SignedMessage)
mp.lk.Lock() mp.lk.Lock()
mp.republished = nil // clear this to avoid races triggering an early republish mp.republished = nil // clear this to avoid races triggering an early republish
for actor := range mp.localAddrs { mp.forEachLocal(ctx, func(ctx context.Context, actor address.Address) {
mset, ok := mp.pending[actor] mset, ok, err := mp.getPendingMset(ctx, actor)
if err != nil {
log.Debugf("failed to get mset: %w", err)
return
}
if !ok { if !ok {
continue return
} }
if len(mset.msgs) == 0 { if len(mset.msgs) == 0 {
continue return
} }
// we need to copy this while holding the lock to avoid races with concurrent modification // we need to copy this while holding the lock to avoid races with concurrent modification
pend := make(map[uint64]*types.SignedMessage, len(mset.msgs)) pend := make(map[uint64]*types.SignedMessage, len(mset.msgs))
@ -46,7 +51,8 @@ func (mp *MessagePool) republishPendingMessages() error {
pend[nonce] = m pend[nonce] = m
} }
pending[actor] = pend pending[actor] = pend
} })
mp.lk.Unlock() mp.lk.Unlock()
mp.curTsLk.Unlock() mp.curTsLk.Unlock()

View File

@ -56,7 +56,7 @@ func TestRepubMessages(t *testing.T) {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
_, err := mp.Push(m) _, err := mp.Push(context.TODO(), m)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -38,7 +38,7 @@ type msgChain struct {
prev *msgChain prev *msgChain
} }
func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) (msgs []*types.SignedMessage, err error) { func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq float64) (msgs []*types.SignedMessage, err error) {
mp.curTsLk.Lock() mp.curTsLk.Lock()
defer mp.curTsLk.Unlock() defer mp.curTsLk.Unlock()
@ -49,9 +49,9 @@ func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) (msgs []*typ
// than any other block, then we don't bother with optimal selection because the // than any other block, then we don't bother with optimal selection because the
// first block will always have higher effective performance // first block will always have higher effective performance
if tq > 0.84 { if tq > 0.84 {
msgs, err = mp.selectMessagesGreedy(mp.curTs, ts) msgs, err = mp.selectMessagesGreedy(ctx, mp.curTs, ts)
} else { } else {
msgs, err = mp.selectMessagesOptimal(mp.curTs, ts, tq) msgs, err = mp.selectMessagesOptimal(ctx, mp.curTs, ts, tq)
} }
if err != nil { if err != nil {
@ -65,7 +65,7 @@ func (mp *MessagePool) SelectMessages(ts *types.TipSet, tq float64) (msgs []*typ
return msgs, nil return msgs, nil
} }
func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) { func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) {
start := time.Now() start := time.Now()
baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts)
@ -91,7 +91,7 @@ func (mp *MessagePool) selectMessagesOptimal(curTs, ts *types.TipSet, tq float64
// 0b. Select all priority messages that fit in the block // 0b. Select all priority messages that fit in the block
minGas := int64(gasguess.MinGas) minGas := int64(gasguess.MinGas)
result, gasLimit := mp.selectPriorityMessages(pending, baseFee, ts) result, gasLimit := mp.selectPriorityMessages(ctx, pending, baseFee, ts)
// have we filled the block? // have we filled the block?
if gasLimit < minGas { if gasLimit < minGas {
@ -389,7 +389,7 @@ tailLoop:
return result, nil return result, nil
} }
func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.SignedMessage, error) { func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *types.TipSet) ([]*types.SignedMessage, error) {
start := time.Now() start := time.Now()
baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts)
@ -415,7 +415,7 @@ func (mp *MessagePool) selectMessagesGreedy(curTs, ts *types.TipSet) ([]*types.S
// 0b. Select all priority messages that fit in the block // 0b. Select all priority messages that fit in the block
minGas := int64(gasguess.MinGas) minGas := int64(gasguess.MinGas)
result, gasLimit := mp.selectPriorityMessages(pending, baseFee, ts) result, gasLimit := mp.selectPriorityMessages(ctx, pending, baseFee, ts)
// have we filled the block? // have we filled the block?
if gasLimit < minGas { if gasLimit < minGas {
@ -525,7 +525,7 @@ tailLoop:
return result, nil return result, nil
} }
func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) ([]*types.SignedMessage, int64) { func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) ([]*types.SignedMessage, int64) {
start := time.Now() start := time.Now()
defer func() { defer func() {
if dt := time.Since(start); dt > time.Millisecond { if dt := time.Since(start); dt > time.Millisecond {
@ -541,10 +541,16 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui
var chains []*msgChain var chains []*msgChain
priority := mpCfg.PriorityAddrs priority := mpCfg.PriorityAddrs
for _, actor := range priority { for _, actor := range priority {
mset, ok := pending[actor] pk, err := mp.resolveToKey(ctx, actor)
if err != nil {
log.Debugf("mpooladdlocal failed to resolve sender: %s", err)
return nil, gasLimit
}
mset, ok := pending[pk]
if ok { if ok {
// remove actor from pending set as we are already processed these messages // remove actor from pending set as we are already processed these messages
delete(pending, actor) delete(pending, pk)
// create chains for the priority actor // create chains for the priority actor
next := mp.createMessageChains(actor, mset, baseFee, ts) next := mp.createMessageChains(actor, mset, baseFee, ts)
chains = append(chains, next...) chains = append(chains, next...)
@ -646,8 +652,7 @@ func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address.
inSync = true inSync = true
} }
// first add our current pending messages mp.forEachPending(func(a address.Address, mset *msgSet) {
for a, mset := range mp.pending {
if inSync { if inSync {
// no need to copy the map // no need to copy the map
result[a] = mset.msgs result[a] = mset.msgs
@ -660,7 +665,7 @@ func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address.
result[a] = msetCopy result[a] = msetCopy
} }
} })
// we are in sync, that's the happy path // we are in sync, that's the happy path
if inSync { if inSync {

View File

@ -427,7 +427,7 @@ func TestBasicMessageSelection(t *testing.T) {
mustAdd(t, mp, m) mustAdd(t, mp, m)
} }
msgs, err := mp.SelectMessages(ts, 1.0) msgs, err := mp.SelectMessages(context.Background(), ts, 1.0)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -464,7 +464,7 @@ func TestBasicMessageSelection(t *testing.T) {
tma.applyBlock(t, block2) tma.applyBlock(t, block2)
// we should have no pending messages in the mpool // we should have no pending messages in the mpool
pend, _ := mp.Pending() pend, _ := mp.Pending(context.TODO())
if len(pend) != 0 { if len(pend) != 0 {
t.Fatalf("expected no pending messages, but got %d", len(pend)) t.Fatalf("expected no pending messages, but got %d", len(pend))
} }
@ -495,7 +495,7 @@ func TestBasicMessageSelection(t *testing.T) {
tma.setStateNonce(a1, 10) tma.setStateNonce(a1, 10)
tma.setStateNonce(a2, 10) tma.setStateNonce(a2, 10)
msgs, err = mp.SelectMessages(ts3, 1.0) msgs, err = mp.SelectMessages(context.Background(), ts3, 1.0)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -569,7 +569,7 @@ func TestMessageSelectionTrimming(t *testing.T) {
mustAdd(t, mp, m) mustAdd(t, mp, m)
} }
msgs, err := mp.SelectMessages(ts, 1.0) msgs, err := mp.SelectMessages(context.Background(), ts, 1.0)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -633,7 +633,7 @@ func TestPriorityMessageSelection(t *testing.T) {
mustAdd(t, mp, m) mustAdd(t, mp, m)
} }
msgs, err := mp.SelectMessages(ts, 1.0) msgs, err := mp.SelectMessages(context.Background(), ts, 1.0)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -712,7 +712,7 @@ func TestPriorityMessageSelection2(t *testing.T) {
mustAdd(t, mp, m) mustAdd(t, mp, m)
} }
msgs, err := mp.SelectMessages(ts, 1.0) msgs, err := mp.SelectMessages(context.Background(), ts, 1.0)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -782,7 +782,7 @@ func TestPriorityMessageSelection3(t *testing.T) {
} }
// test greedy selection // test greedy selection
msgs, err := mp.SelectMessages(ts, 1.0) msgs, err := mp.SelectMessages(context.Background(), ts, 1.0)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -805,7 +805,7 @@ func TestPriorityMessageSelection3(t *testing.T) {
} }
// test optimal selection // test optimal selection
msgs, err = mp.SelectMessages(ts, 0.1) msgs, err = mp.SelectMessages(context.Background(), ts, 0.1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -872,7 +872,7 @@ func TestOptimalMessageSelection1(t *testing.T) {
mustAdd(t, mp, m) mustAdd(t, mp, m)
} }
msgs, err := mp.SelectMessages(ts, 0.25) msgs, err := mp.SelectMessages(context.Background(), ts, 0.25)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -941,7 +941,7 @@ func TestOptimalMessageSelection2(t *testing.T) {
mustAdd(t, mp, m) mustAdd(t, mp, m)
} }
msgs, err := mp.SelectMessages(ts, 0.1) msgs, err := mp.SelectMessages(context.Background(), ts, 0.1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1020,7 +1020,7 @@ func TestOptimalMessageSelection3(t *testing.T) {
} }
} }
msgs, err := mp.SelectMessages(ts, 0.1) msgs, err := mp.SelectMessages(context.Background(), ts, 0.1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1108,7 +1108,7 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu
logging.SetLogLevel("messagepool", "error") logging.SetLogLevel("messagepool", "error")
// 1. greedy selection // 1. greedy selection
greedyMsgs, err := mp.selectMessagesGreedy(ts, ts) greedyMsgs, err := mp.selectMessagesGreedy(context.Background(), ts, ts)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1137,7 +1137,7 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu
var bestMsgs []*types.SignedMessage var bestMsgs []*types.SignedMessage
for j := 0; j < nMiners; j++ { for j := 0; j < nMiners; j++ {
tq := rng.Float64() tq := rng.Float64()
msgs, err := mp.SelectMessages(ts, tq) msgs, err := mp.SelectMessages(context.Background(), ts, tq)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1396,7 +1396,7 @@ readLoop:
minGasLimit := int64(0.9 * float64(build.BlockGasLimit)) minGasLimit := int64(0.9 * float64(build.BlockGasLimit))
// greedy first // greedy first
selected, err := mp.SelectMessages(ts, 1.0) selected, err := mp.SelectMessages(context.Background(), ts, 1.0)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1410,7 +1410,7 @@ readLoop:
} }
// high quality ticket // high quality ticket
selected, err = mp.SelectMessages(ts, .8) selected, err = mp.SelectMessages(context.Background(), ts, .8)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1424,7 +1424,7 @@ readLoop:
} }
// mid quality ticket // mid quality ticket
selected, err = mp.SelectMessages(ts, .4) selected, err = mp.SelectMessages(context.Background(), ts, .4)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1438,7 +1438,7 @@ readLoop:
} }
// low quality ticket // low quality ticket
selected, err = mp.SelectMessages(ts, .1) selected, err = mp.SelectMessages(context.Background(), ts, .1)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1452,7 +1452,7 @@ readLoop:
} }
// very low quality ticket // very low quality ticket
selected, err = mp.SelectMessages(ts, .01) selected, err = mp.SelectMessages(context.Background(), ts, .01)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -24,6 +24,8 @@ type mockMpool struct {
nonces map[address.Address]uint64 nonces map[address.Address]uint64
} }
var _ MpoolNonceAPI = (*mockMpool)(nil)
func newMockMpool() *mockMpool { func newMockMpool() *mockMpool {
return &mockMpool{nonces: make(map[address.Address]uint64)} return &mockMpool{nonces: make(map[address.Address]uint64)}
} }

View File

@ -24,6 +24,7 @@ import (
states2 "github.com/filecoin-project/specs-actors/v2/actors/states" states2 "github.com/filecoin-project/specs-actors/v2/actors/states"
states3 "github.com/filecoin-project/specs-actors/v3/actors/states" states3 "github.com/filecoin-project/specs-actors/v3/actors/states"
states4 "github.com/filecoin-project/specs-actors/v4/actors/states" states4 "github.com/filecoin-project/specs-actors/v4/actors/states"
states5 "github.com/filecoin-project/specs-actors/v5/actors/states"
) )
var log = logging.Logger("statetree") var log = logging.Logger("statetree")
@ -151,6 +152,8 @@ func VersionForNetwork(ver network.Version) (types.StateTreeVersion, error) {
return types.StateTreeVersion2, nil return types.StateTreeVersion2, nil
case network.Version12: case network.Version12:
return types.StateTreeVersion3, nil return types.StateTreeVersion3, nil
case network.Version13:
return types.StateTreeVersion4, nil
default: default:
panic(fmt.Sprintf("unsupported network version %d", ver)) panic(fmt.Sprintf("unsupported network version %d", ver))
} }
@ -161,7 +164,7 @@ func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, e
switch ver { switch ver {
case types.StateTreeVersion0: case types.StateTreeVersion0:
// info is undefined // info is undefined
case types.StateTreeVersion1, types.StateTreeVersion2, types.StateTreeVersion3: case types.StateTreeVersion1, types.StateTreeVersion2, types.StateTreeVersion3, types.StateTreeVersion4:
var err error var err error
info, err = cst.Put(context.TODO(), new(types.StateInfo0)) info, err = cst.Put(context.TODO(), new(types.StateInfo0))
if err != nil { if err != nil {
@ -198,6 +201,12 @@ func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, e
return nil, xerrors.Errorf("failed to create state tree: %w", err) return nil, xerrors.Errorf("failed to create state tree: %w", err)
} }
hamt = tree.Map hamt = tree.Map
case types.StateTreeVersion4:
tree, err := states5.NewTree(store)
if err != nil {
return nil, xerrors.Errorf("failed to create state tree: %w", err)
}
hamt = tree.Map
default: default:
return nil, xerrors.Errorf("unsupported state tree version: %d", ver) return nil, xerrors.Errorf("unsupported state tree version: %d", ver)
} }
@ -253,6 +262,12 @@ func LoadStateTree(cst cbor.IpldStore, c cid.Cid) (*StateTree, error) {
if tree != nil { if tree != nil {
hamt = tree.Map hamt = tree.Map
} }
case types.StateTreeVersion4:
var tree *states5.Tree
tree, err = states5.LoadTree(store, root.Actors)
if tree != nil {
hamt = tree.Map
}
default: default:
return nil, xerrors.Errorf("unsupported state tree version: %d", root.Version) return nil, xerrors.Errorf("unsupported state tree version: %d", root.Version)
} }

View File

@ -155,11 +155,6 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
return nil, xerrors.Errorf("computing tipset state: %w", err) return nil, xerrors.Errorf("computing tipset state: %w", err)
} }
state, err = sm.handleStateForks(ctx, state, ts.Height(), nil, ts)
if err != nil {
return nil, fmt.Errorf("failed to handle fork: %w", err)
}
r := store.NewChainRand(sm.cs, ts.Cids()) r := store.NewChainRand(sm.cs, ts.Cids())
if span.IsRecordingEvents() { if span.IsRecordingEvents() {
@ -172,7 +167,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
vmopt := &vm.VMOpts{ vmopt := &vm.VMOpts{
StateBase: state, StateBase: state,
Epoch: ts.Height() + 1, Epoch: ts.Height(),
Rand: r, Rand: r,
Bstore: sm.cs.StateBlockstore(), Bstore: sm.cs.StateBlockstore(),
Syscalls: sm.cs.VMSys(), Syscalls: sm.cs.VMSys(),

View File

@ -9,6 +9,8 @@ import (
"sync" "sync"
"time" "time"
"github.com/filecoin-project/specs-actors/v5/actors/migration/nv13"
"github.com/filecoin-project/go-state-types/rt" "github.com/filecoin-project/go-state-types/rt"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
@ -143,7 +145,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
Network: network.Version3, Network: network.Version3,
Migration: UpgradeRefuel, Migration: UpgradeRefuel,
}, { }, {
Height: build.UpgradeActorsV2Height, Height: build.UpgradeAssemblyHeight,
Network: network.Version4, Network: network.Version4,
Expensive: true, Expensive: true,
Migration: UpgradeActorsV2, Migration: UpgradeActorsV2,
@ -172,7 +174,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
Network: network.Version9, Network: network.Version9,
Migration: nil, Migration: nil,
}, { }, {
Height: build.UpgradeActorsV3Height, Height: build.UpgradeTrustHeight,
Network: network.Version10, Network: network.Version10,
Migration: UpgradeActorsV3, Migration: UpgradeActorsV3,
PreMigrations: []PreMigration{{ PreMigrations: []PreMigration{{
@ -192,7 +194,7 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
Network: network.Version11, Network: network.Version11,
Migration: nil, Migration: nil,
}, { }, {
Height: build.UpgradeActorsV4Height, Height: build.UpgradeTurboHeight,
Network: network.Version12, Network: network.Version12,
Migration: UpgradeActorsV4, Migration: UpgradeActorsV4,
PreMigrations: []PreMigration{{ PreMigrations: []PreMigration{{
@ -207,7 +209,22 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
StopWithin: 5, StopWithin: 5,
}}, }},
Expensive: true, Expensive: true,
}} }, {
Height: build.UpgradeHyperdriveHeight,
Network: network.Version13,
Migration: UpgradeActorsV5,
PreMigrations: []PreMigration{{
PreMigration: PreUpgradeActorsV5,
StartWithin: 120,
DontStartWithin: 60,
StopWithin: 35,
}, {
PreMigration: PreUpgradeActorsV5,
StartWithin: 30,
DontStartWithin: 15,
StopWithin: 5,
}},
Expensive: true}}
for _, u := range updates { for _, u := range updates {
if u.Height < 0 { if u.Height < 0 {
@ -1053,7 +1070,7 @@ func upgradeActorsV3Common(
// Perform the migration // Perform the migration
newHamtRoot, err := nv10.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) newHamtRoot, err := nv10.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
if err != nil { if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err) return cid.Undef, xerrors.Errorf("upgrading to actors v3: %w", err)
} }
// Persist the result. // Persist the result.
@ -1139,7 +1156,7 @@ func upgradeActorsV4Common(
// Perform the migration // Perform the migration
newHamtRoot, err := nv12.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) newHamtRoot, err := nv12.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
if err != nil { if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err) return cid.Undef, xerrors.Errorf("upgrading to actors v4: %w", err)
} }
// Persist the result. // Persist the result.
@ -1166,6 +1183,92 @@ func upgradeActorsV4Common(
return newRoot, nil return newRoot, nil
} }
func UpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
// Use all the CPUs except 3.
workerCount := runtime.NumCPU() - 3
if workerCount <= 0 {
workerCount = 1
}
config := nv13.Config{
MaxWorkers: uint(workerCount),
JobQueueSize: 1000,
ResultQueueSize: 100,
ProgressLogPeriod: 10 * time.Second,
}
newRoot, err := upgradeActorsV5Common(ctx, sm, cache, root, epoch, ts, config)
if err != nil {
return cid.Undef, xerrors.Errorf("migrating actors v5 state: %w", err)
}
return newRoot, nil
}
func PreUpgradeActorsV5(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
// Use half the CPUs for pre-migration, but leave at least 3.
workerCount := runtime.NumCPU()
if workerCount <= 4 {
workerCount = 1
} else {
workerCount /= 2
}
config := nv13.Config{MaxWorkers: uint(workerCount)}
_, err := upgradeActorsV5Common(ctx, sm, cache, root, epoch, ts, config)
return err
}
func upgradeActorsV5Common(
ctx context.Context, sm *StateManager, cache MigrationCache,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
config nv13.Config,
) (cid.Cid, error) {
buf := blockstore.NewTieredBstore(sm.cs.StateBlockstore(), blockstore.NewMemorySync())
store := store.ActorStore(ctx, buf)
// Load the state root.
var stateRoot types.StateRoot
if err := store.Get(ctx, root, &stateRoot); err != nil {
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
}
if stateRoot.Version != types.StateTreeVersion3 {
return cid.Undef, xerrors.Errorf(
"expected state root version 3 for actors v5 upgrade, got %d",
stateRoot.Version,
)
}
// Perform the migration
newHamtRoot, err := nv13.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v5: %w", err)
}
// Persist the result.
newRoot, err := store.Put(ctx, &types.StateRoot{
Version: types.StateTreeVersion4,
Actors: newHamtRoot,
Info: stateRoot.Info,
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
// Persist the new tree.
{
from := buf
to := buf.Read()
if err := vm.Copy(ctx, from, to, newRoot); err != nil {
return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
}
}
return newRoot, nil
}
func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, name string) error { func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, name string) error {
ia, err := tree.GetActor(builtin0.InitActorAddr) ia, err := tree.GetActor(builtin0.InitActorAddr)
if err != nil { if err != nil {

View File

@ -7,6 +7,8 @@ import (
"sync" "sync"
"sync/atomic" "sync/atomic"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor" cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
@ -90,6 +92,7 @@ type StateManager struct {
expensiveUpgrades map[abi.ChainEpoch]struct{} expensiveUpgrades map[abi.ChainEpoch]struct{}
stCache map[string][]cid.Cid stCache map[string][]cid.Cid
tCache treeCache
compWait map[string]chan struct{} compWait map[string]chan struct{}
stlk sync.Mutex stlk sync.Mutex
genesisMsigLk sync.Mutex genesisMsigLk sync.Mutex
@ -102,6 +105,12 @@ type StateManager struct {
genesisMarketFunds abi.TokenAmount genesisMarketFunds abi.TokenAmount
} }
// Caches a single state tree
type treeCache struct {
root cid.Cid
tree *state.StateTree
}
func NewStateManager(cs *store.ChainStore) *StateManager { func NewStateManager(cs *store.ChainStore) *StateManager {
sm, err := NewStateManagerWithUpgradeSchedule(cs, DefaultUpgradeSchedule()) sm, err := NewStateManagerWithUpgradeSchedule(cs, DefaultUpgradeSchedule())
if err != nil { if err != nil {
@ -154,7 +163,11 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, us UpgradeSchedule
newVM: vm.NewVM, newVM: vm.NewVM,
cs: cs, cs: cs,
stCache: make(map[string][]cid.Cid), stCache: make(map[string][]cid.Cid),
compWait: make(map[string]chan struct{}), tCache: treeCache{
root: cid.Undef,
tree: nil,
},
compWait: make(map[string]chan struct{}),
}, nil }, nil
} }
@ -563,6 +576,52 @@ func (sm *StateManager) ResolveToKeyAddress(ctx context.Context, addr address.Ad
return vm.ResolveToKeyAddr(tree, cst, addr) return vm.ResolveToKeyAddr(tree, cst, addr)
} }
// ResolveToKeyAddressAtFinality is similar to stmgr.ResolveToKeyAddress but fails if the ID address being resolved isn't reorg-stable yet.
// It should not be used for consensus-critical subsystems.
func (sm *StateManager) ResolveToKeyAddressAtFinality(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
switch addr.Protocol() {
case address.BLS, address.SECP256K1:
return addr, nil
case address.Actor:
return address.Undef, xerrors.New("cannot resolve actor address to key address")
default:
}
if ts == nil {
ts = sm.cs.GetHeaviestTipSet()
}
var err error
if ts.Height() > policy.ChainFinality {
ts, err = sm.ChainStore().GetTipsetByHeight(ctx, ts.Height()-policy.ChainFinality, ts, true)
if err != nil {
return address.Undef, xerrors.Errorf("failed to load lookback tipset: %w", err)
}
}
cst := cbor.NewCborStore(sm.cs.StateBlockstore())
tree := sm.tCache.tree
if tree == nil || sm.tCache.root != ts.ParentState() {
tree, err = state.LoadStateTree(cst, ts.ParentState())
if err != nil {
return address.Undef, xerrors.Errorf("failed to load parent state tree: %w", err)
}
sm.tCache = treeCache{
root: ts.ParentState(),
tree: tree,
}
}
resolved, err := vm.ResolveToKeyAddr(tree, cst, addr)
if err == nil {
return resolved, nil
}
return address.Undef, xerrors.New("ID address not found in lookback state")
}
func (sm *StateManager) GetBlsPublicKey(ctx context.Context, addr address.Address, ts *types.TipSet) (pubk []byte, err error) { func (sm *StateManager) GetBlsPublicKey(ctx context.Context, addr address.Address, ts *types.TipSet) (pubk []byte, err error) {
kaddr, err := sm.ResolveToKeyAddress(ctx, addr, ts) kaddr, err := sm.ResolveToKeyAddress(ctx, addr, ts)
if err != nil { if err != nil {
@ -1141,8 +1200,8 @@ func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch,
} }
} }
// After UpgradeActorsV2Height these funds are accounted for in GetFilReserveDisbursed // After UpgradeAssemblyHeight these funds are accounted for in GetFilReserveDisbursed
if height <= build.UpgradeActorsV2Height { if height <= build.UpgradeAssemblyHeight {
// continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch
vf = big.Add(vf, sm.genesisPledge) vf = big.Add(vf, sm.genesisPledge)
// continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch // continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch
@ -1265,7 +1324,7 @@ func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, heig
} }
filReserveDisbursed := big.Zero() filReserveDisbursed := big.Zero()
if height > build.UpgradeActorsV2Height { if height > build.UpgradeAssemblyHeight {
filReserveDisbursed, err = GetFilReserveDisbursed(ctx, st) filReserveDisbursed, err = GetFilReserveDisbursed(ctx, st)
if err != nil { if err != nil {
return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filReserveDisbursed: %w", err) return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filReserveDisbursed: %w", err)

View File

@ -9,6 +9,8 @@ import (
"runtime" "runtime"
"strings" "strings"
exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported"
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/go-state-types/network"
@ -549,6 +551,7 @@ func init() {
actors = append(actors, exported2.BuiltinActors()...) actors = append(actors, exported2.BuiltinActors()...)
actors = append(actors, exported3.BuiltinActors()...) actors = append(actors, exported3.BuiltinActors()...)
actors = append(actors, exported4.BuiltinActors()...) actors = append(actors, exported4.BuiltinActors()...)
actors = append(actors, exported5.BuiltinActors()...)
for _, actor := range actors { for _, actor := range actors {
exports := actor.Exports() exports := actor.Exports()

View File

@ -18,7 +18,7 @@ func TestChainCheckpoint(t *testing.T) {
// Let the first miner mine some blocks. // Let the first miner mine some blocks.
last := cg.CurTipset.TipSet() last := cg.CurTipset.TipSet()
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
ts, err := cg.NextTipSetFromMiners(last, cg.Miners[:1]) ts, err := cg.NextTipSetFromMiners(last, cg.Miners[:1], 0)
require.NoError(t, err) require.NoError(t, err)
last = ts.TipSet.TipSet() last = ts.TipSet.TipSet()
@ -57,7 +57,7 @@ func TestChainCheckpoint(t *testing.T) {
// Let the second miner miner mine a fork // Let the second miner miner mine a fork
last = checkpointParents last = checkpointParents
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
ts, err := cg.NextTipSetFromMiners(last, cg.Miners[1:]) ts, err := cg.NextTipSetFromMiners(last, cg.Miners[1:], 0)
require.NoError(t, err) require.NoError(t, err)
last = ts.TipSet.TipSet() last = ts.TipSet.TipSet()

View File

@ -107,6 +107,9 @@ func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) {
} }
rheight -= ci.skipLength rheight -= ci.skipLength
if rheight < 0 {
rheight = 0
}
var skipTarget *types.TipSet var skipTarget *types.TipSet
if parent.Height() < rheight { if parent.Height() < rheight {

View File

@ -12,6 +12,8 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/filecoin-project/lotus/chain/state"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/crypto"
@ -1129,17 +1131,33 @@ type BlockMessages struct {
func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) { func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) {
applied := make(map[address.Address]uint64) applied := make(map[address.Address]uint64)
cst := cbor.NewCborStore(cs.stateBlockstore)
st, err := state.LoadStateTree(cst, ts.Blocks()[0].ParentStateRoot)
if err != nil {
return nil, xerrors.Errorf("failed to load state tree")
}
selectMsg := func(m *types.Message) (bool, error) { selectMsg := func(m *types.Message) (bool, error) {
// The first match for a sender is guaranteed to have correct nonce -- the block isn't valid otherwise var sender address.Address
if _, ok := applied[m.From]; !ok { if ts.Height() >= build.UpgradeHyperdriveHeight {
applied[m.From] = m.Nonce sender, err = st.LookupID(m.From)
if err != nil {
return false, err
}
} else {
sender = m.From
} }
if applied[m.From] != m.Nonce { // The first match for a sender is guaranteed to have correct nonce -- the block isn't valid otherwise
if _, ok := applied[sender]; !ok {
applied[sender] = m.Nonce
}
if applied[sender] != m.Nonce {
return false, nil return false, nil
} }
applied[m.From]++ applied[sender]++
return true, nil return true, nil
} }
@ -1404,7 +1422,15 @@ func DrawRandomness(rbase []byte, pers crypto.DomainSeparationTag, round abi.Cha
return h.Sum(nil), nil return h.Sum(nil), nil
} }
func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { func (cs *ChainStore) GetBeaconRandomnessLookingBack(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cs.GetBeaconRandomness(ctx, blks, pers, round, entropy, true)
}
func (cs *ChainStore) GetBeaconRandomnessLookingForward(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cs.GetBeaconRandomness(ctx, blks, pers, round, entropy, false)
}
func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) {
_, span := trace.StartSpan(ctx, "store.GetBeaconRandomness") _, span := trace.StartSpan(ctx, "store.GetBeaconRandomness")
defer span.End() defer span.End()
span.AddAttributes(trace.Int64Attribute("round", int64(round))) span.AddAttributes(trace.Int64Attribute("round", int64(round)))
@ -1423,7 +1449,7 @@ func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, p
searchHeight = 0 searchHeight = 0
} }
randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true) randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, lookback)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1438,7 +1464,15 @@ func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, p
return DrawRandomness(be.Data, pers, round, entropy) return DrawRandomness(be.Data, pers, round, entropy)
} }
func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { func (cs *ChainStore) GetChainRandomnessLookingBack(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cs.GetChainRandomness(ctx, blks, pers, round, entropy, true)
}
func (cs *ChainStore) GetChainRandomnessLookingForward(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cs.GetChainRandomness(ctx, blks, pers, round, entropy, false)
}
func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) {
_, span := trace.StartSpan(ctx, "store.GetChainRandomness") _, span := trace.StartSpan(ctx, "store.GetChainRandomness")
defer span.End() defer span.End()
span.AddAttributes(trace.Int64Attribute("round", int64(round))) span.AddAttributes(trace.Int64Attribute("round", int64(round)))
@ -1457,7 +1491,7 @@ func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pe
searchHeight = 0 searchHeight = 0
} }
randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true) randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, lookback)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1732,12 +1766,20 @@ func NewChainRand(cs *ChainStore, blks []cid.Cid) vm.Rand {
} }
} }
func (cr *chainRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { func (cr *chainRand) GetChainRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cr.cs.GetChainRandomness(ctx, cr.blks, pers, round, entropy) return cr.cs.GetChainRandomnessLookingBack(ctx, cr.blks, pers, round, entropy)
} }
func (cr *chainRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { func (cr *chainRand) GetChainRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cr.cs.GetBeaconRandomness(ctx, cr.blks, pers, round, entropy) return cr.cs.GetChainRandomnessLookingForward(ctx, cr.blks, pers, round, entropy)
}
func (cr *chainRand) GetBeaconRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cr.cs.GetBeaconRandomnessLookingBack(ctx, cr.blks, pers, round, entropy)
}
func (cr *chainRand) GetBeaconRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cr.cs.GetBeaconRandomnessLookingForward(ctx, cr.blks, pers, round, entropy)
} }
func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) { func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) {

View File

@ -76,7 +76,7 @@ func BenchmarkGetRandomness(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
_, err := cs.GetChainRandomness(context.TODO(), last.Cids(), crypto.DomainSeparationTag_SealRandomness, 500, nil) _, err := cs.GetChainRandomnessLookingBack(context.TODO(), last.Cids(), crypto.DomainSeparationTag_SealRandomness, 500, nil)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }

View File

@ -516,7 +516,7 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
return pubsub.ValidationReject return pubsub.ValidationReject
} }
if err := mv.mpool.Add(m); err != nil { if err := mv.mpool.Add(ctx, m); err != nil {
log.Debugf("failed to add message from network to message pool (From: %s, To: %s, Nonce: %d, Value: %s): %s", m.Message.From, m.Message.To, m.Message.Nonce, types.FIL(m.Message.Value), err) log.Debugf("failed to add message from network to message pool (From: %s, To: %s, Nonce: %d, Value: %s): %s", m.Message.From, m.Message.To, m.Message.Nonce, types.FIL(m.Message.Value), err)
ctx, _ = tag.New( ctx, _ = tag.New(
ctx, ctx,

Some files were not shown because too many files have changed in this diff Show More