Merge remote-tracking branch 'origin/master' into feat/deal-batch-publish
This commit is contained in:
commit
332ea8a126
@ -201,6 +201,8 @@ jobs:
|
||||
<<: *test
|
||||
test-window-post:
|
||||
<<: *test
|
||||
test-window-post-dispute:
|
||||
<<: *test
|
||||
test-terminate:
|
||||
<<: *test
|
||||
test-conformance:
|
||||
@ -265,6 +267,16 @@ jobs:
|
||||
path: /tmp/test-reports
|
||||
- store_artifacts:
|
||||
path: /tmp/test-artifacts/conformance-coverage.html
|
||||
build-ntwk-calibration:
|
||||
description: |
|
||||
Compile lotus binaries for the calibration network
|
||||
parameters:
|
||||
<<: *test-params
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: make calibnet
|
||||
build-lotus-soup:
|
||||
description: |
|
||||
Compile `lotus-soup` Testground test plan
|
||||
@ -591,6 +603,11 @@ workflows:
|
||||
go-test-flags: "-run=TestWindowedPost"
|
||||
winpost-test: "1"
|
||||
test-suite-name: window-post
|
||||
- test-window-post-dispute:
|
||||
codecov-upload: true
|
||||
go-test-flags: "-run=TestWindowPostDispute"
|
||||
winpost-test: "1"
|
||||
test-suite-name: window-post-dispute
|
||||
- test-terminate:
|
||||
codecov-upload: true
|
||||
go-test-flags: "-run=TestTerminate"
|
||||
@ -611,6 +628,7 @@ workflows:
|
||||
test-suite-name: conformance-bleeding-edge
|
||||
packages: "./conformance"
|
||||
vectors-branch: master
|
||||
- build-ntwk-calibration
|
||||
- build-lotus-soup
|
||||
- trigger-testplans:
|
||||
filters:
|
||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -7,6 +7,3 @@
|
||||
[submodule "extern/test-vectors"]
|
||||
path = extern/test-vectors
|
||||
url = https://github.com/filecoin-project/test-vectors.git
|
||||
[submodule "extern/blst"]
|
||||
path = extern/blst
|
||||
url = https://github.com/supranational/blst.git
|
||||
|
@ -949,7 +949,8 @@ const (
|
||||
)
|
||||
|
||||
type Deadline struct {
|
||||
PostSubmissions bitfield.BitField
|
||||
PostSubmissions bitfield.BitField
|
||||
DisputableProofCount uint64
|
||||
}
|
||||
|
||||
type Partition struct {
|
||||
|
@ -17,9 +17,9 @@ import (
|
||||
|
||||
func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
for _, height := range []abi.ChainEpoch{
|
||||
1, // before
|
||||
2, // before
|
||||
162, // while sealing
|
||||
520, // after upgrade deal
|
||||
530, // after upgrade deal
|
||||
5000, // after
|
||||
} {
|
||||
height := height // make linters happy by copying
|
||||
@ -31,7 +31,7 @@ func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
|
||||
func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) {
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV2At(upgradeHeight)}, OneMiner)
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV3At(upgradeHeight)}, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
|
@ -254,6 +254,21 @@ func TestSecondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration
|
||||
}
|
||||
}
|
||||
|
||||
func TestZeroPricePerByteRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
|
||||
s := setupOneClientOneMiner(t, b, blocktime)
|
||||
defer s.blockMiner.Stop()
|
||||
|
||||
// Set price-per-byte to zero
|
||||
ask, err := s.miner.MarketGetRetrievalAsk(s.ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
ask.PricePerByte = abi.NewTokenAmount(0)
|
||||
err = s.miner.MarketSetRetrievalAsk(s.ctx, ask)
|
||||
require.NoError(t, err)
|
||||
|
||||
MakeDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch)
|
||||
}
|
||||
|
||||
func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
|
||||
maddr, err := miner.ActorAddress(ctx)
|
||||
if err != nil {
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -48,6 +49,7 @@ type TestStorageNode struct {
|
||||
ListenAddr multiaddr.Multiaddr
|
||||
|
||||
MineOne func(context.Context, miner.MineReq) error
|
||||
Stop func(context.Context) error
|
||||
}
|
||||
|
||||
var PresealGenesis = -1
|
||||
@ -110,14 +112,19 @@ var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
|
||||
var OneFull = DefaultFullOpts(1)
|
||||
var TwoFull = DefaultFullOpts(2)
|
||||
|
||||
var FullNodeWithActorsV2At = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||
var FullNodeWithActorsV3At = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
||||
return FullNodeOpts{
|
||||
Opts: func(nodes []TestNode) node.Option {
|
||||
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
||||
// Skip directly to tape height so precommits work.
|
||||
Network: network.Version5,
|
||||
Height: upgradeHeight,
|
||||
// prepare for upgrade.
|
||||
Network: network.Version9,
|
||||
Height: 1,
|
||||
Migration: stmgr.UpgradeActorsV2,
|
||||
}, {
|
||||
// Skip directly to tape height so precommits work.
|
||||
Network: network.Version10,
|
||||
Height: upgradeHeight,
|
||||
Migration: stmgr.UpgradeActorsV3,
|
||||
}})
|
||||
},
|
||||
}
|
||||
@ -158,7 +165,11 @@ func (ts *testSuite) testVersion(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.Equal(t, v.Version, build.BuildVersion)
|
||||
versions := strings.Split(v.Version, "+")
|
||||
if len(versions) <= 0 {
|
||||
t.Fatal("empty version")
|
||||
}
|
||||
require.Equal(t, versions[0], build.BuildVersion)
|
||||
}
|
||||
|
||||
func (ts *testSuite) testSearchMsg(t *testing.T) {
|
||||
|
@ -16,13 +16,18 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
proof3 "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
minerActor "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
bminer "github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
@ -201,7 +206,7 @@ func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n,
|
||||
|
||||
func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
|
||||
for _, height := range []abi.ChainEpoch{
|
||||
1, // before
|
||||
2, // before
|
||||
162, // while sealing
|
||||
5000, // while proving
|
||||
} {
|
||||
@ -218,7 +223,7 @@ func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration,
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV2At(upgradeHeight)}, OneMiner)
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV3At(upgradeHeight)}, OneMiner)
|
||||
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
@ -437,7 +442,7 @@ func TestTerminate(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
|
||||
nSectors := uint64(2)
|
||||
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV2At(1)}, []StorageMiner{{Full: 0, Preseal: int(nSectors)}})
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV3At(2)}, []StorageMiner{{Full: 0, Preseal: int(nSectors)}})
|
||||
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
@ -602,3 +607,420 @@ loop:
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*(nSectors-1)))
|
||||
}
|
||||
|
||||
func TestWindowPostDispute(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// First, we configure two miners. After sealing, we're going to turn off the first miner so
|
||||
// it doesn't submit proofs.
|
||||
///
|
||||
// Then we're going to manually submit bad proofs.
|
||||
n, sn := b(t, []FullNodeOpts{
|
||||
FullNodeWithActorsV3At(2),
|
||||
}, []StorageMiner{
|
||||
{Full: 0, Preseal: PresealGenesis},
|
||||
{Full: 0},
|
||||
})
|
||||
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
chainMiner := sn[0]
|
||||
evilMiner := sn[1]
|
||||
|
||||
{
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := chainMiner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := evilMiner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
defaultFrom, err := client.WalletDefaultAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
// Mine with the _second_ node (the good one).
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for ctx.Err() == nil {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := chainMiner.MineOne(ctx, MineNext); err != nil {
|
||||
if ctx.Err() != nil {
|
||||
// context was canceled, ignore the error.
|
||||
return
|
||||
}
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
cancel()
|
||||
<-done
|
||||
}()
|
||||
|
||||
// Give the chain miner enough sectors to win every block.
|
||||
pledgeSectors(t, ctx, chainMiner, 10, 0, nil)
|
||||
// And the evil one 1 sector. No cookie for you.
|
||||
pledgeSectors(t, ctx, evilMiner, 1, 0, nil)
|
||||
|
||||
// Let the evil miner's sectors gain power.
|
||||
evilMinerAddr, err := evilMiner.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
di, err := client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Printf("Running one proving period\n")
|
||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2)
|
||||
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod*2 {
|
||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
p, err := client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
ssz, err := evilMiner.ActorSectorSize(ctx, evilMinerAddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
// make sure it has gained power.
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)))
|
||||
|
||||
evilSectors, err := evilMiner.SectorsList(ctx)
|
||||
require.NoError(t, err)
|
||||
evilSectorNo := evilSectors[0] // only one.
|
||||
evilSectorLoc, err := client.StateSectorPartition(ctx, evilMinerAddr, evilSectorNo, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Println("evil miner stopping")
|
||||
|
||||
// Now stop the evil miner, and start manually submitting bad proofs.
|
||||
require.NoError(t, evilMiner.Stop(ctx))
|
||||
|
||||
fmt.Println("evil miner stopped")
|
||||
|
||||
// Wait until we need to prove our sector.
|
||||
for {
|
||||
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
if di.Index == evilSectorLoc.Deadline {
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
err = submitBadProof(ctx, client, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
|
||||
require.NoError(t, err, "evil proof not accepted")
|
||||
|
||||
// Wait until after the proving period.
|
||||
for {
|
||||
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
if di.Index != evilSectorLoc.Deadline {
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
fmt.Println("accepted evil proof")
|
||||
|
||||
// Make sure the evil node didn't lose any power.
|
||||
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)))
|
||||
|
||||
// OBJECTION! The good miner files a DISPUTE!!!!
|
||||
{
|
||||
params := &minerActor.DisputeWindowedPoStParams{
|
||||
Deadline: evilSectorLoc.Deadline,
|
||||
PoStIndex: 0,
|
||||
}
|
||||
|
||||
enc, aerr := actors.SerializeParams(params)
|
||||
require.NoError(t, aerr)
|
||||
|
||||
msg := &types.Message{
|
||||
To: evilMinerAddr,
|
||||
Method: minerActor.Methods.DisputeWindowedPoSt,
|
||||
Params: enc,
|
||||
Value: types.NewInt(0),
|
||||
From: defaultFrom,
|
||||
}
|
||||
sm, err := client.MpoolPushMessage(ctx, msg, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Println("waiting dispute")
|
||||
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence)
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error())
|
||||
}
|
||||
|
||||
// Objection SUSTAINED!
|
||||
// Make sure the evil node lost power.
|
||||
p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.True(t, p.MinerPower.RawBytePower.IsZero())
|
||||
|
||||
// Now we begin the redemption arc.
|
||||
require.True(t, p.MinerPower.RawBytePower.IsZero())
|
||||
|
||||
// First, recover the sector.
|
||||
|
||||
{
|
||||
minerInfo, err := client.StateMinerInfo(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
params := &minerActor.DeclareFaultsRecoveredParams{
|
||||
Recoveries: []minerActor.RecoveryDeclaration{{
|
||||
Deadline: evilSectorLoc.Deadline,
|
||||
Partition: evilSectorLoc.Partition,
|
||||
Sectors: bitfield.NewFromSet([]uint64{uint64(evilSectorNo)}),
|
||||
}},
|
||||
}
|
||||
|
||||
enc, aerr := actors.SerializeParams(params)
|
||||
require.NoError(t, aerr)
|
||||
|
||||
msg := &types.Message{
|
||||
To: evilMinerAddr,
|
||||
Method: minerActor.Methods.DeclareFaultsRecovered,
|
||||
Params: enc,
|
||||
Value: types.FromFil(30), // repay debt.
|
||||
From: minerInfo.Owner,
|
||||
}
|
||||
sm, err := client.MpoolPushMessage(ctx, msg, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence)
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, rec.Receipt.ExitCode, "recovery not accepted: %s", rec.Receipt.ExitCode.Error())
|
||||
}
|
||||
|
||||
// Then wait for the deadline.
|
||||
for {
|
||||
di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
if di.Index == evilSectorLoc.Deadline {
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
// Now try to be evil again
|
||||
err = submitBadProof(ctx, client, evilMinerAddr, di, evilSectorLoc.Deadline, evilSectorLoc.Partition)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "message execution failed: exit 16, reason: window post failed: invalid PoSt")
|
||||
|
||||
// It didn't work because we're recovering.
|
||||
}
|
||||
|
||||
func submitBadProof(
|
||||
ctx context.Context,
|
||||
client api.FullNode, maddr address.Address,
|
||||
di *dline.Info, dlIdx, partIdx uint64,
|
||||
) error {
|
||||
head, err := client.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
from, err := client.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
minerInfo, err := client.StateMinerInfo(ctx, maddr, head.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
commEpoch := di.Open
|
||||
commRand, err := client.ChainGetRandomnessFromTickets(
|
||||
ctx, head.Key(), crypto.DomainSeparationTag_PoStChainCommit,
|
||||
commEpoch, nil,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
params := &minerActor.SubmitWindowedPoStParams{
|
||||
ChainCommitEpoch: commEpoch,
|
||||
ChainCommitRand: commRand,
|
||||
Deadline: dlIdx,
|
||||
Partitions: []minerActor.PoStPartition{{Index: partIdx}},
|
||||
Proofs: []proof3.PoStProof{{
|
||||
PoStProof: minerInfo.WindowPoStProofType,
|
||||
ProofBytes: []byte("I'm soooo very evil."),
|
||||
}},
|
||||
}
|
||||
|
||||
enc, aerr := actors.SerializeParams(params)
|
||||
if aerr != nil {
|
||||
return aerr
|
||||
}
|
||||
|
||||
msg := &types.Message{
|
||||
To: maddr,
|
||||
Method: minerActor.Methods.SubmitWindowedPoSt,
|
||||
Params: enc,
|
||||
Value: types.NewInt(0),
|
||||
From: from,
|
||||
}
|
||||
sm, err := client.MpoolPushMessage(ctx, msg, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rec.Receipt.ExitCode.IsError() {
|
||||
return rec.Receipt.ExitCode
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestWindowPostDisputeFails(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
n, sn := b(t, []FullNodeOpts{FullNodeWithActorsV3At(2)}, OneMiner)
|
||||
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
{
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
defaultFrom, err := client.WalletDefaultAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
maddr, err := miner.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
// Mine with the _second_ node (the good one).
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for ctx.Err() == nil {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := miner.MineOne(ctx, MineNext); err != nil {
|
||||
if ctx.Err() != nil {
|
||||
// context was canceled, ignore the error.
|
||||
return
|
||||
}
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
cancel()
|
||||
<-done
|
||||
}()
|
||||
|
||||
pledgeSectors(t, ctx, miner, 10, 0, nil)
|
||||
|
||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Printf("Running one proving period\n")
|
||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod*2)
|
||||
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod*2 {
|
||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
||||
break
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
||||
require.NoError(t, err)
|
||||
expectedPower := types.NewInt(uint64(ssz) * (GenesisPreseals + 10))
|
||||
|
||||
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
// make sure it has gained power.
|
||||
require.Equal(t, p.MinerPower.RawBytePower, expectedPower)
|
||||
|
||||
// Wait until a proof has been submitted.
|
||||
var targetDeadline uint64
|
||||
waitForProof:
|
||||
for {
|
||||
deadlines, err := client.StateMinerDeadlines(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
for dlIdx, dl := range deadlines {
|
||||
nonEmpty, err := dl.PostSubmissions.IsEmpty()
|
||||
require.NoError(t, err)
|
||||
if nonEmpty {
|
||||
targetDeadline = uint64(dlIdx)
|
||||
break waitForProof
|
||||
}
|
||||
}
|
||||
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
for {
|
||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
// wait until the deadline finishes.
|
||||
if di.Index == ((targetDeadline + 1) % di.WPoStPeriodDeadlines) {
|
||||
break
|
||||
}
|
||||
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
// Try to object to the proof. This should fail.
|
||||
{
|
||||
params := &minerActor.DisputeWindowedPoStParams{
|
||||
Deadline: targetDeadline,
|
||||
PoStIndex: 0,
|
||||
}
|
||||
|
||||
enc, aerr := actors.SerializeParams(params)
|
||||
require.NoError(t, aerr)
|
||||
|
||||
msg := &types.Message{
|
||||
To: maddr,
|
||||
Method: minerActor.Methods.DisputeWindowedPoSt,
|
||||
Params: enc,
|
||||
Value: types.NewInt(0),
|
||||
From: defaultFrom,
|
||||
}
|
||||
_, err := client.MpoolPushMessage(ctx, msg, nil)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "failed to dispute valid post (RetCode=16)")
|
||||
}
|
||||
}
|
||||
|
@ -27,6 +27,8 @@ const UpgradePersianHeight = 25
|
||||
const UpgradeOrangeHeight = 27
|
||||
const UpgradeClausHeight = 30
|
||||
|
||||
const UpgradeActorsV3Height = 35
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
|
@ -44,6 +44,9 @@ const UpgradeClausHeight = 161386
|
||||
// 2021-01-17T19:00:00Z
|
||||
const UpgradeOrangeHeight = 250666
|
||||
|
||||
// 2021-01-28T21:00:00Z
|
||||
const UpgradeActorsV3Height = 282586
|
||||
|
||||
func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 30))
|
||||
policy.SetSupportedProofTypes(
|
||||
@ -55,6 +58,8 @@ func init() {
|
||||
SetAddressNetwork(address.Testnet)
|
||||
|
||||
Devnet = true
|
||||
|
||||
BuildType = BuildCalibnet
|
||||
}
|
||||
|
||||
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
||||
|
@ -50,6 +50,9 @@ const UpgradeOrangeHeight = 336458
|
||||
// 2020-12-22T02:00:00Z
|
||||
const UpgradeClausHeight = 343200
|
||||
|
||||
// TODO
|
||||
const UpgradeActorsV3Height = 999999999
|
||||
|
||||
func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40))
|
||||
|
||||
@ -58,6 +61,8 @@ func init() {
|
||||
}
|
||||
|
||||
Devnet = false
|
||||
|
||||
BuildType = BuildMainnet
|
||||
}
|
||||
|
||||
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
||||
|
@ -90,8 +90,9 @@ var (
|
||||
UpgradeKumquatHeight abi.ChainEpoch = -6
|
||||
UpgradeCalicoHeight abi.ChainEpoch = -7
|
||||
UpgradePersianHeight abi.ChainEpoch = -8
|
||||
UpgradeClausHeight abi.ChainEpoch = -9
|
||||
UpgradeOrangeHeight abi.ChainEpoch = -10
|
||||
UpgradeOrangeHeight abi.ChainEpoch = -9
|
||||
UpgradeClausHeight abi.ChainEpoch = -10
|
||||
UpgradeActorsV3Height abi.ChainEpoch = -11
|
||||
|
||||
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
|
@ -10,19 +10,25 @@ var CurrentCommit string
|
||||
var BuildType int
|
||||
|
||||
const (
|
||||
BuildDefault = 0
|
||||
Build2k = 0x1
|
||||
BuildDebug = 0x3
|
||||
BuildDefault = 0
|
||||
BuildMainnet = 0x1
|
||||
Build2k = 0x2
|
||||
BuildDebug = 0x3
|
||||
BuildCalibnet = 0x4
|
||||
)
|
||||
|
||||
func buildType() string {
|
||||
switch BuildType {
|
||||
case BuildDefault:
|
||||
return ""
|
||||
case BuildDebug:
|
||||
return "+debug"
|
||||
case BuildMainnet:
|
||||
return "+mainnet"
|
||||
case Build2k:
|
||||
return "+2k"
|
||||
case BuildDebug:
|
||||
return "+debug"
|
||||
case BuildCalibnet:
|
||||
return "+calibnet"
|
||||
default:
|
||||
return "+huh?"
|
||||
}
|
||||
|
@ -2,16 +2,9 @@ package adt
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
|
||||
)
|
||||
|
||||
type Map interface {
|
||||
@ -24,26 +17,6 @@ type Map interface {
|
||||
ForEach(v cbor.Unmarshaler, fn func(key string) error) error
|
||||
}
|
||||
|
||||
func AsMap(store Store, root cid.Cid, version actors.Version) (Map, error) {
|
||||
switch version {
|
||||
case actors.Version0:
|
||||
return adt0.AsMap(store, root)
|
||||
case actors.Version2:
|
||||
return adt2.AsMap(store, root)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown network version: %d", version)
|
||||
}
|
||||
|
||||
func NewMap(store Store, version actors.Version) (Map, error) {
|
||||
switch version {
|
||||
case actors.Version0:
|
||||
return adt0.MakeEmptyMap(store), nil
|
||||
case actors.Version2:
|
||||
return adt2.MakeEmptyMap(store), nil
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown network version: %d", version)
|
||||
}
|
||||
|
||||
type Array interface {
|
||||
Root() (cid.Cid, error)
|
||||
|
||||
@ -54,23 +27,3 @@ type Array interface {
|
||||
|
||||
ForEach(v cbor.Unmarshaler, fn func(idx int64) error) error
|
||||
}
|
||||
|
||||
func AsArray(store Store, root cid.Cid, version network.Version) (Array, error) {
|
||||
switch actors.VersionForNetwork(version) {
|
||||
case actors.Version0:
|
||||
return adt0.AsArray(store, root)
|
||||
case actors.Version2:
|
||||
return adt2.AsArray(store, root)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown network version: %d", version)
|
||||
}
|
||||
|
||||
func NewArray(store Store, version actors.Version) (Array, error) {
|
||||
switch version {
|
||||
case actors.Version0:
|
||||
return adt0.MakeEmptyArray(store), nil
|
||||
case actors.Version2:
|
||||
return adt2.MakeEmptyArray(store), nil
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown network version: %d", version)
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -22,9 +23,12 @@ func init() {
|
||||
builtin.RegisterActorState(builtin2.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load2(store, root)
|
||||
})
|
||||
builtin.RegisterActorState(builtin3.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load3(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var Methods = builtin2.MethodsAccount
|
||||
var Methods = builtin3.MethodsAccount
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
switch act.Code {
|
||||
@ -32,6 +36,8 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
return load0(store, act.Head)
|
||||
case builtin2.AccountActorCodeID:
|
||||
return load2(store, act.Head)
|
||||
case builtin3.AccountActorCodeID:
|
||||
return load3(store, act.Head)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
30
chain/actors/builtin/account/v3.go
Normal file
30
chain/actors/builtin/account/v3.go
Normal file
@ -0,0 +1,30 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
account3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/account"
|
||||
)
|
||||
|
||||
var _ State = (*state3)(nil)
|
||||
|
||||
func load3(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state3{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state3 struct {
|
||||
account3.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state3) PubkeyAddress() (address.Address, error) {
|
||||
return s.Address, nil
|
||||
}
|
@ -2,12 +2,12 @@ package builtin
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
smoothing2 "github.com/filecoin-project/specs-actors/v2/actors/util/smoothing"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
@ -15,9 +15,12 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
|
||||
smoothing0 "github.com/filecoin-project/specs-actors/actors/util/smoothing"
|
||||
smoothing2 "github.com/filecoin-project/specs-actors/v2/actors/util/smoothing"
|
||||
smoothing3 "github.com/filecoin-project/specs-actors/v3/actors/util/smoothing"
|
||||
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||
smoothing0 "github.com/filecoin-project/specs-actors/actors/util/smoothing"
|
||||
)
|
||||
|
||||
var SystemActorAddr = builtin0.SystemActorAddr
|
||||
@ -38,11 +41,12 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
MethodSend = builtin2.MethodSend
|
||||
MethodConstructor = builtin2.MethodConstructor
|
||||
MethodSend = builtin3.MethodSend
|
||||
MethodConstructor = builtin3.MethodConstructor
|
||||
)
|
||||
|
||||
// TODO: Why does actors have 2 different versions of this?
|
||||
// These are all just type aliases across actor versions 0, 2, & 3. In the future, that might change
|
||||
// and we might need to do something fancier.
|
||||
type SectorInfo = proof0.SectorInfo
|
||||
type PoStProof = proof0.PoStProof
|
||||
type FilterEstimate = smoothing0.FilterEstimate
|
||||
@ -51,13 +55,17 @@ func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate {
|
||||
return (FilterEstimate)(v0)
|
||||
}
|
||||
|
||||
// Doesn't change between actors v0 and v1
|
||||
// Doesn't change between actors v0, v2, and v3.
|
||||
func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
|
||||
return miner0.QAPowerForWeight(size, duration, dealWeight, verifiedWeight)
|
||||
}
|
||||
|
||||
func FromV2FilterEstimate(v1 smoothing2.FilterEstimate) FilterEstimate {
|
||||
return (FilterEstimate)(v1)
|
||||
func FromV2FilterEstimate(v2 smoothing2.FilterEstimate) FilterEstimate {
|
||||
return (FilterEstimate)(v2)
|
||||
}
|
||||
|
||||
func FromV3FilterEstimate(v3 smoothing3.FilterEstimate) FilterEstimate {
|
||||
return (FilterEstimate)(v3)
|
||||
}
|
||||
|
||||
type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error)
|
||||
@ -82,30 +90,42 @@ func ActorNameByCode(c cid.Cid) string {
|
||||
return builtin0.ActorNameByCode(c)
|
||||
case builtin2.IsBuiltinActor(c):
|
||||
return builtin2.ActorNameByCode(c)
|
||||
case builtin3.IsBuiltinActor(c):
|
||||
return builtin3.ActorNameByCode(c)
|
||||
default:
|
||||
return "<unknown>"
|
||||
}
|
||||
}
|
||||
|
||||
func IsBuiltinActor(c cid.Cid) bool {
|
||||
return builtin0.IsBuiltinActor(c) || builtin2.IsBuiltinActor(c)
|
||||
return builtin0.IsBuiltinActor(c) ||
|
||||
builtin2.IsBuiltinActor(c) ||
|
||||
builtin3.IsBuiltinActor(c)
|
||||
}
|
||||
|
||||
func IsAccountActor(c cid.Cid) bool {
|
||||
return c == builtin0.AccountActorCodeID || c == builtin2.AccountActorCodeID
|
||||
return c == builtin0.AccountActorCodeID ||
|
||||
c == builtin2.AccountActorCodeID ||
|
||||
c == builtin3.AccountActorCodeID
|
||||
}
|
||||
|
||||
func IsStorageMinerActor(c cid.Cid) bool {
|
||||
return c == builtin0.StorageMinerActorCodeID || c == builtin2.StorageMinerActorCodeID
|
||||
return c == builtin0.StorageMinerActorCodeID ||
|
||||
c == builtin2.StorageMinerActorCodeID ||
|
||||
c == builtin3.StorageMinerActorCodeID
|
||||
}
|
||||
|
||||
func IsMultisigActor(c cid.Cid) bool {
|
||||
return c == builtin0.MultisigActorCodeID || c == builtin2.MultisigActorCodeID
|
||||
return c == builtin0.MultisigActorCodeID ||
|
||||
c == builtin2.MultisigActorCodeID ||
|
||||
c == builtin3.MultisigActorCodeID
|
||||
|
||||
}
|
||||
|
||||
func IsPaymentChannelActor(c cid.Cid) bool {
|
||||
return c == builtin0.PaymentChannelActorCodeID || c == builtin2.PaymentChannelActorCodeID
|
||||
return c == builtin0.PaymentChannelActorCodeID ||
|
||||
c == builtin2.PaymentChannelActorCodeID ||
|
||||
c == builtin3.PaymentChannelActorCodeID
|
||||
}
|
||||
|
||||
func makeAddress(addr string) address.Address {
|
||||
|
@ -1,10 +1,10 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
)
|
||||
|
||||
var (
|
||||
Address = builtin2.CronActorAddr
|
||||
Methods = builtin2.MethodsCron
|
||||
Address = builtin3.CronActorAddr
|
||||
Methods = builtin3.MethodsCron
|
||||
)
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -24,11 +25,14 @@ func init() {
|
||||
builtin.RegisterActorState(builtin2.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load2(store, root)
|
||||
})
|
||||
builtin.RegisterActorState(builtin3.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load3(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
Address = builtin2.InitActorAddr
|
||||
Methods = builtin2.MethodsInit
|
||||
Address = builtin3.InitActorAddr
|
||||
Methods = builtin3.MethodsInit
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -37,6 +41,8 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
return load0(store, act.Head)
|
||||
case builtin2.InitActorCodeID:
|
||||
return load2(store, act.Head)
|
||||
case builtin3.InitActorCodeID:
|
||||
return load3(store, act.Head)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
86
chain/actors/builtin/init/v3.go
Normal file
86
chain/actors/builtin/init/v3.go
Normal file
@ -0,0 +1,86 @@
|
||||
package init
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
|
||||
init3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/init"
|
||||
adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state3)(nil)
|
||||
|
||||
func load3(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state3{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state3 struct {
|
||||
init3.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state3) ResolveAddress(address address.Address) (address.Address, bool, error) {
|
||||
return s.State.ResolveAddress(s.store, address)
|
||||
}
|
||||
|
||||
func (s *state3) MapAddressToNewID(address address.Address) (address.Address, error) {
|
||||
return s.State.MapAddressToNewID(s.store, address)
|
||||
}
|
||||
|
||||
func (s *state3) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error {
|
||||
addrs, err := adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var actorID cbg.CborInt
|
||||
return addrs.ForEach(&actorID, func(key string) error {
|
||||
addr, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cb(abi.ActorID(actorID), addr)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state3) NetworkName() (dtypes.NetworkName, error) {
|
||||
return dtypes.NetworkName(s.State.NetworkName), nil
|
||||
}
|
||||
|
||||
func (s *state3) SetNetworkName(name string) error {
|
||||
s.State.NetworkName = name
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state3) Remove(addrs ...address.Address) (err error) {
|
||||
m, err := adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
if err = m.Delete(abi.AddrKey(addr)); err != nil {
|
||||
return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err)
|
||||
}
|
||||
}
|
||||
amr, err := m.Root()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get address map root: %w", err)
|
||||
}
|
||||
s.State.AddressMap = amr
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state3) addressMap() (adt.Map, error) {
|
||||
return adt3.AsMap(s.store, s.AddressMap, builtin3.DefaultHamtBitwidth)
|
||||
}
|
@ -12,6 +12,7 @@ import (
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
@ -25,11 +26,14 @@ func init() {
|
||||
builtin.RegisterActorState(builtin2.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load2(store, root)
|
||||
})
|
||||
builtin.RegisterActorState(builtin3.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load3(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
Address = builtin2.StorageMarketActorAddr
|
||||
Methods = builtin2.MethodsMarket
|
||||
Address = builtin3.StorageMarketActorAddr
|
||||
Methods = builtin3.MethodsMarket
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
@ -38,6 +42,8 @@ func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
return load0(store, act.Head)
|
||||
case builtin2.StorageMarketActorCodeID:
|
||||
return load2(store, act.Head)
|
||||
case builtin3.StorageMarketActorCodeID:
|
||||
return load3(store, act.Head)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
205
chain/actors/builtin/market/v3.go
Normal file
205
chain/actors/builtin/market/v3.go
Normal file
@ -0,0 +1,205 @@
|
||||
package market
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
|
||||
market3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/market"
|
||||
adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state3)(nil)
|
||||
|
||||
func load3(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state3{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state3 struct {
|
||||
market3.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state3) TotalLocked() (abi.TokenAmount, error) {
|
||||
fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral)
|
||||
fml = types.BigAdd(fml, s.TotalClientStorageFee)
|
||||
return fml, nil
|
||||
}
|
||||
|
||||
func (s *state3) BalancesChanged(otherState State) (bool, error) {
|
||||
otherState2, ok := otherState.(*state3)
|
||||
if !ok {
|
||||
// there's no way to compare different versions of the state, so let's
|
||||
// just say that means the state of balances has changed
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.EscrowTable.Equals(otherState2.State.EscrowTable) || !s.State.LockedTable.Equals(otherState2.State.LockedTable), nil
|
||||
}
|
||||
|
||||
func (s *state3) StatesChanged(otherState State) (bool, error) {
|
||||
otherState2, ok := otherState.(*state3)
|
||||
if !ok {
|
||||
// there's no way to compare different versions of the state, so let's
|
||||
// just say that means the state of balances has changed
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.States.Equals(otherState2.State.States), nil
|
||||
}
|
||||
|
||||
func (s *state3) States() (DealStates, error) {
|
||||
stateArray, err := adt3.AsArray(s.store, s.State.States, market3.StatesAmtBitwidth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &dealStates3{stateArray}, nil
|
||||
}
|
||||
|
||||
func (s *state3) ProposalsChanged(otherState State) (bool, error) {
|
||||
otherState2, ok := otherState.(*state3)
|
||||
if !ok {
|
||||
// there's no way to compare different versions of the state, so let's
|
||||
// just say that means the state of balances has changed
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.Proposals.Equals(otherState2.State.Proposals), nil
|
||||
}
|
||||
|
||||
func (s *state3) Proposals() (DealProposals, error) {
|
||||
proposalArray, err := adt3.AsArray(s.store, s.State.Proposals, market3.ProposalsAmtBitwidth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &dealProposals3{proposalArray}, nil
|
||||
}
|
||||
|
||||
func (s *state3) EscrowTable() (BalanceTable, error) {
|
||||
bt, err := adt3.AsBalanceTable(s.store, s.State.EscrowTable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &balanceTable3{bt}, nil
|
||||
}
|
||||
|
||||
func (s *state3) LockedTable() (BalanceTable, error) {
|
||||
bt, err := adt3.AsBalanceTable(s.store, s.State.LockedTable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &balanceTable3{bt}, nil
|
||||
}
|
||||
|
||||
func (s *state3) VerifyDealsForActivation(
|
||||
minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
|
||||
) (weight, verifiedWeight abi.DealWeight, err error) {
|
||||
w, vw, _, err := market3.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
|
||||
return w, vw, err
|
||||
}
|
||||
|
||||
type balanceTable3 struct {
|
||||
*adt3.BalanceTable
|
||||
}
|
||||
|
||||
func (bt *balanceTable3) ForEach(cb func(address.Address, abi.TokenAmount) error) error {
|
||||
asMap := (*adt3.Map)(bt.BalanceTable)
|
||||
var ta abi.TokenAmount
|
||||
return asMap.ForEach(&ta, func(key string) error {
|
||||
a, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cb(a, ta)
|
||||
})
|
||||
}
|
||||
|
||||
type dealStates3 struct {
|
||||
adt.Array
|
||||
}
|
||||
|
||||
func (s *dealStates3) Get(dealID abi.DealID) (*DealState, bool, error) {
|
||||
var deal2 market3.DealState
|
||||
found, err := s.Array.Get(uint64(dealID), &deal2)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !found {
|
||||
return nil, false, nil
|
||||
}
|
||||
deal := fromV3DealState(deal2)
|
||||
return &deal, true, nil
|
||||
}
|
||||
|
||||
func (s *dealStates3) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
|
||||
var ds1 market3.DealState
|
||||
return s.Array.ForEach(&ds1, func(idx int64) error {
|
||||
return cb(abi.DealID(idx), fromV3DealState(ds1))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *dealStates3) decode(val *cbg.Deferred) (*DealState, error) {
|
||||
var ds1 market3.DealState
|
||||
if err := ds1.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ds := fromV3DealState(ds1)
|
||||
return &ds, nil
|
||||
}
|
||||
|
||||
func (s *dealStates3) array() adt.Array {
|
||||
return s.Array
|
||||
}
|
||||
|
||||
func fromV3DealState(v1 market3.DealState) DealState {
|
||||
return (DealState)(v1)
|
||||
}
|
||||
|
||||
type dealProposals3 struct {
|
||||
adt.Array
|
||||
}
|
||||
|
||||
func (s *dealProposals3) Get(dealID abi.DealID) (*DealProposal, bool, error) {
|
||||
var proposal2 market3.DealProposal
|
||||
found, err := s.Array.Get(uint64(dealID), &proposal2)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !found {
|
||||
return nil, false, nil
|
||||
}
|
||||
proposal := fromV3DealProposal(proposal2)
|
||||
return &proposal, true, nil
|
||||
}
|
||||
|
||||
func (s *dealProposals3) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error {
|
||||
var dp1 market3.DealProposal
|
||||
return s.Array.ForEach(&dp1, func(idx int64) error {
|
||||
return cb(abi.DealID(idx), fromV3DealProposal(dp1))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *dealProposals3) decode(val *cbg.Deferred) (*DealProposal, error) {
|
||||
var dp1 market3.DealProposal
|
||||
if err := dp1.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dp := fromV3DealProposal(dp1)
|
||||
return &dp, nil
|
||||
}
|
||||
|
||||
func (s *dealProposals3) array() adt.Array {
|
||||
return s.Array
|
||||
}
|
||||
|
||||
func fromV3DealProposal(v1 market3.DealProposal) DealProposal {
|
||||
return (DealProposal)(v1)
|
||||
}
|
@ -2,6 +2,7 @@ package miner
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
@ -21,6 +22,8 @@ import (
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -30,11 +33,14 @@ func init() {
|
||||
builtin.RegisterActorState(builtin2.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load2(store, root)
|
||||
})
|
||||
builtin.RegisterActorState(builtin3.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load3(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var Methods = builtin2.MethodsMiner
|
||||
var Methods = builtin3.MethodsMiner
|
||||
|
||||
// Unchanged between v0 and v2 actors
|
||||
// Unchanged between v0, v2, and v3 actors
|
||||
var WPoStProvingPeriod = miner0.WPoStProvingPeriod
|
||||
var WPoStPeriodDeadlines = miner0.WPoStPeriodDeadlines
|
||||
var WPoStChallengeWindow = miner0.WPoStChallengeWindow
|
||||
@ -53,6 +59,8 @@ func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
return load0(store, act.Head)
|
||||
case builtin2.StorageMinerActorCodeID:
|
||||
return load2(store, act.Head)
|
||||
case builtin3.StorageMinerActorCodeID:
|
||||
return load3(store, act.Head)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
@ -96,9 +104,10 @@ type State interface {
|
||||
type Deadline interface {
|
||||
LoadPartition(idx uint64) (Partition, error)
|
||||
ForEachPartition(cb func(idx uint64, part Partition) error) error
|
||||
PostSubmissions() (bitfield.BitField, error)
|
||||
PartitionsPoSted() (bitfield.BitField, error)
|
||||
|
||||
PartitionsChanged(Deadline) (bool, error)
|
||||
DisputableProofCount() (uint64, error)
|
||||
}
|
||||
|
||||
type Partition interface {
|
||||
@ -142,6 +151,60 @@ type DeclareFaultsParams = miner0.DeclareFaultsParams
|
||||
type DeclareFaultsRecoveredParams = miner0.DeclareFaultsRecoveredParams
|
||||
type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams
|
||||
type ProveCommitSectorParams = miner0.ProveCommitSectorParams
|
||||
type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams
|
||||
|
||||
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
|
||||
// We added support for the new proofs in network version 7, and removed support for the old
|
||||
// ones in network version 8.
|
||||
if nver < network.Version7 {
|
||||
switch proof {
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg2KiBV1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg8MiBV1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg512MiBV1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg32GiBV1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg64GiBV1, nil
|
||||
default:
|
||||
return -1, xerrors.Errorf("unrecognized window post type: %d", proof)
|
||||
}
|
||||
}
|
||||
|
||||
switch proof {
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1:
|
||||
return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil
|
||||
default:
|
||||
return -1, xerrors.Errorf("unrecognized window post type: %d", proof)
|
||||
}
|
||||
}
|
||||
|
||||
func WinningPoStProofTypeFromWindowPoStProofType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredPoStProof, error) {
|
||||
switch proof {
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1:
|
||||
return abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1:
|
||||
return abi.RegisteredPoStProof_StackedDrgWinning8MiBV1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1:
|
||||
return abi.RegisteredPoStProof_StackedDrgWinning512MiBV1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1:
|
||||
return abi.RegisteredPoStProof_StackedDrgWinning32GiBV1, nil
|
||||
case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1:
|
||||
return abi.RegisteredPoStProof_StackedDrgWinning64GiBV1, nil
|
||||
default:
|
||||
return -1, xerrors.Errorf("unknown proof type %d", proof)
|
||||
}
|
||||
}
|
||||
|
||||
type MinerInfo struct {
|
||||
Owner address.Address // Must be an ID-address.
|
||||
@ -151,7 +214,7 @@ type MinerInfo struct {
|
||||
WorkerChangeEpoch abi.ChainEpoch
|
||||
PeerId *peer.ID
|
||||
Multiaddrs []abi.Multiaddrs
|
||||
SealProofType abi.RegisteredSealProof
|
||||
WindowPoStProofType abi.RegisteredPoStProof
|
||||
SectorSize abi.SectorSize
|
||||
WindowPoStPartitionSectors uint64
|
||||
ConsensusFaultElapsed abi.ChainEpoch
|
||||
|
@ -297,6 +297,11 @@ func (s *state0) Info() (MinerInfo, error) {
|
||||
pid = &peerID
|
||||
}
|
||||
|
||||
wpp, err := info.SealProofType.RegisteredWindowPoStProof()
|
||||
if err != nil {
|
||||
return MinerInfo{}, err
|
||||
}
|
||||
|
||||
mi := MinerInfo{
|
||||
Owner: info.Owner,
|
||||
Worker: info.Worker,
|
||||
@ -307,7 +312,7 @@ func (s *state0) Info() (MinerInfo, error) {
|
||||
|
||||
PeerId: pid,
|
||||
Multiaddrs: info.Multiaddrs,
|
||||
SealProofType: info.SealProofType,
|
||||
WindowPoStProofType: wpp,
|
||||
SectorSize: info.SectorSize,
|
||||
WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
|
||||
ConsensusFaultElapsed: -1,
|
||||
@ -382,10 +387,15 @@ func (d *deadline0) PartitionsChanged(other Deadline) (bool, error) {
|
||||
return !d.Deadline.Partitions.Equals(other0.Deadline.Partitions), nil
|
||||
}
|
||||
|
||||
func (d *deadline0) PostSubmissions() (bitfield.BitField, error) {
|
||||
func (d *deadline0) PartitionsPoSted() (bitfield.BitField, error) {
|
||||
return d.Deadline.PostSubmissions, nil
|
||||
}
|
||||
|
||||
func (d *deadline0) DisputableProofCount() (uint64, error) {
|
||||
// field doesn't exist until v3
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (p *partition0) AllSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Sectors, nil
|
||||
}
|
||||
|
@ -296,6 +296,11 @@ func (s *state2) Info() (MinerInfo, error) {
|
||||
pid = &peerID
|
||||
}
|
||||
|
||||
wpp, err := info.SealProofType.RegisteredWindowPoStProof()
|
||||
if err != nil {
|
||||
return MinerInfo{}, err
|
||||
}
|
||||
|
||||
mi := MinerInfo{
|
||||
Owner: info.Owner,
|
||||
Worker: info.Worker,
|
||||
@ -306,7 +311,7 @@ func (s *state2) Info() (MinerInfo, error) {
|
||||
|
||||
PeerId: pid,
|
||||
Multiaddrs: info.Multiaddrs,
|
||||
SealProofType: info.SealProofType,
|
||||
WindowPoStProofType: wpp,
|
||||
SectorSize: info.SectorSize,
|
||||
WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
|
||||
ConsensusFaultElapsed: info.ConsensusFaultElapsed,
|
||||
@ -381,10 +386,15 @@ func (d *deadline2) PartitionsChanged(other Deadline) (bool, error) {
|
||||
return !d.Deadline.Partitions.Equals(other2.Deadline.Partitions), nil
|
||||
}
|
||||
|
||||
func (d *deadline2) PostSubmissions() (bitfield.BitField, error) {
|
||||
func (d *deadline2) PartitionsPoSted() (bitfield.BitField, error) {
|
||||
return d.Deadline.PostSubmissions, nil
|
||||
}
|
||||
|
||||
func (d *deadline2) DisputableProofCount() (uint64, error) {
|
||||
// field doesn't exist until v3
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (p *partition2) AllSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Sectors, nil
|
||||
}
|
||||
|
434
chain/actors/builtin/miner/v3.go
Normal file
434
chain/actors/builtin/miner/v3.go
Normal file
@ -0,0 +1,434 @@
|
||||
package miner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
|
||||
adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state3)(nil)
|
||||
|
||||
func load3(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state3{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state3 struct {
|
||||
miner3.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
type deadline3 struct {
|
||||
miner3.Deadline
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
type partition3 struct {
|
||||
miner3.Partition
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state3) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = xerrors.Errorf("failed to get available balance: %w", r)
|
||||
available = abi.NewTokenAmount(0)
|
||||
}
|
||||
}()
|
||||
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||
available, err = s.GetAvailableBalance(bal)
|
||||
return available, err
|
||||
}
|
||||
|
||||
func (s *state3) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) {
|
||||
return s.CheckVestedFunds(s.store, epoch)
|
||||
}
|
||||
|
||||
func (s *state3) LockedFunds() (LockedFunds, error) {
|
||||
return LockedFunds{
|
||||
VestingFunds: s.State.LockedFunds,
|
||||
InitialPledgeRequirement: s.State.InitialPledge,
|
||||
PreCommitDeposits: s.State.PreCommitDeposits,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *state3) FeeDebt() (abi.TokenAmount, error) {
|
||||
return s.State.FeeDebt, nil
|
||||
}
|
||||
|
||||
func (s *state3) InitialPledge() (abi.TokenAmount, error) {
|
||||
return s.State.InitialPledge, nil
|
||||
}
|
||||
|
||||
func (s *state3) PreCommitDeposits() (abi.TokenAmount, error) {
|
||||
return s.State.PreCommitDeposits, nil
|
||||
}
|
||||
|
||||
func (s *state3) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) {
|
||||
info, ok, err := s.State.GetSector(s.store, num)
|
||||
if !ok || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := fromV3SectorOnChainInfo(*info)
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
func (s *state3) FindSector(num abi.SectorNumber) (*SectorLocation, error) {
|
||||
dlIdx, partIdx, err := s.State.FindSector(s.store, num)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SectorLocation{
|
||||
Deadline: dlIdx,
|
||||
Partition: partIdx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *state3) NumLiveSectors() (uint64, error) {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var total uint64
|
||||
if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner3.Deadline) error {
|
||||
total += dl.LiveSectors
|
||||
return nil
|
||||
}); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// GetSectorExpiration returns the effective expiration of the given sector.
|
||||
//
|
||||
// If the sector does not expire early, the Early expiration field is 0.
|
||||
func (s *state3) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// NOTE: this can be optimized significantly.
|
||||
// 1. If the sector is non-faulty, it will either expire on-time (can be
|
||||
// learned from the sector info), or in the next quantized expiration
|
||||
// epoch (i.e., the first element in the partition's expiration queue.
|
||||
// 2. If it's faulty, it will expire early within the first 14 entries
|
||||
// of the expiration queue.
|
||||
stopErr := errors.New("stop")
|
||||
out := SectorExpiration{}
|
||||
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner3.Deadline) error {
|
||||
partitions, err := dl.PartitionsArray(s.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
quant := s.State.QuantSpecForDeadline(dlIdx)
|
||||
var part miner3.Partition
|
||||
return partitions.ForEach(&part, func(partIdx int64) error {
|
||||
if found, err := part.Sectors.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if !found {
|
||||
return nil
|
||||
}
|
||||
if found, err := part.Terminated.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if found {
|
||||
// already terminated
|
||||
return stopErr
|
||||
}
|
||||
|
||||
q, err := miner3.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner3.PartitionExpirationAmtBitwidth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var exp miner3.ExpirationSet
|
||||
return q.ForEach(&exp, func(epoch int64) error {
|
||||
if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if early {
|
||||
out.Early = abi.ChainEpoch(epoch)
|
||||
return nil
|
||||
}
|
||||
if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if onTime {
|
||||
out.OnTime = abi.ChainEpoch(epoch)
|
||||
return stopErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
})
|
||||
if err == stopErr {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if out.Early == 0 && out.OnTime == 0 {
|
||||
return nil, xerrors.Errorf("failed to find sector %d", num)
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func (s *state3) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) {
|
||||
info, ok, err := s.State.GetPrecommittedSector(s.store, num)
|
||||
if !ok || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := fromV3SectorPreCommitOnChainInfo(*info)
|
||||
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
func (s *state3) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
|
||||
sectors, err := miner3.LoadSectors(s.store, s.State.Sectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If no sector numbers are specified, load all.
|
||||
if snos == nil {
|
||||
infos := make([]*SectorOnChainInfo, 0, sectors.Length())
|
||||
var info2 miner3.SectorOnChainInfo
|
||||
if err := sectors.ForEach(&info2, func(_ int64) error {
|
||||
info := fromV3SectorOnChainInfo(info2)
|
||||
infos = append(infos, &info)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
// Otherwise, load selected.
|
||||
infos2, err := sectors.Load(*snos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
infos := make([]*SectorOnChainInfo, len(infos2))
|
||||
for i, info2 := range infos2 {
|
||||
info := fromV3SectorOnChainInfo(*info2)
|
||||
infos[i] = &info
|
||||
}
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
func (s *state3) IsAllocated(num abi.SectorNumber) (bool, error) {
|
||||
var allocatedSectors bitfield.BitField
|
||||
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return allocatedSectors.IsSet(uint64(num))
|
||||
}
|
||||
|
||||
func (s *state3) LoadDeadline(idx uint64) (Deadline, error) {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dl, err := dls.LoadDeadline(s.store, idx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &deadline3{*dl, s.store}, nil
|
||||
}
|
||||
|
||||
func (s *state3) ForEachDeadline(cb func(uint64, Deadline) error) error {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dls.ForEach(s.store, func(i uint64, dl *miner3.Deadline) error {
|
||||
return cb(i, &deadline3{*dl, s.store})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state3) NumDeadlines() (uint64, error) {
|
||||
return miner3.WPoStPeriodDeadlines, nil
|
||||
}
|
||||
|
||||
func (s *state3) DeadlinesChanged(other State) (bool, error) {
|
||||
other2, ok := other.(*state3)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return !s.State.Deadlines.Equals(other2.Deadlines), nil
|
||||
}
|
||||
|
||||
func (s *state3) MinerInfoChanged(other State) (bool, error) {
|
||||
other0, ok := other.(*state3)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.Info.Equals(other0.State.Info), nil
|
||||
}
|
||||
|
||||
func (s *state3) Info() (MinerInfo, error) {
|
||||
info, err := s.State.GetInfo(s.store)
|
||||
if err != nil {
|
||||
return MinerInfo{}, err
|
||||
}
|
||||
|
||||
var pid *peer.ID
|
||||
if peerID, err := peer.IDFromBytes(info.PeerId); err == nil {
|
||||
pid = &peerID
|
||||
}
|
||||
|
||||
mi := MinerInfo{
|
||||
Owner: info.Owner,
|
||||
Worker: info.Worker,
|
||||
ControlAddresses: info.ControlAddresses,
|
||||
|
||||
NewWorker: address.Undef,
|
||||
WorkerChangeEpoch: -1,
|
||||
|
||||
PeerId: pid,
|
||||
Multiaddrs: info.Multiaddrs,
|
||||
WindowPoStProofType: info.WindowPoStProofType,
|
||||
SectorSize: info.SectorSize,
|
||||
WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
|
||||
ConsensusFaultElapsed: info.ConsensusFaultElapsed,
|
||||
}
|
||||
|
||||
if info.PendingWorkerKey != nil {
|
||||
mi.NewWorker = info.PendingWorkerKey.NewWorker
|
||||
mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt
|
||||
}
|
||||
|
||||
return mi, nil
|
||||
}
|
||||
|
||||
func (s *state3) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) {
|
||||
return s.State.DeadlineInfo(epoch), nil
|
||||
}
|
||||
|
||||
func (s *state3) sectors() (adt.Array, error) {
|
||||
return adt3.AsArray(s.store, s.Sectors, miner3.SectorsAmtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state3) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) {
|
||||
var si miner3.SectorOnChainInfo
|
||||
err := si.UnmarshalCBOR(bytes.NewReader(val.Raw))
|
||||
if err != nil {
|
||||
return SectorOnChainInfo{}, err
|
||||
}
|
||||
|
||||
return fromV3SectorOnChainInfo(si), nil
|
||||
}
|
||||
|
||||
func (s *state3) precommits() (adt.Map, error) {
|
||||
return adt3.AsMap(s.store, s.PreCommittedSectors, builtin3.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state3) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) {
|
||||
var sp miner3.SectorPreCommitOnChainInfo
|
||||
err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
|
||||
if err != nil {
|
||||
return SectorPreCommitOnChainInfo{}, err
|
||||
}
|
||||
|
||||
return fromV3SectorPreCommitOnChainInfo(sp), nil
|
||||
}
|
||||
|
||||
func (d *deadline3) LoadPartition(idx uint64) (Partition, error) {
|
||||
p, err := d.Deadline.LoadPartition(d.store, idx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &partition3{*p, d.store}, nil
|
||||
}
|
||||
|
||||
func (d *deadline3) ForEachPartition(cb func(uint64, Partition) error) error {
|
||||
ps, err := d.Deadline.PartitionsArray(d.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var part miner3.Partition
|
||||
return ps.ForEach(&part, func(i int64) error {
|
||||
return cb(uint64(i), &partition3{part, d.store})
|
||||
})
|
||||
}
|
||||
|
||||
func (d *deadline3) PartitionsChanged(other Deadline) (bool, error) {
|
||||
other2, ok := other.(*deadline3)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return !d.Deadline.Partitions.Equals(other2.Deadline.Partitions), nil
|
||||
}
|
||||
|
||||
func (d *deadline3) PartitionsPoSted() (bitfield.BitField, error) {
|
||||
return d.Deadline.PartitionsPoSted, nil
|
||||
}
|
||||
|
||||
func (d *deadline3) DisputableProofCount() (uint64, error) {
|
||||
ops, err := d.OptimisticProofsSnapshotArray(d.store)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return ops.Length(), nil
|
||||
}
|
||||
|
||||
func (p *partition3) AllSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Sectors, nil
|
||||
}
|
||||
|
||||
func (p *partition3) FaultySectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Faults, nil
|
||||
}
|
||||
|
||||
func (p *partition3) RecoveringSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Recoveries, nil
|
||||
}
|
||||
|
||||
func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo {
|
||||
return SectorOnChainInfo{
|
||||
SectorNumber: v3.SectorNumber,
|
||||
SealProof: v3.SealProof,
|
||||
SealedCID: v3.SealedCID,
|
||||
DealIDs: v3.DealIDs,
|
||||
Activation: v3.Activation,
|
||||
Expiration: v3.Expiration,
|
||||
DealWeight: v3.DealWeight,
|
||||
VerifiedDealWeight: v3.VerifiedDealWeight,
|
||||
InitialPledge: v3.InitialPledge,
|
||||
ExpectedDayReward: v3.ExpectedDayReward,
|
||||
ExpectedStoragePledge: v3.ExpectedStoragePledge,
|
||||
}
|
||||
}
|
||||
|
||||
func fromV3SectorPreCommitOnChainInfo(v3 miner3.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
||||
return SectorPreCommitOnChainInfo{
|
||||
Info: (SectorPreCommitInfo)(v3.Info),
|
||||
PreCommitDeposit: v3.PreCommitDeposit,
|
||||
PreCommitEpoch: v3.PreCommitEpoch,
|
||||
DealWeight: v3.DealWeight,
|
||||
VerifiedDealWeight: v3.VerifiedDealWeight,
|
||||
}
|
||||
}
|
@ -9,14 +9,14 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
multisig2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
multisig3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/multisig"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
var Methods = builtin2.MethodsMultisig
|
||||
var Methods = builtin3.MethodsMultisig
|
||||
|
||||
func Message(version actors.Version, from address.Address) MessageBuilder {
|
||||
switch version {
|
||||
@ -24,6 +24,8 @@ func Message(version actors.Version, from address.Address) MessageBuilder {
|
||||
return message0{from}
|
||||
case actors.Version2:
|
||||
return message2{message0{from}}
|
||||
case actors.Version3:
|
||||
return message3{message0{from}}
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported actors version: %d", version))
|
||||
}
|
||||
@ -47,11 +49,11 @@ type MessageBuilder interface {
|
||||
}
|
||||
|
||||
// this type is the same between v0 and v2
|
||||
type ProposalHashData = multisig2.ProposalHashData
|
||||
type ProposeReturn = multisig2.ProposeReturn
|
||||
type ProposalHashData = multisig3.ProposalHashData
|
||||
type ProposeReturn = multisig3.ProposeReturn
|
||||
|
||||
func txnParams(id uint64, data *ProposalHashData) ([]byte, error) {
|
||||
params := multisig2.TxnIDParams{ID: multisig2.TxnID(id)}
|
||||
params := multisig3.TxnIDParams{ID: multisig3.TxnID(id)}
|
||||
if data != nil {
|
||||
if data.Requester.Protocol() != address.ID {
|
||||
return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester)
|
||||
|
71
chain/actors/builtin/multisig/message3.go
Normal file
71
chain/actors/builtin/multisig/message3.go
Normal file
@ -0,0 +1,71 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
init3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/init"
|
||||
multisig3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/multisig"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type message3 struct{ message0 }
|
||||
|
||||
func (m message3) Create(
|
||||
signers []address.Address, threshold uint64,
|
||||
unlockStart, unlockDuration abi.ChainEpoch,
|
||||
initialAmount abi.TokenAmount,
|
||||
) (*types.Message, error) {
|
||||
|
||||
lenAddrs := uint64(len(signers))
|
||||
|
||||
if lenAddrs < threshold {
|
||||
return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig")
|
||||
}
|
||||
|
||||
if threshold == 0 {
|
||||
threshold = lenAddrs
|
||||
}
|
||||
|
||||
if m.from == address.Undef {
|
||||
return nil, xerrors.Errorf("must provide source address")
|
||||
}
|
||||
|
||||
// Set up constructor parameters for multisig
|
||||
msigParams := &multisig3.ConstructorParams{
|
||||
Signers: signers,
|
||||
NumApprovalsThreshold: threshold,
|
||||
UnlockDuration: unlockDuration,
|
||||
StartEpoch: unlockStart,
|
||||
}
|
||||
|
||||
enc, actErr := actors.SerializeParams(msigParams)
|
||||
if actErr != nil {
|
||||
return nil, actErr
|
||||
}
|
||||
|
||||
// new actors are created by invoking 'exec' on the init actor with the constructor params
|
||||
execParams := &init3.ExecParams{
|
||||
CodeCID: builtin3.MultisigActorCodeID,
|
||||
ConstructorParams: enc,
|
||||
}
|
||||
|
||||
enc, actErr = actors.SerializeParams(execParams)
|
||||
if actErr != nil {
|
||||
return nil, actErr
|
||||
}
|
||||
|
||||
return &types.Message{
|
||||
To: init_.Address,
|
||||
From: m.from,
|
||||
Method: builtin3.MethodsInit.Exec,
|
||||
Params: enc,
|
||||
Value: initialAmount,
|
||||
}, nil
|
||||
}
|
@ -12,6 +12,7 @@ import (
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
@ -25,6 +26,9 @@ func init() {
|
||||
builtin.RegisterActorState(builtin2.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load2(store, root)
|
||||
})
|
||||
builtin.RegisterActorState(builtin3.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load3(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -33,6 +37,8 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
return load0(store, act.Head)
|
||||
case builtin2.MultisigActorCodeID:
|
||||
return load2(store, act.Head)
|
||||
case builtin3.MultisigActorCodeID:
|
||||
return load3(store, act.Head)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
@ -13,8 +13,8 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
||||
multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
multisig0 "github.com/filecoin-project/specs-actors/v2/actors/builtin/multisig"
|
||||
)
|
||||
|
||||
var _ State = (*state0)(nil)
|
||||
|
95
chain/actors/builtin/multisig/state3.go
Normal file
95
chain/actors/builtin/multisig/state3.go
Normal file
@ -0,0 +1,95 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
|
||||
adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
msig3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/multisig"
|
||||
)
|
||||
|
||||
var _ State = (*state3)(nil)
|
||||
|
||||
func load3(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state3{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state3 struct {
|
||||
msig3.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state3) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) {
|
||||
return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil
|
||||
}
|
||||
|
||||
func (s *state3) StartEpoch() (abi.ChainEpoch, error) {
|
||||
return s.State.StartEpoch, nil
|
||||
}
|
||||
|
||||
func (s *state3) UnlockDuration() (abi.ChainEpoch, error) {
|
||||
return s.State.UnlockDuration, nil
|
||||
}
|
||||
|
||||
func (s *state3) InitialBalance() (abi.TokenAmount, error) {
|
||||
return s.State.InitialBalance, nil
|
||||
}
|
||||
|
||||
func (s *state3) Threshold() (uint64, error) {
|
||||
return s.State.NumApprovalsThreshold, nil
|
||||
}
|
||||
|
||||
func (s *state3) Signers() ([]address.Address, error) {
|
||||
return s.State.Signers, nil
|
||||
}
|
||||
|
||||
func (s *state3) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error {
|
||||
arr, err := adt3.AsMap(s.store, s.State.PendingTxns, builtin3.DefaultHamtBitwidth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var out msig3.Transaction
|
||||
return arr.ForEach(&out, func(key string) error {
|
||||
txid, n := binary.Varint([]byte(key))
|
||||
if n <= 0 {
|
||||
return xerrors.Errorf("invalid pending transaction key: %v", key)
|
||||
}
|
||||
return cb(txid, (Transaction)(out))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state3) PendingTxnChanged(other State) (bool, error) {
|
||||
other2, ok := other.(*state3)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.PendingTxns.Equals(other2.PendingTxns), nil
|
||||
}
|
||||
|
||||
func (s *state3) transactions() (adt.Map, error) {
|
||||
return adt3.AsMap(s.store, s.PendingTxns, builtin3.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state3) decodeTransaction(val *cbg.Deferred) (Transaction, error) {
|
||||
var tx msig3.Transaction
|
||||
if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return Transaction{}, err
|
||||
}
|
||||
return tx, nil
|
||||
}
|
@ -8,10 +8,10 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
)
|
||||
|
||||
var Methods = builtin2.MethodsPaych
|
||||
var Methods = builtin3.MethodsPaych
|
||||
|
||||
func Message(version actors.Version, from address.Address) MessageBuilder {
|
||||
switch version {
|
||||
@ -19,6 +19,8 @@ func Message(version actors.Version, from address.Address) MessageBuilder {
|
||||
return message0{from}
|
||||
case actors.Version2:
|
||||
return message2{from}
|
||||
case actors.Version3:
|
||||
return message3{from}
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported actors version: %d", version))
|
||||
}
|
||||
|
74
chain/actors/builtin/paych/message3.go
Normal file
74
chain/actors/builtin/paych/message3.go
Normal file
@ -0,0 +1,74 @@
|
||||
package paych
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
init3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/init"
|
||||
paych3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/paych"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type message3 struct{ from address.Address }
|
||||
|
||||
func (m message3) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) {
|
||||
params, aerr := actors.SerializeParams(&paych3.ConstructorParams{From: m.from, To: to})
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
enc, aerr := actors.SerializeParams(&init3.ExecParams{
|
||||
CodeCID: builtin3.PaymentChannelActorCodeID,
|
||||
ConstructorParams: params,
|
||||
})
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
|
||||
return &types.Message{
|
||||
To: init_.Address,
|
||||
From: m.from,
|
||||
Value: initialAmount,
|
||||
Method: builtin3.MethodsInit.Exec,
|
||||
Params: enc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m message3) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) {
|
||||
params, aerr := actors.SerializeParams(&paych3.UpdateChannelStateParams{
|
||||
Sv: *sv,
|
||||
Secret: secret,
|
||||
})
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
|
||||
return &types.Message{
|
||||
To: paych,
|
||||
From: m.from,
|
||||
Value: abi.NewTokenAmount(0),
|
||||
Method: builtin3.MethodsPaych.UpdateChannelState,
|
||||
Params: params,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m message3) Settle(paych address.Address) (*types.Message, error) {
|
||||
return &types.Message{
|
||||
To: paych,
|
||||
From: m.from,
|
||||
Value: abi.NewTokenAmount(0),
|
||||
Method: builtin3.MethodsPaych.Settle,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m message3) Collect(paych address.Address) (*types.Message, error) {
|
||||
return &types.Message{
|
||||
To: paych,
|
||||
From: m.from,
|
||||
Value: abi.NewTokenAmount(0),
|
||||
Method: builtin3.MethodsPaych.Collect,
|
||||
}, nil
|
||||
}
|
@ -15,6 +15,7 @@ import (
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
@ -28,6 +29,9 @@ func init() {
|
||||
builtin.RegisterActorState(builtin2.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load2(store, root)
|
||||
})
|
||||
builtin.RegisterActorState(builtin3.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load3(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
// Load returns an abstract copy of payment channel state, irregardless of actor version
|
||||
@ -37,6 +41,8 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
return load0(store, act.Head)
|
||||
case builtin2.PaymentChannelActorCodeID:
|
||||
return load2(store, act.Head)
|
||||
case builtin3.PaymentChannelActorCodeID:
|
||||
return load3(store, act.Head)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
104
chain/actors/builtin/paych/state3.go
Normal file
104
chain/actors/builtin/paych/state3.go
Normal file
@ -0,0 +1,104 @@
|
||||
package paych
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
paych3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/paych"
|
||||
adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state3)(nil)
|
||||
|
||||
func load3(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state3{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state3 struct {
|
||||
paych3.State
|
||||
store adt.Store
|
||||
lsAmt *adt3.Array
|
||||
}
|
||||
|
||||
// Channel owner, who has funded the actor
|
||||
func (s *state3) From() (address.Address, error) {
|
||||
return s.State.From, nil
|
||||
}
|
||||
|
||||
// Recipient of payouts from channel
|
||||
func (s *state3) To() (address.Address, error) {
|
||||
return s.State.To, nil
|
||||
}
|
||||
|
||||
// Height at which the channel can be `Collected`
|
||||
func (s *state3) SettlingAt() (abi.ChainEpoch, error) {
|
||||
return s.State.SettlingAt, nil
|
||||
}
|
||||
|
||||
// Amount successfully redeemed through the payment channel, paid out on `Collect()`
|
||||
func (s *state3) ToSend() (abi.TokenAmount, error) {
|
||||
return s.State.ToSend, nil
|
||||
}
|
||||
|
||||
func (s *state3) getOrLoadLsAmt() (*adt3.Array, error) {
|
||||
if s.lsAmt != nil {
|
||||
return s.lsAmt, nil
|
||||
}
|
||||
|
||||
// Get the lane state from the chain
|
||||
lsamt, err := adt3.AsArray(s.store, s.State.LaneStates, paych3.LaneStatesAmtBitwidth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.lsAmt = lsamt
|
||||
return lsamt, nil
|
||||
}
|
||||
|
||||
// Get total number of lanes
|
||||
func (s *state3) LaneCount() (uint64, error) {
|
||||
lsamt, err := s.getOrLoadLsAmt()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return lsamt.Length(), nil
|
||||
}
|
||||
|
||||
// Iterate lane states
|
||||
func (s *state3) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error {
|
||||
// Get the lane state from the chain
|
||||
lsamt, err := s.getOrLoadLsAmt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Note: we use a map instead of an array to store laneStates because the
|
||||
// client sets the lane ID (the index) and potentially they could use a
|
||||
// very large index.
|
||||
var ls paych3.LaneState
|
||||
return lsamt.ForEach(&ls, func(i int64) error {
|
||||
return cb(uint64(i), &laneState3{ls})
|
||||
})
|
||||
}
|
||||
|
||||
type laneState3 struct {
|
||||
paych3.LaneState
|
||||
}
|
||||
|
||||
func (ls *laneState3) Redeemed() (big.Int, error) {
|
||||
return ls.LaneState.Redeemed, nil
|
||||
}
|
||||
|
||||
func (ls *laneState3) Nonce() (uint64, error) {
|
||||
return ls.LaneState.Nonce, nil
|
||||
}
|
@ -16,6 +16,7 @@ import (
|
||||
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -25,11 +26,14 @@ func init() {
|
||||
builtin.RegisterActorState(builtin2.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load2(store, root)
|
||||
})
|
||||
builtin.RegisterActorState(builtin3.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load3(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
Address = builtin2.StoragePowerActorAddr
|
||||
Methods = builtin2.MethodsPower
|
||||
Address = builtin3.StoragePowerActorAddr
|
||||
Methods = builtin3.MethodsPower
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
@ -38,6 +42,8 @@ func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
return load0(store, act.Head)
|
||||
case builtin2.StoragePowerActorCodeID:
|
||||
return load2(store, act.Head)
|
||||
case builtin3.StoragePowerActorCodeID:
|
||||
return load3(store, act.Head)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
149
chain/actors/builtin/power/v3.go
Normal file
149
chain/actors/builtin/power/v3.go
Normal file
@ -0,0 +1,149 @@
|
||||
package power
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
power3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/power"
|
||||
adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state3)(nil)
|
||||
|
||||
func load3(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state3{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state3 struct {
|
||||
power3.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state3) TotalLocked() (abi.TokenAmount, error) {
|
||||
return s.TotalPledgeCollateral, nil
|
||||
}
|
||||
|
||||
func (s *state3) TotalPower() (Claim, error) {
|
||||
return Claim{
|
||||
RawBytePower: s.TotalRawBytePower,
|
||||
QualityAdjPower: s.TotalQualityAdjPower,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Committed power to the network. Includes miners below the minimum threshold.
|
||||
func (s *state3) TotalCommitted() (Claim, error) {
|
||||
return Claim{
|
||||
RawBytePower: s.TotalBytesCommitted,
|
||||
QualityAdjPower: s.TotalQABytesCommitted,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *state3) MinerPower(addr address.Address) (Claim, bool, error) {
|
||||
claims, err := s.claims()
|
||||
if err != nil {
|
||||
return Claim{}, false, err
|
||||
}
|
||||
var claim power3.Claim
|
||||
ok, err := claims.Get(abi.AddrKey(addr), &claim)
|
||||
if err != nil {
|
||||
return Claim{}, false, err
|
||||
}
|
||||
return Claim{
|
||||
RawBytePower: claim.RawBytePower,
|
||||
QualityAdjPower: claim.QualityAdjPower,
|
||||
}, ok, nil
|
||||
}
|
||||
|
||||
func (s *state3) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) {
|
||||
return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a)
|
||||
}
|
||||
|
||||
func (s *state3) TotalPowerSmoothed() (builtin.FilterEstimate, error) {
|
||||
return builtin.FromV3FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil
|
||||
}
|
||||
|
||||
func (s *state3) MinerCounts() (uint64, uint64, error) {
|
||||
return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil
|
||||
}
|
||||
|
||||
func (s *state3) ListAllMiners() ([]address.Address, error) {
|
||||
claims, err := s.claims()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var miners []address.Address
|
||||
err = claims.ForEach(nil, func(k string) error {
|
||||
a, err := address.NewFromBytes([]byte(k))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
miners = append(miners, a)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return miners, nil
|
||||
}
|
||||
|
||||
func (s *state3) ForEachClaim(cb func(miner address.Address, claim Claim) error) error {
|
||||
claims, err := s.claims()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var claim power3.Claim
|
||||
return claims.ForEach(&claim, func(k string) error {
|
||||
a, err := address.NewFromBytes([]byte(k))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cb(a, Claim{
|
||||
RawBytePower: claim.RawBytePower,
|
||||
QualityAdjPower: claim.QualityAdjPower,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state3) ClaimsChanged(other State) (bool, error) {
|
||||
other2, ok := other.(*state3)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.Claims.Equals(other2.State.Claims), nil
|
||||
}
|
||||
|
||||
func (s *state3) claims() (adt.Map, error) {
|
||||
return adt3.AsMap(s.store, s.Claims, builtin3.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state3) decodeClaim(val *cbg.Deferred) (Claim, error) {
|
||||
var ci power3.Claim
|
||||
if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return Claim{}, err
|
||||
}
|
||||
return fromV3Claim(ci), nil
|
||||
}
|
||||
|
||||
func fromV3Claim(v3 power3.Claim) Claim {
|
||||
return Claim{
|
||||
RawBytePower: v3.RawBytePower,
|
||||
QualityAdjPower: v3.QualityAdjPower,
|
||||
}
|
||||
}
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
@ -22,11 +23,14 @@ func init() {
|
||||
builtin.RegisterActorState(builtin2.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load2(store, root)
|
||||
})
|
||||
builtin.RegisterActorState(builtin3.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load3(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
Address = builtin2.RewardActorAddr
|
||||
Methods = builtin2.MethodsReward
|
||||
Address = builtin3.RewardActorAddr
|
||||
Methods = builtin3.MethodsReward
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
@ -35,6 +39,8 @@ func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
return load0(store, act.Head)
|
||||
case builtin2.RewardActorCodeID:
|
||||
return load2(store, act.Head)
|
||||
case builtin3.RewardActorCodeID:
|
||||
return load3(store, act.Head)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
86
chain/actors/builtin/reward/v3.go
Normal file
86
chain/actors/builtin/reward/v3.go
Normal file
@ -0,0 +1,86 @@
|
||||
package reward
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
|
||||
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
|
||||
reward3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/reward"
|
||||
smoothing3 "github.com/filecoin-project/specs-actors/v3/actors/util/smoothing"
|
||||
)
|
||||
|
||||
var _ State = (*state3)(nil)
|
||||
|
||||
func load3(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state3{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state3 struct {
|
||||
reward3.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state3) ThisEpochReward() (abi.TokenAmount, error) {
|
||||
return s.State.ThisEpochReward, nil
|
||||
}
|
||||
|
||||
func (s *state3) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) {
|
||||
return builtin.FilterEstimate{
|
||||
PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate,
|
||||
VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *state3) ThisEpochBaselinePower() (abi.StoragePower, error) {
|
||||
return s.State.ThisEpochBaselinePower, nil
|
||||
}
|
||||
|
||||
func (s *state3) TotalStoragePowerReward() (abi.TokenAmount, error) {
|
||||
return s.State.TotalStoragePowerReward, nil
|
||||
}
|
||||
|
||||
func (s *state3) EffectiveBaselinePower() (abi.StoragePower, error) {
|
||||
return s.State.EffectiveBaselinePower, nil
|
||||
}
|
||||
|
||||
func (s *state3) EffectiveNetworkTime() (abi.ChainEpoch, error) {
|
||||
return s.State.EffectiveNetworkTime, nil
|
||||
}
|
||||
|
||||
func (s *state3) CumsumBaseline() (reward3.Spacetime, error) {
|
||||
return s.State.CumsumBaseline, nil
|
||||
}
|
||||
|
||||
func (s *state3) CumsumRealized() (reward3.Spacetime, error) {
|
||||
return s.State.CumsumRealized, nil
|
||||
}
|
||||
|
||||
func (s *state3) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) {
|
||||
return miner3.InitialPledgeForPower(
|
||||
qaPower,
|
||||
s.State.ThisEpochBaselinePower,
|
||||
s.State.ThisEpochRewardSmoothed,
|
||||
smoothing3.FilterEstimate{
|
||||
PositionEstimate: networkQAPower.PositionEstimate,
|
||||
VelocityEstimate: networkQAPower.VelocityEstimate,
|
||||
},
|
||||
circSupply,
|
||||
), nil
|
||||
}
|
||||
|
||||
func (s *state3) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) {
|
||||
return miner3.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed,
|
||||
smoothing3.FilterEstimate{
|
||||
PositionEstimate: networkQAPower.PositionEstimate,
|
||||
VelocityEstimate: networkQAPower.VelocityEstimate,
|
||||
},
|
||||
sectorWeight), nil
|
||||
}
|
@ -6,16 +6,21 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
func getDataCap(store adt.Store, ver actors.Version, root cid.Cid, addr address.Address) (bool, abi.StoragePower, error) {
|
||||
// taking this as a function instead of asking the caller to call it helps reduce some of the error
|
||||
// checking boilerplate.
|
||||
//
|
||||
// "go made me do it"
|
||||
type rootFunc func() (adt.Map, error)
|
||||
|
||||
// Assumes that the bitwidth for v3 HAMTs is the DefaultHamtBitwidth
|
||||
func getDataCap(store adt.Store, ver actors.Version, root rootFunc, addr address.Address) (bool, abi.StoragePower, error) {
|
||||
if addr.Protocol() != address.ID {
|
||||
return false, big.Zero(), xerrors.Errorf("can only look up ID addresses")
|
||||
}
|
||||
|
||||
vh, err := adt.AsMap(store, root, ver)
|
||||
vh, err := root()
|
||||
if err != nil {
|
||||
return false, big.Zero(), xerrors.Errorf("loading verifreg: %w", err)
|
||||
}
|
||||
@ -30,8 +35,9 @@ func getDataCap(store adt.Store, ver actors.Version, root cid.Cid, addr address.
|
||||
return true, dcap, nil
|
||||
}
|
||||
|
||||
func forEachCap(store adt.Store, ver actors.Version, root cid.Cid, cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
vh, err := adt.AsMap(store, root, ver)
|
||||
// Assumes that the bitwidth for v3 HAMTs is the DefaultHamtBitwidth
|
||||
func forEachCap(store adt.Store, ver actors.Version, root rootFunc, cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
vh, err := root()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("loading verified clients: %w", err)
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state0)(nil)
|
||||
@ -32,17 +33,25 @@ func (s *state0) RootKey() (address.Address, error) {
|
||||
}
|
||||
|
||||
func (s *state0) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
return getDataCap(s.store, actors.Version0, s.State.VerifiedClients, addr)
|
||||
return getDataCap(s.store, actors.Version0, s.verifiedClients, addr)
|
||||
}
|
||||
|
||||
func (s *state0) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
return getDataCap(s.store, actors.Version0, s.State.Verifiers, addr)
|
||||
return getDataCap(s.store, actors.Version0, s.verifiers, addr)
|
||||
}
|
||||
|
||||
func (s *state0) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version0, s.State.Verifiers, cb)
|
||||
return forEachCap(s.store, actors.Version0, s.verifiers, cb)
|
||||
}
|
||||
|
||||
func (s *state0) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version0, s.State.VerifiedClients, cb)
|
||||
return forEachCap(s.store, actors.Version0, s.verifiedClients, cb)
|
||||
}
|
||||
|
||||
func (s *state0) verifiedClients() (adt.Map, error) {
|
||||
return adt0.AsMap(s.store, s.VerifiedClients)
|
||||
}
|
||||
|
||||
func (s *state0) verifiers() (adt.Map, error) {
|
||||
return adt0.AsMap(s.store, s.Verifiers)
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg"
|
||||
adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state2)(nil)
|
||||
@ -32,17 +33,25 @@ func (s *state2) RootKey() (address.Address, error) {
|
||||
}
|
||||
|
||||
func (s *state2) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
return getDataCap(s.store, actors.Version2, s.State.VerifiedClients, addr)
|
||||
return getDataCap(s.store, actors.Version2, s.verifiedClients, addr)
|
||||
}
|
||||
|
||||
func (s *state2) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
return getDataCap(s.store, actors.Version2, s.State.Verifiers, addr)
|
||||
return getDataCap(s.store, actors.Version2, s.verifiers, addr)
|
||||
}
|
||||
|
||||
func (s *state2) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version2, s.State.Verifiers, cb)
|
||||
return forEachCap(s.store, actors.Version2, s.verifiers, cb)
|
||||
}
|
||||
|
||||
func (s *state2) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version2, s.State.VerifiedClients, cb)
|
||||
return forEachCap(s.store, actors.Version2, s.verifiedClients, cb)
|
||||
}
|
||||
|
||||
func (s *state2) verifiedClients() (adt.Map, error) {
|
||||
return adt2.AsMap(s.store, s.VerifiedClients)
|
||||
}
|
||||
|
||||
func (s *state2) verifiers() (adt.Map, error) {
|
||||
return adt2.AsMap(s.store, s.Verifiers)
|
||||
}
|
||||
|
58
chain/actors/builtin/verifreg/v3.go
Normal file
58
chain/actors/builtin/verifreg/v3.go
Normal file
@ -0,0 +1,58 @@
|
||||
package verifreg
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
verifreg3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/verifreg"
|
||||
adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state3)(nil)
|
||||
|
||||
func load3(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state3{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state3 struct {
|
||||
verifreg3.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state3) RootKey() (address.Address, error) {
|
||||
return s.State.RootKey, nil
|
||||
}
|
||||
|
||||
func (s *state3) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
return getDataCap(s.store, actors.Version3, s.verifiedClients, addr)
|
||||
}
|
||||
|
||||
func (s *state3) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
return getDataCap(s.store, actors.Version3, s.verifiers, addr)
|
||||
}
|
||||
|
||||
func (s *state3) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version3, s.verifiers, cb)
|
||||
}
|
||||
|
||||
func (s *state3) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version3, s.verifiedClients, cb)
|
||||
}
|
||||
|
||||
func (s *state3) verifiedClients() (adt.Map, error) {
|
||||
return adt3.AsMap(s.store, s.VerifiedClients, builtin3.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state3) verifiers() (adt.Map, error) {
|
||||
return adt3.AsMap(s.store, s.Verifiers, builtin3.DefaultHamtBitwidth)
|
||||
}
|
@ -3,6 +3,7 @@ package verifreg
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -22,11 +23,14 @@ func init() {
|
||||
builtin.RegisterActorState(builtin2.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load2(store, root)
|
||||
})
|
||||
builtin.RegisterActorState(builtin3.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) {
|
||||
return load3(store, root)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
Address = builtin2.VerifiedRegistryActorAddr
|
||||
Methods = builtin2.MethodsVerifiedRegistry
|
||||
Address = builtin3.VerifiedRegistryActorAddr
|
||||
Methods = builtin3.MethodsVerifiedRegistry
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -35,6 +39,8 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
return load0(store, act.Head)
|
||||
case builtin2.VerifiedRegistryActorCodeID:
|
||||
return load2(store, act.Head)
|
||||
case builtin3.VerifiedRegistryActorCodeID:
|
||||
return load3(store, act.Head)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
@ -6,21 +6,28 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
|
||||
market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||
verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
||||
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||
paych2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/paych"
|
||||
verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg"
|
||||
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
market3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/market"
|
||||
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
|
||||
paych3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/paych"
|
||||
verifreg3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/verifreg"
|
||||
)
|
||||
|
||||
const (
|
||||
ChainFinality = miner0.ChainFinality
|
||||
ChainFinality = miner3.ChainFinality
|
||||
SealRandomnessLookback = ChainFinality
|
||||
PaychSettleDelay = paych2.SettleDelay
|
||||
PaychSettleDelay = paych3.SettleDelay
|
||||
)
|
||||
|
||||
// SetSupportedProofTypes sets supported proof types, across all actor versions.
|
||||
@ -31,6 +38,10 @@ func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
||||
miner2.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2)
|
||||
miner2.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
|
||||
miner3.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
miner3.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2)
|
||||
miner3.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
|
||||
|
||||
AddSupportedProofTypes(types...)
|
||||
}
|
||||
|
||||
@ -49,6 +60,13 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
|
||||
miner2.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
|
||||
miner2.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
|
||||
miner3.PreCommitSealProofTypesV0[t] = struct{}{}
|
||||
|
||||
miner3.PreCommitSealProofTypesV7[t] = struct{}{}
|
||||
miner3.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
|
||||
miner3.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
@ -58,6 +76,7 @@ func SetPreCommitChallengeDelay(delay abi.ChainEpoch) {
|
||||
// Set for all miner versions.
|
||||
miner0.PreCommitChallengeDelay = delay
|
||||
miner2.PreCommitChallengeDelay = delay
|
||||
miner3.PreCommitChallengeDelay = delay
|
||||
}
|
||||
|
||||
// TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay.
|
||||
@ -73,6 +92,10 @@ func SetConsensusMinerMinPower(p abi.StoragePower) {
|
||||
for _, policy := range builtin2.SealProofPolicies {
|
||||
policy.ConsensusMinerMinPower = p
|
||||
}
|
||||
|
||||
for _, policy := range builtin3.PoStProofPolicies {
|
||||
policy.ConsensusMinerMinPower = p
|
||||
}
|
||||
}
|
||||
|
||||
// SetMinVerifiedDealSize sets the minimum size of a verified deal. This should
|
||||
@ -80,6 +103,7 @@ func SetConsensusMinerMinPower(p abi.StoragePower) {
|
||||
func SetMinVerifiedDealSize(size abi.StoragePower) {
|
||||
verifreg0.MinVerifiedDealSize = size
|
||||
verifreg2.MinVerifiedDealSize = size
|
||||
verifreg3.MinVerifiedDealSize = size
|
||||
}
|
||||
|
||||
func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) abi.ChainEpoch {
|
||||
@ -88,6 +112,8 @@ func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) ab
|
||||
return miner0.MaxSealDuration[t]
|
||||
case actors.Version2:
|
||||
return miner2.MaxProveCommitDuration[t]
|
||||
case actors.Version3:
|
||||
return miner3.MaxProveCommitDuration[t]
|
||||
default:
|
||||
panic("unsupported actors version")
|
||||
}
|
||||
@ -103,6 +129,8 @@ func DealProviderCollateralBounds(
|
||||
return market0.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil, nwVer)
|
||||
case actors.Version2:
|
||||
return market2.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
|
||||
case actors.Version3:
|
||||
return market3.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
|
||||
default:
|
||||
panic("unsupported network version")
|
||||
}
|
||||
@ -116,6 +144,12 @@ func SetWPoStChallengeWindow(period abi.ChainEpoch) {
|
||||
|
||||
miner2.WPoStChallengeWindow = period
|
||||
miner2.WPoStProvingPeriod = period * abi.ChainEpoch(miner2.WPoStPeriodDeadlines)
|
||||
|
||||
miner3.WPoStChallengeWindow = period
|
||||
miner3.WPoStProvingPeriod = period * abi.ChainEpoch(miner3.WPoStPeriodDeadlines)
|
||||
// by default, this is 2x finality which is 30 periods.
|
||||
// scale it if we're scaling the challenge period.
|
||||
miner3.WPoStDisputeWindow = period * 30
|
||||
}
|
||||
|
||||
func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
|
||||
@ -132,17 +166,17 @@ func GetMaxSectorExpirationExtension() abi.ChainEpoch {
|
||||
|
||||
// TODO: we'll probably need to abstract over this better in the future.
|
||||
func GetMaxPoStPartitions(p abi.RegisteredPoStProof) (int, error) {
|
||||
sectorsPerPart, err := builtin2.PoStProofWindowPoStPartitionSectors(p)
|
||||
sectorsPerPart, err := builtin3.PoStProofWindowPoStPartitionSectors(p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(miner2.AddressedSectorsMax / sectorsPerPart), nil
|
||||
return int(miner3.AddressedSectorsMax / sectorsPerPart), nil
|
||||
}
|
||||
|
||||
func GetDefaultSectorSize() abi.SectorSize {
|
||||
// supported sector sizes are the same across versions.
|
||||
szs := make([]abi.SectorSize, 0, len(miner2.PreCommitSealProofTypesV8))
|
||||
for spt := range miner2.PreCommitSealProofTypesV8 {
|
||||
szs := make([]abi.SectorSize, 0, len(miner3.PreCommitSealProofTypesV8))
|
||||
for spt := range miner3.PreCommitSealProofTypesV8 {
|
||||
ss, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -11,6 +11,7 @@ type Version int
|
||||
const (
|
||||
Version0 Version = 0
|
||||
Version2 Version = 2
|
||||
Version3 Version = 3
|
||||
)
|
||||
|
||||
// Converts a network version into an actors adt version.
|
||||
@ -20,6 +21,8 @@ func VersionForNetwork(version network.Version) Version {
|
||||
return Version0
|
||||
case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8, network.Version9:
|
||||
return Version2
|
||||
case network.Version10:
|
||||
return Version3
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported network version %d", version))
|
||||
}
|
||||
|
@ -14,7 +14,6 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/google/uuid"
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-cid"
|
||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
||||
@ -85,19 +84,6 @@ type ChainGen struct {
|
||||
lr repo.LockedRepo
|
||||
}
|
||||
|
||||
type mybs struct {
|
||||
blockstore.Blockstore
|
||||
}
|
||||
|
||||
func (m mybs) Get(c cid.Cid) (block.Block, error) {
|
||||
b, err := m.Blockstore.Get(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
var rootkeyMultisig = genesis.MultisigMeta{
|
||||
Signers: []address.Address{remAccTestKey},
|
||||
Threshold: 1,
|
||||
@ -134,12 +120,12 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
|
||||
return nil, xerrors.Errorf("taking mem-repo lock failed: %w", err)
|
||||
}
|
||||
|
||||
ds, err := lr.Datastore("/metadata")
|
||||
ds, err := lr.Datastore(context.TODO(), "/metadata")
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to get metadata datastore: %w", err)
|
||||
}
|
||||
|
||||
bs, err := lr.Blockstore(repo.BlockstoreChain)
|
||||
bs, err := lr.Blockstore(context.TODO(), repo.BlockstoreChain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -152,8 +138,6 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
bs = mybs{bs}
|
||||
|
||||
ks, err := lr.KeyStore()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting repo keystore failed: %w", err)
|
||||
@ -465,7 +449,12 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners
|
||||
}
|
||||
}
|
||||
|
||||
return store.NewFullTipSet(blks), nil
|
||||
fts := store.NewFullTipSet(blks)
|
||||
if err := cg.cs.PutTipSet(context.TODO(), fts.TipSet()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fts, nil
|
||||
}
|
||||
|
||||
func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket,
|
||||
|
@ -9,10 +9,10 @@ import (
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/lib/sigs/bls"
|
||||
)
|
||||
|
||||
func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.WalletAPI, bt *api.BlockTemplate) (*types.FullBlock, error) {
|
||||
@ -140,35 +140,29 @@ func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w api.WalletA
|
||||
}
|
||||
|
||||
func aggregateSignatures(sigs []crypto.Signature) (*crypto.Signature, error) {
|
||||
sigsS := make([][]byte, len(sigs))
|
||||
sigsS := make([]ffi.Signature, len(sigs))
|
||||
for i := 0; i < len(sigs); i++ {
|
||||
sigsS[i] = sigs[i].Data
|
||||
copy(sigsS[i][:], sigs[i].Data[:ffi.SignatureBytes])
|
||||
}
|
||||
|
||||
aggregator := new(bls.AggregateSignature).AggregateCompressed(sigsS)
|
||||
if aggregator == nil {
|
||||
aggSig := ffi.Aggregate(sigsS)
|
||||
if aggSig == nil {
|
||||
if len(sigs) > 0 {
|
||||
return nil, xerrors.Errorf("bls.Aggregate returned nil with %d signatures", len(sigs))
|
||||
}
|
||||
|
||||
zeroSig := ffi.CreateZeroSignature()
|
||||
|
||||
// Note: for blst this condition should not happen - nil should not
|
||||
// be returned
|
||||
return &crypto.Signature{
|
||||
Type: crypto.SigTypeBLS,
|
||||
Data: new(bls.Signature).Compress(),
|
||||
Data: zeroSig[:],
|
||||
}, nil
|
||||
}
|
||||
aggSigAff := aggregator.ToAffine()
|
||||
if aggSigAff == nil {
|
||||
return &crypto.Signature{
|
||||
Type: crypto.SigTypeBLS,
|
||||
Data: new(bls.Signature).Compress(),
|
||||
}, nil
|
||||
}
|
||||
aggSig := aggSigAff.Compress()
|
||||
return &crypto.Signature{
|
||||
Type: crypto.SigTypeBLS,
|
||||
Data: aggSig,
|
||||
Data: aggSig[:],
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -48,9 +48,13 @@ func saveConfig(cfg *types.MpoolConfig, ds dtypes.MetadataDS) error {
|
||||
}
|
||||
|
||||
func (mp *MessagePool) GetConfig() *types.MpoolConfig {
|
||||
mp.cfgLk.Lock()
|
||||
defer mp.cfgLk.Unlock()
|
||||
return mp.cfg.Clone()
|
||||
return mp.getConfig().Clone()
|
||||
}
|
||||
|
||||
func (mp *MessagePool) getConfig() *types.MpoolConfig {
|
||||
mp.cfgLk.RLock()
|
||||
defer mp.cfgLk.RUnlock()
|
||||
return mp.cfg
|
||||
}
|
||||
|
||||
func validateConfg(cfg *types.MpoolConfig) error {
|
||||
|
@ -133,7 +133,7 @@ type MessagePool struct {
|
||||
curTsLk sync.Mutex // DO NOT LOCK INSIDE lk
|
||||
curTs *types.TipSet
|
||||
|
||||
cfgLk sync.Mutex
|
||||
cfgLk sync.RWMutex
|
||||
cfg *types.MpoolConfig
|
||||
|
||||
api Provider
|
||||
@ -781,7 +781,7 @@ func (mp *MessagePool) addLocked(m *types.SignedMessage, strict, untrusted bool)
|
||||
|
||||
if incr {
|
||||
mp.currentSize++
|
||||
if mp.currentSize > mp.cfg.SizeLimitHigh {
|
||||
if mp.currentSize > mp.getConfig().SizeLimitHigh {
|
||||
// send signal to prune messages if it hasnt already been sent
|
||||
select {
|
||||
case mp.pruneTrigger <- struct{}{}:
|
||||
|
@ -19,7 +19,8 @@ func (mp *MessagePool) pruneExcessMessages() error {
|
||||
mp.lk.Lock()
|
||||
defer mp.lk.Unlock()
|
||||
|
||||
if mp.currentSize < mp.cfg.SizeLimitHigh {
|
||||
mpCfg := mp.getConfig()
|
||||
if mp.currentSize < mpCfg.SizeLimitHigh {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -27,7 +28,7 @@ func (mp *MessagePool) pruneExcessMessages() error {
|
||||
case <-mp.pruneCooldown:
|
||||
err := mp.pruneMessages(context.TODO(), ts)
|
||||
go func() {
|
||||
time.Sleep(mp.cfg.PruneCooldown)
|
||||
time.Sleep(mpCfg.PruneCooldown)
|
||||
mp.pruneCooldown <- struct{}{}
|
||||
}()
|
||||
return err
|
||||
@ -53,8 +54,9 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro
|
||||
// protected actors -- not pruned
|
||||
protected := make(map[address.Address]struct{})
|
||||
|
||||
mpCfg := mp.getConfig()
|
||||
// we never prune priority addresses
|
||||
for _, actor := range mp.cfg.PriorityAddrs {
|
||||
for _, actor := range mpCfg.PriorityAddrs {
|
||||
protected[actor] = struct{}{}
|
||||
}
|
||||
|
||||
@ -90,7 +92,7 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro
|
||||
})
|
||||
|
||||
// Keep messages (remove them from pruneMsgs) from chains while we are under the low water mark
|
||||
loWaterMark := mp.cfg.SizeLimitLow
|
||||
loWaterMark := mpCfg.SizeLimitLow
|
||||
keepLoop:
|
||||
for _, chain := range chains {
|
||||
for _, m := range chain.msgs {
|
||||
|
@ -532,14 +532,14 @@ func (mp *MessagePool) selectPriorityMessages(pending map[address.Address]map[ui
|
||||
log.Infow("select priority messages done", "took", dt)
|
||||
}
|
||||
}()
|
||||
|
||||
result := make([]*types.SignedMessage, 0, mp.cfg.SizeLimitLow)
|
||||
mpCfg := mp.getConfig()
|
||||
result := make([]*types.SignedMessage, 0, mpCfg.SizeLimitLow)
|
||||
gasLimit := int64(build.BlockGasLimit)
|
||||
minGas := int64(gasguess.MinGas)
|
||||
|
||||
// 1. Get priority actor chains
|
||||
var chains []*msgChain
|
||||
priority := mp.cfg.PriorityAddrs
|
||||
priority := mpCfg.PriorityAddrs
|
||||
for _, actor := range priority {
|
||||
mset, ok := pending[actor]
|
||||
if ok {
|
||||
|
@ -20,6 +20,10 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
|
||||
states0 "github.com/filecoin-project/specs-actors/actors/states"
|
||||
states2 "github.com/filecoin-project/specs-actors/v2/actors/states"
|
||||
states3 "github.com/filecoin-project/specs-actors/v3/actors/states"
|
||||
)
|
||||
|
||||
var log = logging.Logger("statetree")
|
||||
@ -144,23 +148,12 @@ func VersionForNetwork(ver network.Version) types.StateTreeVersion {
|
||||
return types.StateTreeVersion1
|
||||
}
|
||||
|
||||
func adtForSTVersion(ver types.StateTreeVersion) actors.Version {
|
||||
switch ver {
|
||||
case types.StateTreeVersion0:
|
||||
return actors.Version0
|
||||
case types.StateTreeVersion1:
|
||||
return actors.Version2
|
||||
default:
|
||||
panic("unhandled state tree version")
|
||||
}
|
||||
}
|
||||
|
||||
func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, error) {
|
||||
var info cid.Cid
|
||||
switch ver {
|
||||
case types.StateTreeVersion0:
|
||||
// info is undefined
|
||||
case types.StateTreeVersion1:
|
||||
case types.StateTreeVersion1, types.StateTreeVersion2:
|
||||
var err error
|
||||
info, err = cst.Put(context.TODO(), new(types.StateInfo0))
|
||||
if err != nil {
|
||||
@ -169,13 +162,34 @@ func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, e
|
||||
default:
|
||||
return nil, xerrors.Errorf("unsupported state tree version: %d", ver)
|
||||
}
|
||||
root, err := adt.NewMap(adt.WrapStore(context.TODO(), cst), adtForSTVersion(ver))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
store := adt.WrapStore(context.TODO(), cst)
|
||||
var hamt adt.Map
|
||||
switch ver {
|
||||
case types.StateTreeVersion0:
|
||||
tree, err := states0.NewTree(store)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to create state tree: %w", err)
|
||||
}
|
||||
hamt = tree.Map
|
||||
case types.StateTreeVersion1:
|
||||
tree, err := states2.NewTree(store)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to create state tree: %w", err)
|
||||
}
|
||||
hamt = tree.Map
|
||||
case types.StateTreeVersion2:
|
||||
tree, err := states3.NewTree(store)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to create state tree: %w", err)
|
||||
}
|
||||
hamt = tree.Map
|
||||
default:
|
||||
return nil, xerrors.Errorf("unsupported state tree version: %d", ver)
|
||||
}
|
||||
|
||||
s := &StateTree{
|
||||
root: root,
|
||||
root: hamt,
|
||||
info: info,
|
||||
version: ver,
|
||||
Store: cst,
|
||||
@ -194,30 +208,49 @@ func LoadStateTree(cst cbor.IpldStore, c cid.Cid) (*StateTree, error) {
|
||||
root.Version = types.StateTreeVersion0
|
||||
}
|
||||
|
||||
switch root.Version {
|
||||
case types.StateTreeVersion0, types.StateTreeVersion1:
|
||||
// Load the actual state-tree HAMT.
|
||||
nd, err := adt.AsMap(
|
||||
adt.WrapStore(context.TODO(), cst), root.Actors,
|
||||
adtForSTVersion(root.Version),
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("loading hamt node %s failed: %s", c, err)
|
||||
return nil, err
|
||||
}
|
||||
store := adt.WrapStore(context.TODO(), cst)
|
||||
|
||||
s := &StateTree{
|
||||
root: nd,
|
||||
info: root.Info,
|
||||
version: root.Version,
|
||||
Store: cst,
|
||||
snaps: newStateSnaps(),
|
||||
var (
|
||||
hamt adt.Map
|
||||
err error
|
||||
)
|
||||
switch root.Version {
|
||||
case types.StateTreeVersion0:
|
||||
var tree *states0.Tree
|
||||
tree, err = states0.LoadTree(store, root.Actors)
|
||||
if tree != nil {
|
||||
hamt = tree.Map
|
||||
}
|
||||
case types.StateTreeVersion1:
|
||||
var tree *states2.Tree
|
||||
tree, err = states2.LoadTree(store, root.Actors)
|
||||
if tree != nil {
|
||||
hamt = tree.Map
|
||||
}
|
||||
case types.StateTreeVersion2:
|
||||
var tree *states3.Tree
|
||||
tree, err = states3.LoadTree(store, root.Actors)
|
||||
if tree != nil {
|
||||
hamt = tree.Map
|
||||
}
|
||||
s.lookupIDFun = s.lookupIDinternal
|
||||
return s, nil
|
||||
default:
|
||||
return nil, xerrors.Errorf("unsupported state tree version: %d", root.Version)
|
||||
}
|
||||
if err != nil {
|
||||
log.Errorf("failed to load state tree: %s", err)
|
||||
return nil, xerrors.Errorf("failed to load state tree: %w", err)
|
||||
}
|
||||
|
||||
s := &StateTree{
|
||||
root: hamt,
|
||||
info: root.Info,
|
||||
version: root.Version,
|
||||
Store: cst,
|
||||
snaps: newStateSnaps(),
|
||||
}
|
||||
s.lookupIDFun = s.lookupIDinternal
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (st *StateTree) SetActor(addr address.Address, act *types.Actor) error {
|
||||
|
@ -4,7 +4,12 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/rt"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -29,29 +34,95 @@ import (
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
"github.com/filecoin-project/specs-actors/v2/actors/migration/nv4"
|
||||
"github.com/filecoin-project/specs-actors/v2/actors/migration/nv7"
|
||||
"github.com/filecoin-project/specs-actors/v3/actors/migration/nv10"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// UpgradeFunc is a migration function run at every upgrade.
|
||||
// MigrationCache can be used to cache information used by a migration. This is primarily useful to
|
||||
// "pre-compute" some migration state ahead of time, and make it accessible in the migration itself.
|
||||
type MigrationCache interface {
|
||||
Write(key string, value cid.Cid) error
|
||||
Read(key string) (bool, cid.Cid, error)
|
||||
Load(key string, loadFunc func() (cid.Cid, error)) (cid.Cid, error)
|
||||
}
|
||||
|
||||
// MigrationFunc is a migration function run at every upgrade.
|
||||
//
|
||||
// - The cache is a per-upgrade cache, pre-populated by pre-migrations.
|
||||
// - The oldState is the state produced by the upgrade epoch.
|
||||
// - The returned newState is the new state that will be used by the next epoch.
|
||||
// - The height is the upgrade epoch height (already executed).
|
||||
// - The tipset is the tipset for the last non-null block before the upgrade. Do
|
||||
// not assume that ts.Height() is the upgrade height.
|
||||
type UpgradeFunc func(ctx context.Context, sm *StateManager, cb ExecCallback, oldState cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (newState cid.Cid, err error)
|
||||
type MigrationFunc func(
|
||||
ctx context.Context,
|
||||
sm *StateManager, cache MigrationCache,
|
||||
cb ExecCallback, oldState cid.Cid,
|
||||
height abi.ChainEpoch, ts *types.TipSet,
|
||||
) (newState cid.Cid, err error)
|
||||
|
||||
// PreMigrationFunc is a function run _before_ a network upgrade to pre-compute part of the network
|
||||
// upgrade and speed it up.
|
||||
type PreMigrationFunc func(
|
||||
ctx context.Context,
|
||||
sm *StateManager, cache MigrationCache,
|
||||
oldState cid.Cid,
|
||||
height abi.ChainEpoch, ts *types.TipSet,
|
||||
) error
|
||||
|
||||
// PreMigration describes a pre-migration step to prepare for a network state upgrade. Pre-migrations
|
||||
// are optimizations, are not guaranteed to run, and may be canceled and/or run multiple times.
|
||||
type PreMigration struct {
|
||||
// PreMigration is the pre-migration function to run at the specified time. This function is
|
||||
// run asynchronously and must abort promptly when canceled.
|
||||
PreMigration PreMigrationFunc
|
||||
|
||||
// StartWithin specifies that this pre-migration should be started at most StartWithin
|
||||
// epochs before the upgrade.
|
||||
StartWithin abi.ChainEpoch
|
||||
|
||||
// DontStartWithin specifies that this pre-migration should not be started DontStartWithin
|
||||
// epochs before the final upgrade epoch.
|
||||
//
|
||||
// This should be set such that the pre-migration is likely to complete before StopWithin.
|
||||
DontStartWithin abi.ChainEpoch
|
||||
|
||||
// StopWithin specifies that this pre-migration should be stopped StopWithin epochs of the
|
||||
// final upgrade epoch.
|
||||
StopWithin abi.ChainEpoch
|
||||
}
|
||||
|
||||
type Upgrade struct {
|
||||
Height abi.ChainEpoch
|
||||
Network network.Version
|
||||
Expensive bool
|
||||
Migration UpgradeFunc
|
||||
Migration MigrationFunc
|
||||
|
||||
// PreMigrations specifies a set of pre-migration functions to run at the indicated epochs.
|
||||
// These functions should fill the given cache with information that can speed up the
|
||||
// eventual full migration at the upgrade epoch.
|
||||
PreMigrations []PreMigration
|
||||
}
|
||||
|
||||
type UpgradeSchedule []Upgrade
|
||||
|
||||
type migrationLogger struct{}
|
||||
|
||||
func (ml migrationLogger) Log(level rt.LogLevel, msg string, args ...interface{}) {
|
||||
switch level {
|
||||
case rt.DEBUG:
|
||||
log.Debugf(msg, args...)
|
||||
case rt.INFO:
|
||||
log.Infof(msg, args...)
|
||||
case rt.WARN:
|
||||
log.Warnf(msg, args...)
|
||||
case rt.ERROR:
|
||||
log.Errorf(msg, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func DefaultUpgradeSchedule() UpgradeSchedule {
|
||||
var us UpgradeSchedule
|
||||
|
||||
@ -100,32 +171,24 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
|
||||
Height: build.UpgradeOrangeHeight,
|
||||
Network: network.Version9,
|
||||
Migration: nil,
|
||||
}, {
|
||||
Height: build.UpgradeActorsV3Height,
|
||||
Network: network.Version10,
|
||||
Migration: UpgradeActorsV3,
|
||||
PreMigrations: []PreMigration{{
|
||||
PreMigration: PreUpgradeActorsV3,
|
||||
StartWithin: 120,
|
||||
DontStartWithin: 60,
|
||||
StopWithin: 35,
|
||||
}, {
|
||||
PreMigration: PreUpgradeActorsV3,
|
||||
StartWithin: 30,
|
||||
DontStartWithin: 15,
|
||||
StopWithin: 5,
|
||||
}},
|
||||
Expensive: true,
|
||||
}}
|
||||
|
||||
if build.UpgradeActorsV2Height == math.MaxInt64 { // disable actors upgrade
|
||||
updates = []Upgrade{{
|
||||
Height: build.UpgradeBreezeHeight,
|
||||
Network: network.Version1,
|
||||
Migration: UpgradeFaucetBurnRecovery,
|
||||
}, {
|
||||
Height: build.UpgradeSmokeHeight,
|
||||
Network: network.Version2,
|
||||
Migration: nil,
|
||||
}, {
|
||||
Height: build.UpgradeIgnitionHeight,
|
||||
Network: network.Version3,
|
||||
Migration: UpgradeIgnition,
|
||||
}, {
|
||||
Height: build.UpgradeRefuelHeight,
|
||||
Network: network.Version3,
|
||||
Migration: UpgradeRefuel,
|
||||
}, {
|
||||
Height: build.UpgradeLiftoffHeight,
|
||||
Network: network.Version3,
|
||||
Migration: UpgradeLiftoff,
|
||||
}}
|
||||
}
|
||||
|
||||
for _, u := range updates {
|
||||
if u.Height < 0 {
|
||||
// upgrade disabled
|
||||
@ -137,14 +200,43 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
|
||||
}
|
||||
|
||||
func (us UpgradeSchedule) Validate() error {
|
||||
// Make sure we're not trying to upgrade to version 0.
|
||||
// Make sure each upgrade is valid.
|
||||
for _, u := range us {
|
||||
if u.Network <= 0 {
|
||||
return xerrors.Errorf("cannot upgrade to version <= 0: %d", u.Network)
|
||||
}
|
||||
|
||||
for _, m := range u.PreMigrations {
|
||||
if m.StartWithin <= 0 {
|
||||
return xerrors.Errorf("pre-migration must specify a positive start-within epoch")
|
||||
}
|
||||
|
||||
if m.DontStartWithin < 0 || m.StopWithin < 0 {
|
||||
return xerrors.Errorf("pre-migration must specify non-negative epochs")
|
||||
}
|
||||
|
||||
if m.StartWithin <= m.StopWithin {
|
||||
return xerrors.Errorf("pre-migration start-within must come before stop-within")
|
||||
}
|
||||
|
||||
// If we have a dont-start-within.
|
||||
if m.DontStartWithin != 0 {
|
||||
if m.DontStartWithin < m.StopWithin {
|
||||
return xerrors.Errorf("pre-migration dont-start-within must come before stop-within")
|
||||
}
|
||||
if m.StartWithin <= m.DontStartWithin {
|
||||
return xerrors.Errorf("pre-migration start-within must come after dont-start-within")
|
||||
}
|
||||
}
|
||||
}
|
||||
if !sort.SliceIsSorted(u.PreMigrations, func(i, j int) bool {
|
||||
return u.PreMigrations[i].StartWithin > u.PreMigrations[j].StartWithin //nolint:scopelint,gosec
|
||||
}) {
|
||||
return xerrors.Errorf("pre-migrations must be sorted by start epoch")
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure all the upgrades make sense.
|
||||
// Make sure the upgrade order makes sense.
|
||||
for i := 1; i < len(us); i++ {
|
||||
prev := &us[i-1]
|
||||
curr := &us[i]
|
||||
@ -166,12 +258,28 @@ func (us UpgradeSchedule) Validate() error {
|
||||
func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecCallback, ts *types.TipSet) (cid.Cid, error) {
|
||||
retCid := root
|
||||
var err error
|
||||
f, ok := sm.stateMigrations[height]
|
||||
if ok {
|
||||
retCid, err = f(ctx, sm, cb, root, height, ts)
|
||||
u := sm.stateMigrations[height]
|
||||
if u != nil && u.upgrade != nil {
|
||||
startTime := time.Now()
|
||||
log.Warnw("STARTING migration", "height", height, "from", root)
|
||||
// Yes, we clone the cache, even for the final upgrade epoch. Why? Reverts. We may
|
||||
// have to migrate multiple times.
|
||||
tmpCache := u.cache.Clone()
|
||||
retCid, err = u.upgrade(ctx, sm, tmpCache, cb, root, height, ts)
|
||||
if err != nil {
|
||||
log.Errorw("FAILED migration", "height", height, "from", root, "error", err)
|
||||
return cid.Undef, err
|
||||
}
|
||||
// Yes, we update the cache, even for the final upgrade epoch. Why? Reverts. This
|
||||
// can save us a _lot_ of time because very few actors will have changed if we
|
||||
// do a small revert then need to re-run the migration.
|
||||
u.cache.Update(tmpCache)
|
||||
log.Warnw("COMPLETED migration",
|
||||
"height", height,
|
||||
"from", root,
|
||||
"to", retCid,
|
||||
"duration", time.Since(startTime),
|
||||
)
|
||||
}
|
||||
|
||||
return retCid, nil
|
||||
@ -182,6 +290,109 @@ func (sm *StateManager) hasExpensiveFork(ctx context.Context, height abi.ChainEp
|
||||
return ok
|
||||
}
|
||||
|
||||
func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc, cache *nv10.MemMigrationCache, ts *types.TipSet) {
|
||||
height := ts.Height()
|
||||
parent := ts.ParentState()
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
log.Warn("STARTING pre-migration")
|
||||
// Clone the cache so we don't actually _update_ it
|
||||
// till we're done. Otherwise, if we fail, the next
|
||||
// migration to use the cache may assume that
|
||||
// certain blocks exist, even if they don't.
|
||||
tmpCache := cache.Clone()
|
||||
err := fn(ctx, sm, tmpCache, parent, height, ts)
|
||||
if err != nil {
|
||||
log.Errorw("FAILED pre-migration", "error", err)
|
||||
return
|
||||
}
|
||||
// Finally, if everything worked, update the cache.
|
||||
cache.Update(tmpCache)
|
||||
log.Warnw("COMPLETED pre-migration", "duration", time.Since(startTime))
|
||||
}
|
||||
|
||||
func (sm *StateManager) preMigrationWorker(ctx context.Context) {
|
||||
defer close(sm.shutdown)
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
type op struct {
|
||||
after abi.ChainEpoch
|
||||
notAfter abi.ChainEpoch
|
||||
run func(ts *types.TipSet)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
|
||||
// Turn each pre-migration into an operation in a schedule.
|
||||
var schedule []op
|
||||
for upgradeEpoch, migration := range sm.stateMigrations {
|
||||
cache := migration.cache
|
||||
for _, prem := range migration.preMigrations {
|
||||
preCtx, preCancel := context.WithCancel(ctx)
|
||||
migrationFunc := prem.PreMigration
|
||||
|
||||
afterEpoch := upgradeEpoch - prem.StartWithin
|
||||
notAfterEpoch := upgradeEpoch - prem.DontStartWithin
|
||||
stopEpoch := upgradeEpoch - prem.StopWithin
|
||||
// We can't start after we stop.
|
||||
if notAfterEpoch > stopEpoch {
|
||||
notAfterEpoch = stopEpoch - 1
|
||||
}
|
||||
|
||||
// Add an op to start a pre-migration.
|
||||
schedule = append(schedule, op{
|
||||
after: afterEpoch,
|
||||
notAfter: notAfterEpoch,
|
||||
|
||||
// TODO: are these values correct?
|
||||
run: func(ts *types.TipSet) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
runPreMigration(preCtx, sm, migrationFunc, cache, ts)
|
||||
}()
|
||||
},
|
||||
})
|
||||
|
||||
// Add an op to cancel the pre-migration if it's still running.
|
||||
schedule = append(schedule, op{
|
||||
after: stopEpoch,
|
||||
notAfter: -1,
|
||||
run: func(ts *types.TipSet) { preCancel() },
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Then sort by epoch.
|
||||
sort.Slice(schedule, func(i, j int) bool {
|
||||
return schedule[i].after < schedule[j].after
|
||||
})
|
||||
|
||||
// Finally, when the head changes, see if there's anything we need to do.
|
||||
//
|
||||
// We're intentionally ignoring reorgs as they don't matter for our purposes.
|
||||
for change := range sm.cs.SubHeadChanges(ctx) {
|
||||
for _, head := range change {
|
||||
for len(schedule) > 0 {
|
||||
op := &schedule[0]
|
||||
if head.Val.Height() < op.after {
|
||||
break
|
||||
}
|
||||
|
||||
// If we haven't passed the pre-migration height...
|
||||
if op.notAfter < 0 || head.Val.Height() < op.notAfter {
|
||||
op.run(head.Val)
|
||||
}
|
||||
schedule = schedule[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmount, cb func(trace types.ExecutionTrace)) error {
|
||||
fromAct, err := tree.GetActor(from)
|
||||
if err != nil {
|
||||
@ -235,7 +446,7 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo
|
||||
return nil
|
||||
}
|
||||
|
||||
func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
// Some initial parameters
|
||||
FundsForMiners := types.FromFil(1_000_000)
|
||||
LookbackEpoch := abi.ChainEpoch(32000)
|
||||
@ -521,7 +732,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, cb ExecCal
|
||||
return tree.Flush(ctx)
|
||||
}
|
||||
|
||||
func UpgradeIgnition(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeIgnition(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
store := sm.cs.Store(ctx)
|
||||
|
||||
if build.UpgradeLiftoffHeight <= epoch {
|
||||
@ -576,7 +787,7 @@ func UpgradeIgnition(ctx context.Context, sm *StateManager, cb ExecCallback, roo
|
||||
return tree.Flush(ctx)
|
||||
}
|
||||
|
||||
func UpgradeRefuel(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeRefuel(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
|
||||
store := sm.cs.Store(ctx)
|
||||
tree, err := sm.StateTree(root)
|
||||
@ -602,7 +813,7 @@ func UpgradeRefuel(ctx context.Context, sm *StateManager, cb ExecCallback, root
|
||||
return tree.Flush(ctx)
|
||||
}
|
||||
|
||||
func UpgradeActorsV2(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeActorsV2(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
buf := bufbstore.NewTieredBstore(sm.cs.Blockstore(), bstore.NewTemporarySync())
|
||||
store := store.ActorStore(ctx, buf)
|
||||
|
||||
@ -648,7 +859,7 @@ func UpgradeActorsV2(ctx context.Context, sm *StateManager, cb ExecCallback, roo
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func UpgradeLiftoff(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeLiftoff(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
tree, err := sm.StateTree(root)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("getting state tree: %w", err)
|
||||
@ -662,7 +873,7 @@ func UpgradeLiftoff(ctx context.Context, sm *StateManager, cb ExecCallback, root
|
||||
return tree.Flush(ctx)
|
||||
}
|
||||
|
||||
func UpgradeCalico(ctx context.Context, sm *StateManager, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
func UpgradeCalico(ctx context.Context, sm *StateManager, _ MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
store := sm.cs.Store(ctx)
|
||||
var stateRoot types.StateRoot
|
||||
if err := store.Get(ctx, root, &stateRoot); err != nil {
|
||||
@ -704,6 +915,103 @@ func UpgradeCalico(ctx context.Context, sm *StateManager, cb ExecCallback, root
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func UpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
// Use all the CPUs except 3.
|
||||
workerCount := runtime.NumCPU() - 3
|
||||
if workerCount <= 0 {
|
||||
workerCount = 1
|
||||
}
|
||||
|
||||
config := nv10.Config{
|
||||
MaxWorkers: uint(workerCount),
|
||||
JobQueueSize: 1000,
|
||||
ResultQueueSize: 100,
|
||||
ProgressLogPeriod: 10 * time.Second,
|
||||
}
|
||||
newRoot, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("migrating actors v3 state: %w", err)
|
||||
}
|
||||
|
||||
// perform some basic sanity checks to make sure everything still works.
|
||||
store := store.ActorStore(ctx, sm.ChainStore().Blockstore())
|
||||
if newSm, err := state.LoadStateTree(store, newRoot); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("state tree sanity load failed: %w", err)
|
||||
} else if newRoot2, err := newSm.Flush(ctx); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("state tree sanity flush failed: %w", err)
|
||||
} else if newRoot2 != newRoot {
|
||||
return cid.Undef, xerrors.Errorf("state-root mismatch: %s != %s", newRoot, newRoot2)
|
||||
} else if _, err := newSm.GetActor(init_.Address); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to load init actor after upgrade: %w", err)
|
||||
}
|
||||
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func PreUpgradeActorsV3(ctx context.Context, sm *StateManager, cache MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
||||
// Use half the CPUs for pre-migration, but leave at least 3.
|
||||
workerCount := runtime.NumCPU()
|
||||
if workerCount <= 4 {
|
||||
workerCount = 1
|
||||
} else {
|
||||
workerCount /= 2
|
||||
}
|
||||
config := nv10.Config{MaxWorkers: uint(workerCount)}
|
||||
_, err := upgradeActorsV3Common(ctx, sm, cache, root, epoch, ts, config)
|
||||
return err
|
||||
}
|
||||
|
||||
func upgradeActorsV3Common(
|
||||
ctx context.Context, sm *StateManager, cache MigrationCache,
|
||||
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
|
||||
config nv10.Config,
|
||||
) (cid.Cid, error) {
|
||||
buf := bufbstore.NewTieredBstore(sm.cs.Blockstore(), bstore.NewTemporarySync())
|
||||
store := store.ActorStore(ctx, buf)
|
||||
|
||||
// Load the state root.
|
||||
var stateRoot types.StateRoot
|
||||
if err := store.Get(ctx, root, &stateRoot); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
|
||||
}
|
||||
|
||||
if stateRoot.Version != types.StateTreeVersion1 {
|
||||
return cid.Undef, xerrors.Errorf(
|
||||
"expected state root version 1 for actors v3 upgrade, got %d",
|
||||
stateRoot.Version,
|
||||
)
|
||||
}
|
||||
|
||||
// Perform the migration
|
||||
newHamtRoot, err := nv10.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("upgrading to actors v2: %w", err)
|
||||
}
|
||||
|
||||
// Persist the result.
|
||||
newRoot, err := store.Put(ctx, &types.StateRoot{
|
||||
Version: types.StateTreeVersion2,
|
||||
Actors: newHamtRoot,
|
||||
Info: stateRoot.Info,
|
||||
})
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
|
||||
}
|
||||
|
||||
// Persist the new tree.
|
||||
|
||||
{
|
||||
from := buf
|
||||
to := buf.Read()
|
||||
|
||||
if err := vm.Copy(ctx, from, to, newRoot); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func setNetworkName(ctx context.Context, store adt.Store, tree *state.StateTree, name string) error {
|
||||
ia, err := tree.GetActor(builtin0.InitActorAddr)
|
||||
if err != nil {
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
@ -122,7 +123,7 @@ func TestForkHeightTriggers(t *testing.T) {
|
||||
cg.ChainStore(), UpgradeSchedule{{
|
||||
Network: 1,
|
||||
Height: testForkHeight,
|
||||
Migration: func(ctx context.Context, sm *StateManager, cb ExecCallback,
|
||||
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback,
|
||||
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
cst := ipldcbor.NewCborStore(sm.ChainStore().Blockstore())
|
||||
|
||||
@ -252,7 +253,7 @@ func TestForkRefuseCall(t *testing.T) {
|
||||
Network: 1,
|
||||
Expensive: true,
|
||||
Height: testForkHeight,
|
||||
Migration: func(ctx context.Context, sm *StateManager, cb ExecCallback,
|
||||
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback,
|
||||
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
return root, nil
|
||||
}}})
|
||||
@ -317,3 +318,166 @@ func TestForkRefuseCall(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestForkPreMigration(t *testing.T) {
|
||||
logging.SetAllLoggers(logging.LevelInfo)
|
||||
|
||||
cg, err := gen.NewGenerator()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fooCid, err := abi.CidBuilder.Sum([]byte("foo"))
|
||||
require.NoError(t, err)
|
||||
|
||||
barCid, err := abi.CidBuilder.Sum([]byte("bar"))
|
||||
require.NoError(t, err)
|
||||
|
||||
failCid, err := abi.CidBuilder.Sum([]byte("fail"))
|
||||
require.NoError(t, err)
|
||||
|
||||
var wait20 sync.WaitGroup
|
||||
wait20.Add(3)
|
||||
|
||||
wasCanceled := make(chan struct{})
|
||||
|
||||
checkCache := func(t *testing.T, cache MigrationCache) {
|
||||
found, value, err := cache.Read("foo")
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, fooCid, value)
|
||||
|
||||
found, value, err = cache.Read("bar")
|
||||
require.NoError(t, err)
|
||||
require.True(t, found)
|
||||
require.Equal(t, barCid, value)
|
||||
|
||||
found, _, err = cache.Read("fail")
|
||||
require.NoError(t, err)
|
||||
require.False(t, found)
|
||||
}
|
||||
|
||||
counter := make(chan struct{}, 10)
|
||||
|
||||
sm, err := NewStateManagerWithUpgradeSchedule(
|
||||
cg.ChainStore(), UpgradeSchedule{{
|
||||
Network: 1,
|
||||
Height: testForkHeight,
|
||||
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecCallback,
|
||||
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
|
||||
// Make sure the test that should be canceled, is canceled.
|
||||
select {
|
||||
case <-wasCanceled:
|
||||
case <-ctx.Done():
|
||||
return cid.Undef, ctx.Err()
|
||||
}
|
||||
|
||||
// the cache should be setup correctly.
|
||||
checkCache(t, cache)
|
||||
|
||||
counter <- struct{}{}
|
||||
|
||||
return root, nil
|
||||
},
|
||||
PreMigrations: []PreMigration{{
|
||||
StartWithin: 20,
|
||||
PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache,
|
||||
_ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error {
|
||||
wait20.Done()
|
||||
wait20.Wait()
|
||||
|
||||
err := cache.Write("foo", fooCid)
|
||||
require.NoError(t, err)
|
||||
|
||||
counter <- struct{}{}
|
||||
|
||||
return nil
|
||||
},
|
||||
}, {
|
||||
StartWithin: 20,
|
||||
PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache,
|
||||
_ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error {
|
||||
wait20.Done()
|
||||
wait20.Wait()
|
||||
|
||||
err := cache.Write("bar", barCid)
|
||||
require.NoError(t, err)
|
||||
|
||||
counter <- struct{}{}
|
||||
|
||||
return nil
|
||||
},
|
||||
}, {
|
||||
StartWithin: 20,
|
||||
PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache,
|
||||
_ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error {
|
||||
wait20.Done()
|
||||
wait20.Wait()
|
||||
|
||||
err := cache.Write("fail", failCid)
|
||||
require.NoError(t, err)
|
||||
|
||||
counter <- struct{}{}
|
||||
|
||||
// Fail this migration. The cached entry should not be persisted.
|
||||
return fmt.Errorf("failed")
|
||||
},
|
||||
}, {
|
||||
StartWithin: 15,
|
||||
StopWithin: 5,
|
||||
PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache,
|
||||
_ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error {
|
||||
|
||||
<-ctx.Done()
|
||||
close(wasCanceled)
|
||||
|
||||
counter <- struct{}{}
|
||||
|
||||
return nil
|
||||
},
|
||||
}, {
|
||||
StartWithin: 10,
|
||||
PreMigration: func(ctx context.Context, _ *StateManager, cache MigrationCache,
|
||||
_ cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) error {
|
||||
|
||||
checkCache(t, cache)
|
||||
|
||||
counter <- struct{}{}
|
||||
|
||||
return nil
|
||||
},
|
||||
}}},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, sm.Start(context.Background()))
|
||||
defer func() {
|
||||
require.NoError(t, sm.Stop(context.Background()))
|
||||
}()
|
||||
|
||||
inv := vm.NewActorRegistry()
|
||||
inv.Register(nil, testActor{})
|
||||
|
||||
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) {
|
||||
nvm, err := vm.NewVM(ctx, vmopt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nvm.SetInvoker(inv)
|
||||
return nvm, nil
|
||||
})
|
||||
|
||||
cg.SetStateManager(sm)
|
||||
|
||||
for i := 0; i < 50; i++ {
|
||||
_, err := cg.NextTipSet()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
// We have 5 pre-migration steps, and the migration. They should all have written something
|
||||
// to this channel.
|
||||
require.Equal(t, 6, len(counter))
|
||||
}
|
||||
|
@ -20,6 +20,10 @@ import (
|
||||
|
||||
// Used for genesis.
|
||||
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
||||
"github.com/filecoin-project/specs-actors/v3/actors/migration/nv10"
|
||||
|
||||
// we use the same adt for all receipts
|
||||
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
@ -42,6 +46,7 @@ import (
|
||||
)
|
||||
|
||||
const LookbackNoLimit = abi.ChainEpoch(-1)
|
||||
const ReceiptAmtBitwidth = 3
|
||||
|
||||
var log = logging.Logger("statemgr")
|
||||
|
||||
@ -58,15 +63,24 @@ type versionSpec struct {
|
||||
atOrBelow abi.ChainEpoch
|
||||
}
|
||||
|
||||
type migration struct {
|
||||
upgrade MigrationFunc
|
||||
preMigrations []PreMigration
|
||||
cache *nv10.MemMigrationCache
|
||||
}
|
||||
|
||||
type StateManager struct {
|
||||
cs *store.ChainStore
|
||||
|
||||
cancel context.CancelFunc
|
||||
shutdown chan struct{}
|
||||
|
||||
// Determines the network version at any given epoch.
|
||||
networkVersions []versionSpec
|
||||
latestVersion network.Version
|
||||
|
||||
// Maps chain epochs to upgrade functions.
|
||||
stateMigrations map[abi.ChainEpoch]UpgradeFunc
|
||||
// Maps chain epochs to migrations.
|
||||
stateMigrations map[abi.ChainEpoch]*migration
|
||||
// A set of potentially expensive/time consuming upgrades. Explicit
|
||||
// calls for, e.g., gas estimation fail against this epoch with
|
||||
// ErrExpensiveFork.
|
||||
@ -99,7 +113,7 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, us UpgradeSchedule
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stateMigrations := make(map[abi.ChainEpoch]UpgradeFunc, len(us))
|
||||
stateMigrations := make(map[abi.ChainEpoch]*migration, len(us))
|
||||
expensiveUpgrades := make(map[abi.ChainEpoch]struct{}, len(us))
|
||||
var networkVersions []versionSpec
|
||||
lastVersion := network.Version0
|
||||
@ -107,8 +121,13 @@ func NewStateManagerWithUpgradeSchedule(cs *store.ChainStore, us UpgradeSchedule
|
||||
// If we have any upgrades, process them and create a version
|
||||
// schedule.
|
||||
for _, upgrade := range us {
|
||||
if upgrade.Migration != nil {
|
||||
stateMigrations[upgrade.Height] = upgrade.Migration
|
||||
if upgrade.Migration != nil || upgrade.PreMigrations != nil {
|
||||
migration := &migration{
|
||||
upgrade: upgrade.Migration,
|
||||
preMigrations: upgrade.PreMigrations,
|
||||
cache: nv10.NewMemMigrationCache(),
|
||||
}
|
||||
stateMigrations[upgrade.Height] = migration
|
||||
}
|
||||
if upgrade.Expensive {
|
||||
expensiveUpgrades[upgrade.Height] = struct{}{}
|
||||
@ -144,6 +163,33 @@ func cidsToKey(cids []cid.Cid) string {
|
||||
return out
|
||||
}
|
||||
|
||||
// Start starts the state manager's optional background processes. At the moment, this schedules
|
||||
// pre-migration functions to run ahead of network upgrades.
|
||||
//
|
||||
// This method is not safe to invoke from multiple threads or concurrently with Stop.
|
||||
func (sm *StateManager) Start(context.Context) error {
|
||||
var ctx context.Context
|
||||
ctx, sm.cancel = context.WithCancel(context.Background())
|
||||
sm.shutdown = make(chan struct{})
|
||||
go sm.preMigrationWorker(ctx)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop starts the state manager's background processes.
|
||||
//
|
||||
// This method is not safe to invoke concurrently with Start.
|
||||
func (sm *StateManager) Stop(ctx context.Context) error {
|
||||
if sm.cancel != nil {
|
||||
sm.cancel()
|
||||
select {
|
||||
case <-sm.shutdown:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st cid.Cid, rec cid.Cid, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "tipSetState")
|
||||
defer span.End()
|
||||
@ -384,11 +430,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
|
||||
return cid.Cid{}, cid.Cid{}, err
|
||||
}
|
||||
|
||||
// XXX: Is the height correct? Or should it be epoch-1?
|
||||
rectarr, err := adt.NewArray(sm.cs.Store(ctx), actors.VersionForNetwork(sm.GetNtwkVersion(ctx, epoch)))
|
||||
if err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("failed to create receipts amt: %w", err)
|
||||
}
|
||||
rectarr := blockadt.MakeEmptyArray(sm.cs.Store(ctx))
|
||||
for i, receipt := range receipts {
|
||||
if err := rectarr.Set(uint64(i), receipt); err != nil {
|
||||
return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err)
|
||||
@ -473,13 +515,26 @@ func (sm *StateManager) ResolveToKeyAddress(ctx context.Context, addr address.Ad
|
||||
ts = sm.cs.GetHeaviestTipSet()
|
||||
}
|
||||
|
||||
cst := cbor.NewCborStore(sm.cs.Blockstore())
|
||||
|
||||
// First try to resolve the actor in the parent state, so we don't have to compute anything.
|
||||
tree, err := state.LoadStateTree(cst, ts.ParentState())
|
||||
if err != nil {
|
||||
return address.Undef, xerrors.Errorf("failed to load parent state tree: %w", err)
|
||||
}
|
||||
|
||||
resolved, err := vm.ResolveToKeyAddr(tree, cst, addr)
|
||||
if err == nil {
|
||||
return resolved, nil
|
||||
}
|
||||
|
||||
// If that fails, compute the tip-set and try again.
|
||||
st, _, err := sm.TipSetState(ctx, ts)
|
||||
if err != nil {
|
||||
return address.Undef, xerrors.Errorf("resolve address failed to get tipset state: %w", err)
|
||||
}
|
||||
|
||||
cst := cbor.NewCborStore(sm.cs.Blockstore())
|
||||
tree, err := state.LoadStateTree(cst, st)
|
||||
tree, err = state.LoadStateTree(cst, st)
|
||||
if err != nil {
|
||||
return address.Undef, xerrors.Errorf("failed to load state tree")
|
||||
}
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
|
||||
exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported"
|
||||
exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported"
|
||||
exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
@ -207,17 +208,17 @@ func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwra
|
||||
return nil, xerrors.Errorf("getting miner info: %w", err)
|
||||
}
|
||||
|
||||
wpt, err := info.SealProofType.RegisteredWinningPoStProof()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting window proof type: %w", err)
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(maddr)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting miner ID: %w", err)
|
||||
}
|
||||
|
||||
ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, wpt, abi.ActorID(mid), rand, numProvSect)
|
||||
proofType, err := miner.WinningPoStProofTypeFromWindowPoStProofType(nv, info.WindowPoStProofType)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("determining winning post proof type: %w", err)
|
||||
}
|
||||
|
||||
ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, proofType, abi.ActorID(mid), rand, numProvSect)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("generating winning post challenges: %w", err)
|
||||
}
|
||||
@ -560,6 +561,7 @@ func init() {
|
||||
var actors []rt.VMActor
|
||||
actors = append(actors, exported0.BuiltinActors()...)
|
||||
actors = append(actors, exported2.BuiltinActors()...)
|
||||
actors = append(actors, exported3.BuiltinActors()...)
|
||||
|
||||
for _, actor := range actors {
|
||||
exports := actor.Exports()
|
||||
|
@ -52,7 +52,7 @@ func BenchmarkGetRandomness(b *testing.B) {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
bs, err := lr.Blockstore(repo.BlockstoreChain)
|
||||
bs, err := lr.Blockstore(context.TODO(), repo.BlockstoreChain)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -65,7 +65,7 @@ func BenchmarkGetRandomness(b *testing.B) {
|
||||
}
|
||||
}()
|
||||
|
||||
mds, err := lr.Datastore("/metadata")
|
||||
mds, err := lr.Datastore(context.Background(), "/metadata")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
@ -34,7 +34,8 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
blst "github.com/supranational/blst/bindings/go"
|
||||
|
||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||
|
||||
// named msgarray here to make it clear that these are the types used by
|
||||
// messages, regardless of specs-actors version.
|
||||
@ -55,7 +56,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
bstore "github.com/filecoin-project/lotus/lib/blockstore"
|
||||
"github.com/filecoin-project/lotus/lib/sigs"
|
||||
"github.com/filecoin-project/lotus/lib/sigs/bls"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
)
|
||||
|
||||
@ -676,6 +676,10 @@ func blockSanityChecks(h *types.BlockHeader) error {
|
||||
return xerrors.Errorf("block had nil bls aggregate signature")
|
||||
}
|
||||
|
||||
if h.Miner.Protocol() != address.ID {
|
||||
return xerrors.Errorf("block had non-ID miner address")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1178,17 +1182,21 @@ func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig *crypto.Signat
|
||||
trace.Int64Attribute("msgCount", int64(len(msgs))),
|
||||
)
|
||||
|
||||
msgsS := make([]blst.Message, len(msgs))
|
||||
msgsS := make([]ffi.Message, len(msgs))
|
||||
pubksS := make([]ffi.PublicKey, len(msgs))
|
||||
for i := 0; i < len(msgs); i++ {
|
||||
msgsS[i] = msgs[i].Bytes()
|
||||
copy(pubksS[i][:], pubks[i][:ffi.PublicKeyBytes])
|
||||
}
|
||||
|
||||
sigS := new(ffi.Signature)
|
||||
copy(sigS[:], sig.Data[:ffi.SignatureBytes])
|
||||
|
||||
if len(msgs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
valid := new(bls.Signature).AggregateVerifyCompressed(sig.Data, pubks,
|
||||
msgsS, []byte(bls.DST))
|
||||
valid := ffi.HashVerify(sigS, msgsS, pubksS)
|
||||
if !valid {
|
||||
return xerrors.New("bls aggregate signature failed to verify")
|
||||
}
|
||||
|
@ -150,8 +150,11 @@ func TestSyncManagerEdgeCase(t *testing.T) {
|
||||
t.Fatalf("Expected tipset %s to sync, but got %s", e1, last)
|
||||
}
|
||||
|
||||
if len(sm.state) != 0 {
|
||||
t.Errorf("active syncs expected empty but got: %d", len(sm.state))
|
||||
sm.mx.Lock()
|
||||
activeSyncs := len(sm.state)
|
||||
sm.mx.Unlock()
|
||||
if activeSyncs != 0 {
|
||||
t.Errorf("active syncs expected empty but got: %d", activeSyncs)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -47,41 +47,24 @@ func NewBeaconEntry(round uint64, data []byte) BeaconEntry {
|
||||
}
|
||||
|
||||
type BlockHeader struct {
|
||||
Miner address.Address // 0
|
||||
Miner address.Address // 0 unique per block/miner
|
||||
Ticket *Ticket // 1 unique per block/miner: should be a valid VRF
|
||||
ElectionProof *ElectionProof // 2 unique per block/miner: should be a valid VRF
|
||||
BeaconEntries []BeaconEntry // 3 identical for all blocks in same tipset
|
||||
WinPoStProof []proof2.PoStProof // 4 unique per block/miner
|
||||
Parents []cid.Cid // 5 identical for all blocks in same tipset
|
||||
ParentWeight BigInt // 6 identical for all blocks in same tipset
|
||||
Height abi.ChainEpoch // 7 identical for all blocks in same tipset
|
||||
ParentStateRoot cid.Cid // 8 identical for all blocks in same tipset
|
||||
ParentMessageReceipts cid.Cid // 9 identical for all blocks in same tipset
|
||||
Messages cid.Cid // 10 unique per block
|
||||
BLSAggregate *crypto.Signature // 11 unique per block: aggrregate of BLS messages from above
|
||||
Timestamp uint64 // 12 identical for all blocks in same tipset / hard-tied to the value of Height above
|
||||
BlockSig *crypto.Signature // 13 unique per block/miner: miner signature
|
||||
ForkSignaling uint64 // 14 currently unused/undefined
|
||||
ParentBaseFee abi.TokenAmount // 15 identical for all blocks in same tipset: the base fee after executing parent tipset
|
||||
|
||||
Ticket *Ticket // 1
|
||||
|
||||
ElectionProof *ElectionProof // 2
|
||||
|
||||
BeaconEntries []BeaconEntry // 3
|
||||
|
||||
WinPoStProof []proof2.PoStProof // 4
|
||||
|
||||
Parents []cid.Cid // 5
|
||||
|
||||
ParentWeight BigInt // 6
|
||||
|
||||
Height abi.ChainEpoch // 7
|
||||
|
||||
ParentStateRoot cid.Cid // 8
|
||||
|
||||
ParentMessageReceipts cid.Cid // 8
|
||||
|
||||
Messages cid.Cid // 10
|
||||
|
||||
BLSAggregate *crypto.Signature // 11
|
||||
|
||||
Timestamp uint64 // 12
|
||||
|
||||
BlockSig *crypto.Signature // 13
|
||||
|
||||
ForkSignaling uint64 // 14
|
||||
|
||||
// ParentBaseFee is the base fee after executing parent tipset
|
||||
ParentBaseFee abi.TokenAmount // 15
|
||||
|
||||
// internal
|
||||
validated bool // true if the signature has been validated
|
||||
validated bool // internal, true if the signature has been validated
|
||||
}
|
||||
|
||||
func (blk *BlockHeader) ToStorageBlock() (block.Block, error) {
|
||||
|
@ -9,8 +9,10 @@ type StateTreeVersion uint64
|
||||
const (
|
||||
// StateTreeVersion0 corresponds to actors < v2.
|
||||
StateTreeVersion0 StateTreeVersion = iota
|
||||
// StateTreeVersion1 corresponds to actors >= v2.
|
||||
// StateTreeVersion1 corresponds to actors v2
|
||||
StateTreeVersion1
|
||||
// StateTreeVersion2 corresponds to actors >= v3.
|
||||
StateTreeVersion2
|
||||
)
|
||||
|
||||
type StateRoot struct {
|
||||
|
@ -17,6 +17,7 @@ import (
|
||||
exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported"
|
||||
exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported"
|
||||
vmr "github.com/filecoin-project/specs-actors/v2/actors/runtime"
|
||||
exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
@ -62,6 +63,7 @@ func NewActorRegistry() *ActorRegistry {
|
||||
// add builtInCode using: register(cid, singleton)
|
||||
inv.Register(ActorsVersionPredicate(actors.Version0), exported0.BuiltinActors()...)
|
||||
inv.Register(ActorsVersionPredicate(actors.Version2), exported2.BuiltinActors()...)
|
||||
inv.Register(ActorsVersionPredicate(actors.Version3), exported3.BuiltinActors()...)
|
||||
|
||||
return inv
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/actors/aerrors"
|
||||
@ -91,6 +92,8 @@ func newAccountActor(ver actors.Version) *types.Actor {
|
||||
code = builtin0.AccountActorCodeID
|
||||
case actors.Version2:
|
||||
code = builtin2.AccountActorCodeID
|
||||
case actors.Version3:
|
||||
code = builtin3.AccountActorCodeID
|
||||
default:
|
||||
panic("unsupported actors version")
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ func BackupCmd(repoFlag string, rt repo.RepoType, getApi BackupApiFn) *cli.Comma
|
||||
}
|
||||
defer lr.Close() // nolint:errcheck
|
||||
|
||||
mds, err := lr.Datastore("/metadata")
|
||||
mds, err := lr.Datastore(context.TODO(), "/metadata")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting metadata datastore: %w", err)
|
||||
}
|
||||
|
@ -58,6 +58,7 @@ var chainCmd = &cli.Command{
|
||||
chainInspectUsage,
|
||||
chainDecodeCmd,
|
||||
chainEncodeCmd,
|
||||
chainDisputeSetCmd,
|
||||
},
|
||||
}
|
||||
|
||||
|
429
cli/disputer.go
Normal file
429
cli/disputer.go
Normal file
@ -0,0 +1,429 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
|
||||
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
const Confidence = 10
|
||||
|
||||
type minerDeadline struct {
|
||||
miner address.Address
|
||||
index uint64
|
||||
}
|
||||
|
||||
var chainDisputeSetCmd = &cli.Command{
|
||||
Name: "disputer",
|
||||
Usage: "interact with the window post disputer",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "max-fee",
|
||||
Usage: "Spend up to X FIL per DisputeWindowedPoSt message",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "from",
|
||||
Usage: "optionally specify the account to send messages from",
|
||||
},
|
||||
},
|
||||
Subcommands: []*cli.Command{
|
||||
disputerStartCmd,
|
||||
disputerMsgCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var disputerMsgCmd = &cli.Command{
|
||||
Name: "dispute",
|
||||
Usage: "Send a specific DisputeWindowedPoSt message",
|
||||
ArgsUsage: "[minerAddress index postIndex]",
|
||||
Flags: []cli.Flag{},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.NArg() != 3 {
|
||||
fmt.Println("Usage: dispute [minerAddress index postIndex]")
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
toa, err := address.NewFromString(cctx.Args().First())
|
||||
if err != nil {
|
||||
return fmt.Errorf("given 'miner' address %q was invalid: %w", cctx.Args().First(), err)
|
||||
}
|
||||
|
||||
deadline, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
postIndex, err := strconv.ParseUint(cctx.Args().Get(2), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fromAddr, err := getSender(ctx, api, cctx.String("from"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dpp, aerr := actors.SerializeParams(&miner3.DisputeWindowedPoStParams{
|
||||
Deadline: deadline,
|
||||
PoStIndex: postIndex,
|
||||
})
|
||||
|
||||
if aerr != nil {
|
||||
return xerrors.Errorf("failed to serailize params: %w", aerr)
|
||||
}
|
||||
|
||||
dmsg := &types.Message{
|
||||
To: toa,
|
||||
From: fromAddr,
|
||||
Value: big.Zero(),
|
||||
Method: builtin3.MethodsMiner.DisputeWindowedPoSt,
|
||||
Params: dpp,
|
||||
}
|
||||
|
||||
rslt, err := api.StateCall(ctx, dmsg, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to simulate dispute: %w", err)
|
||||
}
|
||||
|
||||
if rslt.MsgRct.ExitCode == 0 {
|
||||
mss, err := getMaxFee(cctx.String("max-fee"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sm, err := api.MpoolPushMessage(ctx, dmsg, mss)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("dispute message ", sm.Cid())
|
||||
} else {
|
||||
fmt.Println("dispute is unsuccessful")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var disputerStartCmd = &cli.Command{
|
||||
Name: "start",
|
||||
Usage: "Start the window post disputer",
|
||||
ArgsUsage: "[minerAddress]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.Uint64Flag{
|
||||
Name: "start-epoch",
|
||||
Usage: "only start disputing PoSts after this epoch ",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
fromAddr, err := getSender(ctx, api, cctx.String("from"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mss, err := getMaxFee(cctx.String("max-fee"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
startEpoch := abi.ChainEpoch(0)
|
||||
if cctx.IsSet("height") {
|
||||
startEpoch = abi.ChainEpoch(cctx.Uint64("height"))
|
||||
}
|
||||
|
||||
fmt.Println("checking sync status")
|
||||
|
||||
if err := SyncWait(ctx, api, false); err != nil {
|
||||
return xerrors.Errorf("sync wait: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("setting up window post disputer")
|
||||
|
||||
// subscribe to head changes and validate the current value
|
||||
|
||||
headChanges, err := api.ChainNotify(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
head, ok := <-headChanges
|
||||
if !ok {
|
||||
return xerrors.Errorf("Notify stream was invalid")
|
||||
}
|
||||
|
||||
if len(head) != 1 {
|
||||
return xerrors.Errorf("Notify first entry should have been one item")
|
||||
}
|
||||
|
||||
if head[0].Type != store.HCCurrent {
|
||||
return xerrors.Errorf("expected current head on Notify stream (got %s)", head[0].Type)
|
||||
}
|
||||
|
||||
lastEpoch := head[0].Val.Height()
|
||||
lastStatusCheckEpoch := lastEpoch
|
||||
|
||||
// build initial deadlineMap
|
||||
|
||||
minerList, err := api.StateListMiners(ctx, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
knownMiners := make(map[address.Address]struct{})
|
||||
deadlineMap := make(map[abi.ChainEpoch][]minerDeadline)
|
||||
for _, miner := range minerList {
|
||||
dClose, dl, err := makeMinerDeadline(ctx, api, miner)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("making deadline: %w", err)
|
||||
}
|
||||
|
||||
deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl)
|
||||
|
||||
knownMiners[miner] = struct{}{}
|
||||
}
|
||||
|
||||
// when this fires, check for newly created miners, and purge any "missed" epochs from deadlineMap
|
||||
statusCheckTicker := time.NewTicker(time.Hour)
|
||||
defer statusCheckTicker.Stop()
|
||||
|
||||
fmt.Println("starting up window post disputer")
|
||||
|
||||
applyTsk := func(tsk types.TipSetKey) error {
|
||||
log.Infof("last checked height: %d", lastEpoch)
|
||||
dls, ok := deadlineMap[lastEpoch]
|
||||
delete(deadlineMap, lastEpoch)
|
||||
if !ok || startEpoch >= lastEpoch {
|
||||
// no deadlines closed at this epoch - Confidence, or we haven't reached the start cutoff yet
|
||||
return nil
|
||||
}
|
||||
|
||||
dpmsgs := make([]*types.Message, 0)
|
||||
|
||||
// TODO: Parallelizeable
|
||||
for _, dl := range dls {
|
||||
fullDeadlines, err := api.StateMinerDeadlines(ctx, dl.miner, tsk)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to load deadlines: %w", err)
|
||||
}
|
||||
|
||||
if int(dl.index) >= len(fullDeadlines) {
|
||||
return xerrors.Errorf("deadline index %d not found in deadlines", dl.index)
|
||||
}
|
||||
|
||||
ms, err := makeDisputeWindowedPosts(ctx, api, dl, fullDeadlines[dl.index].DisputableProofCount, fromAddr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to check for disputes: %w", err)
|
||||
}
|
||||
|
||||
dpmsgs = append(dpmsgs, ms...)
|
||||
|
||||
dClose, dl, err := makeMinerDeadline(ctx, api, dl.miner)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("making deadline: %w", err)
|
||||
}
|
||||
|
||||
deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl)
|
||||
}
|
||||
|
||||
// TODO: Parallelizeable / can be integrated into the previous deadline-iterating for loop
|
||||
for _, dpmsg := range dpmsgs {
|
||||
log.Infof("disputing a PoSt from miner %s", dpmsg.To)
|
||||
m, err := api.MpoolPushMessage(ctx, dpmsg, mss)
|
||||
if err != nil {
|
||||
log.Infof("failed to dispute post message: %s", err.Error())
|
||||
} else {
|
||||
log.Infof("disputed a PoSt in message: %s", m.Cid())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
disputeLoop := func() error {
|
||||
select {
|
||||
case notif, ok := <-headChanges:
|
||||
if !ok {
|
||||
return xerrors.Errorf("head change channel errored")
|
||||
}
|
||||
|
||||
for _, val := range notif {
|
||||
switch val.Type {
|
||||
case store.HCApply:
|
||||
for ; lastEpoch <= val.Val.Height(); lastEpoch++ {
|
||||
err := applyTsk(val.Val.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case store.HCRevert:
|
||||
// do nothing
|
||||
default:
|
||||
return xerrors.Errorf("unexpected head change type %s", val.Type)
|
||||
}
|
||||
}
|
||||
case <-statusCheckTicker.C:
|
||||
log.Infof("Running status check: ")
|
||||
|
||||
minerList, err = api.StateListMiners(ctx, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting miner list: %w", err)
|
||||
}
|
||||
|
||||
for _, m := range minerList {
|
||||
_, ok := knownMiners[m]
|
||||
if !ok {
|
||||
dClose, dl, err := makeMinerDeadline(ctx, api, m)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("making deadline: %w", err)
|
||||
}
|
||||
|
||||
deadlineMap[dClose+Confidence] = append(deadlineMap[dClose+Confidence], *dl)
|
||||
|
||||
knownMiners[m] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for ; lastStatusCheckEpoch < lastEpoch; lastStatusCheckEpoch++ {
|
||||
// if an epoch got "skipped" from the deadlineMap somehow, just fry it now instead of letting it sit around forever
|
||||
_, ok := deadlineMap[lastStatusCheckEpoch]
|
||||
if ok {
|
||||
log.Infof("epoch %d was skipped during execution, deleting it from deadlineMap")
|
||||
delete(deadlineMap, lastStatusCheckEpoch)
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Status check complete")
|
||||
case <-ctx.Done():
|
||||
return xerrors.Errorf("context cancelled")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
for {
|
||||
err := disputeLoop()
|
||||
if err != nil {
|
||||
fmt.Println("disputer shutting down: ", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// for a given miner, index, and maxPostIndex, tries to dispute posts from 0...postsSnapshotted-1
|
||||
// returns a list of DisputeWindowedPoSt msgs that are expected to succeed if sent
|
||||
func makeDisputeWindowedPosts(ctx context.Context, api lapi.FullNode, dl minerDeadline, postsSnapshotted uint64, sender address.Address) ([]*types.Message, error) {
|
||||
disputes := make([]*types.Message, 0)
|
||||
|
||||
for i := uint64(0); i < postsSnapshotted; i++ {
|
||||
|
||||
dpp, aerr := actors.SerializeParams(&miner3.DisputeWindowedPoStParams{
|
||||
Deadline: dl.index,
|
||||
PoStIndex: i,
|
||||
})
|
||||
|
||||
if aerr != nil {
|
||||
return nil, xerrors.Errorf("failed to serailize params: %w", aerr)
|
||||
}
|
||||
|
||||
dispute := &types.Message{
|
||||
To: dl.miner,
|
||||
From: sender,
|
||||
Value: big.Zero(),
|
||||
Method: builtin3.MethodsMiner.DisputeWindowedPoSt,
|
||||
Params: dpp,
|
||||
}
|
||||
|
||||
rslt, err := api.StateCall(ctx, dispute, types.EmptyTSK)
|
||||
if err == nil && rslt.MsgRct.ExitCode == 0 {
|
||||
disputes = append(disputes, dispute)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return disputes, nil
|
||||
}
|
||||
|
||||
func makeMinerDeadline(ctx context.Context, api lapi.FullNode, mAddr address.Address) (abi.ChainEpoch, *minerDeadline, error) {
|
||||
dl, err := api.StateMinerProvingDeadline(ctx, mAddr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return -1, nil, xerrors.Errorf("getting proving index list: %w", err)
|
||||
}
|
||||
|
||||
return dl.Close, &minerDeadline{
|
||||
miner: mAddr,
|
||||
index: dl.Index,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getSender(ctx context.Context, api lapi.FullNode, fromStr string) (address.Address, error) {
|
||||
if fromStr == "" {
|
||||
return api.WalletDefaultAddress(ctx)
|
||||
}
|
||||
|
||||
addr, err := address.NewFromString(fromStr)
|
||||
if err != nil {
|
||||
return address.Undef, err
|
||||
}
|
||||
|
||||
has, err := api.WalletHas(ctx, addr)
|
||||
if err != nil {
|
||||
return address.Undef, err
|
||||
}
|
||||
|
||||
if !has {
|
||||
return address.Undef, xerrors.Errorf("wallet doesn't contain: %s ", addr)
|
||||
}
|
||||
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
func getMaxFee(maxStr string) (*lapi.MessageSendSpec, error) {
|
||||
if maxStr != "" {
|
||||
maxFee, err := types.ParseFIL(maxStr)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parsing max-fee: %w", err)
|
||||
}
|
||||
return &lapi.MessageSendSpec{
|
||||
MaxFee: types.BigInt(maxFee),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
44
cli/state.go
44
cli/state.go
@ -935,6 +935,10 @@ var stateComputeStateCmd = &cli.Command{
|
||||
Name: "compute-state-output",
|
||||
Usage: "a json file containing pre-existing compute-state output, to generate html reports without rerunning state changes",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "no-timing",
|
||||
Usage: "don't show timing information in html traces",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
@ -1026,7 +1030,9 @@ var stateComputeStateCmd = &cli.Command{
|
||||
return c.Code, nil
|
||||
}
|
||||
|
||||
return ComputeStateHTMLTempl(os.Stdout, ts, stout, getCode)
|
||||
_, _ = fmt.Fprintln(os.Stderr, "computed state cid: ", stout.Root)
|
||||
|
||||
return ComputeStateHTMLTempl(os.Stdout, ts, stout, !cctx.Bool("no-timing"), getCode)
|
||||
}
|
||||
|
||||
fmt.Println("computed state cid: ", stout.Root)
|
||||
@ -1147,8 +1153,11 @@ var compStateMsg = `
|
||||
{{if gt (len .Msg.Params) 0}}
|
||||
<div><pre class="params">{{JsonParams ($code) (.Msg.Method) (.Msg.Params) | html}}</pre></div>
|
||||
{{end}}
|
||||
<div><span class="slow-{{IsSlow .Duration}}-{{IsVerySlow .Duration}}">Took {{.Duration}}</span>, <span class="exit{{IntExit .MsgRct.ExitCode}}">Exit: <b>{{.MsgRct.ExitCode}}</b></span>{{if gt (len .MsgRct.Return) 0}}, Return{{end}}</div>
|
||||
|
||||
{{if PrintTiming}}
|
||||
<div><span class="slow-{{IsSlow .Duration}}-{{IsVerySlow .Duration}}">Took {{.Duration}}</span>, <span class="exit{{IntExit .MsgRct.ExitCode}}">Exit: <b>{{.MsgRct.ExitCode}}</b></span>{{if gt (len .MsgRct.Return) 0}}, Return{{end}}</div>
|
||||
{{else}}
|
||||
<div><span class="exit{{IntExit .MsgRct.ExitCode}}">Exit: <b>{{.MsgRct.ExitCode}}</b></span>{{if gt (len .MsgRct.Return) 0}}, Return{{end}}</div>
|
||||
{{end}}
|
||||
{{if gt (len .MsgRct.Return) 0}}
|
||||
<div><pre class="ret">{{JsonReturn ($code) (.Msg.Method) (.MsgRct.Return) | html}}</pre></div>
|
||||
{{end}}
|
||||
@ -1174,7 +1183,7 @@ var compStateMsg = `
|
||||
{{range .GasCharges}}
|
||||
<tr><td>{{.Name}}{{if .Extra}}:{{.Extra}}{{end}}</td>
|
||||
{{template "gasC" .}}
|
||||
<td>{{.TimeTaken}}</td>
|
||||
<td>{{if PrintTiming}}{{.TimeTaken}}{{end}}</td>
|
||||
<td>
|
||||
{{ $fImp := FirstImportant .Location }}
|
||||
{{ if $fImp }}
|
||||
@ -1213,7 +1222,7 @@ var compStateMsg = `
|
||||
{{with SumGas .GasCharges}}
|
||||
<tr class="sum"><td><b>Sum</b></td>
|
||||
{{template "gasC" .}}
|
||||
<td>{{.TimeTaken}}</td>
|
||||
<td>{{if PrintTiming}}{{.TimeTaken}}{{end}}</td>
|
||||
<td></td></tr>
|
||||
{{end}}
|
||||
</table>
|
||||
@ -1234,19 +1243,20 @@ type compStateHTMLIn struct {
|
||||
Comp *api.ComputeStateOutput
|
||||
}
|
||||
|
||||
func ComputeStateHTMLTempl(w io.Writer, ts *types.TipSet, o *api.ComputeStateOutput, getCode func(addr address.Address) (cid.Cid, error)) error {
|
||||
func ComputeStateHTMLTempl(w io.Writer, ts *types.TipSet, o *api.ComputeStateOutput, printTiming bool, getCode func(addr address.Address) (cid.Cid, error)) error {
|
||||
t, err := template.New("compute_state").Funcs(map[string]interface{}{
|
||||
"GetCode": getCode,
|
||||
"GetMethod": getMethod,
|
||||
"ToFil": toFil,
|
||||
"JsonParams": JsonParams,
|
||||
"JsonReturn": jsonReturn,
|
||||
"IsSlow": isSlow,
|
||||
"IsVerySlow": isVerySlow,
|
||||
"IntExit": func(i exitcode.ExitCode) int64 { return int64(i) },
|
||||
"SumGas": sumGas,
|
||||
"CodeStr": codeStr,
|
||||
"Call": call,
|
||||
"GetCode": getCode,
|
||||
"GetMethod": getMethod,
|
||||
"ToFil": toFil,
|
||||
"JsonParams": JsonParams,
|
||||
"JsonReturn": jsonReturn,
|
||||
"IsSlow": isSlow,
|
||||
"IsVerySlow": isVerySlow,
|
||||
"IntExit": func(i exitcode.ExitCode) int64 { return int64(i) },
|
||||
"SumGas": sumGas,
|
||||
"CodeStr": codeStr,
|
||||
"Call": call,
|
||||
"PrintTiming": func() bool { return printTiming },
|
||||
"FirstImportant": func(locs []types.Loc) *types.Loc {
|
||||
if len(locs) != 0 {
|
||||
for _, l := range locs {
|
||||
|
@ -32,7 +32,6 @@ import (
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/genesis"
|
||||
)
|
||||
@ -177,8 +176,6 @@ var sealBenchCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
policy.AddSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
|
||||
|
||||
if c.Bool("no-gpu") {
|
||||
err := os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
if err != nil {
|
||||
|
@ -245,7 +245,7 @@ func startNodes(
|
||||
|
||||
// Create a gateway server in front of the full node
|
||||
gapiImpl := newGatewayAPI(fullNode, lookbackCap, stateWaitLookbackLimit)
|
||||
_, addr, err := builder.CreateRPCServer(gapiImpl)
|
||||
_, addr, err := builder.CreateRPCServer(t, gapiImpl)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a gateway client API that connects to the gateway server
|
||||
|
@ -6,7 +6,9 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"contrib.go.opencensus.io/exporter/prometheus"
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
promclient "github.com/prometheus/client_golang/prometheus"
|
||||
"go.opencensus.io/tag"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
@ -99,6 +101,17 @@ var runCmd = &cli.Command{
|
||||
rpcServer.Register("Filecoin", metrics.MetricedGatewayAPI(NewGatewayAPI(api)))
|
||||
|
||||
mux.Handle("/rpc/v0", rpcServer)
|
||||
|
||||
registry := promclient.DefaultRegisterer.(*promclient.Registry)
|
||||
exporter, err := prometheus.NewExporter(prometheus.Options{
|
||||
Registry: registry,
|
||||
Namespace: "lotus_gw",
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mux.Handle("/debug/metrics", exporter)
|
||||
|
||||
mux.PathPrefix("/").Handler(http.DefaultServeMux)
|
||||
|
||||
/*ah := &auth.Handler{
|
||||
|
@ -308,7 +308,7 @@ var runCmd = &cli.Command{
|
||||
|
||||
{
|
||||
// init datastore for r.Exists
|
||||
_, err := lr.Datastore("/metadata")
|
||||
_, err := lr.Datastore(context.Background(), "/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -327,7 +327,7 @@ var runCmd = &cli.Command{
|
||||
log.Error("closing repo", err)
|
||||
}
|
||||
}()
|
||||
ds, err := lr.Datastore("/metadata")
|
||||
ds, err := lr.Datastore(context.Background(), "/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ var chainBalanceStateCmd = &cli.Command{
|
||||
|
||||
defer lkrepo.Close() //nolint:errcheck
|
||||
|
||||
bs, err := lkrepo.Blockstore(repo.BlockstoreChain)
|
||||
bs, err := lkrepo.Blockstore(ctx, repo.BlockstoreChain)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open blockstore: %w", err)
|
||||
}
|
||||
@ -188,7 +188,7 @@ var chainBalanceStateCmd = &cli.Command{
|
||||
}
|
||||
}()
|
||||
|
||||
mds, err := lkrepo.Datastore("/metadata")
|
||||
mds, err := lkrepo.Datastore(context.Background(), "/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -396,7 +396,7 @@ var chainPledgeCmd = &cli.Command{
|
||||
|
||||
defer lkrepo.Close() //nolint:errcheck
|
||||
|
||||
bs, err := lkrepo.Blockstore(repo.BlockstoreChain)
|
||||
bs, err := lkrepo.Blockstore(ctx, repo.BlockstoreChain)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to open blockstore: %w", err)
|
||||
}
|
||||
@ -409,7 +409,7 @@ var chainPledgeCmd = &cli.Command{
|
||||
}
|
||||
}()
|
||||
|
||||
mds, err := lkrepo.Datastore("/metadata")
|
||||
mds, err := lkrepo.Datastore(context.Background(), "/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ import (
|
||||
|
||||
var bitFieldCmd = &cli.Command{
|
||||
Name: "bitfield",
|
||||
Usage: "Bitfield analyze tool",
|
||||
Description: "analyze bitfields",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
@ -26,53 +27,24 @@ var bitFieldCmd = &cli.Command{
|
||||
},
|
||||
},
|
||||
Subcommands: []*cli.Command{
|
||||
bitFieldEncodeCmd,
|
||||
bitFieldDecodeCmd,
|
||||
bitFieldRunsCmd,
|
||||
bitFieldStatCmd,
|
||||
bitFieldDecodeCmd,
|
||||
bitFieldMergeCmd,
|
||||
bitFieldIntersectCmd,
|
||||
bitFieldEncodeCmd,
|
||||
bitFieldSubCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var bitFieldRunsCmd = &cli.Command{
|
||||
Name: "runs",
|
||||
Usage: "Bitfield bit runs",
|
||||
Description: "print bit runs in a bitfield",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "enc",
|
||||
Value: "base64",
|
||||
Usage: "specify input encoding to parse",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
var val string
|
||||
if cctx.Args().Present() {
|
||||
val = cctx.Args().Get(0)
|
||||
} else {
|
||||
b, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
val = string(b)
|
||||
}
|
||||
|
||||
var dec []byte
|
||||
switch cctx.String("enc") {
|
||||
case "base64":
|
||||
d, err := base64.StdEncoding.DecodeString(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decoding base64 value: %w", err)
|
||||
}
|
||||
dec = d
|
||||
case "hex":
|
||||
d, err := hex.DecodeString(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decoding hex value: %w", err)
|
||||
}
|
||||
dec = d
|
||||
default:
|
||||
return fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
|
||||
dec, err := decodeToByte(cctx, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rle, err := rlepluslazy.FromBuf(dec)
|
||||
@ -98,7 +70,7 @@ var bitFieldRunsCmd = &cli.Command{
|
||||
s = "FALSE"
|
||||
}
|
||||
|
||||
fmt.Printf("@%d %s * %d\n", idx, s, r.Len)
|
||||
fmt.Printf("@%08d %s * %d\n", idx, s, r.Len)
|
||||
|
||||
idx += r.Len
|
||||
}
|
||||
@ -109,43 +81,14 @@ var bitFieldRunsCmd = &cli.Command{
|
||||
|
||||
var bitFieldStatCmd = &cli.Command{
|
||||
Name: "stat",
|
||||
Usage: "Bitfield stats",
|
||||
Description: "print bitfield stats",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "enc",
|
||||
Value: "base64",
|
||||
Usage: "specify input encoding to parse",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
var val string
|
||||
if cctx.Args().Present() {
|
||||
val = cctx.Args().Get(0)
|
||||
} else {
|
||||
b, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
val = string(b)
|
||||
}
|
||||
|
||||
var dec []byte
|
||||
switch cctx.String("enc") {
|
||||
case "base64":
|
||||
d, err := base64.StdEncoding.DecodeString(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decoding base64 value: %w", err)
|
||||
}
|
||||
dec = d
|
||||
case "hex":
|
||||
d, err := hex.DecodeString(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decoding hex value: %w", err)
|
||||
}
|
||||
dec = d
|
||||
default:
|
||||
return fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
|
||||
dec, err := decodeToByte(cctx, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("Raw length: %d bits (%d bytes)\n", len(dec)*8, len(dec))
|
||||
|
||||
rle, err := rlepluslazy.FromBuf(dec)
|
||||
if err != nil {
|
||||
@ -157,10 +100,7 @@ var bitFieldStatCmd = &cli.Command{
|
||||
return xerrors.Errorf("getting run iterator: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Raw length: %d bits (%d bytes)\n", len(dec)*8, len(dec))
|
||||
|
||||
var ones, zeros, oneRuns, zeroRuns, invalid uint64
|
||||
|
||||
for rit.HasNext() {
|
||||
r, err := rit.NextRun()
|
||||
if err != nil {
|
||||
@ -195,14 +135,8 @@ var bitFieldStatCmd = &cli.Command{
|
||||
|
||||
var bitFieldDecodeCmd = &cli.Command{
|
||||
Name: "decode",
|
||||
Usage: "Bitfield to decimal number",
|
||||
Description: "decode bitfield and print all numbers in it",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "enc",
|
||||
Value: "base64",
|
||||
Usage: "specify input encoding to parse",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
rle, err := decode(cctx, 0)
|
||||
if err != nil {
|
||||
@ -219,43 +153,61 @@ var bitFieldDecodeCmd = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var bitFieldIntersectCmd = &cli.Command{
|
||||
Name: "intersect",
|
||||
Description: "intersect 2 bitfields and print the resulting bitfield as base64",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "enc",
|
||||
Value: "base64",
|
||||
Usage: "specify input encoding to parse",
|
||||
},
|
||||
},
|
||||
var bitFieldMergeCmd = &cli.Command{
|
||||
Name: "merge",
|
||||
Usage: "Merge 2 bitfields",
|
||||
Description: "Merge 2 bitfields and print the resulting bitfield",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
a, err := decode(cctx, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b, err := decode(cctx, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o, err := bitfield.MergeBitFields(a, b)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("merge: %w", err)
|
||||
}
|
||||
|
||||
str, err := encode(cctx, o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(str)
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var bitFieldIntersectCmd = &cli.Command{
|
||||
Name: "intersect",
|
||||
Usage: "Intersect 2 bitfields",
|
||||
Description: "intersect 2 bitfields and print the resulting bitfield",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
a, err := decode(cctx, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b, err := decode(cctx, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o, err := bitfield.IntersectBitField(a, b)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("intersect: %w", err)
|
||||
}
|
||||
|
||||
s, err := o.RunIterator()
|
||||
str, err := encode(cctx, o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bytes, err := rlepluslazy.EncodeRuns(s, []byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(base64.StdEncoding.EncodeToString(bytes))
|
||||
fmt.Println(str)
|
||||
|
||||
return nil
|
||||
},
|
||||
@ -263,41 +215,29 @@ var bitFieldIntersectCmd = &cli.Command{
|
||||
|
||||
var bitFieldSubCmd = &cli.Command{
|
||||
Name: "sub",
|
||||
Description: "subtract 2 bitfields and print the resulting bitfield as base64",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "enc",
|
||||
Value: "base64",
|
||||
Usage: "specify input encoding to parse",
|
||||
},
|
||||
},
|
||||
Usage: "Subtract 2 bitfields",
|
||||
Description: "subtract 2 bitfields and print the resulting bitfield",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
b, err := decode(cctx, 1)
|
||||
a, err := decode(cctx, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a, err := decode(cctx, 0)
|
||||
b, err := decode(cctx, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o, err := bitfield.SubtractBitField(a, b)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("intersect: %w", err)
|
||||
return xerrors.Errorf("subtract: %w", err)
|
||||
}
|
||||
|
||||
s, err := o.RunIterator()
|
||||
str, err := encode(cctx, o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bytes, err := rlepluslazy.EncodeRuns(s, []byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(base64.StdEncoding.EncodeToString(bytes))
|
||||
fmt.Println(str)
|
||||
|
||||
return nil
|
||||
},
|
||||
@ -305,15 +245,9 @@ var bitFieldSubCmd = &cli.Command{
|
||||
|
||||
var bitFieldEncodeCmd = &cli.Command{
|
||||
Name: "encode",
|
||||
Usage: "Decimal number to bitfield",
|
||||
Description: "encode a series of decimal numbers into a bitfield",
|
||||
ArgsUsage: "[infile]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "enc",
|
||||
Value: "base64",
|
||||
Usage: "specify input encoding to parse",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
f, err := os.Open(cctx.Args().First())
|
||||
if err != nil {
|
||||
@ -331,38 +265,64 @@ var bitFieldEncodeCmd = &cli.Command{
|
||||
out.Set(i)
|
||||
}
|
||||
|
||||
s, err := out.RunIterator()
|
||||
str, err := encode(cctx, out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bytes, err := rlepluslazy.EncodeRuns(s, []byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(base64.StdEncoding.EncodeToString(bytes))
|
||||
fmt.Println(str)
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func decode(cctx *cli.Context, a int) (bitfield.BitField, error) {
|
||||
func encode(cctx *cli.Context, field bitfield.BitField) (string, error) {
|
||||
s, err := field.RunIterator()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
bytes, err := rlepluslazy.EncodeRuns(s, []byte{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var str string
|
||||
switch cctx.String("enc") {
|
||||
case "base64":
|
||||
str = base64.StdEncoding.EncodeToString(bytes)
|
||||
case "hex":
|
||||
str = hex.EncodeToString(bytes)
|
||||
default:
|
||||
return "", fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
|
||||
}
|
||||
|
||||
return str, nil
|
||||
|
||||
}
|
||||
func decode(cctx *cli.Context, i int) (bitfield.BitField, error) {
|
||||
b, err := decodeToByte(cctx, i)
|
||||
if err != nil {
|
||||
return bitfield.BitField{}, err
|
||||
}
|
||||
return bitfield.NewFromBytes(b)
|
||||
}
|
||||
|
||||
func decodeToByte(cctx *cli.Context, i int) ([]byte, error) {
|
||||
var val string
|
||||
if cctx.Args().Present() {
|
||||
if a >= cctx.NArg() {
|
||||
return bitfield.BitField{}, xerrors.Errorf("need more than %d args", a)
|
||||
if i >= cctx.NArg() {
|
||||
return nil, xerrors.Errorf("need more than %d args", i)
|
||||
}
|
||||
val = cctx.Args().Get(a)
|
||||
val = cctx.Args().Get(i)
|
||||
} else {
|
||||
if a > 0 {
|
||||
return bitfield.BitField{}, xerrors.Errorf("need more than %d args", a)
|
||||
if i > 0 {
|
||||
return nil, xerrors.Errorf("need more than %d args", i)
|
||||
}
|
||||
b, err := ioutil.ReadAll(os.Stdin)
|
||||
r, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return bitfield.BitField{}, err
|
||||
return nil, err
|
||||
}
|
||||
val = string(b)
|
||||
val = string(r)
|
||||
}
|
||||
|
||||
var dec []byte
|
||||
@ -370,18 +330,18 @@ func decode(cctx *cli.Context, a int) (bitfield.BitField, error) {
|
||||
case "base64":
|
||||
d, err := base64.StdEncoding.DecodeString(val)
|
||||
if err != nil {
|
||||
return bitfield.BitField{}, fmt.Errorf("decoding base64 value: %w", err)
|
||||
return nil, fmt.Errorf("decoding base64 value: %w", err)
|
||||
}
|
||||
dec = d
|
||||
case "hex":
|
||||
d, err := hex.DecodeString(val)
|
||||
if err != nil {
|
||||
return bitfield.BitField{}, fmt.Errorf("decoding hex value: %w", err)
|
||||
return nil, fmt.Errorf("decoding hex value: %w", err)
|
||||
}
|
||||
dec = d
|
||||
default:
|
||||
return bitfield.BitField{}, fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
|
||||
return nil, fmt.Errorf("unrecognized encoding: %s", cctx.String("enc"))
|
||||
}
|
||||
|
||||
return bitfield.NewFromBytes(dec)
|
||||
return dec, nil
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -75,7 +76,7 @@ var datastoreListCmd = &cli.Command{
|
||||
}
|
||||
defer lr.Close() //nolint:errcheck
|
||||
|
||||
ds, err := lr.Datastore(datastore.NewKey(cctx.Args().First()).String())
|
||||
ds, err := lr.Datastore(context.Background(), datastore.NewKey(cctx.Args().First()).String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -120,7 +121,7 @@ var datastoreGetCmd = &cli.Command{
|
||||
},
|
||||
ArgsUsage: "[namespace key]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
logging.SetLogLevel("badger", "ERROR") // nolint:errchec
|
||||
logging.SetLogLevel("badger", "ERROR") // nolint:errcheck
|
||||
|
||||
r, err := repo.NewFS(cctx.String("repo"))
|
||||
if err != nil {
|
||||
@ -141,7 +142,7 @@ var datastoreGetCmd = &cli.Command{
|
||||
}
|
||||
defer lr.Close() //nolint:errcheck
|
||||
|
||||
ds, err := lr.Datastore(datastore.NewKey(cctx.Args().First()).String())
|
||||
ds, err := lr.Datastore(context.Background(), datastore.NewKey(cctx.Args().First()).String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ var exportChainCmd = &cli.Command{
|
||||
|
||||
defer fi.Close() //nolint:errcheck
|
||||
|
||||
bs, err := lr.Blockstore(repo.BlockstoreChain)
|
||||
bs, err := lr.Blockstore(ctx, repo.BlockstoreChain)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open blockstore: %w", err)
|
||||
}
|
||||
@ -85,7 +85,7 @@ var exportChainCmd = &cli.Command{
|
||||
}
|
||||
}()
|
||||
|
||||
mds, err := lr.Datastore("/metadata")
|
||||
mds, err := lr.Datastore(context.Background(), "/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -24,6 +25,8 @@ var importCarCmd = &cli.Command{
|
||||
return xerrors.Errorf("opening fs repo: %w", err)
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
exists, err := r.Exists()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -44,7 +47,7 @@ var importCarCmd = &cli.Command{
|
||||
return xerrors.Errorf("opening the car file: %w", err)
|
||||
}
|
||||
|
||||
bs, err := lr.Blockstore(repo.BlockstoreChain)
|
||||
bs, err := lr.Blockstore(ctx, repo.BlockstoreChain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -99,6 +102,8 @@ var importObjectCmd = &cli.Command{
|
||||
return xerrors.Errorf("opening fs repo: %w", err)
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
exists, err := r.Exists()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -113,7 +118,7 @@ var importObjectCmd = &cli.Command{
|
||||
}
|
||||
defer lr.Close() //nolint:errcheck
|
||||
|
||||
bs, err := lr.Blockstore(repo.BlockstoreChain)
|
||||
bs, err := lr.Blockstore(ctx, repo.BlockstoreChain)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open blockstore: %w", err)
|
||||
}
|
||||
|
@ -131,7 +131,7 @@ var stateTreePruneCmd = &cli.Command{
|
||||
|
||||
defer lkrepo.Close() //nolint:errcheck
|
||||
|
||||
bs, err := lkrepo.Blockstore(repo.BlockstoreChain)
|
||||
bs, err := lkrepo.Blockstore(ctx, repo.BlockstoreChain)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open blockstore: %w", err)
|
||||
}
|
||||
@ -151,7 +151,7 @@ var stateTreePruneCmd = &cli.Command{
|
||||
return fmt.Errorf("only badger blockstores are supported")
|
||||
}
|
||||
|
||||
mds, err := lkrepo.Datastore("/metadata")
|
||||
mds, err := lkrepo.Datastore(context.Background(), "/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ func TestWorkerKeyChange(t *testing.T) {
|
||||
|
||||
blocktime := 1 * time.Millisecond
|
||||
|
||||
n, sn := builder.MockSbBuilder(t, []test.FullNodeOpts{test.FullNodeWithActorsV2At(1), test.FullNodeWithActorsV2At(1)}, test.OneMiner)
|
||||
n, sn := builder.MockSbBuilder(t, []test.FullNodeOpts{test.FullNodeWithActorsV3At(2), test.FullNodeWithActorsV3At(2)}, test.OneMiner)
|
||||
|
||||
client1 := n[0]
|
||||
client2 := n[1]
|
||||
|
@ -417,7 +417,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
|
||||
return xerrors.Errorf("peer ID from private key: %w", err)
|
||||
}
|
||||
|
||||
mds, err := lr.Datastore("/metadata")
|
||||
mds, err := lr.Datastore(context.TODO(), "/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -190,7 +191,7 @@ var initRestoreCmd = &cli.Command{
|
||||
|
||||
log.Info("Restoring metadata backup")
|
||||
|
||||
mds, err := lr.Datastore("/metadata")
|
||||
mds, err := lr.Datastore(context.TODO(), "/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -430,11 +430,6 @@ var provingCheckProvableCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
pf, err := info.SealProofType.RegisteredWindowPoStProof()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partitions, err := api.StateMinerPartitions(ctx, addr, dlIdx, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -463,7 +458,7 @@ var provingCheckProvableCmd = &cli.Command{
|
||||
})
|
||||
}
|
||||
|
||||
bad, err := sapi.CheckProvable(ctx, pf, tocheck, cctx.Bool("slow"))
|
||||
bad, err := sapi.CheckProvable(ctx, info.WindowPoStProofType, tocheck, cctx.Bool("slow"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -249,6 +249,7 @@ var sectorsListCmd = &cli.Command{
|
||||
tablewriter.Col("Events"),
|
||||
tablewriter.Col("Deals"),
|
||||
tablewriter.Col("DealWeight"),
|
||||
tablewriter.Col("VerifiedPower"),
|
||||
tablewriter.NewLineCol("Error"),
|
||||
tablewriter.NewLineCol("RecoveryTimeout"))
|
||||
|
||||
@ -268,9 +269,11 @@ var sectorsListCmd = &cli.Command{
|
||||
_, inSSet := commitedIDs[s]
|
||||
_, inASet := activeIDs[s]
|
||||
|
||||
dw := .0
|
||||
dw, vp := .0, .0
|
||||
if st.Expiration-st.Activation > 0 {
|
||||
dw = float64(big.Div(st.DealWeight, big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
|
||||
rdw := big.Add(st.DealWeight, st.VerifiedDealWeight)
|
||||
dw = float64(big.Div(rdw, big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
|
||||
vp = float64(big.Div(big.Mul(st.VerifiedDealWeight, big.NewInt(9)), big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
|
||||
}
|
||||
|
||||
var deals int
|
||||
@ -309,6 +312,9 @@ var sectorsListCmd = &cli.Command{
|
||||
|
||||
if !fast && deals > 0 {
|
||||
m["DealWeight"] = units.BytesSize(dw)
|
||||
if vp > 0 {
|
||||
m["VerifiedPower"] = color.GreenString(units.BytesSize(vp))
|
||||
}
|
||||
}
|
||||
|
||||
if st.Early > 0 {
|
||||
|
@ -118,7 +118,7 @@ var runCmd = &cli.Command{
|
||||
|
||||
var w api.WalletAPI = lw
|
||||
if cctx.Bool("ledger") {
|
||||
ds, err := lr.Datastore("/metadata")
|
||||
ds, err := lr.Datastore(context.Background(), "/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
dstore "github.com/ipfs/go-datastore"
|
||||
@ -87,7 +88,7 @@ func restore(cctx *cli.Context, r repo.Repo) error {
|
||||
|
||||
log.Info("Restoring metadata backup")
|
||||
|
||||
mds, err := lr.Datastore("/metadata")
|
||||
mds, err := lr.Datastore(context.TODO(), "/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -254,7 +254,7 @@ var DaemonCmd = &cli.Command{
|
||||
issnapshot = true
|
||||
}
|
||||
|
||||
if err := ImportChain(r, chainfile, issnapshot); err != nil {
|
||||
if err := ImportChain(ctx, r, chainfile, issnapshot); err != nil {
|
||||
return err
|
||||
}
|
||||
if cctx.Bool("halt-after-import") {
|
||||
@ -389,7 +389,7 @@ func importKey(ctx context.Context, api api.FullNode, f string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) {
|
||||
func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) (err error) {
|
||||
var rd io.Reader
|
||||
var l int64
|
||||
if strings.HasPrefix(fname, "http://") || strings.HasPrefix(fname, "https://") {
|
||||
@ -432,12 +432,12 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) {
|
||||
}
|
||||
defer lr.Close() //nolint:errcheck
|
||||
|
||||
bs, err := lr.Blockstore(repo.BlockstoreChain)
|
||||
bs, err := lr.Blockstore(ctx, repo.BlockstoreChain)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to open blockstore: %w", err)
|
||||
}
|
||||
|
||||
mds, err := lr.Datastore("/metadata")
|
||||
mds, err := lr.Datastore(context.TODO(), "/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -473,7 +473,7 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) {
|
||||
return xerrors.Errorf("flushing validation cache failed: %w", err)
|
||||
}
|
||||
|
||||
gb, err := cst.GetTipsetByHeight(context.TODO(), 0, ts, true)
|
||||
gb, err := cst.GetTipsetByHeight(ctx, 0, ts, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -487,13 +487,13 @@ func ImportChain(r repo.Repo, fname string, snapshot bool) (err error) {
|
||||
|
||||
if !snapshot {
|
||||
log.Infof("validating imported chain...")
|
||||
if err := stm.ValidateChain(context.TODO(), ts); err != nil {
|
||||
if err := stm.ValidateChain(ctx, ts); err != nil {
|
||||
return xerrors.Errorf("chain validation failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("accepting %s as new head", ts.Cids())
|
||||
if err := cst.ForceHeadSilent(context.Background(), ts); err != nil {
|
||||
if err := cst.ForceHeadSilent(ctx, ts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -4028,7 +4028,7 @@ Response:
|
||||
"WorkerChangeEpoch": 10101,
|
||||
"PeerId": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
|
||||
"Multiaddrs": null,
|
||||
"SealProofType": 8,
|
||||
"WindowPoStProofType": 8,
|
||||
"SectorSize": 34359738368,
|
||||
"WindowPoStPartitionSectors": 42,
|
||||
"ConsensusFaultElapsed": 10101
|
||||
|
1
extern/blst
vendored
1
extern/blst
vendored
@ -1 +0,0 @@
|
||||
Subproject commit 1cbb16ed9580dcd3e9593b71221fcf2a048faaef
|
2
extern/filecoin-ffi
vendored
2
extern/filecoin-ffi
vendored
@ -1 +1 @@
|
||||
Subproject commit 1d9cb3e8ff53f51f9318fc57e5d00bc79bdc0128
|
||||
Subproject commit 62f89f108a6a8fe9ad6ed52fb7ffbf8594d7ae5c
|
12
extern/sector-storage/manager.go
vendored
12
extern/sector-storage/manager.go
vendored
@ -285,9 +285,19 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector storage.
|
||||
if unsealed == cid.Undef {
|
||||
return xerrors.Errorf("cannot unseal piece (sector: %d, offset: %d size: %d) - unsealed cid is undefined", sector, offset, size)
|
||||
}
|
||||
|
||||
ssize, err := sector.ProofType.SectorSize()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting sector size: %w", err)
|
||||
}
|
||||
|
||||
err = m.sched.Schedule(ctx, sector, sealtasks.TTUnseal, selector, unsealFetch, func(ctx context.Context, w Worker) error {
|
||||
// TODO: make restartable
|
||||
_, err := m.waitSimpleCall(ctx)(w.UnsealPiece(ctx, sector, offset, size, ticket, unsealed))
|
||||
|
||||
// NOTE: we're unsealing the whole sector here as with SDR we can't really
|
||||
// unseal the sector partially. Requesting the whole sector here can
|
||||
// save us some work in case another piece is requested from here
|
||||
_, err := m.waitSimpleCall(ctx)(w.UnsealPiece(ctx, sector, 0, abi.PaddedPieceSize(ssize).Unpadded(), ticket, unsealed))
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
|
7
extern/storage-sealing/sealing.go
vendored
7
extern/storage-sealing/sealing.go
vendored
@ -481,7 +481,12 @@ func (m *Sealing) currentSealProof(ctx context.Context) (abi.RegisteredSealProof
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return mi.SealProofType, nil
|
||||
ver, err := m.api.StateNetworkVersion(ctx, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return miner.PreferredSealProofTypeFromWindowPoStType(ver, mi.WindowPoStProofType)
|
||||
}
|
||||
|
||||
func (m *Sealing) minerSector(spt abi.RegisteredSealProof, num abi.SectorNumber) storage.SectorRef {
|
||||
|
20
go.mod
20
go.mod
@ -25,25 +25,26 @@ require (
|
||||
github.com/elastic/gosigar v0.12.0
|
||||
github.com/fatih/color v1.9.0
|
||||
github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200910194244-f640612a1a1f
|
||||
github.com/filecoin-project/go-address v0.0.5-0.20201103152444-f2023ef3f5bb
|
||||
github.com/filecoin-project/go-address v0.0.5
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 // indirect
|
||||
github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816
|
||||
github.com/filecoin-project/go-bitfield v0.2.3
|
||||
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2
|
||||
github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434
|
||||
github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03
|
||||
github.com/filecoin-project/go-data-transfer v1.2.7
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a
|
||||
github.com/filecoin-project/go-fil-markets v1.1.2
|
||||
github.com/filecoin-project/go-fil-markets v1.1.5
|
||||
github.com/filecoin-project/go-jsonrpc v0.1.2
|
||||
github.com/filecoin-project/go-multistore v0.0.3
|
||||
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20
|
||||
github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261
|
||||
github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc
|
||||
github.com/filecoin-project/go-state-types v0.0.0-20210119062722-4adba5aaea71
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe
|
||||
github.com/filecoin-project/go-statestore v0.1.0
|
||||
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
|
||||
github.com/filecoin-project/specs-actors v0.9.13
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.3
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.4
|
||||
github.com/filecoin-project/specs-actors/v3 v3.0.1-0.20210128235937-57195d8909b1
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506
|
||||
github.com/filecoin-project/test-vectors/schema v0.0.5
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1
|
||||
@ -125,12 +126,11 @@ require (
|
||||
github.com/prometheus/client_golang v1.6.0
|
||||
github.com/raulk/clock v1.1.0
|
||||
github.com/raulk/go-watchdog v1.0.1
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/supranational/blst v0.1.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
github.com/urfave/cli/v2 v2.2.0
|
||||
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2
|
||||
github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4
|
||||
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7
|
||||
github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d
|
||||
@ -141,7 +141,7 @@ require (
|
||||
go.uber.org/multierr v1.6.0
|
||||
go.uber.org/zap v1.16.0
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
|
||||
@ -157,5 +157,3 @@ replace github.com/golangci/golangci-lint => github.com/golangci/golangci-lint v
|
||||
replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi
|
||||
|
||||
replace github.com/filecoin-project/test-vectors => ./extern/test-vectors
|
||||
|
||||
replace github.com/supranational/blst => ./extern/blst
|
||||
|
32
go.sum
32
go.sum
@ -240,16 +240,18 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL
|
||||
github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E=
|
||||
github.com/filecoin-project/go-address v0.0.3 h1:eVfbdjEbpbzIrbiSa+PiGUY+oDK9HnUn+M1R/ggoHf8=
|
||||
github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8=
|
||||
github.com/filecoin-project/go-address v0.0.5-0.20201103152444-f2023ef3f5bb h1:Cbu7YYsXHtVlPEJ+eqbBx2S3ElmWCB0NjpGPYvvvCrA=
|
||||
github.com/filecoin-project/go-address v0.0.5-0.20201103152444-f2023ef3f5bb/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8=
|
||||
github.com/filecoin-project/go-address v0.0.5 h1:SSaFT/5aLfPXycUlFyemoHYhRgdyXClXCyDdNJKPlDM=
|
||||
github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8=
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM=
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs=
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 h1:pIuR0dnMD0i+as8wNnjjHyQrnhP5O5bmba/lmgQeRgU=
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g=
|
||||
github.com/filecoin-project/go-amt-ipld/v3 v3.0.0 h1:Ou/q82QeHGOhpkedvaxxzpBYuqTxLCcj5OChkDNx4qc=
|
||||
github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o=
|
||||
github.com/filecoin-project/go-bitfield v0.2.0 h1:gCtLcjskIPtdg4NfN7gQZSQF9yrBQ7mkT0qCJxzGI2Q=
|
||||
github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
|
||||
github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816 h1:RMdzMqe3mu2Z/3N3b9UEfkbGZxukstmZgNC024ybWhA=
|
||||
github.com/filecoin-project/go-bitfield v0.2.3-0.20201110211213-fe2c1862e816/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
|
||||
github.com/filecoin-project/go-bitfield v0.2.3 h1:pedK/7maYF06Z+BYJf2OeFFqIDEh6SP6mIOlLFpYXGs=
|
||||
github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
|
||||
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8=
|
||||
github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg=
|
||||
github.com/filecoin-project/go-commp-utils v0.0.0-20201119054358-b88f7a96a434 h1:0kHszkYP3hgApcjl5x4rpwONhN9+j7XDobf6at5XfHs=
|
||||
@ -267,12 +269,14 @@ github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a h1:hyJ+pUm/4U4RdEZBlg6k8Ma4rDiuvqyGpoICXAxwsTg=
|
||||
github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
|
||||
github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c=
|
||||
github.com/filecoin-project/go-fil-markets v1.1.2 h1:5FVdDmF9GvW6Xllql9OGiJXEZjh/tu590BXSQH2W/vU=
|
||||
github.com/filecoin-project/go-fil-markets v1.1.2/go.mod h1:6oTRaAsHnCqhi3mpZqdvnWIzH6QzHQc4dbhJrI9/BfQ=
|
||||
github.com/filecoin-project/go-fil-markets v1.1.5 h1:S5LIyy7VruG+zFMfsuDiJKvEqF+NpTPRMvN9GqJko3w=
|
||||
github.com/filecoin-project/go-fil-markets v1.1.5/go.mod h1:6oTRaAsHnCqhi3mpZqdvnWIzH6QzHQc4dbhJrI9/BfQ=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24=
|
||||
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM=
|
||||
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+eEvrDCGJoPLxFpDynFjYfBjI=
|
||||
github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1 h1:zbzs46G7bOctkZ+JUX3xirrj0RaEsi+27dtlsgrTNBg=
|
||||
github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1/go.mod h1:gXpNmr3oQx8l3o7qkGyDjJjYSRX7hp/FGOStdqrWyDI=
|
||||
github.com/filecoin-project/go-jsonrpc v0.1.2 h1:MTebUawBHLxxY9gDi1WXuGc89TWIDmsgoDqeZSk9KRw=
|
||||
github.com/filecoin-project/go-jsonrpc v0.1.2/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4=
|
||||
github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0DzzQwqsL0XarpnI=
|
||||
@ -286,6 +290,8 @@ github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go
|
||||
github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
|
||||
github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc h1:+hbMY4Pcx2oizrfH08VWXwrj5mU8aJT6g0UNxGHFCGU=
|
||||
github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
|
||||
github.com/filecoin-project/go-state-types v0.0.0-20210119062722-4adba5aaea71 h1:Cas/CUB4ybYpdxvW7LouaydE16cpwdq3vvS3qgZuU+Q=
|
||||
github.com/filecoin-project/go-state-types v0.0.0-20210119062722-4adba5aaea71/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe h1:dF8u+LEWeIcTcfUcCf3WFVlc81Fr2JKg8zPzIbBDKDw=
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
|
||||
github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ=
|
||||
@ -299,8 +305,10 @@ github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK
|
||||
github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.2 h1:2Vcf4CGa29kRh4JJ02m+FbvD/p3YNnLGsaHfw7Uj49g=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.2/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.3 h1:5Pd6pjU7VjUye+Hz4gYBCPAFdBxtEbHsgGYvWmfc83w=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.3/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.4 h1:NZK2oMCcA71wNsUzDBmLQyRMzcCnX9tDGvwZ53G67j8=
|
||||
github.com/filecoin-project/specs-actors/v2 v2.3.4/go.mod h1:UuJQLoTx/HPvvWeqlIFmC/ywlOLHNe8SNQ3OunFbu2Y=
|
||||
github.com/filecoin-project/specs-actors/v3 v3.0.1-0.20210128235937-57195d8909b1 h1:I6mvbwANIoToUZ37cYmuLyDKbPlAUxWnp0fJOZnlTz4=
|
||||
github.com/filecoin-project/specs-actors/v3 v3.0.1-0.20210128235937-57195d8909b1/go.mod h1:NL24TPjJGyU7fh1ztpUyYcoZi3TmRKNEI0huPYmhObA=
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw=
|
||||
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
|
||||
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
|
||||
@ -753,6 +761,8 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
@ -1418,6 +1428,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
|
||||
github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
|
||||
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
@ -1470,6 +1482,8 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:f
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163 h1:TtcUeY2XZSriVWR1pXyfCBWIf/NGC2iUdNw1lofUjUU=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2 h1:7HzUKl5d/dELS9lLeT4W6YvliZx+s9k/eOOIdHKrA/w=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
|
||||
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E=
|
||||
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8=
|
||||
github.com/whyrusleeping/go-ctrlnet v0.0.0-20180313164037-f564fbbdaa95/go.mod h1:SJqKCCPXRfBFCwXjfNT/skfsceF7+MBFLI2OrvuRA7g=
|
||||
@ -1694,6 +1708,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -21,8 +21,8 @@ import (
|
||||
)
|
||||
|
||||
type IpfsBstore struct {
|
||||
ctx context.Context
|
||||
api iface.CoreAPI
|
||||
ctx context.Context
|
||||
api, offlineAPI iface.CoreAPI
|
||||
}
|
||||
|
||||
func NewIpfsBstore(ctx context.Context, onlineMode bool) (*IpfsBstore, error) {
|
||||
@ -34,10 +34,18 @@ func NewIpfsBstore(ctx context.Context, onlineMode bool) (*IpfsBstore, error) {
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("setting offline mode: %s", err)
|
||||
}
|
||||
offlineAPI := api
|
||||
if onlineMode {
|
||||
offlineAPI, err = localApi.WithOptions(options.Api.Offline(true))
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("applying offline mode: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &IpfsBstore{
|
||||
ctx: ctx,
|
||||
api: api,
|
||||
ctx: ctx,
|
||||
api: api,
|
||||
offlineAPI: offlineAPI,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -50,10 +58,18 @@ func NewRemoteIpfsBstore(ctx context.Context, maddr multiaddr.Multiaddr, onlineM
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("applying offline mode: %s", err)
|
||||
}
|
||||
offlineAPI := api
|
||||
if onlineMode {
|
||||
offlineAPI, err = httpApi.WithOptions(options.Api.Offline(true))
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("applying offline mode: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &IpfsBstore{
|
||||
ctx: ctx,
|
||||
api: api,
|
||||
ctx: ctx,
|
||||
api: api,
|
||||
offlineAPI: offlineAPI,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -62,7 +78,7 @@ func (i *IpfsBstore) DeleteBlock(cid cid.Cid) error {
|
||||
}
|
||||
|
||||
func (i *IpfsBstore) Has(cid cid.Cid) (bool, error) {
|
||||
_, err := i.api.Block().Stat(i.ctx, path.IpldPath(cid))
|
||||
_, err := i.offlineAPI.Block().Stat(i.ctx, path.IpldPath(cid))
|
||||
if err != nil {
|
||||
// The underlying client is running in Offline mode.
|
||||
// Stat() will fail with an err if the block isn't in the
|
||||
|
@ -7,17 +7,17 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
|
||||
blst "github.com/supranational/blst/bindings/go"
|
||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||
|
||||
"github.com/filecoin-project/lotus/lib/sigs"
|
||||
)
|
||||
|
||||
const DST = string("BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_NUL_")
|
||||
|
||||
type SecretKey = blst.SecretKey
|
||||
type PublicKey = blst.P1Affine
|
||||
type Signature = blst.P2Affine
|
||||
type AggregateSignature = blst.P2Aggregate
|
||||
type SecretKey = ffi.PrivateKey
|
||||
type PublicKey = ffi.PublicKey
|
||||
type Signature = ffi.Signature
|
||||
type AggregateSignature = ffi.Signature
|
||||
|
||||
type blsSigner struct{}
|
||||
|
||||
@ -29,30 +29,55 @@ func (blsSigner) GenPrivate() ([]byte, error) {
|
||||
return nil, fmt.Errorf("bls signature error generating random data")
|
||||
}
|
||||
// Note private keys seem to be serialized little-endian!
|
||||
pk := blst.KeyGen(ikm[:]).ToLEndian()
|
||||
return pk, nil
|
||||
sk := ffi.PrivateKeyGenerateWithSeed(ikm)
|
||||
return sk[:], nil
|
||||
}
|
||||
|
||||
func (blsSigner) ToPublic(priv []byte) ([]byte, error) {
|
||||
pk := new(SecretKey).FromLEndian(priv)
|
||||
if pk == nil || !pk.Valid() {
|
||||
if priv == nil || len(priv) != ffi.PrivateKeyBytes {
|
||||
return nil, fmt.Errorf("bls signature invalid private key")
|
||||
}
|
||||
return new(PublicKey).From(pk).Compress(), nil
|
||||
|
||||
sk := new(SecretKey)
|
||||
copy(sk[:], priv[:ffi.PrivateKeyBytes])
|
||||
|
||||
pubkey := ffi.PrivateKeyPublicKey(*sk)
|
||||
|
||||
return pubkey[:], nil
|
||||
}
|
||||
|
||||
func (blsSigner) Sign(p []byte, msg []byte) ([]byte, error) {
|
||||
pk := new(SecretKey).FromLEndian(p)
|
||||
if pk == nil || !pk.Valid() {
|
||||
if p == nil || len(p) != ffi.PrivateKeyBytes {
|
||||
return nil, fmt.Errorf("bls signature invalid private key")
|
||||
}
|
||||
return new(Signature).Sign(pk, msg, []byte(DST)).Compress(), nil
|
||||
|
||||
sk := new(SecretKey)
|
||||
copy(sk[:], p[:ffi.PrivateKeyBytes])
|
||||
|
||||
sig := ffi.PrivateKeySign(*sk, msg)
|
||||
|
||||
return sig[:], nil
|
||||
}
|
||||
|
||||
func (blsSigner) Verify(sig []byte, a address.Address, msg []byte) error {
|
||||
if !new(Signature).VerifyCompressed(sig, a.Payload()[:], msg, []byte(DST)) {
|
||||
payload := a.Payload()
|
||||
if sig == nil || len(sig) != ffi.SignatureBytes || len(payload) != ffi.PublicKeyBytes {
|
||||
return fmt.Errorf("bls signature failed to verify")
|
||||
}
|
||||
|
||||
pk := new(PublicKey)
|
||||
copy(pk[:], payload[:ffi.PublicKeyBytes])
|
||||
|
||||
sigS := new(Signature)
|
||||
copy(sigS[:], sig[:ffi.SignatureBytes])
|
||||
|
||||
msgs := [1]ffi.Message{msg}
|
||||
pks := [1]PublicKey{*pk}
|
||||
|
||||
if !ffi.HashVerify(sigS, msgs[:], pks[:]) {
|
||||
return fmt.Errorf("bls signature failed to verify")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -202,5 +202,109 @@
|
||||
"AddVerifiedClient",
|
||||
"UseBytes",
|
||||
"RestoreBytes"
|
||||
],
|
||||
"fil/3/account": [
|
||||
"Send",
|
||||
"Constructor",
|
||||
"PubkeyAddress"
|
||||
],
|
||||
"fil/3/cron": [
|
||||
"Send",
|
||||
"Constructor",
|
||||
"EpochTick"
|
||||
],
|
||||
"fil/3/init": [
|
||||
"Send",
|
||||
"Constructor",
|
||||
"Exec"
|
||||
],
|
||||
"fil/3/multisig": [
|
||||
"Send",
|
||||
"Constructor",
|
||||
"Propose",
|
||||
"Approve",
|
||||
"Cancel",
|
||||
"AddSigner",
|
||||
"RemoveSigner",
|
||||
"SwapSigner",
|
||||
"ChangeNumApprovalsThreshold",
|
||||
"LockBalance"
|
||||
],
|
||||
"fil/3/paymentchannel": [
|
||||
"Send",
|
||||
"Constructor",
|
||||
"UpdateChannelState",
|
||||
"Settle",
|
||||
"Collect"
|
||||
],
|
||||
"fil/3/reward": [
|
||||
"Send",
|
||||
"Constructor",
|
||||
"AwardBlockReward",
|
||||
"ThisEpochReward",
|
||||
"UpdateNetworkKPI"
|
||||
],
|
||||
"fil/3/storagemarket": [
|
||||
"Send",
|
||||
"Constructor",
|
||||
"AddBalance",
|
||||
"WithdrawBalance",
|
||||
"PublishStorageDeals",
|
||||
"VerifyDealsForActivation",
|
||||
"ActivateDeals",
|
||||
"OnMinerSectorsTerminate",
|
||||
"ComputeDataCommitment",
|
||||
"CronTick"
|
||||
],
|
||||
"fil/3/storageminer": [
|
||||
"Send",
|
||||
"Constructor",
|
||||
"ControlAddresses",
|
||||
"ChangeWorkerAddress",
|
||||
"ChangePeerID",
|
||||
"SubmitWindowedPoSt",
|
||||
"PreCommitSector",
|
||||
"ProveCommitSector",
|
||||
"ExtendSectorExpiration",
|
||||
"TerminateSectors",
|
||||
"DeclareFaults",
|
||||
"DeclareFaultsRecovered",
|
||||
"OnDeferredCronEvent",
|
||||
"CheckSectorProven",
|
||||
"ApplyRewards",
|
||||
"ReportConsensusFault",
|
||||
"WithdrawBalance",
|
||||
"ConfirmSectorProofsValid",
|
||||
"ChangeMultiaddrs",
|
||||
"CompactPartitions",
|
||||
"CompactSectorNumbers",
|
||||
"ConfirmUpdateWorkerKey",
|
||||
"RepayDebt",
|
||||
"ChangeOwnerAddress",
|
||||
"DisputeWindowedPoSt"
|
||||
],
|
||||
"fil/3/storagepower": [
|
||||
"Send",
|
||||
"Constructor",
|
||||
"CreateMiner",
|
||||
"UpdateClaimedPower",
|
||||
"EnrollCronEvent",
|
||||
"OnEpochTickEnd",
|
||||
"UpdatePledgeTotal",
|
||||
"SubmitPoRepForBulkVerify",
|
||||
"CurrentTotalPower"
|
||||
],
|
||||
"fil/3/system": [
|
||||
"Send",
|
||||
"Constructor"
|
||||
],
|
||||
"fil/3/verifiedregistry": [
|
||||
"Send",
|
||||
"Constructor",
|
||||
"AddVerifier",
|
||||
"RemoveVerifier",
|
||||
"AddVerifiedClient",
|
||||
"UseBytes",
|
||||
"RestoreBytes"
|
||||
]
|
||||
}
|
@ -23,6 +23,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/events"
|
||||
"github.com/filecoin-project/lotus/chain/events/state"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -134,30 +135,36 @@ func (n *ProviderNodeAdapter) VerifySignature(ctx context.Context, sig crypto.Si
|
||||
return err == nil, err
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) {
|
||||
func (n *ProviderNodeAdapter) GetMinerWorkerAddress(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (address.Address, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(tok)
|
||||
if err != nil {
|
||||
return address.Undef, err
|
||||
}
|
||||
|
||||
mi, err := n.StateMinerInfo(ctx, miner, tsk)
|
||||
mi, err := n.StateMinerInfo(ctx, maddr, tsk)
|
||||
if err != nil {
|
||||
return address.Address{}, err
|
||||
}
|
||||
return mi.Worker, nil
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) GetProofType(ctx context.Context, miner address.Address, tok shared.TipSetToken) (abi.RegisteredSealProof, error) {
|
||||
func (n *ProviderNodeAdapter) GetProofType(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (abi.RegisteredSealProof, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(tok)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
mi, err := n.StateMinerInfo(ctx, miner, tsk)
|
||||
mi, err := n.StateMinerInfo(ctx, maddr, tsk)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return mi.SealProofType, nil
|
||||
|
||||
nver, err := n.StateNetworkVersion(ctx, tsk)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return miner.PreferredSealProofTypeFromWindowPoStType(nver, mi.WindowPoStProofType)
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) {
|
||||
|
@ -269,7 +269,7 @@ func Online() Option {
|
||||
Override(new(vm.SyscallBuilder), vm.Syscalls),
|
||||
Override(new(*store.ChainStore), modules.ChainStore),
|
||||
Override(new(stmgr.UpgradeSchedule), stmgr.DefaultUpgradeSchedule()),
|
||||
Override(new(*stmgr.StateManager), stmgr.NewStateManagerWithUpgradeSchedule),
|
||||
Override(new(*stmgr.StateManager), modules.StateManager),
|
||||
Override(new(*wallet.LocalWallet), wallet.NewWallet),
|
||||
Override(new(wallet.Default), From(new(*wallet.LocalWallet))),
|
||||
Override(new(api.WalletAPI), From(new(wallet.MultiWallet))),
|
||||
|
@ -7,6 +7,8 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-padreader"
|
||||
@ -157,6 +159,16 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams)
|
||||
dealStart = ts.Height() + abi.ChainEpoch(dealStartBufferHours*blocksPerHour) // TODO: Get this from storage ask
|
||||
}
|
||||
|
||||
networkVersion, err := a.StateNetworkVersion(ctx, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to get network version: %w", err)
|
||||
}
|
||||
|
||||
st, err := miner.PreferredSealProofTypeFromWindowPoStType(networkVersion, mi.WindowPoStProofType)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to get seal proof type: %w", err)
|
||||
}
|
||||
|
||||
result, err := a.SMDealClient.ProposeStorageDeal(ctx, storagemarket.ProposeStorageDealParams{
|
||||
Addr: params.Wallet,
|
||||
Info: &providerInfo,
|
||||
@ -165,7 +177,7 @@ func (a *API) ClientStartDeal(ctx context.Context, params *api.StartDealParams)
|
||||
EndEpoch: calcDealExpiration(params.MinBlocksDuration, md, dealStart),
|
||||
Price: params.EpochPrice,
|
||||
Collateral: params.ProviderCollateral,
|
||||
Rt: mi.SealProofType,
|
||||
Rt: st,
|
||||
FastRetrieval: params.FastRetrieval,
|
||||
VerifiedDeal: params.VerifiedDeal,
|
||||
StoreID: storeID,
|
||||
|
@ -140,15 +140,10 @@ func (m *StateModule) StateMinerInfo(ctx context.Context, actor address.Address,
|
||||
return miner.MinerInfo{}, xerrors.Errorf("failed to load miner actor state: %w", err)
|
||||
}
|
||||
|
||||
// TODO: You know, this is terrible.
|
||||
// I mean, we _really_ shouldn't do this. Maybe we should convert somewhere else?
|
||||
info, err := mas.Info()
|
||||
if err != nil {
|
||||
return miner.MinerInfo{}, err
|
||||
}
|
||||
if m.StateManager.GetNtwkVersion(ctx, ts.Height()) >= network.Version7 && info.SealProofType < abi.RegisteredSealProof_StackedDrg2KiBV1_1 {
|
||||
info.SealProofType += abi.RegisteredSealProof_StackedDrg2KiBV1_1
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@ -170,13 +165,19 @@ func (a *StateAPI) StateMinerDeadlines(ctx context.Context, m address.Address, t
|
||||
|
||||
out := make([]api.Deadline, deadlines)
|
||||
if err := mas.ForEachDeadline(func(i uint64, dl miner.Deadline) error {
|
||||
ps, err := dl.PostSubmissions()
|
||||
ps, err := dl.PartitionsPoSted()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l, err := dl.DisputableProofCount()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out[i] = api.Deadline{
|
||||
PostSubmissions: ps,
|
||||
PostSubmissions: ps,
|
||||
DisputableProofCount: l,
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
|
@ -77,7 +77,7 @@ func MessagePool(lc fx.Lifecycle, sm *stmgr.StateManager, ps *pubsub.PubSub, ds
|
||||
}
|
||||
|
||||
func ChainRawBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.ChainRawBlockstore, error) {
|
||||
bs, err := r.Blockstore(repo.BlockstoreChain)
|
||||
bs, err := r.Blockstore(helpers.LifecycleCtx(mctx, lc), repo.BlockstoreChain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user