Merge remote-tracking branch 'origin/feat/nv13' into feat/nv13-1.11

This commit is contained in:
Łukasz Magiera 2021-05-31 21:38:34 +02:00
commit ffa47659a1
29 changed files with 781 additions and 178 deletions

View File

@ -6,6 +6,8 @@ import (
"testing" "testing"
"time" "time"
"github.com/filecoin-project/go-state-types/big"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
@ -846,3 +848,155 @@ waitForProof:
require.Contains(t, err.Error(), "failed to dispute valid post (RetCode=16)") require.Contains(t, err.Error(), "failed to dispute valid post (RetCode=16)")
} }
} }
func TestWindowPostBaseFeeNoBurn(t *testing.T, b APIBuilder, blocktime time.Duration) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
och := build.UpgradeClausHeight
build.UpgradeClausHeight = 10
n, sn := b(t, DefaultFullOpts(1), OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
{
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
}
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
build.Clock.Sleep(time.Second)
done := make(chan struct{})
go func() {
defer close(done)
for ctx.Err() == nil {
build.Clock.Sleep(blocktime)
if err := miner.MineOne(ctx, MineNext); err != nil {
if ctx.Err() != nil {
// context was canceled, ignore the error.
return
}
t.Error(err)
}
}
}()
defer func() {
cancel()
<-done
}()
pledgeSectors(t, ctx, miner, 10, 0, nil)
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
require.NoError(t, err)
en := wact.Nonce
// wait for a new message to be sent from worker address, it will be a PoSt
waitForProof:
for {
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
require.NoError(t, err)
if wact.Nonce > en {
break waitForProof
}
build.Clock.Sleep(blocktime)
}
slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
require.NoError(t, err)
pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0])
require.NoError(t, err)
require.Equal(t, pmr.GasCost.BaseFeeBurn, big.Zero())
build.UpgradeClausHeight = och
}
func TestWindowPostBaseFeeBurn(t *testing.T, b APIBuilder, blocktime time.Duration) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(-1)}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
{
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
}
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
require.NoError(t, err)
build.Clock.Sleep(time.Second)
done := make(chan struct{})
go func() {
defer close(done)
for ctx.Err() == nil {
build.Clock.Sleep(blocktime)
if err := miner.MineOne(ctx, MineNext); err != nil {
if ctx.Err() != nil {
// context was canceled, ignore the error.
return
}
t.Error(err)
}
}
}()
defer func() {
cancel()
<-done
}()
pledgeSectors(t, ctx, miner, 10, 0, nil)
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
require.NoError(t, err)
en := wact.Nonce
// wait for a new message to be sent from worker address, it will be a PoSt
waitForProof:
for {
wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
require.NoError(t, err)
if wact.Nonce > en {
break waitForProof
}
build.Clock.Sleep(blocktime)
}
slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
require.NoError(t, err)
pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0])
require.NoError(t, err)
require.NotEqual(t, pmr.GasCost.BaseFeeBurn, big.Zero())
}

View File

@ -50,7 +50,7 @@ const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60)
const UpgradeOrangeHeight = 336458 const UpgradeOrangeHeight = 336458
// 2020-12-22T02:00:00Z // 2020-12-22T02:00:00Z
const UpgradeClausHeight = 343200 var UpgradeClausHeight = abi.ChainEpoch(343200)
// 2021-03-04T00:00:30Z // 2021-03-04T00:00:30Z
const UpgradeTrustHeight = 550321 const UpgradeTrustHeight = 550321

View File

@ -0,0 +1,35 @@
package cron
import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors/adt"
cron5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/cron"
)
var _ State = (*state5)(nil)
func load5(store adt.Store, root cid.Cid) (State, error) {
out := state5{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make5(store adt.Store) (State, error) {
out := state5{store: store}
out.State = *cron5.ConstructState(cron5.BuiltInEntries())
return &out, nil
}
type state5 struct {
cron5.State
store adt.Store
}
func (s *state5) GetState() interface{} {
return &s.State
}

View File

@ -0,0 +1,35 @@
package system
import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors/adt"
system5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/system"
)
var _ State = (*state5)(nil)
func load5(store adt.Store, root cid.Cid) (State, error) {
out := state5{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make5(store adt.Store) (State, error) {
out := state5{store: store}
out.State = system5.State{}
return &out, nil
}
type state5 struct {
system5.State
store adt.Store
}
func (s *state5) GetState() interface{} {
return &s.State
}

View File

@ -60,8 +60,6 @@ func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
miner4.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2) miner4.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2)
miner4.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) miner4.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner5.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner5.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2)
miner5.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) miner5.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
AddSupportedProofTypes(types...) AddSupportedProofTypes(types...)
@ -93,9 +91,6 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
miner4.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} miner4.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
miner4.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} miner4.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
miner5.PreCommitSealProofTypesV0[t] = struct{}{}
miner5.PreCommitSealProofTypesV7[t] = struct{}{}
miner5.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
miner5.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} miner5.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
} }
@ -308,7 +303,7 @@ func GetDefaultAggregationProof() abi.RegisteredAggregationProof {
func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch { func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch {
if nwVer <= network.Version10 { if nwVer <= network.Version10 {
return builtin5.SealProofPoliciesV0[proof].SectorMaxLifetime return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime
} }
return builtin5.SealProofPoliciesV11[proof].SectorMaxLifetime return builtin5.SealProofPoliciesV11[proof].SectorMaxLifetime

View File

@ -31,10 +31,12 @@ func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
{{range .versions}} {{range .versions}}
{{if (eq . 0)}} {{if (eq . 0)}}
miner{{.}}.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types)) miner{{.}}.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types))
{{else}} {{else if (le . 4)}}
miner{{.}}.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types)) miner{{.}}.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner{{.}}.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2) miner{{.}}.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2)
miner{{.}}.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) miner{{.}}.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
{{else}}
miner{{.}}.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
{{end}} {{end}}
{{end}} {{end}}
@ -51,15 +53,17 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
// Set for all miner versions. // Set for all miner versions.
{{range .versions}} {{range .versions}}
{{if (eq . 0)}} {{if (eq . 0)}}
miner{{.}}.SupportedProofTypes[t] = struct{}{} miner{{.}}.SupportedProofTypes[t] = struct{}{}
{{else}} {{else if (le . 4)}}
miner{{.}}.PreCommitSealProofTypesV0[t] = struct{}{} miner{{.}}.PreCommitSealProofTypesV0[t] = struct{}{}
miner{{.}}.PreCommitSealProofTypesV7[t] = struct{}{} miner{{.}}.PreCommitSealProofTypesV7[t] = struct{}{}
miner{{.}}.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} miner{{.}}.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
{{end}} {{else}}
{{end}} miner{{.}}.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
{{end}}
{{end}}
} }
} }
@ -203,7 +207,7 @@ func GetDefaultAggregationProof() abi.RegisteredAggregationProof {
func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch { func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch {
if nwVer <= network.Version10 { if nwVer <= network.Version10 {
return builtin{{.latestVersion}}.SealProofPoliciesV0[proof].SectorMaxLifetime return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime
} }
return builtin{{.latestVersion}}.SealProofPoliciesV11[proof].SectorMaxLifetime return builtin{{.latestVersion}}.SealProofPoliciesV11[proof].SectorMaxLifetime

View File

@ -76,9 +76,10 @@ type ChainGen struct {
w *wallet.LocalWallet w *wallet.LocalWallet
eppProvs map[address.Address]WinningPoStProver eppProvs map[address.Address]WinningPoStProver
Miners []address.Address Miners []address.Address
receivers []address.Address receivers []address.Address
// a SecP address
banker address.Address banker address.Address
bankerNonce uint64 bankerNonce uint64
@ -111,7 +112,7 @@ var DefaultRemainderAccountActor = genesis.Actor{
Meta: remAccMeta.ActorMeta(), Meta: remAccMeta.ActorMeta(),
} }
func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeSchedule) (*ChainGen, error) {
j := journal.NilJournal() j := journal.NilJournal()
// TODO: we really shouldn't modify a global variable here. // TODO: we really shouldn't modify a global variable here.
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
@ -246,7 +247,10 @@ func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
mgen[genesis2.MinerAddress(uint64(i))] = &wppProvider{} mgen[genesis2.MinerAddress(uint64(i))] = &wppProvider{}
} }
sm := stmgr.NewStateManager(cs) sm, err := stmgr.NewStateManagerWithUpgradeSchedule(cs, us)
if err != nil {
return nil, xerrors.Errorf("initing stmgr: %w", err)
}
miners := []address.Address{maddr1, maddr2} miners := []address.Address{maddr1, maddr2}
@ -284,6 +288,14 @@ func NewGenerator() (*ChainGen, error) {
return NewGeneratorWithSectors(1) return NewGeneratorWithSectors(1)
} }
func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) {
return NewGeneratorWithSectorsAndUpgradeSchedule(numSectors, stmgr.DefaultUpgradeSchedule())
}
func NewGeneratorWithUpgradeSchedule(us stmgr.UpgradeSchedule) (*ChainGen, error) {
return NewGeneratorWithSectorsAndUpgradeSchedule(1, us)
}
func (cg *ChainGen) StateManager() *stmgr.StateManager { func (cg *ChainGen) StateManager() *stmgr.StateManager {
return cg.sm return cg.sm
} }
@ -386,7 +398,7 @@ type MinedTipSet struct {
} }
func (cg *ChainGen) NextTipSet() (*MinedTipSet, error) { func (cg *ChainGen) NextTipSet() (*MinedTipSet, error) {
mts, err := cg.NextTipSetFromMiners(cg.CurTipset.TipSet(), cg.Miners) mts, err := cg.NextTipSetFromMiners(cg.CurTipset.TipSet(), cg.Miners, 0)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -399,7 +411,7 @@ func (cg *ChainGen) SetWinningPoStProver(m address.Address, wpp WinningPoStProve
cg.eppProvs[m] = wpp cg.eppProvs[m] = wpp
} }
func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Address) (*MinedTipSet, error) { func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Address, nulls abi.ChainEpoch) (*MinedTipSet, error) {
ms, err := cg.GetMessages(cg) ms, err := cg.GetMessages(cg)
if err != nil { if err != nil {
return nil, xerrors.Errorf("get random messages: %w", err) return nil, xerrors.Errorf("get random messages: %w", err)
@ -410,21 +422,23 @@ func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Ad
msgs[i] = ms msgs[i] = ms
} }
fts, err := cg.NextTipSetFromMinersWithMessages(base, miners, msgs) fts, err := cg.NextTipSetFromMinersWithMessagesAndNulls(base, miners, msgs, nulls)
if err != nil { if err != nil {
return nil, err return nil, err
} }
cg.CurTipset = fts
return &MinedTipSet{ return &MinedTipSet{
TipSet: fts, TipSet: fts,
Messages: ms, Messages: ms,
}, nil }, nil
} }
func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners []address.Address, msgs [][]*types.SignedMessage) (*store.FullTipSet, error) { func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet, miners []address.Address, msgs [][]*types.SignedMessage, nulls abi.ChainEpoch) (*store.FullTipSet, error) {
var blks []*types.FullBlock var blks []*types.FullBlock
for round := base.Height() + 1; len(blks) == 0; round++ { for round := base.Height() + nulls + 1; len(blks) == 0; round++ {
for mi, m := range miners { for mi, m := range miners {
bvals, et, ticket, err := cg.nextBlockProof(context.TODO(), base, m, round) bvals, et, ticket, err := cg.nextBlockProof(context.TODO(), base, m, round)
if err != nil { if err != nil {
@ -457,6 +471,8 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessages(base *types.TipSet, miners
return nil, err return nil, err
} }
cg.CurTipset = fts
return fts, nil return fts, nil
} }
@ -576,7 +592,7 @@ func (mca mca) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipS
return nil, xerrors.Errorf("loading tipset key: %w", err) return nil, xerrors.Errorf("loading tipset key: %w", err)
} }
return mca.sm.ChainStore().GetChainRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy) return mca.sm.ChainStore().GetChainRandomnessLookingBack(ctx, pts.Cids(), personalization, randEpoch, entropy)
} }
func (mca mca) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { func (mca mca) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
@ -585,7 +601,7 @@ func (mca mca) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSe
return nil, xerrors.Errorf("loading tipset key: %w", err) return nil, xerrors.Errorf("loading tipset key: %w", err)
} }
return mca.sm.ChainStore().GetBeaconRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy) return mca.sm.ChainStore().GetBeaconRandomnessLookingBack(ctx, pts.Cids(), personalization, randEpoch, entropy)
} }
func (mca mca) MinerGetBaseInfo(ctx context.Context, maddr address.Address, epoch abi.ChainEpoch, tsk types.TipSetKey) (*api.MiningBaseInfo, error) { func (mca mca) MinerGetBaseInfo(ctx context.Context, maddr address.Address, epoch abi.ChainEpoch, tsk types.TipSetKey) (*api.MiningBaseInfo, error) {

View File

@ -488,13 +488,25 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
// TODO: copied from actors test harness, deduplicate or remove from here // TODO: copied from actors test harness, deduplicate or remove from here
type fakeRand struct{} type fakeRand struct{}
func (fr *fakeRand) GetChainRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { func (fr *fakeRand) GetChainRandomnessLookingForward(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
out := make([]byte, 32) out := make([]byte, 32)
_, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint
return out, nil return out, nil
} }
func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { func (fr *fakeRand) GetChainRandomnessLookingBack(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
out := make([]byte, 32)
_, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint
return out, nil
}
func (fr *fakeRand) GetBeaconRandomnessLookingForward(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
out := make([]byte, 32)
_, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint
return out, nil
}
func (fr *fakeRand) GetBeaconRandomnessLookingBack(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
out := make([]byte, 32) out := make([]byte, 32)
_, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint
return out, nil return out, nil

View File

@ -107,6 +107,9 @@ func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) {
} }
rheight -= ci.skipLength rheight -= ci.skipLength
if rheight < 0 {
rheight = 0
}
var skipTarget *types.TipSet var skipTarget *types.TipSet
if parent.Height() < rheight { if parent.Height() < rheight {

View File

@ -1404,7 +1404,15 @@ func DrawRandomness(rbase []byte, pers crypto.DomainSeparationTag, round abi.Cha
return h.Sum(nil), nil return h.Sum(nil), nil
} }
func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { func (cs *ChainStore) GetBeaconRandomnessLookingBack(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cs.GetBeaconRandomness(ctx, blks, pers, round, entropy, true)
}
func (cs *ChainStore) GetBeaconRandomnessLookingForward(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cs.GetBeaconRandomness(ctx, blks, pers, round, entropy, false)
}
func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) {
_, span := trace.StartSpan(ctx, "store.GetBeaconRandomness") _, span := trace.StartSpan(ctx, "store.GetBeaconRandomness")
defer span.End() defer span.End()
span.AddAttributes(trace.Int64Attribute("round", int64(round))) span.AddAttributes(trace.Int64Attribute("round", int64(round)))
@ -1423,7 +1431,7 @@ func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, p
searchHeight = 0 searchHeight = 0
} }
randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true) randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, lookback)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1438,7 +1446,15 @@ func (cs *ChainStore) GetBeaconRandomness(ctx context.Context, blks []cid.Cid, p
return DrawRandomness(be.Data, pers, round, entropy) return DrawRandomness(be.Data, pers, round, entropy)
} }
func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { func (cs *ChainStore) GetChainRandomnessLookingBack(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cs.GetChainRandomness(ctx, blks, pers, round, entropy, true)
}
func (cs *ChainStore) GetChainRandomnessLookingForward(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cs.GetChainRandomness(ctx, blks, pers, round, entropy, false)
}
func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) {
_, span := trace.StartSpan(ctx, "store.GetChainRandomness") _, span := trace.StartSpan(ctx, "store.GetChainRandomness")
defer span.End() defer span.End()
span.AddAttributes(trace.Int64Attribute("round", int64(round))) span.AddAttributes(trace.Int64Attribute("round", int64(round)))
@ -1457,7 +1473,7 @@ func (cs *ChainStore) GetChainRandomness(ctx context.Context, blks []cid.Cid, pe
searchHeight = 0 searchHeight = 0
} }
randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true) randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, lookback)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1732,12 +1748,20 @@ func NewChainRand(cs *ChainStore, blks []cid.Cid) vm.Rand {
} }
} }
func (cr *chainRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { func (cr *chainRand) GetChainRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cr.cs.GetChainRandomness(ctx, cr.blks, pers, round, entropy) return cr.cs.GetChainRandomnessLookingBack(ctx, cr.blks, pers, round, entropy)
} }
func (cr *chainRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { func (cr *chainRand) GetChainRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cr.cs.GetBeaconRandomness(ctx, cr.blks, pers, round, entropy) return cr.cs.GetChainRandomnessLookingForward(ctx, cr.blks, pers, round, entropy)
}
func (cr *chainRand) GetBeaconRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cr.cs.GetBeaconRandomnessLookingBack(ctx, cr.blks, pers, round, entropy)
}
func (cr *chainRand) GetBeaconRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return cr.cs.GetBeaconRandomnessLookingForward(ctx, cr.blks, pers, round, entropy)
} }
func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) { func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) {

View File

@ -76,7 +76,7 @@ func BenchmarkGetRandomness(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
_, err := cs.GetChainRandomness(context.TODO(), last.Cids(), crypto.DomainSeparationTag_SealRandomness, 500, nil) _, err := cs.GetChainRandomnessLookingBack(context.TODO(), last.Cids(), crypto.DomainSeparationTag_SealRandomness, 500, nil)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }

View File

@ -7,6 +7,12 @@ import (
"testing" "testing"
"time" "time"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore" ds "github.com/ipfs/go-datastore"
@ -101,7 +107,7 @@ func prepSyncTest(t testing.TB, h int) *syncTestUtil {
g: g, g: g,
} }
tu.addSourceNode(h) tu.addSourceNode(stmgr.DefaultUpgradeSchedule(), h)
//tu.checkHeight("source", source, h) //tu.checkHeight("source", source, h)
// separate logs // separate logs
@ -110,6 +116,53 @@ func prepSyncTest(t testing.TB, h int) *syncTestUtil {
return tu return tu
} }
func prepSyncTestWithV5Height(t testing.TB, h int, v5height abi.ChainEpoch) *syncTestUtil {
logging.SetLogLevel("*", "INFO")
us := stmgr.UpgradeSchedule{{
// prepare for upgrade.
Network: network.Version9,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version10,
Height: 2,
Migration: stmgr.UpgradeActorsV3,
}, {
Network: network.Version12,
Height: 3,
Migration: stmgr.UpgradeActorsV4,
}, {
Network: network.Version13,
Height: v5height,
Migration: stmgr.UpgradeActorsV5,
}}
g, err := gen.NewGeneratorWithUpgradeSchedule(us)
if err != nil {
t.Fatalf("%+v", err)
}
ctx, cancel := context.WithCancel(context.Background())
tu := &syncTestUtil{
t: t,
ctx: ctx,
cancel: cancel,
mn: mocknet.New(ctx),
g: g,
}
tu.addSourceNode(us, h)
//tu.checkHeight("source", source, h)
// separate logs
fmt.Println("\x1b[31m///////////////////////////////////////////////////\x1b[39b")
return tu
}
func (tu *syncTestUtil) Shutdown() { func (tu *syncTestUtil) Shutdown() {
tu.cancel() tu.cancel()
} }
@ -174,7 +227,7 @@ func (tu *syncTestUtil) pushTsExpectErr(to int, fts *store.FullTipSet, experr bo
} }
} }
func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, wait, fail bool, msgs [][]*types.SignedMessage) *store.FullTipSet { func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int, wait, fail bool, msgs [][]*types.SignedMessage, nulls abi.ChainEpoch) *store.FullTipSet {
if miners == nil { if miners == nil {
for i := range tu.g.Miners { for i := range tu.g.Miners {
miners = append(miners, i) miners = append(miners, i)
@ -191,10 +244,10 @@ func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int,
var nts *store.FullTipSet var nts *store.FullTipSet
var err error var err error
if msgs != nil { if msgs != nil {
nts, err = tu.g.NextTipSetFromMinersWithMessages(blk.TipSet(), maddrs, msgs) nts, err = tu.g.NextTipSetFromMinersWithMessagesAndNulls(blk.TipSet(), maddrs, msgs, 0)
require.NoError(tu.t, err) require.NoError(tu.t, err)
} else { } else {
mt, err := tu.g.NextTipSetFromMiners(blk.TipSet(), maddrs) mt, err := tu.g.NextTipSetFromMiners(blk.TipSet(), maddrs, nulls)
require.NoError(tu.t, err) require.NoError(tu.t, err)
nts = mt.TipSet nts = mt.TipSet
} }
@ -209,11 +262,11 @@ func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, to int, miners []int,
} }
func (tu *syncTestUtil) mineNewBlock(src int, miners []int) { func (tu *syncTestUtil) mineNewBlock(src int, miners []int) {
mts := tu.mineOnBlock(tu.g.CurTipset, src, miners, true, false, nil) mts := tu.mineOnBlock(tu.g.CurTipset, src, miners, true, false, nil, 0)
tu.g.CurTipset = mts tu.g.CurTipset = mts
} }
func (tu *syncTestUtil) addSourceNode(gen int) { func (tu *syncTestUtil) addSourceNode(us stmgr.UpgradeSchedule, gen int) {
if tu.genesis != nil { if tu.genesis != nil {
tu.t.Fatal("source node already exists") tu.t.Fatal("source node already exists")
} }
@ -229,6 +282,7 @@ func (tu *syncTestUtil) addSourceNode(gen int) {
node.Test(), node.Test(),
node.Override(new(modules.Genesis), modules.LoadGenesis(genesis)), node.Override(new(modules.Genesis), modules.LoadGenesis(genesis)),
node.Override(new(stmgr.UpgradeSchedule), us),
) )
require.NoError(tu.t, err) require.NoError(tu.t, err)
tu.t.Cleanup(func() { _ = stop(context.Background()) }) tu.t.Cleanup(func() { _ = stop(context.Background()) })
@ -445,7 +499,7 @@ func TestSyncBadTimestamp(t *testing.T) {
fmt.Println("BASE: ", base.Cids()) fmt.Println("BASE: ", base.Cids())
tu.printHeads() tu.printHeads()
a1 := tu.mineOnBlock(base, 0, nil, false, true, nil) a1 := tu.mineOnBlock(base, 0, nil, false, true, nil, 0)
tu.g.Timestamper = nil tu.g.Timestamper = nil
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
@ -454,7 +508,7 @@ func TestSyncBadTimestamp(t *testing.T) {
fmt.Println("After mine bad block!") fmt.Println("After mine bad block!")
tu.printHeads() tu.printHeads()
a2 := tu.mineOnBlock(base, 0, nil, true, false, nil) a2 := tu.mineOnBlock(base, 0, nil, true, false, nil, 0)
tu.waitUntilSync(0, client) tu.waitUntilSync(0, client)
@ -498,7 +552,7 @@ func TestSyncBadWinningPoSt(t *testing.T) {
tu.g.SetWinningPoStProver(tu.g.Miners[1], &badWpp{}) tu.g.SetWinningPoStProver(tu.g.Miners[1], &badWpp{})
// now ensure that new blocks are not accepted // now ensure that new blocks are not accepted
tu.mineOnBlock(base, client, nil, false, true, nil) tu.mineOnBlock(base, client, nil, false, true, nil, 0)
} }
func (tu *syncTestUtil) loadChainToNode(to int) { func (tu *syncTestUtil) loadChainToNode(to int) {
@ -543,16 +597,16 @@ func TestSyncFork(t *testing.T) {
fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())
// The two nodes fork at this point into 'a' and 'b' // The two nodes fork at this point into 'a' and 'b'
a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil) a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0)
a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil) a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0)
a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil) a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0)
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
// chain B will now be heaviest // chain B will now be heaviest
b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil) b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0)
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0)
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0)
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0)
fmt.Println("A: ", a.Cids(), a.TipSet().Height()) fmt.Println("A: ", a.Cids(), a.TipSet().Height())
fmt.Println("B: ", b.Cids(), b.TipSet().Height()) fmt.Println("B: ", b.Cids(), b.TipSet().Height())
@ -614,13 +668,13 @@ func TestDuplicateNonce(t *testing.T) {
msgs[k] = []*types.SignedMessage{makeMsg(tu.g.Miners[k])} msgs[k] = []*types.SignedMessage{makeMsg(tu.g.Miners[k])}
} }
ts1 := tu.mineOnBlock(base, 0, []int{0, 1}, true, false, msgs) ts1 := tu.mineOnBlock(base, 0, []int{0, 1}, true, false, msgs, 0)
tu.waitUntilSyncTarget(0, ts1.TipSet()) tu.waitUntilSyncTarget(0, ts1.TipSet())
// mine another tipset // mine another tipset
ts2 := tu.mineOnBlock(ts1, 0, []int{0, 1}, true, false, make([][]*types.SignedMessage, 2)) ts2 := tu.mineOnBlock(ts1, 0, []int{0, 1}, true, false, make([][]*types.SignedMessage, 2), 0)
tu.waitUntilSyncTarget(0, ts2.TipSet()) tu.waitUntilSyncTarget(0, ts2.TipSet())
var includedMsg cid.Cid var includedMsg cid.Cid
@ -671,11 +725,15 @@ func TestBadNonce(t *testing.T) {
base := tu.g.CurTipset base := tu.g.CurTipset
// Get the banker from computed tipset state, not the parent.
st, _, err := tu.g.StateManager().TipSetState(context.TODO(), base.TipSet())
require.NoError(t, err)
ba, err := tu.g.StateManager().LoadActorRaw(context.TODO(), tu.g.Banker(), st)
require.NoError(t, err)
// Produce a message from the banker with a bad nonce // Produce a message from the banker with a bad nonce
makeBadMsg := func() *types.SignedMessage { makeBadMsg := func() *types.SignedMessage {
ba, err := tu.nds[0].StateGetActor(context.TODO(), tu.g.Banker(), base.TipSet().Key())
require.NoError(t, err)
msg := types.Message{ msg := types.Message{
To: tu.g.Banker(), To: tu.g.Banker(),
From: tu.g.Banker(), From: tu.g.Banker(),
@ -703,7 +761,7 @@ func TestBadNonce(t *testing.T) {
msgs := make([][]*types.SignedMessage, 1) msgs := make([][]*types.SignedMessage, 1)
msgs[0] = []*types.SignedMessage{makeBadMsg()} msgs[0] = []*types.SignedMessage{makeBadMsg()}
tu.mineOnBlock(base, 0, []int{0}, true, true, msgs) tu.mineOnBlock(base, 0, []int{0}, true, true, msgs, 0)
} }
func BenchmarkSyncBasic(b *testing.B) { func BenchmarkSyncBasic(b *testing.B) {
@ -768,19 +826,19 @@ func TestSyncCheckpointHead(t *testing.T) {
fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())
// The two nodes fork at this point into 'a' and 'b' // The two nodes fork at this point into 'a' and 'b'
a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil) a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0)
a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil) a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0)
a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil) a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0)
tu.waitUntilSyncTarget(p1, a.TipSet()) tu.waitUntilSyncTarget(p1, a.TipSet())
tu.checkpointTs(p1, a.TipSet().Key()) tu.checkpointTs(p1, a.TipSet().Key())
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
// chain B will now be heaviest // chain B will now be heaviest
b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil) b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0)
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0)
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0)
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0)
fmt.Println("A: ", a.Cids(), a.TipSet().Height()) fmt.Println("A: ", a.Cids(), a.TipSet().Height())
fmt.Println("B: ", b.Cids(), b.TipSet().Height()) fmt.Println("B: ", b.Cids(), b.TipSet().Height())
@ -815,19 +873,19 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) {
fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())
// The two nodes fork at this point into 'a' and 'b' // The two nodes fork at this point into 'a' and 'b'
a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil) a1 := tu.mineOnBlock(base, p1, []int{0}, true, false, nil, 0)
a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil) a := tu.mineOnBlock(a1, p1, []int{0}, true, false, nil, 0)
a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil) a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0)
tu.waitUntilSyncTarget(p1, a.TipSet()) tu.waitUntilSyncTarget(p1, a.TipSet())
tu.checkpointTs(p1, a1.TipSet().Key()) tu.checkpointTs(p1, a1.TipSet().Key())
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
// chain B will now be heaviest // chain B will now be heaviest
b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil) b := tu.mineOnBlock(base, p2, []int{1}, true, false, nil, 0)
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0)
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0)
b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil) b = tu.mineOnBlock(b, p2, []int{1}, true, false, nil, 0)
fmt.Println("A: ", a.Cids(), a.TipSet().Height()) fmt.Println("A: ", a.Cids(), a.TipSet().Height())
fmt.Println("B: ", b.Cids(), b.TipSet().Height()) fmt.Println("B: ", b.Cids(), b.TipSet().Height())
@ -846,3 +904,58 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) {
p1Head = tu.getHead(p1) p1Head = tu.getHead(p1)
require.True(tu.t, p1Head.Equals(b.TipSet())) require.True(tu.t, p1Head.Equals(b.TipSet()))
} }
func TestDrandNull(t *testing.T) {
H := 10
v5h := abi.ChainEpoch(50)
ov5h := build.UpgradeHyperdriveHeight
build.UpgradeHyperdriveHeight = v5h
tu := prepSyncTestWithV5Height(t, H, v5h)
entropy := []byte{0, 2, 3, 4}
// arbitrarily chosen
pers := crypto.DomainSeparationTag_WinningPoStChallengeSeed
beforeNull := tu.g.CurTipset
afterNull := tu.mineOnBlock(beforeNull, 0, nil, false, false, nil, 2)
nullHeight := beforeNull.TipSet().Height() + 1
if afterNull.TipSet().Height() == nullHeight {
t.Fatal("didn't inject nulls as expected")
}
rand, err := tu.nds[0].ChainGetRandomnessFromBeacon(tu.ctx, afterNull.TipSet().Key(), pers, nullHeight, entropy)
require.NoError(t, err)
// calculate the expected randomness based on the beacon BEFORE the null
expectedBE := beforeNull.Blocks[0].Header.BeaconEntries
expectedRand, err := store.DrawRandomness(expectedBE[len(expectedBE)-1].Data, pers, nullHeight, entropy)
require.NoError(t, err)
require.Equal(t, []byte(rand), expectedRand)
// zoom zoom to past the v5 upgrade by injecting many many nulls
postUpgrade := tu.mineOnBlock(afterNull, 0, nil, false, false, nil, v5h)
nv, err := tu.nds[0].StateNetworkVersion(tu.ctx, types.EmptyTSK)
require.NoError(t, err)
if nv != network.Version13 {
t.Fatal("expect to be v13 by now")
}
afterNull = tu.mineOnBlock(postUpgrade, 0, nil, false, false, nil, 2)
nullHeight = postUpgrade.TipSet().Height() + 1
if afterNull.TipSet().Height() == nullHeight {
t.Fatal("didn't inject nulls as expected")
}
rand, err = tu.nds[0].ChainGetRandomnessFromBeacon(tu.ctx, afterNull.TipSet().Key(), pers, nullHeight, entropy)
require.NoError(t, err)
// calculate the expected randomness based on the beacon AFTER the null
expectedBE = afterNull.Blocks[0].Header.BeaconEntries
expectedRand, err = store.DrawRandomness(expectedBE[len(expectedBE)-1].Data, pers, nullHeight, entropy)
require.NoError(t, err)
require.Equal(t, []byte(rand), expectedRand)
build.UpgradeHyperdriveHeight = ov5h
}

View File

@ -160,8 +160,35 @@ var prices = map[abi.ChainEpoch]Pricelist{
hashingBase: 31355, hashingBase: 31355,
computeUnsealedSectorCidBase: 98647, computeUnsealedSectorCidBase: 98647,
verifySealBase: 2000, // TODO gas , it VerifySeal syscall is not used verifySealBase: 2000, // TODO gas, it VerifySeal syscall is not used
verifyAggregateSealBase: 400_000_000, // TODO (~40ms, I think)
verifyAggregateSealPer: map[abi.RegisteredSealProof]int64{
abi.RegisteredSealProof_StackedDrg32GiBV1_1: 449900,
abi.RegisteredSealProof_StackedDrg64GiBV1_1: 359272,
},
verifyAggregateSealSteps: map[abi.RegisteredSealProof]stepCost{
abi.RegisteredSealProof_StackedDrg32GiBV1_1: {
{4, 103994170},
{7, 112356810},
{13, 122912610},
{26, 137559930},
{52, 162039100},
{103, 210960780},
{205, 318351180},
{410, 528274980},
},
abi.RegisteredSealProof_StackedDrg64GiBV1_1: {
{4, 102581240},
{7, 110803030},
{13, 120803700},
{26, 134642130},
{52, 157357890},
{103, 203017690},
{205, 304253590},
{410, 509880640},
},
},
verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{ verifyPostLookup: map[abi.RegisteredPoStProof]scalingCost{
abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: { abi.RegisteredPoStProof_StackedDrgWindow512MiBV1: {
flat: 117680921, flat: 117680921,

View File

@ -18,6 +18,28 @@ type scalingCost struct {
scale int64 scale int64
} }
type stepCost []step
type step struct {
start int64
cost int64
}
func (sc stepCost) Lookup(x int64) int64 {
i := 0
for ; i < len(sc); i++ {
if sc[i].start > x {
break
}
}
i-- // look at previous item
if i < 0 {
return 0
}
return sc[i].cost
}
type pricelistV0 struct { type pricelistV0 struct {
computeGasMulti int64 computeGasMulti int64
storageGasMulti int64 storageGasMulti int64
@ -93,9 +115,12 @@ type pricelistV0 struct {
computeUnsealedSectorCidBase int64 computeUnsealedSectorCidBase int64
verifySealBase int64 verifySealBase int64
verifyAggregateSealBase int64 verifyAggregateSealBase int64
verifyPostLookup map[abi.RegisteredPoStProof]scalingCost verifyAggregateSealPer map[abi.RegisteredSealProof]int64
verifyPostDiscount bool verifyAggregateSealSteps map[abi.RegisteredSealProof]stepCost
verifyConsensusFault int64
verifyPostLookup map[abi.RegisteredPoStProof]scalingCost
verifyPostDiscount bool
verifyConsensusFault int64
} }
var _ Pricelist = (*pricelistV0)(nil) var _ Pricelist = (*pricelistV0)(nil)
@ -189,8 +214,18 @@ func (pl *pricelistV0) OnVerifySeal(info proof2.SealVerifyInfo) GasCharge {
// OnVerifyAggregateSeals // OnVerifyAggregateSeals
func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge { func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge {
// TODO: this needs more cost tunning proofType := aggregate.SealProof
return newGasCharge("OnVerifyAggregateSeals", pl.verifyAggregateSealBase, 0) perProof, ok := pl.verifyAggregateSealPer[proofType]
if !ok {
perProof = pl.verifyAggregateSealPer[abi.RegisteredSealProof_StackedDrg32GiBV1_1]
}
step, ok := pl.verifyAggregateSealSteps[proofType]
if !ok {
step = pl.verifyAggregateSealSteps[abi.RegisteredSealProof_StackedDrg32GiBV1_1]
}
num := int64(len(aggregate.Infos))
return newGasCharge("OnVerifyAggregateSeals", perProof*num+step.Lookup(num), 0)
} }
// OnVerifyPost // OnVerifyPost

32
chain/vm/gas_v0_test.go Normal file
View File

@ -0,0 +1,32 @@
package vm
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestStepGasCost(t *testing.T) {
s := stepCost{
{4, 103994170},
{7, 112356810},
{13, 122912610},
{26, 137559930},
{52, 162039100},
{103, 210960780},
{205, 318351180},
{410, 528274980},
}
assert.EqualValues(t, 0, s.Lookup(0))
assert.EqualValues(t, 0, s.Lookup(3))
assert.EqualValues(t, 103994170, s.Lookup(4))
assert.EqualValues(t, 103994170, s.Lookup(6))
assert.EqualValues(t, 112356810, s.Lookup(7))
assert.EqualValues(t, 210960780, s.Lookup(103))
assert.EqualValues(t, 210960780, s.Lookup(204))
assert.EqualValues(t, 318351180, s.Lookup(205))
assert.EqualValues(t, 318351180, s.Lookup(409))
assert.EqualValues(t, 528274980, s.Lookup(410))
assert.EqualValues(t, 528274980, s.Lookup(10000000000))
}

View File

@ -81,6 +81,10 @@ type Runtime struct {
lastGasCharge *types.GasTrace lastGasCharge *types.GasTrace
} }
func (rt *Runtime) BaseFee() abi.TokenAmount {
return rt.vm.baseFee
}
func (rt *Runtime) NetworkVersion() network.Version { func (rt *Runtime) NetworkVersion() network.Version {
return rt.vm.GetNtwkVersion(rt.ctx, rt.CurrEpoch()) return rt.vm.GetNtwkVersion(rt.ctx, rt.CurrEpoch())
} }
@ -208,17 +212,31 @@ func (rt *Runtime) GetActorCodeCID(addr address.Address) (ret cid.Cid, ok bool)
} }
func (rt *Runtime) GetRandomnessFromTickets(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { func (rt *Runtime) GetRandomnessFromTickets(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness {
res, err := rt.vm.rand.GetChainRandomness(rt.ctx, personalization, randEpoch, entropy) var err error
var res []byte
if rt.vm.GetNtwkVersion(rt.ctx, randEpoch) >= network.Version13 {
res, err = rt.vm.rand.GetChainRandomnessLookingForward(rt.ctx, personalization, randEpoch, entropy)
} else {
res, err = rt.vm.rand.GetChainRandomnessLookingBack(rt.ctx, personalization, randEpoch, entropy)
}
if err != nil { if err != nil {
panic(aerrors.Fatalf("could not get randomness: %s", err)) panic(aerrors.Fatalf("could not get ticket randomness: %s", err))
} }
return res return res
} }
func (rt *Runtime) GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { func (rt *Runtime) GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness {
res, err := rt.vm.rand.GetBeaconRandomness(rt.ctx, personalization, randEpoch, entropy) var err error
var res []byte
if rt.vm.GetNtwkVersion(rt.ctx, randEpoch) >= network.Version13 {
res, err = rt.vm.rand.GetBeaconRandomnessLookingForward(rt.ctx, personalization, randEpoch, entropy)
} else {
res, err = rt.vm.rand.GetBeaconRandomnessLookingBack(rt.ctx, personalization, randEpoch, entropy)
}
if err != nil { if err != nil {
panic(aerrors.Fatalf("could not get randomness: %s", err)) panic(aerrors.Fatalf("could not get beacon randomness: %s", err))
} }
return res return res
} }

View File

@ -255,8 +255,10 @@ func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) {
} }
type Rand interface { type Rand interface {
GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) GetChainRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error)
GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) GetChainRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error)
GetBeaconRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error)
GetBeaconRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error)
} }
type ApplyRet struct { type ApplyRet struct {
@ -566,7 +568,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
gasUsed = 0 gasUsed = 0
} }
burn, err := vm.ShouldBurn(st, msg, errcode) burn, err := vm.ShouldBurn(ctx, st, msg, errcode)
if err != nil { if err != nil {
return nil, xerrors.Errorf("deciding whether should burn failed: %w", err) return nil, xerrors.Errorf("deciding whether should burn failed: %w", err)
} }
@ -609,26 +611,31 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
}, nil }, nil
} }
func (vm *VM) ShouldBurn(st *state.StateTree, msg *types.Message, errcode exitcode.ExitCode) (bool, error) { func (vm *VM) ShouldBurn(ctx context.Context, st *state.StateTree, msg *types.Message, errcode exitcode.ExitCode) (bool, error) {
// Check to see if we should burn funds. We avoid burning on successful if vm.ntwkVersion(ctx, vm.blockHeight) <= network.Version12 {
// window post. This won't catch _indirect_ window post calls, but this // Check to see if we should burn funds. We avoid burning on successful
// is the best we can get for now. // window post. This won't catch _indirect_ window post calls, but this
if vm.blockHeight > build.UpgradeClausHeight && errcode == exitcode.Ok && msg.Method == miner.Methods.SubmitWindowedPoSt { // is the best we can get for now.
// Ok, we've checked the _method_, but we still need to check if vm.blockHeight > build.UpgradeClausHeight && errcode == exitcode.Ok && msg.Method == miner.Methods.SubmitWindowedPoSt {
// the target actor. It would be nice if we could just look at // Ok, we've checked the _method_, but we still need to check
// the trace, but I'm not sure if that's safe? // the target actor. It would be nice if we could just look at
if toActor, err := st.GetActor(msg.To); err != nil { // the trace, but I'm not sure if that's safe?
// If the actor wasn't found, we probably deleted it or something. Move on. if toActor, err := st.GetActor(msg.To); err != nil {
if !xerrors.Is(err, types.ErrActorNotFound) { // If the actor wasn't found, we probably deleted it or something. Move on.
// Otherwise, this should never fail and something is very wrong. if !xerrors.Is(err, types.ErrActorNotFound) {
return false, xerrors.Errorf("failed to lookup target actor: %w", err) // Otherwise, this should never fail and something is very wrong.
return false, xerrors.Errorf("failed to lookup target actor: %w", err)
}
} else if builtin.IsStorageMinerActor(toActor.Code) {
// Ok, this is a storage miner and we've processed a window post. Remove the burn.
return false, nil
} }
} else if builtin.IsStorageMinerActor(toActor.Code) {
// Ok, this is a storage miner and we've processed a window post. Remove the burn.
return false, nil
} }
return true, nil
} }
// Any "don't burn" rules from Network v13 onwards go here, for now we always return true
return true, nil return true, nil
} }

View File

@ -71,7 +71,7 @@ var Commands = []*cli.Command{
WithCategory("basic", walletCmd), WithCategory("basic", walletCmd),
WithCategory("basic", clientCmd), WithCategory("basic", clientCmd),
WithCategory("basic", multisigCmd), WithCategory("basic", multisigCmd),
WithCategory("basic", verifRegCmd), WithCategory("basic", filplusCmd),
WithCategory("basic", paychCmd), WithCategory("basic", paychCmd),
WithCategory("developer", AuthCmd), WithCategory("developer", AuthCmd),
WithCategory("developer", MpoolCmd), WithCategory("developer", MpoolCmd),

View File

@ -3,11 +3,11 @@ package cli
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/filecoin-project/lotus/api/v0api"
verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg" verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api/v0api"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -24,26 +24,26 @@ import (
cbor "github.com/ipfs/go-ipld-cbor" cbor "github.com/ipfs/go-ipld-cbor"
) )
var verifRegCmd = &cli.Command{ var filplusCmd = &cli.Command{
Name: "verifreg", Name: "filplus",
Usage: "Interact with the verified registry actor", Usage: "Interact with the verified registry actor used by Filplus",
Flags: []cli.Flag{}, Flags: []cli.Flag{},
Subcommands: []*cli.Command{ Subcommands: []*cli.Command{
verifRegVerifyClientCmd, filplusVerifyClientCmd,
verifRegListVerifiersCmd, filplusListNotariesCmd,
verifRegListClientsCmd, filplusListClientsCmd,
verifRegCheckClientCmd, filplusCheckClientCmd,
verifRegCheckVerifierCmd, filplusCheckNotaryCmd,
}, },
} }
var verifRegVerifyClientCmd = &cli.Command{ var filplusVerifyClientCmd = &cli.Command{
Name: "verify-client", Name: "grant-datacap",
Usage: "give allowance to the specified verified client address", Usage: "give allowance to the specified verified client address",
Flags: []cli.Flag{ Flags: []cli.Flag{
&cli.StringFlag{ &cli.StringFlag{
Name: "from", Name: "from",
Usage: "specify your verifier address to send the message from", Usage: "specify your notary address to send the message from",
Required: true, Required: true,
}, },
}, },
@ -79,17 +79,17 @@ var verifRegVerifyClientCmd = &cli.Command{
defer closer() defer closer()
ctx := ReqContext(cctx) ctx := ReqContext(cctx)
found, dcap, err := checkVerifier(ctx, api, fromk) found, dcap, err := checkNotary(ctx, api, fromk)
if err != nil { if err != nil {
return err return err
} }
if !found { if !found {
return xerrors.New("sender address must be a verifier") return xerrors.New("sender address must be a notary")
} }
if dcap.Cmp(allowance.Int) < 0 { if dcap.Cmp(allowance.Int) < 0 {
return xerrors.Errorf("cannot allot more allowance than verifier data cap: %s < %s", dcap, allowance) return xerrors.Errorf("cannot allot more allowance than notary data cap: %s < %s", dcap, allowance)
} }
// TODO: This should be abstracted over actor versions // TODO: This should be abstracted over actor versions
@ -125,9 +125,9 @@ var verifRegVerifyClientCmd = &cli.Command{
}, },
} }
var verifRegListVerifiersCmd = &cli.Command{ var filplusListNotariesCmd = &cli.Command{
Name: "list-verifiers", Name: "list-notaries",
Usage: "list all verifiers", Usage: "list all notaries",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
api, closer, err := GetFullNodeAPI(cctx) api, closer, err := GetFullNodeAPI(cctx)
if err != nil { if err != nil {
@ -155,7 +155,7 @@ var verifRegListVerifiersCmd = &cli.Command{
}, },
} }
var verifRegListClientsCmd = &cli.Command{ var filplusListClientsCmd = &cli.Command{
Name: "list-clients", Name: "list-clients",
Usage: "list all verified clients", Usage: "list all verified clients",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
@ -185,8 +185,8 @@ var verifRegListClientsCmd = &cli.Command{
}, },
} }
var verifRegCheckClientCmd = &cli.Command{ var filplusCheckClientCmd = &cli.Command{
Name: "check-client", Name: "check-client-datacap",
Usage: "check verified client remaining bytes", Usage: "check verified client remaining bytes",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
if !cctx.Args().Present() { if !cctx.Args().Present() {
@ -219,12 +219,12 @@ var verifRegCheckClientCmd = &cli.Command{
}, },
} }
var verifRegCheckVerifierCmd = &cli.Command{ var filplusCheckNotaryCmd = &cli.Command{
Name: "check-verifier", Name: "check-notaries-datacap",
Usage: "check verifiers remaining bytes", Usage: "check notaries remaining bytes",
Action: func(cctx *cli.Context) error { Action: func(cctx *cli.Context) error {
if !cctx.Args().Present() { if !cctx.Args().Present() {
return fmt.Errorf("must specify verifier address to check") return fmt.Errorf("must specify notary address to check")
} }
vaddr, err := address.NewFromString(cctx.Args().First()) vaddr, err := address.NewFromString(cctx.Args().First())
@ -239,7 +239,7 @@ var verifRegCheckVerifierCmd = &cli.Command{
defer closer() defer closer()
ctx := ReqContext(cctx) ctx := ReqContext(cctx)
found, dcap, err := checkVerifier(ctx, api, vaddr) found, dcap, err := checkNotary(ctx, api, vaddr)
if err != nil { if err != nil {
return err return err
} }
@ -253,7 +253,7 @@ var verifRegCheckVerifierCmd = &cli.Command{
}, },
} }
func checkVerifier(ctx context.Context, api v0api.FullNode, vaddr address.Address) (bool, abi.StoragePower, error) { func checkNotary(ctx context.Context, api v0api.FullNode, vaddr address.Address) (bool, abi.StoragePower, error) {
vid, err := api.StateLookupID(ctx, vaddr, types.EmptyTSK) vid, err := api.StateLookupID(ctx, vaddr, types.EmptyTSK)
if err != nil { if err != nil {
return false, big.Zero(), err return false, big.Zero(), err

View File

@ -19,10 +19,18 @@ func NewFixedRand() vm.Rand {
return &fixedRand{} return &fixedRand{}
} }
func (r *fixedRand) GetChainRandomness(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { func (r *fixedRand) GetChainRandomnessLookingForward(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) {
return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes.
} }
func (r *fixedRand) GetBeaconRandomness(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { func (r *fixedRand) GetChainRandomnessLookingBack(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) {
return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes.
}
func (r *fixedRand) GetBeaconRandomnessLookingForward(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) {
return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes.
}
func (r *fixedRand) GetBeaconRandomnessLookingBack(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) {
return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes.
} }

View File

@ -45,8 +45,17 @@ func (r *RecordingRand) loadHead() {
r.head = head.Key() r.head = head.Key()
} }
func (r *RecordingRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { func (r *RecordingRand) GetChainRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return r.getChainRandomness(ctx, pers, round, entropy)
}
func (r *RecordingRand) GetChainRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return r.getChainRandomness(ctx, pers, round, entropy)
}
func (r *RecordingRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
r.once.Do(r.loadHead) r.once.Do(r.loadHead)
// FullNode's ChainGetRandomnessFromTickets handles whether we should be looking forward or back
ret, err := r.api.ChainGetRandomnessFromTickets(ctx, r.head, pers, round, entropy) ret, err := r.api.ChainGetRandomnessFromTickets(ctx, r.head, pers, round, entropy)
if err != nil { if err != nil {
return ret, err return ret, err
@ -70,7 +79,15 @@ func (r *RecordingRand) GetChainRandomness(ctx context.Context, pers crypto.Doma
return ret, err return ret, err
} }
func (r *RecordingRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { func (r *RecordingRand) GetBeaconRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return r.getBeaconRandomness(ctx, pers, round, entropy)
}
func (r *RecordingRand) GetBeaconRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return r.getBeaconRandomness(ctx, pers, round, entropy)
}
func (r *RecordingRand) getBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
r.once.Do(r.loadHead) r.once.Do(r.loadHead)
ret, err := r.api.ChainGetRandomnessFromBeacon(ctx, r.head, pers, round, entropy) ret, err := r.api.ChainGetRandomnessFromBeacon(ctx, r.head, pers, round, entropy)
if err != nil { if err != nil {

View File

@ -43,7 +43,15 @@ func (r *ReplayingRand) match(requested schema.RandomnessRule) ([]byte, bool) {
return nil, false return nil, false
} }
func (r *ReplayingRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { func (r *ReplayingRand) GetChainRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return r.getChainRandomness(ctx, pers, round, entropy, false)
}
func (r *ReplayingRand) GetChainRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return r.getChainRandomness(ctx, pers, round, entropy, true)
}
func (r *ReplayingRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) {
rule := schema.RandomnessRule{ rule := schema.RandomnessRule{
Kind: schema.RandomnessChain, Kind: schema.RandomnessChain,
DomainSeparationTag: int64(pers), DomainSeparationTag: int64(pers),
@ -57,10 +65,23 @@ func (r *ReplayingRand) GetChainRandomness(ctx context.Context, pers crypto.Doma
} }
r.reporter.Logf("returning fallback chain randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy) r.reporter.Logf("returning fallback chain randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy)
return r.fallback.GetChainRandomness(ctx, pers, round, entropy)
if lookback {
return r.fallback.GetChainRandomnessLookingBack(ctx, pers, round, entropy)
}
return r.fallback.GetChainRandomnessLookingForward(ctx, pers, round, entropy)
} }
func (r *ReplayingRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { func (r *ReplayingRand) GetBeaconRandomnessLookingForward(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return r.getBeaconRandomness(ctx, pers, round, entropy, false)
}
func (r *ReplayingRand) GetBeaconRandomnessLookingBack(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
return r.getBeaconRandomness(ctx, pers, round, entropy, true)
}
func (r *ReplayingRand) getBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) {
rule := schema.RandomnessRule{ rule := schema.RandomnessRule{
Kind: schema.RandomnessBeacon, Kind: schema.RandomnessBeacon,
DomainSeparationTag: int64(pers), DomainSeparationTag: int64(pers),
@ -74,6 +95,10 @@ func (r *ReplayingRand) GetBeaconRandomness(ctx context.Context, pers crypto.Dom
} }
r.reporter.Logf("returning fallback beacon randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy) r.reporter.Logf("returning fallback beacon randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy)
return r.fallback.GetBeaconRandomness(ctx, pers, round, entropy)
if lookback {
return r.fallback.GetBeaconRandomnessLookingBack(ctx, pers, round, entropy)
}
return r.fallback.GetBeaconRandomnessLookingForward(ctx, pers, round, entropy)
} }

View File

@ -15,12 +15,12 @@ COMMANDS:
version Print version version Print version
help, h Shows a list of commands or help for one command help, h Shows a list of commands or help for one command
BASIC: BASIC:
send Send funds between accounts send Send funds between accounts
wallet Manage wallet wallet Manage wallet
client Make deals, store data, retrieve data client Make deals, store data, retrieve data
msig Interact with a multisig wallet msig Interact with a multisig wallet
verifreg Interact with the verified registry actor filplus Interact with the verified registry actor used by Filplus
paych Manage payment channels paych Manage payment channels
DEVELOPER: DEVELOPER:
auth Manage RPC permissions auth Manage RPC permissions
mpool Manage message pool mpool Manage message pool
@ -1035,21 +1035,21 @@ OPTIONS:
``` ```
## lotus verifreg ## lotus filplus
``` ```
NAME: NAME:
lotus verifreg - Interact with the verified registry actor lotus filplus - Interact with the verified registry actor used by Filplus
USAGE: USAGE:
lotus verifreg command [command options] [arguments...] lotus filplus command [command options] [arguments...]
COMMANDS: COMMANDS:
verify-client give allowance to the specified verified client address grant-datacap give allowance to the specified verified client address
list-verifiers list all verifiers list-notaries list all notaries
list-clients list all verified clients list-clients list all verified clients
check-client check verified client remaining bytes check-client-datacap check verified client remaining bytes
check-verifier check verifiers remaining bytes check-notaries-datacap check notaries remaining bytes
help, h Shows a list of commands or help for one command help, h Shows a list of commands or help for one command
OPTIONS: OPTIONS:
--help, -h show help (default: false) --help, -h show help (default: false)
@ -1057,66 +1057,66 @@ OPTIONS:
``` ```
### lotus verifreg verify-client ### lotus filplus grant-datacap
``` ```
NAME: NAME:
lotus verifreg verify-client - give allowance to the specified verified client address lotus filplus grant-datacap - give allowance to the specified verified client address
USAGE: USAGE:
lotus verifreg verify-client [command options] [arguments...] lotus filplus grant-datacap [command options] [arguments...]
OPTIONS: OPTIONS:
--from value specify your verifier address to send the message from --from value specify your notary address to send the message from
--help, -h show help (default: false) --help, -h show help (default: false)
``` ```
### lotus verifreg list-verifiers ### lotus filplus list-notaries
``` ```
NAME: NAME:
lotus verifreg list-verifiers - list all verifiers lotus filplus list-notaries - list all notaries
USAGE: USAGE:
lotus verifreg list-verifiers [command options] [arguments...] lotus filplus list-notaries [command options] [arguments...]
OPTIONS: OPTIONS:
--help, -h show help (default: false) --help, -h show help (default: false)
``` ```
### lotus verifreg list-clients ### lotus filplus list-clients
``` ```
NAME: NAME:
lotus verifreg list-clients - list all verified clients lotus filplus list-clients - list all verified clients
USAGE: USAGE:
lotus verifreg list-clients [command options] [arguments...] lotus filplus list-clients [command options] [arguments...]
OPTIONS: OPTIONS:
--help, -h show help (default: false) --help, -h show help (default: false)
``` ```
### lotus verifreg check-client ### lotus filplus check-client-datacap
``` ```
NAME: NAME:
lotus verifreg check-client - check verified client remaining bytes lotus filplus check-client-datacap - check verified client remaining bytes
USAGE: USAGE:
lotus verifreg check-client [command options] [arguments...] lotus filplus check-client-datacap [command options] [arguments...]
OPTIONS: OPTIONS:
--help, -h show help (default: false) --help, -h show help (default: false)
``` ```
### lotus verifreg check-verifier ### lotus filplus check-notaries-datacap
``` ```
NAME: NAME:
lotus verifreg check-verifier - check verifiers remaining bytes lotus filplus check-notaries-datacap - check notaries remaining bytes
USAGE: USAGE:
lotus verifreg check-verifier [command options] [arguments...] lotus filplus check-notaries-datacap [command options] [arguments...]
OPTIONS: OPTIONS:
--help, -h show help (default: false) --help, -h show help (default: false)

View File

@ -181,7 +181,7 @@ func (b *PreCommitBatcher) processBatch(notif, after bool) (*cid.Cid, error) {
break break
} }
params.Sectors = append(params.Sectors, p.pci) params.Sectors = append(params.Sectors, *p.pci)
deposit = big.Add(deposit, p.deposit) deposit = big.Add(deposit, p.deposit)
} }

2
go.mod
View File

@ -48,7 +48,7 @@ require (
github.com/filecoin-project/specs-actors/v2 v2.3.5 github.com/filecoin-project/specs-actors/v2 v2.3.5
github.com/filecoin-project/specs-actors/v3 v3.1.1 github.com/filecoin-project/specs-actors/v3 v3.1.1
github.com/filecoin-project/specs-actors/v4 v4.0.1 github.com/filecoin-project/specs-actors/v4 v4.0.1
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210517165532-c7cff61d07fb github.com/filecoin-project/specs-actors/v5 v5.0.0-20210528202914-a9f9f95f5e93
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506
github.com/filecoin-project/test-vectors/schema v0.0.5 github.com/filecoin-project/test-vectors/schema v0.0.5
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1

2
go.sum
View File

@ -330,6 +330,8 @@ github.com/filecoin-project/specs-actors/v3 v3.1.1 h1:BE8fsns1GnEOxt1DTE5LxBK2FT
github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww=
github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg= github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg=
github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng=
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210528202914-a9f9f95f5e93 h1:PZ5pLy4dZVgL+fXgvSVtPOYhfEYUzEYYVEz7IfG8e5U=
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210528202914-a9f9f95f5e93/go.mod h1:kSDmoQuO8jlhMVzKNoesbhka1e6gHKcLQjKm9mE9Qhw=
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw=
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg= github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=

View File

@ -67,7 +67,8 @@ import (
var DefaultHashFunction = uint64(mh.BLAKE2B_MIN + 31) var DefaultHashFunction = uint64(mh.BLAKE2B_MIN + 31)
const dealStartBufferHours uint64 = 49 // 8 days ~= SealDuration + PreCommit + MaxProveCommitDuration + 8 hour buffer
const dealStartBufferHours uint64 = 8 * 24
type API struct { type API struct {
fx.In fx.In

View File

@ -10,6 +10,8 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/filecoin-project/lotus/build"
"go.uber.org/fx" "go.uber.org/fx"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -97,7 +99,12 @@ func (a *ChainAPI) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.
return nil, xerrors.Errorf("loading tipset key: %w", err) return nil, xerrors.Errorf("loading tipset key: %w", err)
} }
return a.Chain.GetChainRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy) // Doing this here is slightly nicer than doing it in the chainstore directly, but it's still bad for ChainAPI to reason about network upgrades
if randEpoch > build.UpgradeHyperdriveHeight {
return a.Chain.GetChainRandomnessLookingForward(ctx, pts.Cids(), personalization, randEpoch, entropy)
}
return a.Chain.GetChainRandomnessLookingBack(ctx, pts.Cids(), personalization, randEpoch, entropy)
} }
func (a *ChainAPI) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { func (a *ChainAPI) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
@ -106,7 +113,12 @@ func (a *ChainAPI) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.T
return nil, xerrors.Errorf("loading tipset key: %w", err) return nil, xerrors.Errorf("loading tipset key: %w", err)
} }
return a.Chain.GetBeaconRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy) // Doing this here is slightly nicer than doing it in the chainstore directly, but it's still bad for ChainAPI to reason about network upgrades
if randEpoch > build.UpgradeHyperdriveHeight {
return a.Chain.GetBeaconRandomnessLookingForward(ctx, pts.Cids(), personalization, randEpoch, entropy)
}
return a.Chain.GetBeaconRandomnessLookingBack(ctx, pts.Cids(), personalization, randEpoch, entropy)
} }
func (a *ChainAPI) ChainGetBlock(ctx context.Context, msg cid.Cid) (*types.BlockHeader, error) { func (a *ChainAPI) ChainGetBlock(ctx context.Context, msg cid.Cid) (*types.BlockHeader, error) {

View File

@ -260,6 +260,34 @@ func TestWindowPostDisputeFails(t *testing.T) {
test.TestWindowPostDisputeFails(t, builder.MockSbBuilder, 2*time.Millisecond) test.TestWindowPostDisputeFails(t, builder.MockSbBuilder, 2*time.Millisecond)
} }
func TestWindowPostBaseFeeNoBurn(t *testing.T) {
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
}
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("gen", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestWindowPostBaseFeeNoBurn(t, builder.MockSbBuilder, 2*time.Millisecond)
}
func TestWindowPostBaseFeeBurn(t *testing.T) {
if os.Getenv("LOTUS_TEST_WINDOW_POST") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_WINDOW_POST=1 to run")
}
logging.SetLogLevel("miner", "ERROR")
logging.SetLogLevel("gen", "ERROR")
logging.SetLogLevel("chainstore", "ERROR")
logging.SetLogLevel("chain", "ERROR")
logging.SetLogLevel("sub", "ERROR")
logging.SetLogLevel("storageminer", "ERROR")
test.TestWindowPostBaseFeeBurn(t, builder.MockSbBuilder, 2*time.Millisecond)
}
func TestDeadlineToggling(t *testing.T) { func TestDeadlineToggling(t *testing.T) {
if os.Getenv("LOTUS_TEST_DEADLINE_TOGGLING") != "1" { if os.Getenv("LOTUS_TEST_DEADLINE_TOGGLING") != "1" {
t.Skip("this takes a few minutes, set LOTUS_TEST_DEADLINE_TOGGLING=1 to run") t.Skip("this takes a few minutes, set LOTUS_TEST_DEADLINE_TOGGLING=1 to run")