Merge pull request #6406 from filecoin-project/feat/lotus-sim

This commit is contained in:
Łukasz Magiera 2021-06-24 18:20:43 +02:00 committed by GitHub
commit ebd746cdec
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
61 changed files with 5064 additions and 28 deletions

1
.gitignore vendored
View File

@ -8,6 +8,7 @@
/lotus-health /lotus-health
/lotus-chainwatch /lotus-chainwatch
/lotus-shed /lotus-shed
/lotus-sim
/lotus-pond /lotus-pond
/lotus-townhall /lotus-townhall
/lotus-fountain /lotus-fountain

View File

@ -234,6 +234,12 @@ BINS+=tvx
install-chainwatch: lotus-chainwatch install-chainwatch: lotus-chainwatch
install -C ./lotus-chainwatch /usr/local/bin/lotus-chainwatch install -C ./lotus-chainwatch /usr/local/bin/lotus-chainwatch
lotus-sim: $(BUILD_DEPS)
rm -f lotus-sim
go build $(GOFLAGS) -o lotus-sim ./cmd/lotus-sim
.PHONY: lotus-sim
BINS+=lotus-sim
# SYSTEMD # SYSTEMD
install-daemon-service: install-daemon install-daemon-service: install-daemon

View File

@ -22,6 +22,7 @@ import (
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
{{range .versions}} {{range .versions}}
builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
{{end}} {{end}}
@ -97,9 +98,13 @@ type State interface {
FindSector(abi.SectorNumber) (*SectorLocation, error) FindSector(abi.SectorNumber) (*SectorLocation, error)
GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error) GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error)
GetPrecommittedSector(abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) GetPrecommittedSector(abi.SectorNumber) (*SectorPreCommitOnChainInfo, error)
ForEachPrecommittedSector(func(SectorPreCommitOnChainInfo) error) error
LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error) LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error)
NumLiveSectors() (uint64, error) NumLiveSectors() (uint64, error)
IsAllocated(abi.SectorNumber) (bool, error) IsAllocated(abi.SectorNumber) (bool, error)
// UnallocatedSectorNumbers returns up to count unallocated sector numbers (or less than
// count if there aren't enough).
UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
// Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors // Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors
GetProvingPeriodStart() (abi.ChainEpoch, error) GetProvingPeriodStart() (abi.ChainEpoch, error)
@ -176,6 +181,7 @@ type DeclareFaultsRecoveredParams = miner0.DeclareFaultsRecoveredParams
type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams
type ProveCommitSectorParams = miner0.ProveCommitSectorParams type ProveCommitSectorParams = miner0.ProveCommitSectorParams
type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams
type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
// We added support for the new proofs in network version 7, and removed support for the old // We added support for the new proofs in network version 7, and removed support for the old

View File

@ -22,6 +22,7 @@ import (
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
@ -156,9 +157,13 @@ type State interface {
FindSector(abi.SectorNumber) (*SectorLocation, error) FindSector(abi.SectorNumber) (*SectorLocation, error)
GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error) GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error)
GetPrecommittedSector(abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) GetPrecommittedSector(abi.SectorNumber) (*SectorPreCommitOnChainInfo, error)
ForEachPrecommittedSector(func(SectorPreCommitOnChainInfo) error) error
LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error) LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error)
NumLiveSectors() (uint64, error) NumLiveSectors() (uint64, error)
IsAllocated(abi.SectorNumber) (bool, error) IsAllocated(abi.SectorNumber) (bool, error)
// UnallocatedSectorNumbers returns up to count unallocated sector numbers (or less than
// count if there aren't enough).
UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
// Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors // Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors
GetProvingPeriodStart() (abi.ChainEpoch, error) GetProvingPeriodStart() (abi.ChainEpoch, error)
@ -235,6 +240,7 @@ type DeclareFaultsRecoveredParams = miner0.DeclareFaultsRecoveredParams
type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams
type ProveCommitSectorParams = miner0.ProveCommitSectorParams type ProveCommitSectorParams = miner0.ProveCommitSectorParams
type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams
type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
// We added support for the new proofs in network version 7, and removed support for the old // We added support for the new proofs in network version 7, and removed support for the old

View File

@ -8,6 +8,7 @@ import (
{{end}} {{end}}
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
rle "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -209,6 +210,26 @@ func (s *state{{.v}}) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCom
return &ret, nil return &ret, nil
} }
func (s *state{{.v}}) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
{{if (ge .v 3) -}}
precommitted, err := adt{{.v}}.AsMap(s.store, s.State.PreCommittedSectors, builtin{{.v}}.DefaultHamtBitwidth)
{{- else -}}
precommitted, err := adt{{.v}}.AsMap(s.store, s.State.PreCommittedSectors)
{{- end}}
if err != nil {
return err
}
var info miner{{.v}}.SectorPreCommitOnChainInfo
if err := precommitted.ForEach(&info, func(_ string) error {
return cb(fromV{{.v}}SectorPreCommitOnChainInfo(info))
}); err != nil {
return err
}
return nil
}
func (s *state{{.v}}) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { func (s *state{{.v}}) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
sectors, err := miner{{.v}}.LoadSectors(s.store, s.State.Sectors) sectors, err := miner{{.v}}.LoadSectors(s.store, s.State.Sectors)
if err != nil { if err != nil {
@ -242,9 +263,15 @@ func (s *state{{.v}}) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo
return infos, nil return infos, nil
} }
func (s *state{{.v}}) IsAllocated(num abi.SectorNumber) (bool, error) { func (s *state{{.v}}) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
var allocatedSectors bitfield.BitField var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
return allocatedSectors, err
}
func (s *state{{.v}}) IsAllocated(num abi.SectorNumber) (bool, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return false, err return false, err
} }
@ -255,6 +282,42 @@ func (s *state{{.v}}) GetProvingPeriodStart() (abi.ChainEpoch, error) {
return s.State.ProvingPeriodStart, nil return s.State.ProvingPeriodStart, nil
} }
func (s *state{{.v}}) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return nil, err
}
allocatedRuns, err := allocatedSectors.RunIterator()
if err != nil {
return nil, err
}
unallocatedRuns, err := rle.Subtract(
&rle.RunSliceIterator{Runs: []rle.Run{ {Val: true, Len: abi.MaxSectorNumber} }},
allocatedRuns,
)
if err != nil {
return nil, err
}
iter, err := rle.BitsFromRuns(unallocatedRuns)
if err != nil {
return nil, err
}
sectors := make([]abi.SectorNumber, 0, count)
for iter.HasNext() && len(sectors) < count {
nextNo, err := iter.Next()
if err != nil {
return nil, err
}
sectors = append(sectors, abi.SectorNumber(nextNo))
}
return sectors, nil
}
func (s *state{{.v}}) LoadDeadline(idx uint64) (Deadline, error) { func (s *state{{.v}}) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store) dls, err := s.State.LoadDeadlines(s.store)
if err != nil { if err != nil {

View File

@ -8,6 +8,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
rle "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -206,6 +207,22 @@ func (s *state0) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn
return &ret, nil return &ret, nil
} }
func (s *state0) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
precommitted, err := adt0.AsMap(s.store, s.State.PreCommittedSectors)
if err != nil {
return err
}
var info miner0.SectorPreCommitOnChainInfo
if err := precommitted.ForEach(&info, func(_ string) error {
return cb(fromV0SectorPreCommitOnChainInfo(info))
}); err != nil {
return err
}
return nil
}
func (s *state0) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { func (s *state0) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
sectors, err := miner0.LoadSectors(s.store, s.State.Sectors) sectors, err := miner0.LoadSectors(s.store, s.State.Sectors)
if err != nil { if err != nil {
@ -239,9 +256,15 @@ func (s *state0) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err
return infos, nil return infos, nil
} }
func (s *state0) IsAllocated(num abi.SectorNumber) (bool, error) { func (s *state0) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
var allocatedSectors bitfield.BitField var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
return allocatedSectors, err
}
func (s *state0) IsAllocated(num abi.SectorNumber) (bool, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return false, err return false, err
} }
@ -252,6 +275,42 @@ func (s *state0) GetProvingPeriodStart() (abi.ChainEpoch, error) {
return s.State.ProvingPeriodStart, nil return s.State.ProvingPeriodStart, nil
} }
func (s *state0) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return nil, err
}
allocatedRuns, err := allocatedSectors.RunIterator()
if err != nil {
return nil, err
}
unallocatedRuns, err := rle.Subtract(
&rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
allocatedRuns,
)
if err != nil {
return nil, err
}
iter, err := rle.BitsFromRuns(unallocatedRuns)
if err != nil {
return nil, err
}
sectors := make([]abi.SectorNumber, 0, count)
for iter.HasNext() && len(sectors) < count {
nextNo, err := iter.Next()
if err != nil {
return nil, err
}
sectors = append(sectors, abi.SectorNumber(nextNo))
}
return sectors, nil
}
func (s *state0) LoadDeadline(idx uint64) (Deadline, error) { func (s *state0) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store) dls, err := s.State.LoadDeadlines(s.store)
if err != nil { if err != nil {

View File

@ -6,6 +6,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
rle "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -204,6 +205,22 @@ func (s *state2) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn
return &ret, nil return &ret, nil
} }
func (s *state2) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
precommitted, err := adt2.AsMap(s.store, s.State.PreCommittedSectors)
if err != nil {
return err
}
var info miner2.SectorPreCommitOnChainInfo
if err := precommitted.ForEach(&info, func(_ string) error {
return cb(fromV2SectorPreCommitOnChainInfo(info))
}); err != nil {
return err
}
return nil
}
func (s *state2) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { func (s *state2) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
sectors, err := miner2.LoadSectors(s.store, s.State.Sectors) sectors, err := miner2.LoadSectors(s.store, s.State.Sectors)
if err != nil { if err != nil {
@ -237,9 +254,15 @@ func (s *state2) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err
return infos, nil return infos, nil
} }
func (s *state2) IsAllocated(num abi.SectorNumber) (bool, error) { func (s *state2) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
var allocatedSectors bitfield.BitField var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
return allocatedSectors, err
}
func (s *state2) IsAllocated(num abi.SectorNumber) (bool, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return false, err return false, err
} }
@ -250,6 +273,42 @@ func (s *state2) GetProvingPeriodStart() (abi.ChainEpoch, error) {
return s.State.ProvingPeriodStart, nil return s.State.ProvingPeriodStart, nil
} }
func (s *state2) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return nil, err
}
allocatedRuns, err := allocatedSectors.RunIterator()
if err != nil {
return nil, err
}
unallocatedRuns, err := rle.Subtract(
&rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
allocatedRuns,
)
if err != nil {
return nil, err
}
iter, err := rle.BitsFromRuns(unallocatedRuns)
if err != nil {
return nil, err
}
sectors := make([]abi.SectorNumber, 0, count)
for iter.HasNext() && len(sectors) < count {
nextNo, err := iter.Next()
if err != nil {
return nil, err
}
sectors = append(sectors, abi.SectorNumber(nextNo))
}
return sectors, nil
}
func (s *state2) LoadDeadline(idx uint64) (Deadline, error) { func (s *state2) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store) dls, err := s.State.LoadDeadlines(s.store)
if err != nil { if err != nil {

View File

@ -6,6 +6,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
rle "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -206,6 +207,22 @@ func (s *state3) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn
return &ret, nil return &ret, nil
} }
func (s *state3) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
precommitted, err := adt3.AsMap(s.store, s.State.PreCommittedSectors, builtin3.DefaultHamtBitwidth)
if err != nil {
return err
}
var info miner3.SectorPreCommitOnChainInfo
if err := precommitted.ForEach(&info, func(_ string) error {
return cb(fromV3SectorPreCommitOnChainInfo(info))
}); err != nil {
return err
}
return nil
}
func (s *state3) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { func (s *state3) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
sectors, err := miner3.LoadSectors(s.store, s.State.Sectors) sectors, err := miner3.LoadSectors(s.store, s.State.Sectors)
if err != nil { if err != nil {
@ -239,9 +256,15 @@ func (s *state3) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err
return infos, nil return infos, nil
} }
func (s *state3) IsAllocated(num abi.SectorNumber) (bool, error) { func (s *state3) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
var allocatedSectors bitfield.BitField var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
return allocatedSectors, err
}
func (s *state3) IsAllocated(num abi.SectorNumber) (bool, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return false, err return false, err
} }
@ -252,6 +275,42 @@ func (s *state3) GetProvingPeriodStart() (abi.ChainEpoch, error) {
return s.State.ProvingPeriodStart, nil return s.State.ProvingPeriodStart, nil
} }
func (s *state3) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return nil, err
}
allocatedRuns, err := allocatedSectors.RunIterator()
if err != nil {
return nil, err
}
unallocatedRuns, err := rle.Subtract(
&rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
allocatedRuns,
)
if err != nil {
return nil, err
}
iter, err := rle.BitsFromRuns(unallocatedRuns)
if err != nil {
return nil, err
}
sectors := make([]abi.SectorNumber, 0, count)
for iter.HasNext() && len(sectors) < count {
nextNo, err := iter.Next()
if err != nil {
return nil, err
}
sectors = append(sectors, abi.SectorNumber(nextNo))
}
return sectors, nil
}
func (s *state3) LoadDeadline(idx uint64) (Deadline, error) { func (s *state3) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store) dls, err := s.State.LoadDeadlines(s.store)
if err != nil { if err != nil {

View File

@ -6,6 +6,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
rle "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -206,6 +207,22 @@ func (s *state4) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn
return &ret, nil return &ret, nil
} }
func (s *state4) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
precommitted, err := adt4.AsMap(s.store, s.State.PreCommittedSectors, builtin4.DefaultHamtBitwidth)
if err != nil {
return err
}
var info miner4.SectorPreCommitOnChainInfo
if err := precommitted.ForEach(&info, func(_ string) error {
return cb(fromV4SectorPreCommitOnChainInfo(info))
}); err != nil {
return err
}
return nil
}
func (s *state4) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { func (s *state4) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
sectors, err := miner4.LoadSectors(s.store, s.State.Sectors) sectors, err := miner4.LoadSectors(s.store, s.State.Sectors)
if err != nil { if err != nil {
@ -239,9 +256,15 @@ func (s *state4) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err
return infos, nil return infos, nil
} }
func (s *state4) IsAllocated(num abi.SectorNumber) (bool, error) { func (s *state4) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
var allocatedSectors bitfield.BitField var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
return allocatedSectors, err
}
func (s *state4) IsAllocated(num abi.SectorNumber) (bool, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return false, err return false, err
} }
@ -252,6 +275,42 @@ func (s *state4) GetProvingPeriodStart() (abi.ChainEpoch, error) {
return s.State.ProvingPeriodStart, nil return s.State.ProvingPeriodStart, nil
} }
func (s *state4) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return nil, err
}
allocatedRuns, err := allocatedSectors.RunIterator()
if err != nil {
return nil, err
}
unallocatedRuns, err := rle.Subtract(
&rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
allocatedRuns,
)
if err != nil {
return nil, err
}
iter, err := rle.BitsFromRuns(unallocatedRuns)
if err != nil {
return nil, err
}
sectors := make([]abi.SectorNumber, 0, count)
for iter.HasNext() && len(sectors) < count {
nextNo, err := iter.Next()
if err != nil {
return nil, err
}
sectors = append(sectors, abi.SectorNumber(nextNo))
}
return sectors, nil
}
func (s *state4) LoadDeadline(idx uint64) (Deadline, error) { func (s *state4) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store) dls, err := s.State.LoadDeadlines(s.store)
if err != nil { if err != nil {

View File

@ -6,6 +6,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
rle "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -206,6 +207,22 @@ func (s *state5) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn
return &ret, nil return &ret, nil
} }
func (s *state5) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
precommitted, err := adt5.AsMap(s.store, s.State.PreCommittedSectors, builtin5.DefaultHamtBitwidth)
if err != nil {
return err
}
var info miner5.SectorPreCommitOnChainInfo
if err := precommitted.ForEach(&info, func(_ string) error {
return cb(fromV5SectorPreCommitOnChainInfo(info))
}); err != nil {
return err
}
return nil
}
func (s *state5) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { func (s *state5) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
sectors, err := miner5.LoadSectors(s.store, s.State.Sectors) sectors, err := miner5.LoadSectors(s.store, s.State.Sectors)
if err != nil { if err != nil {
@ -239,9 +256,15 @@ func (s *state5) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err
return infos, nil return infos, nil
} }
func (s *state5) IsAllocated(num abi.SectorNumber) (bool, error) { func (s *state5) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
var allocatedSectors bitfield.BitField var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
return allocatedSectors, err
}
func (s *state5) IsAllocated(num abi.SectorNumber) (bool, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return false, err return false, err
} }
@ -252,6 +275,42 @@ func (s *state5) GetProvingPeriodStart() (abi.ChainEpoch, error) {
return s.State.ProvingPeriodStart, nil return s.State.ProvingPeriodStart, nil
} }
func (s *state5) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return nil, err
}
allocatedRuns, err := allocatedSectors.RunIterator()
if err != nil {
return nil, err
}
unallocatedRuns, err := rle.Subtract(
&rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
allocatedRuns,
)
if err != nil {
return nil, err
}
iter, err := rle.BitsFromRuns(unallocatedRuns)
if err != nil {
return nil, err
}
sectors := make([]abi.SectorNumber, 0, count)
for iter.HasNext() && len(sectors) < count {
nextNo, err := iter.Next()
if err != nil {
return nil, err
}
sectors = append(sectors, abi.SectorNumber(nextNo))
}
return sectors, nil
}
func (s *state5) LoadDeadline(idx uint64) (Deadline, error) { func (s *state5) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store) dls, err := s.State.LoadDeadlines(s.store)
if err != nil { if err != nil {

View File

@ -115,6 +115,7 @@ type MessageBuilder interface {
type ProposalHashData = msig{{.latestVersion}}.ProposalHashData type ProposalHashData = msig{{.latestVersion}}.ProposalHashData
type ProposeReturn = msig{{.latestVersion}}.ProposeReturn type ProposeReturn = msig{{.latestVersion}}.ProposeReturn
type ProposeParams = msig{{.latestVersion}}.ProposeParams type ProposeParams = msig{{.latestVersion}}.ProposeParams
type ApproveReturn = msig{{.latestVersion}}.ApproveReturn
func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { func txnParams(id uint64, data *ProposalHashData) ([]byte, error) {
params := msig{{.latestVersion}}.TxnIDParams{ID: msig{{.latestVersion}}.TxnID(id)} params := msig{{.latestVersion}}.TxnIDParams{ID: msig{{.latestVersion}}.TxnID(id)}

View File

@ -186,6 +186,7 @@ type MessageBuilder interface {
type ProposalHashData = msig5.ProposalHashData type ProposalHashData = msig5.ProposalHashData
type ProposeReturn = msig5.ProposeReturn type ProposeReturn = msig5.ProposeReturn
type ProposeParams = msig5.ProposeParams type ProposeParams = msig5.ProposeParams
type ApproveReturn = msig5.ApproveReturn
func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { func txnParams(id uint64, data *ProposalHashData) ([]byte, error) {
params := msig5.TxnIDParams{ID: msig5.TxnID(id)} params := msig5.TxnIDParams{ID: msig5.TxnID(id)}

View File

@ -3,6 +3,7 @@ package genesis
import ( import (
"context" "context"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin/system" "github.com/filecoin-project/lotus/chain/actors/builtin/system"
@ -34,6 +35,7 @@ func SetupSystemActor(ctx context.Context, bs bstore.Blockstore, av actors.Versi
act := &types.Actor{ act := &types.Actor{
Code: actcid, Code: actcid,
Head: statecid, Head: statecid,
Balance: big.Zero(),
} }
return act, nil return act, nil

View File

@ -11,6 +11,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/specs-actors/actors/util/adt"
@ -183,6 +184,7 @@ func SetupInitActor(ctx context.Context, bs bstore.Blockstore, netname string, i
act := &types.Actor{ act := &types.Actor{
Code: actcid, Code: actcid,
Head: statecid, Head: statecid,
Balance: big.Zero(),
} }
return counter, act, keyToId, nil return counter, act, keyToId, nil

View File

@ -3,6 +3,7 @@ package genesis
import ( import (
"context" "context"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin/cron" "github.com/filecoin-project/lotus/chain/actors/builtin/cron"
@ -33,6 +34,7 @@ func SetupCronActor(ctx context.Context, bs bstore.Blockstore, av actors.Version
act := &types.Actor{ act := &types.Actor{
Code: actcid, Code: actcid,
Head: statecid, Head: statecid,
Balance: big.Zero(),
} }
return act, nil return act, nil

View File

@ -3,6 +3,7 @@ package genesis
import ( import (
"context" "context"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
@ -35,6 +36,7 @@ func SetupStoragePowerActor(ctx context.Context, bs bstore.Blockstore, av actors
act := &types.Actor{ act := &types.Actor{
Code: actcid, Code: actcid,
Head: statecid, Head: statecid,
Balance: big.Zero(),
} }
return act, nil return act, nil

View File

@ -3,6 +3,7 @@ package genesis
import ( import (
"context" "context"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/market"
@ -33,6 +34,7 @@ func SetupStorageMarketActor(ctx context.Context, bs bstore.Blockstore, av actor
act := &types.Actor{ act := &types.Actor{
Code: actcid, Code: actcid,
Head: statecid, Head: statecid,
Balance: big.Zero(),
} }
return act, nil return act, nil

View File

@ -3,6 +3,7 @@ package genesis
import ( import (
"context" "context"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
@ -48,6 +49,7 @@ func SetupVerifiedRegistryActor(ctx context.Context, bs bstore.Blockstore, av ac
act := &types.Actor{ act := &types.Actor{
Code: actcid, Code: actcid,
Head: statecid, Head: statecid,
Balance: big.Zero(),
} }
return act, nil return act, nil

View File

@ -313,11 +313,10 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge
totalFilAllocated := big.Zero() totalFilAllocated := big.Zero()
// flush as ForEach works on the HAMT
if _, err := state.Flush(ctx); err != nil {
return nil, nil, err
}
err = state.ForEach(func(addr address.Address, act *types.Actor) error { err = state.ForEach(func(addr address.Address, act *types.Actor) error {
if act.Balance.Nil() {
panic(fmt.Sprintf("actor %s (%s) has nil balance", addr, builtin.ActorNameByCode(act.Code)))
}
totalFilAllocated = big.Add(totalFilAllocated, act.Balance) totalFilAllocated = big.Add(totalFilAllocated, act.Balance)
return nil return nil
}) })

View File

@ -504,6 +504,26 @@ func (st *StateTree) MutateActor(addr address.Address, f func(*types.Actor) erro
} }
func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error { func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error {
// Walk through layers, if any.
seen := make(map[address.Address]struct{})
for i := len(st.snaps.layers) - 1; i >= 0; i-- {
for addr, op := range st.snaps.layers[i].actors {
if _, ok := seen[addr]; ok {
continue
}
seen[addr] = struct{}{}
if op.Delete {
continue
}
act := op.Act // copy
if err := f(addr, &act); err != nil {
return err
}
}
}
// Now walk through the saved actors.
var act types.Actor var act types.Actor
return st.root.ForEach(&act, func(k string) error { return st.root.ForEach(&act, func(k string) error {
act := act // copy act := act // copy
@ -512,6 +532,12 @@ func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error
return xerrors.Errorf("invalid address (%x) found in state tree key: %w", []byte(k), err) return xerrors.Errorf("invalid address (%x) found in state tree key: %w", []byte(k), err)
} }
// no need to record anything here, there are no duplicates in the actors HAMT
// iself.
if _, ok := seen[addr]; ok {
return nil
}
return f(addr, &act) return f(addr, &act)
}) })
} }

View File

@ -439,6 +439,8 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
}, },
GasCosts: &gasOutputs, GasCosts: &gasOutputs,
Duration: time.Since(start), Duration: time.Since(start),
ActorErr: aerrors.Newf(exitcode.SysErrOutOfGas,
"message gas limit does not cover on-chain gas costs"),
}, nil }, nil
} }
@ -669,6 +671,12 @@ func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) {
return root, nil return root, nil
} }
// Get the buffered blockstore associated with the VM. This includes any temporary blocks produced
// during this VM's execution.
func (vm *VM) ActorStore(ctx context.Context) adt.Store {
return adt.WrapStore(ctx, vm.cst)
}
func linksForObj(blk block.Block, cb func(cid.Cid)) error { func linksForObj(blk block.Block, cb func(cid.Cid)) error {
switch blk.Cid().Prefix().Codec { switch blk.Cid().Prefix().Codec {
case cid.DagCBOR: case cid.DagCBOR:

28
cmd/lotus-sim/copy.go Normal file
View File

@ -0,0 +1,28 @@
package main
import (
"fmt"
"github.com/urfave/cli/v2"
)
var copySimCommand = &cli.Command{
Name: "copy",
ArgsUsage: "<new-name>",
Action: func(cctx *cli.Context) (err error) {
node, err := open(cctx)
if err != nil {
return err
}
defer func() {
if cerr := node.Close(); err == nil {
err = cerr
}
}()
if cctx.NArg() != 1 {
return fmt.Errorf("expected 1 argument")
}
name := cctx.Args().First()
return node.CopySim(cctx.Context, cctx.String("simulation"), name)
},
}

49
cmd/lotus-sim/create.go Normal file
View File

@ -0,0 +1,49 @@
package main
import (
"fmt"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
)
var createSimCommand = &cli.Command{
Name: "create",
ArgsUsage: "[tipset]",
Action: func(cctx *cli.Context) (err error) {
node, err := open(cctx)
if err != nil {
return err
}
defer func() {
if cerr := node.Close(); err == nil {
err = cerr
}
}()
var ts *types.TipSet
switch cctx.NArg() {
case 0:
if err := node.Chainstore.Load(); err != nil {
return err
}
ts = node.Chainstore.GetHeaviestTipSet()
case 1:
cids, err := lcli.ParseTipSetString(cctx.Args().Get(1))
if err != nil {
return err
}
tsk := types.NewTipSetKey(cids...)
ts, err = node.Chainstore.LoadTipSet(tsk)
if err != nil {
return err
}
default:
return fmt.Errorf("expected 0 or 1 arguments")
}
_, err = node.CreateSim(cctx.Context, cctx.String("simulation"), ts)
return err
},
}

22
cmd/lotus-sim/delete.go Normal file
View File

@ -0,0 +1,22 @@
package main
import (
"github.com/urfave/cli/v2"
)
var deleteSimCommand = &cli.Command{
Name: "delete",
Action: func(cctx *cli.Context) (err error) {
node, err := open(cctx)
if err != nil {
return err
}
defer func() {
if cerr := node.Close(); err == nil {
err = cerr
}
}()
return node.DeleteSim(cctx.Context, cctx.String("simulation"))
},
}

110
cmd/lotus-sim/info.go Normal file
View File

@ -0,0 +1,110 @@
package main
import (
"context"
"fmt"
"io"
"text/tabwriter"
"time"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cmd/lotus-sim/simulation"
)
func getTotalPower(ctx context.Context, sm *stmgr.StateManager, ts *types.TipSet) (power.Claim, error) {
actor, err := sm.LoadActor(ctx, power.Address, ts)
if err != nil {
return power.Claim{}, err
}
state, err := power.Load(sm.ChainStore().ActorStore(ctx), actor)
if err != nil {
return power.Claim{}, err
}
return state.TotalPower()
}
func printInfo(ctx context.Context, sim *simulation.Simulation, out io.Writer) error {
head := sim.GetHead()
start := sim.GetStart()
powerNow, err := getTotalPower(ctx, sim.StateManager, head)
if err != nil {
return err
}
powerLookbackEpoch := head.Height() - builtin.EpochsInDay*2
if powerLookbackEpoch < start.Height() {
powerLookbackEpoch = start.Height()
}
lookbackTs, err := sim.Node.Chainstore.GetTipsetByHeight(ctx, powerLookbackEpoch, head, false)
if err != nil {
return err
}
powerLookback, err := getTotalPower(ctx, sim.StateManager, lookbackTs)
if err != nil {
return err
}
// growth rate in size/day
growthRate := big.Div(
big.Mul(big.Sub(powerNow.RawBytePower, powerLookback.RawBytePower),
big.NewInt(builtin.EpochsInDay)),
big.NewInt(int64(head.Height()-lookbackTs.Height())),
)
tw := tabwriter.NewWriter(out, 8, 8, 1, ' ', 0)
headEpoch := head.Height()
firstEpoch := start.Height() + 1
headTime := time.Unix(int64(head.MinTimestamp()), 0)
startTime := time.Unix(int64(start.MinTimestamp()), 0)
duration := headTime.Sub(startTime)
fmt.Fprintf(tw, "Name:\t%s\n", sim.Name())
fmt.Fprintf(tw, "Head:\t%s\n", head)
fmt.Fprintf(tw, "Start Epoch:\t%d\n", firstEpoch)
fmt.Fprintf(tw, "End Epoch:\t%d\n", headEpoch)
fmt.Fprintf(tw, "Length:\t%d\n", headEpoch-firstEpoch)
fmt.Fprintf(tw, "Start Date:\t%s\n", startTime)
fmt.Fprintf(tw, "End Date:\t%s\n", headTime)
fmt.Fprintf(tw, "Duration:\t%.2f day(s)\n", duration.Hours()/24)
fmt.Fprintf(tw, "Capacity:\t%s\n", types.SizeStr(powerNow.RawBytePower))
fmt.Fprintf(tw, "Daily Capacity Growth:\t%s/day\n", types.SizeStr(growthRate))
fmt.Fprintf(tw, "Network Version:\t%d\n", sim.GetNetworkVersion())
return tw.Flush()
}
var infoSimCommand = &cli.Command{
Name: "info",
Description: "Output information about the simulation.",
Subcommands: []*cli.Command{
infoCommitGasSimCommand,
infoMessageSizeSimCommand,
infoWindowPostBandwidthSimCommand,
infoCapacityGrowthSimCommand,
infoStateGrowthSimCommand,
},
Action: func(cctx *cli.Context) (err error) {
node, err := open(cctx)
if err != nil {
return err
}
defer func() {
if cerr := node.Close(); err == nil {
err = cerr
}
}()
sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
if err != nil {
return err
}
return printInfo(cctx.Context, sim, cctx.App.Writer)
},
}

View File

@ -0,0 +1,67 @@
package main
import (
"fmt"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/types"
)
var infoCapacityGrowthSimCommand = &cli.Command{
Name: "capacity-growth",
Description: "List daily capacity growth over the course of the simulation starting at the end.",
Action: func(cctx *cli.Context) (err error) {
node, err := open(cctx)
if err != nil {
return err
}
defer func() {
if cerr := node.Close(); err == nil {
err = cerr
}
}()
sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
if err != nil {
return err
}
firstEpoch := sim.GetStart().Height()
ts := sim.GetHead()
lastPower, err := getTotalPower(cctx.Context, sim.StateManager, ts)
if err != nil {
return err
}
lastHeight := ts.Height()
for ts.Height() > firstEpoch && cctx.Err() == nil {
ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents())
if err != nil {
return err
}
newEpoch := ts.Height()
if newEpoch != firstEpoch && newEpoch+builtin.EpochsInDay > lastHeight {
continue
}
newPower, err := getTotalPower(cctx.Context, sim.StateManager, ts)
if err != nil {
return err
}
growthRate := big.Div(
big.Mul(big.Sub(lastPower.RawBytePower, newPower.RawBytePower),
big.NewInt(builtin.EpochsInDay)),
big.NewInt(int64(lastHeight-newEpoch)),
)
lastPower = newPower
lastHeight = newEpoch
fmt.Fprintf(cctx.App.Writer, "%s/day\n", types.SizeStr(growthRate))
}
return cctx.Err()
},
}

View File

@ -0,0 +1,148 @@
package main
import (
"bytes"
"fmt"
"os"
"syscall"
"github.com/streadway/quantile"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cmd/lotus-sim/simulation"
"github.com/filecoin-project/lotus/lib/stati"
)
var infoCommitGasSimCommand = &cli.Command{
Name: "commit-gas",
Description: "Output information about the gas for commits",
Flags: []cli.Flag{
&cli.Int64Flag{
Name: "lookback",
Value: 0,
},
},
Action: func(cctx *cli.Context) (err error) {
log := func(f string, i ...interface{}) {
fmt.Fprintf(os.Stderr, f, i...)
}
node, err := open(cctx)
if err != nil {
return err
}
defer func() {
if cerr := node.Close(); err == nil {
err = cerr
}
}()
go profileOnSignal(cctx, syscall.SIGUSR2)
sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
if err != nil {
return err
}
var gasAgg, proofsAgg uint64
var gasAggMax, proofsAggMax uint64
var gasSingle, proofsSingle uint64
qpoints := []struct{ q, tol float64 }{
{0.01, 0.0005},
{0.05, 0.001},
{0.20, 0.01},
{0.25, 0.01},
{0.30, 0.01},
{0.40, 0.01},
{0.45, 0.01},
{0.50, 0.01},
{0.60, 0.01},
{0.80, 0.01},
{0.95, 0.001},
{0.99, 0.0005},
}
estims := make([]quantile.Estimate, len(qpoints))
for i, p := range qpoints {
estims[i] = quantile.Known(p.q, p.tol)
}
qua := quantile.New(estims...)
hist, err := stati.NewHistogram([]float64{
1, 3, 5, 7, 15, 30, 50, 100, 200, 400, 600, 700, 819})
if err != nil {
return err
}
err = sim.Walk(cctx.Context, cctx.Int64("lookback"), func(
sm *stmgr.StateManager, ts *types.TipSet, stCid cid.Cid,
messages []*simulation.AppliedMessage,
) error {
for _, m := range messages {
if m.ExitCode != exitcode.Ok {
continue
}
if m.Method == miner.Methods.ProveCommitAggregate {
param := miner.ProveCommitAggregateParams{}
err := param.UnmarshalCBOR(bytes.NewReader(m.Params))
if err != nil {
log("failed to decode params: %+v", err)
return nil
}
c, err := param.SectorNumbers.Count()
if err != nil {
log("failed to count sectors")
return nil
}
gasAgg += uint64(m.GasUsed)
proofsAgg += c
if c == 819 {
gasAggMax += uint64(m.GasUsed)
proofsAggMax += c
}
for i := uint64(0); i < c; i++ {
qua.Add(float64(c))
}
hist.Observe(float64(c))
}
if m.Method == miner.Methods.ProveCommitSector {
gasSingle += uint64(m.GasUsed)
proofsSingle++
qua.Add(1)
hist.Observe(1)
}
}
return nil
})
if err != nil {
return err
}
idealGassUsed := float64(gasAggMax) / float64(proofsAggMax) * float64(proofsAgg+proofsSingle)
fmt.Printf("Gas usage efficiency in comparison to all 819: %f%%\n", 100*idealGassUsed/float64(gasAgg+gasSingle))
fmt.Printf("Proofs in singles: %d\n", proofsSingle)
fmt.Printf("Proofs in Aggs: %d\n", proofsAgg)
fmt.Printf("Proofs in Aggs(819): %d\n", proofsAggMax)
fmt.Println()
fmt.Println("Quantiles of proofs in given aggregate size:")
for _, p := range qpoints {
fmt.Printf("%.0f%%\t%.0f\n", p.q*100, qua.Get(p.q))
}
fmt.Println()
fmt.Println("Histogram of messages:")
fmt.Printf("Total\t%d\n", hist.Total())
for i, b := range hist.Buckets[1:] {
fmt.Printf("%.0f\t%d\n", b, hist.Get(i))
}
return nil
},
}

View File

@ -0,0 +1,95 @@
package main
import (
"fmt"
"syscall"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cmd/lotus-sim/simulation"
"github.com/filecoin-project/lotus/lib/stati"
"github.com/ipfs/go-cid"
"github.com/streadway/quantile"
"github.com/urfave/cli/v2"
)
var infoMessageSizeSimCommand = &cli.Command{
Name: "message-size",
Description: "Output information about message size distribution",
Flags: []cli.Flag{
&cli.Int64Flag{
Name: "lookback",
Value: 0,
},
},
Action: func(cctx *cli.Context) (err error) {
node, err := open(cctx)
if err != nil {
return err
}
defer func() {
if cerr := node.Close(); err == nil {
err = cerr
}
}()
go profileOnSignal(cctx, syscall.SIGUSR2)
sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
if err != nil {
return err
}
qpoints := []struct{ q, tol float64 }{
{0.30, 0.01},
{0.40, 0.01},
{0.60, 0.01},
{0.70, 0.01},
{0.80, 0.01},
{0.85, 0.01},
{0.90, 0.01},
{0.95, 0.001},
{0.99, 0.0005},
{0.999, 0.0001},
}
estims := make([]quantile.Estimate, len(qpoints))
for i, p := range qpoints {
estims[i] = quantile.Known(p.q, p.tol)
}
qua := quantile.New(estims...)
hist, err := stati.NewHistogram([]float64{
1 << 8, 1 << 10, 1 << 11, 1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
})
if err != nil {
return err
}
err = sim.Walk(cctx.Context, cctx.Int64("lookback"), func(
sm *stmgr.StateManager, ts *types.TipSet, stCid cid.Cid,
messages []*simulation.AppliedMessage,
) error {
for _, m := range messages {
msgSize := float64(m.ChainLength())
qua.Add(msgSize)
hist.Observe(msgSize)
}
return nil
})
if err != nil {
return err
}
fmt.Println("Quantiles of message sizes:")
for _, p := range qpoints {
fmt.Printf("%.1f%%\t%.0f\n", p.q*100, qua.Get(p.q))
}
fmt.Println()
fmt.Println("Histogram of message sizes:")
fmt.Printf("Total\t%d\n", hist.Total())
for i, b := range hist.Buckets[1:] {
fmt.Printf("%.0f\t%d\t%.1f%%\n", b, hist.Get(i), 100*hist.GetRatio(i))
}
return nil
},
}

141
cmd/lotus-sim/info_state.go Normal file
View File

@ -0,0 +1,141 @@
package main
import (
"bytes"
"context"
"fmt"
"math"
"sync"
"sync/atomic"
"github.com/ipfs/go-cid"
"github.com/urfave/cli/v2"
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/types"
)
var infoStateGrowthSimCommand = &cli.Command{
Name: "state-size",
Description: "List daily state size over the course of the simulation starting at the end.",
Action: func(cctx *cli.Context) (err error) {
node, err := open(cctx)
if err != nil {
return err
}
defer func() {
if cerr := node.Close(); err == nil {
err = cerr
}
}()
sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
if err != nil {
return err
}
// NOTE: This code is entirely read-bound.
store := node.Chainstore.StateBlockstore()
stateSize := func(ctx context.Context, c cid.Cid) (uint64, error) {
seen := cid.NewSet()
sema := make(chan struct{}, 40)
var lock sync.Mutex
var recSize func(cid.Cid) (uint64, error)
recSize = func(c cid.Cid) (uint64, error) {
// Not a part of the chain state.
if err := ctx.Err(); err != nil {
return 0, err
}
lock.Lock()
visit := seen.Visit(c)
lock.Unlock()
// Already seen?
if !visit {
return 0, nil
}
var links []cid.Cid
var totalSize uint64
if err := store.View(c, func(data []byte) error {
totalSize += uint64(len(data))
return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) {
if c.Prefix().Codec != cid.DagCBOR {
return
}
links = append(links, c)
})
}); err != nil {
return 0, err
}
var wg sync.WaitGroup
errCh := make(chan error, 1)
cb := func(c cid.Cid) {
size, err := recSize(c)
if err != nil {
select {
case errCh <- err:
default:
}
return
}
atomic.AddUint64(&totalSize, size)
}
asyncCb := func(c cid.Cid) {
wg.Add(1)
go func() {
defer wg.Done()
defer func() { <-sema }()
cb(c)
}()
}
for _, link := range links {
select {
case sema <- struct{}{}:
asyncCb(link)
default:
cb(link)
}
}
wg.Wait()
select {
case err := <-errCh:
return 0, err
default:
}
return totalSize, nil
}
return recSize(c)
}
firstEpoch := sim.GetStart().Height()
ts := sim.GetHead()
lastHeight := abi.ChainEpoch(math.MaxInt64)
for ts.Height() > firstEpoch && cctx.Err() == nil {
if ts.Height()+builtin.EpochsInDay <= lastHeight {
lastHeight = ts.Height()
parentStateSize, err := stateSize(cctx.Context, ts.ParentState())
if err != nil {
return err
}
fmt.Fprintf(cctx.App.Writer, "%d: %s\n", ts.Height(), types.SizeStr(types.NewInt(parentStateSize)))
}
ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents())
if err != nil {
return err
}
}
return cctx.Err()
},
}

View File

@ -0,0 +1,69 @@
package main
import (
"fmt"
"github.com/ipfs/go-cid"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cmd/lotus-sim/simulation"
)
var infoWindowPostBandwidthSimCommand = &cli.Command{
Name: "post-bandwidth",
Description: "List average chain bandwidth used by window posts for each day of the simulation.",
Action: func(cctx *cli.Context) (err error) {
node, err := open(cctx)
if err != nil {
return err
}
defer func() {
if cerr := node.Close(); err == nil {
err = cerr
}
}()
sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
if err != nil {
return err
}
var postGas, totalGas int64
printStats := func() {
fmt.Fprintf(cctx.App.Writer, "%.4f%%\n", float64(100*postGas)/float64(totalGas))
}
idx := 0
err = sim.Walk(cctx.Context, 0, func(
sm *stmgr.StateManager, ts *types.TipSet, stCid cid.Cid,
messages []*simulation.AppliedMessage,
) error {
for _, m := range messages {
totalGas += m.GasUsed
if m.ExitCode != exitcode.Ok {
continue
}
if m.Method == miner.Methods.SubmitWindowedPoSt {
postGas += m.GasUsed
}
}
idx++
idx %= builtin.EpochsInDay
if idx == 0 {
printStats()
postGas = 0
totalGas = 0
}
return nil
})
if idx > 0 {
printStats()
}
return err
},
}

38
cmd/lotus-sim/list.go Normal file
View File

@ -0,0 +1,38 @@
package main
import (
"fmt"
"text/tabwriter"
"github.com/urfave/cli/v2"
)
var listSimCommand = &cli.Command{
Name: "list",
Action: func(cctx *cli.Context) (err error) {
node, err := open(cctx)
if err != nil {
return err
}
defer func() {
if cerr := node.Close(); err == nil {
err = cerr
}
}()
list, err := node.ListSims(cctx.Context)
if err != nil {
return err
}
tw := tabwriter.NewWriter(cctx.App.Writer, 8, 8, 0, ' ', 0)
for _, name := range list {
sim, err := node.LoadSim(cctx.Context, name)
if err != nil {
return err
}
head := sim.GetHead()
fmt.Fprintf(tw, "%s\t%s\t%s\n", name, head.Height(), head.Key())
}
return tw.Flush()
},
}

63
cmd/lotus-sim/main.go Normal file
View File

@ -0,0 +1,63 @@
package main
import (
"context"
"fmt"
"os"
"os/signal"
"syscall"
"github.com/urfave/cli/v2"
logging "github.com/ipfs/go-log/v2"
)
var root []*cli.Command = []*cli.Command{
createSimCommand,
deleteSimCommand,
copySimCommand,
renameSimCommand,
listSimCommand,
runSimCommand,
infoSimCommand,
upgradeCommand,
}
func main() {
if _, set := os.LookupEnv("GOLOG_LOG_LEVEL"); !set {
_ = logging.SetLogLevel("simulation", "DEBUG")
_ = logging.SetLogLevel("simulation-mock", "DEBUG")
}
app := &cli.App{
Name: "lotus-sim",
Usage: "A tool to simulate a network.",
Commands: root,
Writer: os.Stdout,
ErrWriter: os.Stderr,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "repo",
EnvVars: []string{"LOTUS_PATH"},
Hidden: true,
Value: "~/.lotus",
},
&cli.StringFlag{
Name: "simulation",
Aliases: []string{"sim"},
EnvVars: []string{"LOTUS_SIMULATION"},
Value: "default",
},
},
}
ctx, cancel := signal.NotifyContext(context.Background(),
syscall.SIGTERM, syscall.SIGINT, syscall.SIGHUP)
defer cancel()
if err := app.RunContext(ctx, os.Args); err != nil {
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
os.Exit(1)
return
}
}

94
cmd/lotus-sim/profile.go Normal file
View File

@ -0,0 +1,94 @@
package main
import (
"context"
"fmt"
"os"
"os/signal"
"path/filepath"
"runtime/pprof"
"time"
"github.com/urfave/cli/v2"
)
func takeProfiles(ctx context.Context) (fname string, _err error) {
dir, err := os.MkdirTemp(".", ".profiles-temp*")
if err != nil {
return "", err
}
if err := writeProfiles(ctx, dir); err != nil {
_ = os.RemoveAll(dir)
return "", err
}
fname = fmt.Sprintf("pprof-simulation-%s", time.Now().Format(time.RFC3339))
if err := os.Rename(dir, fname); err != nil {
_ = os.RemoveAll(dir)
return "", err
}
return fname, nil
}
func writeProfiles(ctx context.Context, dir string) error {
for _, profile := range pprof.Profiles() {
file, err := os.Create(filepath.Join(dir, profile.Name()+".pprof.gz"))
if err != nil {
return err
}
if err := profile.WriteTo(file, 0); err != nil {
_ = file.Close()
return err
}
if err := file.Close(); err != nil {
return err
}
if err := ctx.Err(); err != nil {
return err
}
}
file, err := os.Create(filepath.Join(dir, "cpu.pprof.gz"))
if err != nil {
return err
}
if err := pprof.StartCPUProfile(file); err != nil {
_ = file.Close()
return err
}
select {
case <-time.After(30 * time.Second):
case <-ctx.Done():
}
pprof.StopCPUProfile()
err = file.Close()
if err := ctx.Err(); err != nil {
return err
}
return err
}
func profileOnSignal(cctx *cli.Context, signals ...os.Signal) {
ch := make(chan os.Signal, 1)
signal.Notify(ch, signals...)
defer signal.Stop(ch)
for {
select {
case <-ch:
fname, err := takeProfiles(cctx.Context)
switch err {
case context.Canceled:
return
case nil:
fmt.Fprintf(cctx.App.ErrWriter, "Wrote profile to %q\n", fname)
default:
fmt.Fprintf(cctx.App.ErrWriter, "ERROR: failed to write profile: %s\n", err)
}
case <-cctx.Done():
return
}
}
}

29
cmd/lotus-sim/rename.go Normal file
View File

@ -0,0 +1,29 @@
package main
import (
"fmt"
"github.com/urfave/cli/v2"
)
var renameSimCommand = &cli.Command{
Name: "rename",
ArgsUsage: "<new-name>",
Action: func(cctx *cli.Context) (err error) {
node, err := open(cctx)
if err != nil {
return err
}
defer func() {
if cerr := node.Close(); err == nil {
err = cerr
}
}()
if cctx.NArg() != 1 {
return fmt.Errorf("expected 1 argument")
}
name := cctx.Args().First()
return node.RenameSim(cctx.Context, cctx.String("simulation"), name)
},
}

72
cmd/lotus-sim/run.go Normal file
View File

@ -0,0 +1,72 @@
package main
import (
"fmt"
"os"
"os/signal"
"syscall"
"github.com/urfave/cli/v2"
)
var runSimCommand = &cli.Command{
Name: "run",
Description: `Run the simulation.
Signals:
- SIGUSR1: Print information about the current simulation (equivalent to 'lotus-sim info').
- SIGUSR2: Write pprof profiles to ./pprof-simulation-$DATE/`,
Flags: []cli.Flag{
&cli.IntFlag{
Name: "epochs",
Usage: "Advance the given number of epochs then stop.",
},
},
Action: func(cctx *cli.Context) (err error) {
node, err := open(cctx)
if err != nil {
return err
}
defer func() {
if cerr := node.Close(); err == nil {
err = cerr
}
}()
go profileOnSignal(cctx, syscall.SIGUSR2)
sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
if err != nil {
return err
}
targetEpochs := cctx.Int("epochs")
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGUSR1)
defer signal.Stop(ch)
for i := 0; targetEpochs == 0 || i < targetEpochs; i++ {
ts, err := sim.Step(cctx.Context)
if err != nil {
return err
}
fmt.Fprintf(cctx.App.Writer, "advanced to %d %s\n", ts.Height(), ts.Key())
// Print
select {
case <-ch:
fmt.Fprintln(cctx.App.Writer, "---------------------")
if err := printInfo(cctx.Context, sim, cctx.App.Writer); err != nil {
fmt.Fprintf(cctx.App.ErrWriter, "ERROR: failed to print info: %s\n", err)
}
fmt.Fprintln(cctx.App.Writer, "---------------------")
case <-cctx.Context.Done():
return cctx.Err()
default:
}
}
fmt.Fprintln(cctx.App.Writer, "simulation done")
return err
},
}

View File

@ -0,0 +1,93 @@
package simulation
import (
"context"
"crypto/sha256"
"encoding/binary"
"time"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
)
const beaconPrefix = "mockbeacon:"
// nextBeaconEntries returns a fake beacon entries for the next block.
func (sim *Simulation) nextBeaconEntries() []types.BeaconEntry {
parentBeacons := sim.head.Blocks()[0].BeaconEntries
lastBeacon := parentBeacons[len(parentBeacons)-1]
beaconRound := lastBeacon.Round + 1
buf := make([]byte, len(beaconPrefix)+8)
copy(buf, beaconPrefix)
binary.BigEndian.PutUint64(buf[len(beaconPrefix):], beaconRound)
beaconRand := sha256.Sum256(buf)
return []types.BeaconEntry{{
Round: beaconRound,
Data: beaconRand[:],
}}
}
// nextTicket returns a fake ticket for the next block.
func (sim *Simulation) nextTicket() *types.Ticket {
newProof := sha256.Sum256(sim.head.MinTicket().VRFProof)
return &types.Ticket{
VRFProof: newProof[:],
}
}
// makeTipSet generates and executes the next tipset from the given messages. This method:
//
// 1. Stores the given messages in the Chainstore.
// 2. Creates and persists a single block mined by the same miner as the parent.
// 3. Creates a tipset from this block and executes it.
// 4. Returns the resulting tipset.
//
// This method does _not_ mutate local state (although it does add blocks to the datastore).
func (sim *Simulation) makeTipSet(ctx context.Context, messages []*types.Message) (*types.TipSet, error) {
parentTs := sim.head
parentState, parentRec, err := sim.StateManager.TipSetState(ctx, parentTs)
if err != nil {
return nil, xerrors.Errorf("failed to compute parent tipset: %w", err)
}
msgsCid, err := sim.storeMessages(ctx, messages)
if err != nil {
return nil, xerrors.Errorf("failed to store block messages: %w", err)
}
uts := parentTs.MinTimestamp() + build.BlockDelaySecs
blks := []*types.BlockHeader{{
Miner: parentTs.MinTicketBlock().Miner, // keep reusing the same miner.
Ticket: sim.nextTicket(),
BeaconEntries: sim.nextBeaconEntries(),
Parents: parentTs.Cids(),
Height: parentTs.Height() + 1,
ParentStateRoot: parentState,
ParentMessageReceipts: parentRec,
Messages: msgsCid,
ParentBaseFee: abi.NewTokenAmount(0),
Timestamp: uts,
ElectionProof: &types.ElectionProof{WinCount: 1},
}}
err = sim.Node.Chainstore.PersistBlockHeaders(blks...)
if err != nil {
return nil, xerrors.Errorf("failed to persist block headers: %w", err)
}
newTipSet, err := types.NewTipSet(blks)
if err != nil {
return nil, xerrors.Errorf("failed to create new tipset: %w", err)
}
now := time.Now()
_, _, err = sim.StateManager.TipSetState(ctx, newTipSet)
if err != nil {
return nil, xerrors.Errorf("failed to compute new tipset: %w", err)
}
duration := time.Since(now)
log.Infow("computed tipset", "duration", duration, "height", newTipSet.Height())
return newTipSet, nil
}

View File

@ -0,0 +1,280 @@
package blockbuilder
import (
"context"
"math"
"go.uber.org/zap"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/account"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
)
const (
// 0.25 is the default, but the number below is from the network.
gasOverestimation = 1.0 / 0.808
// The number of expected blocks in a tipset. We use this to determine how much gas a tipset
// has.
// 5 per tipset, but we effectively get 4 blocks worth of messages.
expectedBlocks = 4
// TODO: This will produce invalid blocks but it will accurately model the amount of gas
// we're willing to use per-tipset.
// A more correct approach would be to produce 5 blocks. We can do that later.
targetGas = build.BlockGasTarget * expectedBlocks
)
type BlockBuilder struct {
ctx context.Context
logger *zap.SugaredLogger
parentTs *types.TipSet
parentSt *state.StateTree
vm *vm.VM
sm *stmgr.StateManager
gasTotal int64
messages []*types.Message
}
// NewBlockBuilder constructs a new block builder from the parent state. Use this to pack a block
// with messages.
//
// NOTE: The context applies to the life of the block builder itself (but does not need to be canceled).
func NewBlockBuilder(ctx context.Context, logger *zap.SugaredLogger, sm *stmgr.StateManager, parentTs *types.TipSet) (*BlockBuilder, error) {
parentState, _, err := sm.TipSetState(ctx, parentTs)
if err != nil {
return nil, err
}
parentSt, err := sm.StateTree(parentState)
if err != nil {
return nil, err
}
bb := &BlockBuilder{
ctx: ctx,
logger: logger.With("epoch", parentTs.Height()+1),
sm: sm,
parentTs: parentTs,
parentSt: parentSt,
}
// Then we construct a VM to execute messages for gas estimation.
//
// Most parts of this VM are "real" except:
// 1. We don't charge a fee.
// 2. The runtime has "fake" proof logic.
// 3. We don't actually save any of the results.
r := store.NewChainRand(sm.ChainStore(), parentTs.Cids())
vmopt := &vm.VMOpts{
StateBase: parentState,
Epoch: parentTs.Height() + 1,
Rand: r,
Bstore: sm.ChainStore().StateBlockstore(),
Syscalls: sm.ChainStore().VMSys(),
CircSupplyCalc: sm.GetVMCirculatingSupply,
NtwkVersion: sm.GetNtwkVersion,
BaseFee: abi.NewTokenAmount(0),
LookbackState: stmgr.LookbackStateGetterForTipset(sm, parentTs),
}
bb.vm, err = vm.NewVM(bb.ctx, vmopt)
if err != nil {
return nil, err
}
return bb, nil
}
// PushMessages tries to push the specified message into the block.
//
// 1. All messages will be executed in-order.
// 2. Gas computation & nonce selection will be handled internally.
// 3. The base-fee is 0 so the sender does not need funds.
// 4. As usual, the sender must be an account (any account).
// 5. If the message fails to execute, this method will fail.
//
// Returns ErrOutOfGas when out of gas. Check BlockBuilder.GasRemaining and try pushing a cheaper
// message.
func (bb *BlockBuilder) PushMessage(msg *types.Message) (*types.MessageReceipt, error) {
if bb.gasTotal >= targetGas {
return nil, new(ErrOutOfGas)
}
st := bb.StateTree()
store := bb.ActorStore()
// Copy the message before we start mutating it.
msgCpy := *msg
msg = &msgCpy
actor, err := st.GetActor(msg.From)
if err != nil {
return nil, err
}
if !builtin.IsAccountActor(actor.Code) {
return nil, xerrors.Errorf(
"messags may only be sent from account actors, got message from %s (%s)",
msg.From, builtin.ActorNameByCode(actor.Code),
)
}
msg.Nonce = actor.Nonce
if msg.From.Protocol() == address.ID {
state, err := account.Load(store, actor)
if err != nil {
return nil, err
}
msg.From, err = state.PubkeyAddress()
if err != nil {
return nil, err
}
}
// TODO: Our gas estimation is broken for payment channels due to horrible hacks in
// gasEstimateGasLimit.
if msg.Value == types.EmptyInt {
msg.Value = abi.NewTokenAmount(0)
}
msg.GasPremium = abi.NewTokenAmount(0)
msg.GasFeeCap = abi.NewTokenAmount(0)
msg.GasLimit = build.BlockGasTarget
// We manually snapshot so we can revert nonce changes, etc. on failure.
err = st.Snapshot(bb.ctx)
if err != nil {
return nil, xerrors.Errorf("failed to take a snapshot while estimating message gas: %w", err)
}
defer st.ClearSnapshot()
ret, err := bb.vm.ApplyMessage(bb.ctx, msg)
if err != nil {
_ = st.Revert()
return nil, err
}
if ret.ActorErr != nil {
_ = st.Revert()
return nil, ret.ActorErr
}
// Sometimes there are bugs. Let's catch them.
if ret.GasUsed == 0 {
_ = st.Revert()
return nil, xerrors.Errorf("used no gas %v -> %v", msg, ret)
}
// Update the gas limit taking overestimation into account.
msg.GasLimit = int64(math.Ceil(float64(ret.GasUsed) * gasOverestimation))
// Did we go over? Yes, revert.
newTotal := bb.gasTotal + msg.GasLimit
if newTotal > targetGas {
_ = st.Revert()
return nil, &ErrOutOfGas{Available: targetGas - bb.gasTotal, Required: msg.GasLimit}
}
bb.gasTotal = newTotal
bb.messages = append(bb.messages, msg)
return &ret.MessageReceipt, nil
}
// ActorStore returns the VM's current (pending) blockstore.
func (bb *BlockBuilder) ActorStore() adt.Store {
return bb.vm.ActorStore(bb.ctx)
}
// StateTree returns the VM's current (pending) state-tree. This includes any changes made by
// successfully pushed messages.
//
// You probably want ParentStateTree
func (bb *BlockBuilder) StateTree() *state.StateTree {
return bb.vm.StateTree().(*state.StateTree)
}
// ParentStateTree returns the parent state-tree (not the paren't tipset's parent state-tree).
func (bb *BlockBuilder) ParentStateTree() *state.StateTree {
return bb.parentSt
}
// StateTreeByHeight will return a state-tree up through and including the current in-progress
// epoch.
//
// NOTE: This will return the state after the given epoch, not the parent state for the epoch.
func (bb *BlockBuilder) StateTreeByHeight(epoch abi.ChainEpoch) (*state.StateTree, error) {
now := bb.Height()
if epoch > now {
return nil, xerrors.Errorf(
"cannot load state-tree from future: %d > %d", epoch, bb.Height(),
)
} else if epoch <= 0 {
return nil, xerrors.Errorf(
"cannot load state-tree: epoch %d <= 0", epoch,
)
}
// Manually handle "now" and "previous".
switch epoch {
case now:
return bb.StateTree(), nil
case now - 1:
return bb.ParentStateTree(), nil
}
// Get the tipset of the block _after_ the target epoch so we can use its parent state.
targetTs, err := bb.sm.ChainStore().GetTipsetByHeight(bb.ctx, epoch+1, bb.parentTs, false)
if err != nil {
return nil, err
}
return bb.sm.StateTree(targetTs.ParentState())
}
// Messages returns all messages currently packed into the next block.
// 1. DO NOT modify the slice, copy it.
// 2. DO NOT retain the slice, copy it.
func (bb *BlockBuilder) Messages() []*types.Message {
return bb.messages
}
// GasRemaining returns the amount of remaining gas in the next block.
func (bb *BlockBuilder) GasRemaining() int64 {
return targetGas - bb.gasTotal
}
// ParentTipSet returns the parent tipset.
func (bb *BlockBuilder) ParentTipSet() *types.TipSet {
return bb.parentTs
}
// Height returns the epoch for the target block.
func (bb *BlockBuilder) Height() abi.ChainEpoch {
return bb.parentTs.Height() + 1
}
// NetworkVersion returns the network version for the target block.
func (bb *BlockBuilder) NetworkVersion() network.Version {
return bb.sm.GetNtwkVersion(bb.ctx, bb.Height())
}
// StateManager returns the stmgr.StateManager.
func (bb *BlockBuilder) StateManager() *stmgr.StateManager {
return bb.sm
}
// ActorsVersion returns the actors version for the target block.
func (bb *BlockBuilder) ActorsVersion() actors.Version {
return actors.VersionForNetwork(bb.NetworkVersion())
}
func (bb *BlockBuilder) L() *zap.SugaredLogger {
return bb.logger
}

View File

@ -0,0 +1,25 @@
package blockbuilder
import (
"errors"
"fmt"
)
// ErrOutOfGas is returned from BlockBuilder.PushMessage when the block does not have enough gas to
// fit the given message.
type ErrOutOfGas struct {
Available, Required int64
}
func (e *ErrOutOfGas) Error() string {
if e.Available == 0 {
return "out of gas: block full"
}
return fmt.Sprintf("out of gas: %d < %d", e.Required, e.Available)
}
// IsOutOfGas returns true if the error is an "out of gas" error.
func IsOutOfGas(err error) bool {
var oog *ErrOutOfGas
return errors.As(err, &oog)
}

View File

@ -0,0 +1,58 @@
package simulation
import (
"context"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/chain/types"
)
// toArray converts the given set of CIDs to an AMT. This is usually used to pack messages into blocks.
func toArray(store blockadt.Store, cids []cid.Cid) (cid.Cid, error) {
arr := blockadt.MakeEmptyArray(store)
for i, c := range cids {
oc := cbg.CborCid(c)
if err := arr.Set(uint64(i), &oc); err != nil {
return cid.Undef, err
}
}
return arr.Root()
}
// storeMessages packs a set of messages into a types.MsgMeta and returns the resulting CID. The
// resulting CID is valid for the BlocKHeader's Messages field.
func (sim *Simulation) storeMessages(ctx context.Context, messages []*types.Message) (cid.Cid, error) {
// We store all messages as "bls" messages so they're executed in-order. This ensures
// accurate gas accounting. It also ensures we don't, e.g., try to fund a miner after we
// fail a pre-commit...
var msgCids []cid.Cid
for _, msg := range messages {
c, err := sim.Node.Chainstore.PutMessage(msg)
if err != nil {
return cid.Undef, err
}
msgCids = append(msgCids, c)
}
adtStore := sim.Node.Chainstore.ActorStore(ctx)
blsMsgArr, err := toArray(adtStore, msgCids)
if err != nil {
return cid.Undef, err
}
sekpMsgArr, err := toArray(adtStore, nil)
if err != nil {
return cid.Undef, err
}
msgsCid, err := adtStore.Put(adtStore.Context(), &types.MsgMeta{
BlsMessages: blsMsgArr,
SecpkMessages: sekpMsgArr,
})
if err != nil {
return cid.Undef, err
}
return msgsCid, nil
}

View File

@ -0,0 +1,179 @@
package mock
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
tutils "github.com/filecoin-project/specs-actors/v5/support/testing"
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
)
// Ideally, we'd use extern/sector-storage/mock. Unfortunately, those mocks are a bit _too_ accurate
// and would force us to load sector info for window post proofs.
const (
mockSealProofPrefix = "valid seal proof:"
mockAggregateSealProofPrefix = "valid aggregate seal proof:"
mockPoStProofPrefix = "valid post proof:"
)
var log = logging.Logger("simulation-mock")
// mockVerifier is a simple mock for verifying "fake" proofs.
type mockVerifier struct{}
var Verifier ffiwrapper.Verifier = mockVerifier{}
func (mockVerifier) VerifySeal(proof proof5.SealVerifyInfo) (bool, error) {
addr, err := address.NewIDAddress(uint64(proof.Miner))
if err != nil {
return false, err
}
mockProof, err := MockSealProof(proof.SealProof, addr)
if err != nil {
return false, err
}
if bytes.Equal(proof.Proof, mockProof) {
return true, nil
}
log.Debugw("invalid seal proof", "expected", mockProof, "actual", proof.Proof, "miner", addr)
return false, nil
}
func (mockVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) {
addr, err := address.NewIDAddress(uint64(aggregate.Miner))
if err != nil {
return false, err
}
mockProof, err := MockAggregateSealProof(aggregate.SealProof, addr, len(aggregate.Infos))
if err != nil {
return false, err
}
if bytes.Equal(aggregate.Proof, mockProof) {
return true, nil
}
log.Debugw("invalid aggregate seal proof",
"expected", mockProof,
"actual", aggregate.Proof,
"count", len(aggregate.Infos),
"miner", addr,
)
return false, nil
}
func (mockVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) {
panic("should not be called")
}
func (mockVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) {
if len(info.Proofs) != 1 {
return false, fmt.Errorf("expected exactly one proof")
}
proof := info.Proofs[0]
addr, err := address.NewIDAddress(uint64(info.Prover))
if err != nil {
return false, err
}
mockProof, err := MockWindowPoStProof(proof.PoStProof, addr)
if err != nil {
return false, err
}
if bytes.Equal(proof.ProofBytes, mockProof) {
return true, nil
}
log.Debugw("invalid window post proof",
"expected", mockProof,
"actual", info.Proofs[0],
"miner", addr,
)
return false, nil
}
func (mockVerifier) GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) {
panic("should not be called")
}
// MockSealProof generates a mock "seal" proof tied to the specified proof type and the given miner.
func MockSealProof(proofType abi.RegisteredSealProof, minerAddr address.Address) ([]byte, error) {
plen, err := proofType.ProofSize()
if err != nil {
return nil, err
}
proof := make([]byte, plen)
i := copy(proof, mockSealProofPrefix)
binary.BigEndian.PutUint64(proof[i:], uint64(proofType))
i += 8
i += copy(proof[i:], minerAddr.Bytes())
return proof, nil
}
// MockAggregateSealProof generates a mock "seal" aggregate proof tied to the specified proof type,
// the given miner, and the number of proven sectors.
func MockAggregateSealProof(proofType abi.RegisteredSealProof, minerAddr address.Address, count int) ([]byte, error) {
proof := make([]byte, aggProofLen(count))
i := copy(proof, mockAggregateSealProofPrefix)
binary.BigEndian.PutUint64(proof[i:], uint64(proofType))
i += 8
binary.BigEndian.PutUint64(proof[i:], uint64(count))
i += 8
i += copy(proof[i:], minerAddr.Bytes())
return proof, nil
}
// MockWindowPoStProof generates a mock "window post" proof tied to the specified proof type, and the
// given miner.
func MockWindowPoStProof(proofType abi.RegisteredPoStProof, minerAddr address.Address) ([]byte, error) {
plen, err := proofType.ProofSize()
if err != nil {
return nil, err
}
proof := make([]byte, plen)
i := copy(proof, mockPoStProofPrefix)
i += copy(proof[i:], minerAddr.Bytes())
return proof, nil
}
// makeCommR generates a "fake" but valid CommR for a sector. It is unique for the given sector/miner.
func MockCommR(minerAddr address.Address, sno abi.SectorNumber) cid.Cid {
return tutils.MakeCID(fmt.Sprintf("%s:%d", minerAddr, sno), &miner5.SealedCIDPrefix)
}
// TODO: dedup
func aggProofLen(nproofs int) int {
switch {
case nproofs <= 8:
return 11220
case nproofs <= 16:
return 14196
case nproofs <= 32:
return 17172
case nproofs <= 64:
return 20148
case nproofs <= 128:
return 23124
case nproofs <= 256:
return 26100
case nproofs <= 512:
return 29076
case nproofs <= 1024:
return 32052
case nproofs <= 2048:
return 35028
case nproofs <= 4096:
return 38004
case nproofs <= 8192:
return 40980
default:
panic("too many proofs")
}
}

View File

@ -0,0 +1,241 @@
package simulation
import (
"context"
"strings"
"go.uber.org/multierr"
"golang.org/x/xerrors"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
"github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock"
"github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/stages"
"github.com/filecoin-project/lotus/node/repo"
)
// Node represents the local lotus node, or at least the part of it we care about.
type Node struct {
repo repo.LockedRepo
Blockstore blockstore.Blockstore
MetadataDS datastore.Batching
Chainstore *store.ChainStore
}
// OpenNode opens the local lotus node for writing. This will fail if the node is online.
func OpenNode(ctx context.Context, path string) (*Node, error) {
r, err := repo.NewFS(path)
if err != nil {
return nil, err
}
return NewNode(ctx, r)
}
// NewNode constructs a new node from the given repo.
func NewNode(ctx context.Context, r repo.Repo) (nd *Node, _err error) {
lr, err := r.Lock(repo.FullNode)
if err != nil {
return nil, err
}
defer func() {
if _err != nil {
_ = lr.Close()
}
}()
bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
if err != nil {
return nil, err
}
ds, err := lr.Datastore(ctx, "/metadata")
if err != nil {
return nil, err
}
return &Node{
repo: lr,
Chainstore: store.NewChainStore(bs, bs, ds, vm.Syscalls(mock.Verifier), nil),
MetadataDS: ds,
Blockstore: bs,
}, err
}
// Close cleanly close the repo. Please call this on shutdown to make sure everything is flushed.
func (nd *Node) Close() error {
if nd.repo != nil {
return nd.repo.Close()
}
return nil
}
// LoadSim loads
func (nd *Node) LoadSim(ctx context.Context, name string) (*Simulation, error) {
stages, err := stages.DefaultPipeline()
if err != nil {
return nil, err
}
sim := &Simulation{
Node: nd,
name: name,
stages: stages,
}
sim.head, err = sim.loadNamedTipSet("head")
if err != nil {
return nil, err
}
sim.start, err = sim.loadNamedTipSet("start")
if err != nil {
return nil, err
}
err = sim.loadConfig()
if err != nil {
return nil, xerrors.Errorf("failed to load config for simulation %s: %w", name, err)
}
us, err := sim.config.upgradeSchedule()
if err != nil {
return nil, xerrors.Errorf("failed to create upgrade schedule for simulation %s: %w", name, err)
}
sim.StateManager, err = stmgr.NewStateManagerWithUpgradeSchedule(nd.Chainstore, us)
if err != nil {
return nil, xerrors.Errorf("failed to create state manager for simulation %s: %w", name, err)
}
return sim, nil
}
// Create creates a new simulation.
//
// - This will fail if a simulation already exists with the given name.
// - Name must not contain a '/'.
func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet) (*Simulation, error) {
if strings.Contains(name, "/") {
return nil, xerrors.Errorf("simulation name %q cannot contain a '/'", name)
}
stages, err := stages.DefaultPipeline()
if err != nil {
return nil, err
}
sim := &Simulation{
name: name,
Node: nd,
StateManager: stmgr.NewStateManager(nd.Chainstore),
stages: stages,
}
if has, err := nd.MetadataDS.Has(sim.key("head")); err != nil {
return nil, err
} else if has {
return nil, xerrors.Errorf("simulation named %s already exists", name)
}
if err := sim.storeNamedTipSet("start", head); err != nil {
return nil, xerrors.Errorf("failed to set simulation start: %w", err)
}
if err := sim.SetHead(head); err != nil {
return nil, err
}
return sim, nil
}
// ListSims lists all simulations.
func (nd *Node) ListSims(ctx context.Context) ([]string, error) {
prefix := simulationPrefix.ChildString("head").String()
items, err := nd.MetadataDS.Query(query.Query{
Prefix: prefix,
KeysOnly: true,
Orders: []query.Order{query.OrderByKey{}},
})
if err != nil {
return nil, xerrors.Errorf("failed to list simulations: %w", err)
}
defer func() { _ = items.Close() }()
var names []string
for {
select {
case result, ok := <-items.Next():
if !ok {
return names, nil
}
if result.Error != nil {
return nil, xerrors.Errorf("failed to retrieve next simulation: %w", result.Error)
}
names = append(names, strings.TrimPrefix(result.Key, prefix+"/"))
case <-ctx.Done():
return nil, ctx.Err()
}
}
}
var simFields = []string{"head", "start", "config"}
// DeleteSim deletes a simulation and all related metadata.
//
// NOTE: This function does not delete associated messages, blocks, or chain state.
func (nd *Node) DeleteSim(ctx context.Context, name string) error {
var err error
for _, field := range simFields {
key := simulationPrefix.ChildString(field).ChildString(name)
err = multierr.Append(err, nd.MetadataDS.Delete(key))
}
return err
}
// CopySim copies a simulation.
func (nd *Node) CopySim(ctx context.Context, oldName, newName string) error {
if strings.Contains(newName, "/") {
return xerrors.Errorf("simulation name %q cannot contain a '/'", newName)
}
if strings.Contains(oldName, "/") {
return xerrors.Errorf("simulation name %q cannot contain a '/'", oldName)
}
values := make(map[string][]byte)
for _, field := range simFields {
key := simulationPrefix.ChildString(field).ChildString(oldName)
value, err := nd.MetadataDS.Get(key)
if err == datastore.ErrNotFound {
continue
} else if err != nil {
return err
}
values[field] = value
}
if _, ok := values["head"]; !ok {
return xerrors.Errorf("simulation named %s not found", oldName)
}
for _, field := range simFields {
key := simulationPrefix.ChildString(field).ChildString(newName)
var err error
if value, ok := values[field]; ok {
err = nd.MetadataDS.Put(key, value)
} else {
err = nd.MetadataDS.Delete(key)
}
if err != nil {
return err
}
}
return nil
}
// RenameSim renames a simulation.
func (nd *Node) RenameSim(ctx context.Context, oldName, newName string) error {
if err := nd.CopySim(ctx, oldName, newName); err != nil {
return err
}
return nd.DeleteSim(ctx, oldName)
}

View File

@ -0,0 +1,408 @@
package simulation
import (
"context"
"encoding/json"
"runtime"
"golang.org/x/sync/errgroup"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log/v2"
blockadt "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/stages"
)
var log = logging.Logger("simulation")
// config is the simulation's config, persisted to the local metadata store and loaded on start.
//
// See Simulation.loadConfig and Simulation.saveConfig.
type config struct {
Upgrades map[network.Version]abi.ChainEpoch
}
// upgradeSchedule constructs an stmgr.StateManager upgrade schedule, overriding any network upgrade
// epochs as specified in the config.
func (c *config) upgradeSchedule() (stmgr.UpgradeSchedule, error) {
upgradeSchedule := stmgr.DefaultUpgradeSchedule()
expected := make(map[network.Version]struct{}, len(c.Upgrades))
for nv := range c.Upgrades {
expected[nv] = struct{}{}
}
// Update network upgrade epochs.
newUpgradeSchedule := upgradeSchedule[:0]
for _, upgrade := range upgradeSchedule {
if height, ok := c.Upgrades[upgrade.Network]; ok {
delete(expected, upgrade.Network)
if height < 0 {
continue
}
upgrade.Height = height
}
newUpgradeSchedule = append(newUpgradeSchedule, upgrade)
}
// Make sure we didn't try to configure an unknown network version.
if len(expected) > 0 {
missing := make([]network.Version, 0, len(expected))
for nv := range expected {
missing = append(missing, nv)
}
return nil, xerrors.Errorf("unknown network versions %v in config", missing)
}
// Finally, validate it. This ensures we don't change the order of the upgrade or anything
// like that.
if err := newUpgradeSchedule.Validate(); err != nil {
return nil, err
}
return newUpgradeSchedule, nil
}
// Simulation specifies a lotus-sim simulation.
type Simulation struct {
Node *Node
StateManager *stmgr.StateManager
name string
config config
start *types.TipSet
// head
head *types.TipSet
stages []stages.Stage
}
// loadConfig loads a simulation's config from the datastore. This must be called on startup and may
// be called to restore the config from-disk.
func (sim *Simulation) loadConfig() error {
configBytes, err := sim.Node.MetadataDS.Get(sim.key("config"))
if err == nil {
err = json.Unmarshal(configBytes, &sim.config)
}
switch err {
case nil:
case datastore.ErrNotFound:
sim.config = config{}
default:
return xerrors.Errorf("failed to load config: %w", err)
}
return nil
}
// saveConfig saves the current config to the datastore. This must be called whenever the config is
// changed.
func (sim *Simulation) saveConfig() error {
buf, err := json.Marshal(sim.config)
if err != nil {
return err
}
return sim.Node.MetadataDS.Put(sim.key("config"), buf)
}
var simulationPrefix = datastore.NewKey("/simulation")
// key returns the the key in the form /simulation/<subkey>/<simulation-name>. For example,
// /simulation/head/default.
func (sim *Simulation) key(subkey string) datastore.Key {
return simulationPrefix.ChildString(subkey).ChildString(sim.name)
}
// loadNamedTipSet the tipset with the given name (for this simulation)
func (sim *Simulation) loadNamedTipSet(name string) (*types.TipSet, error) {
tskBytes, err := sim.Node.MetadataDS.Get(sim.key(name))
if err != nil {
return nil, xerrors.Errorf("failed to load tipset %s/%s: %w", sim.name, name, err)
}
tsk, err := types.TipSetKeyFromBytes(tskBytes)
if err != nil {
return nil, xerrors.Errorf("failed to parse tipste %v (%s/%s): %w", tskBytes, sim.name, name, err)
}
ts, err := sim.Node.Chainstore.LoadTipSet(tsk)
if err != nil {
return nil, xerrors.Errorf("failed to load tipset %s (%s/%s): %w", tsk, sim.name, name, err)
}
return ts, nil
}
// storeNamedTipSet stores the tipset at name (relative to the simulation).
func (sim *Simulation) storeNamedTipSet(name string, ts *types.TipSet) error {
if err := sim.Node.MetadataDS.Put(sim.key(name), ts.Key().Bytes()); err != nil {
return xerrors.Errorf("failed to store tipset (%s/%s): %w", sim.name, name, err)
}
return nil
}
// GetHead returns the current simulation head.
func (sim *Simulation) GetHead() *types.TipSet {
return sim.head
}
// GetStart returns simulation's parent tipset.
func (sim *Simulation) GetStart() *types.TipSet {
return sim.start
}
// GetNetworkVersion returns the current network version for the simulation.
func (sim *Simulation) GetNetworkVersion() network.Version {
return sim.StateManager.GetNtwkVersion(context.TODO(), sim.head.Height())
}
// SetHead updates the current head of the simulation and stores it in the metadata store. This is
// called for every Simulation.Step.
func (sim *Simulation) SetHead(head *types.TipSet) error {
if err := sim.storeNamedTipSet("head", head); err != nil {
return err
}
sim.head = head
return nil
}
// Name returns the simulation's name.
func (sim *Simulation) Name() string {
return sim.name
}
// SetUpgradeHeight sets the height of the given network version change (and saves the config).
//
// This fails if the specified epoch has already passed or the new upgrade schedule is invalid.
func (sim *Simulation) SetUpgradeHeight(nv network.Version, epoch abi.ChainEpoch) (_err error) {
if epoch <= sim.head.Height() {
return xerrors.Errorf("cannot set upgrade height in the past (%d <= %d)", epoch, sim.head.Height())
}
if sim.config.Upgrades == nil {
sim.config.Upgrades = make(map[network.Version]abi.ChainEpoch, 1)
}
sim.config.Upgrades[nv] = epoch
defer func() {
if _err != nil {
// try to restore the old config on error.
_ = sim.loadConfig()
}
}()
newUpgradeSchedule, err := sim.config.upgradeSchedule()
if err != nil {
return err
}
sm, err := stmgr.NewStateManagerWithUpgradeSchedule(sim.Node.Chainstore, newUpgradeSchedule)
if err != nil {
return err
}
err = sim.saveConfig()
if err != nil {
return err
}
sim.StateManager = sm
return nil
}
// ListUpgrades returns any future network upgrades.
func (sim *Simulation) ListUpgrades() (stmgr.UpgradeSchedule, error) {
upgrades, err := sim.config.upgradeSchedule()
if err != nil {
return nil, err
}
var pending stmgr.UpgradeSchedule
for _, upgrade := range upgrades {
if upgrade.Height < sim.head.Height() {
continue
}
pending = append(pending, upgrade)
}
return pending, nil
}
type AppliedMessage struct {
types.Message
types.MessageReceipt
}
// Walk walks the simulation's chain from the current head back to the first tipset.
func (sim *Simulation) Walk(
ctx context.Context,
lookback int64,
cb func(sm *stmgr.StateManager,
ts *types.TipSet,
stCid cid.Cid,
messages []*AppliedMessage) error,
) error {
store := sim.Node.Chainstore.ActorStore(ctx)
minEpoch := sim.start.Height()
if lookback != 0 {
minEpoch = sim.head.Height() - abi.ChainEpoch(lookback)
}
// Given tha loading messages and receipts can be a little bit slow, we do this in parallel.
//
// 1. We spin up some number of workers.
// 2. We hand tipsets to workers in round-robin order.
// 3. We pull "resolved" tipsets in the same round-robin order.
// 4. We serially call the callback in reverse-chain order.
//
// We have a buffer of size 1 for both resolved tipsets and unresolved tipsets. This should
// ensure that we never block unecessarily.
type work struct {
ts *types.TipSet
stCid cid.Cid
recCid cid.Cid
}
type result struct {
ts *types.TipSet
stCid cid.Cid
messages []*AppliedMessage
}
// This is more disk bound than CPU bound, but eh...
workerCount := runtime.NumCPU() * 2
workQs := make([]chan *work, workerCount)
resultQs := make([]chan *result, workerCount)
for i := range workQs {
workQs[i] = make(chan *work, 1)
}
for i := range resultQs {
resultQs[i] = make(chan *result, 1)
}
grp, ctx := errgroup.WithContext(ctx)
// Walk the chain and fire off work items.
grp.Go(func() error {
ts := sim.head
stCid, recCid, err := sim.StateManager.TipSetState(ctx, ts)
if err != nil {
return err
}
i := 0
for ts.Height() > minEpoch {
if err := ctx.Err(); err != nil {
return ctx.Err()
}
select {
case workQs[i] <- &work{ts, stCid, recCid}:
case <-ctx.Done():
return ctx.Err()
}
stCid = ts.MinTicketBlock().ParentStateRoot
recCid = ts.MinTicketBlock().ParentMessageReceipts
ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents())
if err != nil {
return xerrors.Errorf("loading parent: %w", err)
}
i = (i + 1) % workerCount
}
for _, q := range workQs {
close(q)
}
return nil
})
// Spin up one worker per queue pair.
for i := 0; i < workerCount; i++ {
workQ := workQs[i]
resultQ := resultQs[i]
grp.Go(func() error {
for {
if err := ctx.Err(); err != nil {
return ctx.Err()
}
var job *work
var ok bool
select {
case job, ok = <-workQ:
case <-ctx.Done():
return ctx.Err()
}
if !ok {
break
}
msgs, err := sim.Node.Chainstore.MessagesForTipset(job.ts)
if err != nil {
return err
}
recs, err := blockadt.AsArray(store, job.recCid)
if err != nil {
return xerrors.Errorf("amt load: %w", err)
}
applied := make([]*AppliedMessage, len(msgs))
var rec types.MessageReceipt
err = recs.ForEach(&rec, func(i int64) error {
applied[i] = &AppliedMessage{
Message: *msgs[i].VMMessage(),
MessageReceipt: rec,
}
return nil
})
if err != nil {
return err
}
select {
case resultQ <- &result{
ts: job.ts,
stCid: job.stCid,
messages: applied,
}:
case <-ctx.Done():
return ctx.Err()
}
}
close(resultQ)
return nil
})
}
// Process results in the same order we enqueued them.
grp.Go(func() error {
qs := resultQs
for len(qs) > 0 {
newQs := qs[:0]
for _, q := range qs {
if err := ctx.Err(); err != nil {
return ctx.Err()
}
select {
case r, ok := <-q:
if !ok {
continue
}
err := cb(sim.StateManager, r.ts, r.stCid, r.messages)
if err != nil {
return err
}
case <-ctx.Done():
return ctx.Err()
}
newQs = append(newQs, q)
}
qs = newQs
}
return nil
})
// Wait for everything to finish.
return grp.Wait()
}

View File

@ -0,0 +1,38 @@
package stages
import (
"math/rand"
"github.com/filecoin-project/go-address"
)
// actorIter is a simple persistent iterator that loops over a set of actors.
type actorIter struct {
actors []address.Address
offset int
}
// shuffle randomly permutes the set of actors.
func (p *actorIter) shuffle() {
rand.Shuffle(len(p.actors), func(i, j int) {
p.actors[i], p.actors[j] = p.actors[j], p.actors[i]
})
}
// next returns the next actor's address and advances the iterator.
func (p *actorIter) next() address.Address {
next := p.actors[p.offset]
p.offset++
p.offset %= len(p.actors)
return next
}
// add adds a new actor to the iterator.
func (p *actorIter) add(addr address.Address) {
p.actors = append(p.actors, addr)
}
// len returns the number of actors in the iterator.
func (p *actorIter) len() int {
return len(p.actors)
}

View File

@ -0,0 +1,200 @@
package stages
import (
"sort"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/policy"
)
// pendingCommitTracker tracks pending commits per-miner for a single epoch.
type pendingCommitTracker map[address.Address]minerPendingCommits
// minerPendingCommits tracks a miner's pending commits during a single epoch (grouped by seal proof type).
type minerPendingCommits map[abi.RegisteredSealProof][]abi.SectorNumber
// finish marks count sectors of the given proof type as "prove-committed".
func (m minerPendingCommits) finish(proof abi.RegisteredSealProof, count int) {
snos := m[proof]
if len(snos) < count {
panic("not enough sector numbers to finish")
} else if len(snos) == count {
delete(m, proof)
} else {
m[proof] = snos[count:]
}
}
// empty returns true if there are no pending commits.
func (m minerPendingCommits) empty() bool {
return len(m) == 0
}
// count returns the number of pending commits.
func (m minerPendingCommits) count() int {
count := 0
for _, snos := range m {
count += len(snos)
}
return count
}
// commitQueue is used to track pending prove-commits.
//
// Miners are processed in round-robin where _all_ commits from a given miner are finished before
// moving on to the next. This is designed to maximize batching.
type commitQueue struct {
minerQueue []address.Address
queue []pendingCommitTracker
offset abi.ChainEpoch
}
// ready returns the number of prove-commits ready to be proven at the current epoch. Useful for logging.
func (q *commitQueue) ready() int {
if len(q.queue) == 0 {
return 0
}
count := 0
for _, pending := range q.queue[0] {
count += pending.count()
}
return count
}
// nextMiner returns the next miner to be proved and the set of pending prove commits for that
// miner. When some number of sectors have successfully been proven, call "finish" so we don't try
// to prove them again.
func (q *commitQueue) nextMiner() (address.Address, minerPendingCommits, bool) {
if len(q.queue) == 0 {
return address.Undef, nil, false
}
next := q.queue[0]
// Go through the queue and find the first non-empty batch.
for len(q.minerQueue) > 0 {
addr := q.minerQueue[0]
q.minerQueue = q.minerQueue[1:]
pending := next[addr]
if !pending.empty() {
return addr, pending, true
}
delete(next, addr)
}
return address.Undef, nil, false
}
// advanceEpoch will advance to the next epoch. If some sectors were left unproven in the current
// epoch, they will be "prepended" into the next epochs sector set.
func (q *commitQueue) advanceEpoch(epoch abi.ChainEpoch) {
if epoch < q.offset {
panic("cannot roll epoch backwards")
}
// Now we "roll forwards", merging each epoch we advance over with the next.
for len(q.queue) > 1 && q.offset < epoch {
curr := q.queue[0]
q.queue[0] = nil
q.queue = q.queue[1:]
q.offset++
next := q.queue[0]
// Cleanup empty entries.
for addr, pending := range curr {
if pending.empty() {
delete(curr, addr)
}
}
// If the entire level is actually empty, just skip to the next one.
if len(curr) == 0 {
continue
}
// Otherwise, merge the next into the current.
for addr, nextPending := range next {
currPending := curr[addr]
if currPending.empty() {
curr[addr] = nextPending
continue
}
for ty, nextSnos := range nextPending {
currSnos := currPending[ty]
if len(currSnos) == 0 {
currPending[ty] = nextSnos
continue
}
currPending[ty] = append(currSnos, nextSnos...)
}
}
// Now replace next with the merged curr.
q.queue[0] = curr
}
q.offset = epoch
if len(q.queue) == 0 {
return
}
next := q.queue[0]
seenMiners := make(map[address.Address]struct{}, len(q.minerQueue))
for _, addr := range q.minerQueue {
seenMiners[addr] = struct{}{}
}
// Find the new miners not already in the queue.
offset := len(q.minerQueue)
for addr, pending := range next {
if pending.empty() {
delete(next, addr)
continue
}
if _, ok := seenMiners[addr]; ok {
continue
}
q.minerQueue = append(q.minerQueue, addr)
}
// Sort the new miners only.
newMiners := q.minerQueue[offset:]
sort.Slice(newMiners, func(i, j int) bool {
// eh, escape analysis should be fine here...
return string(newMiners[i].Bytes()) < string(newMiners[j].Bytes())
})
}
// enquueProveCommit enqueues prove-commit for the given pre-commit for the given miner.
func (q *commitQueue) enqueueProveCommit(addr address.Address, preCommitEpoch abi.ChainEpoch, info miner.SectorPreCommitInfo) error {
// Compute the epoch at which we can start trying to commit.
preCommitDelay := policy.GetPreCommitChallengeDelay()
minCommitEpoch := preCommitEpoch + preCommitDelay + 1
// Figure out the offset in the queue.
i := int(minCommitEpoch - q.offset)
if i < 0 {
i = 0
}
// Expand capacity and insert.
if cap(q.queue) <= i {
pc := make([]pendingCommitTracker, i+1, preCommitDelay*2)
copy(pc, q.queue)
q.queue = pc
} else if len(q.queue) <= i {
q.queue = q.queue[:i+1]
}
tracker := q.queue[i]
if tracker == nil {
tracker = make(pendingCommitTracker)
q.queue[i] = tracker
}
minerPending := tracker[addr]
if minerPending == nil {
minerPending = make(minerPendingCommits)
tracker[addr] = minerPending
}
minerPending[info.SealProof] = append(minerPending[info.SealProof], info.SectorNumber)
return nil
}

View File

@ -0,0 +1,128 @@
package stages
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/policy"
)
func TestCommitQueue(t *testing.T) {
var q commitQueue
addr1, err := address.NewIDAddress(1000)
require.NoError(t, err)
proofType := abi.RegisteredSealProof_StackedDrg64GiBV1_1
require.NoError(t, q.enqueueProveCommit(addr1, 0, miner.SectorPreCommitInfo{
SealProof: proofType,
SectorNumber: 0,
}))
require.NoError(t, q.enqueueProveCommit(addr1, 0, miner.SectorPreCommitInfo{
SealProof: proofType,
SectorNumber: 1,
}))
require.NoError(t, q.enqueueProveCommit(addr1, 1, miner.SectorPreCommitInfo{
SealProof: proofType,
SectorNumber: 2,
}))
require.NoError(t, q.enqueueProveCommit(addr1, 1, miner.SectorPreCommitInfo{
SealProof: proofType,
SectorNumber: 3,
}))
require.NoError(t, q.enqueueProveCommit(addr1, 3, miner.SectorPreCommitInfo{
SealProof: proofType,
SectorNumber: 4,
}))
require.NoError(t, q.enqueueProveCommit(addr1, 4, miner.SectorPreCommitInfo{
SealProof: proofType,
SectorNumber: 5,
}))
require.NoError(t, q.enqueueProveCommit(addr1, 6, miner.SectorPreCommitInfo{
SealProof: proofType,
SectorNumber: 6,
}))
epoch := abi.ChainEpoch(0)
q.advanceEpoch(epoch)
_, _, ok := q.nextMiner()
require.False(t, ok)
epoch += policy.GetPreCommitChallengeDelay()
q.advanceEpoch(epoch)
_, _, ok = q.nextMiner()
require.False(t, ok)
// 0 : empty + non-empty
epoch++
q.advanceEpoch(epoch)
addr, sectors, ok := q.nextMiner()
require.True(t, ok)
require.Equal(t, sectors.count(), 2)
require.Equal(t, addr, addr1)
sectors.finish(proofType, 1)
require.Equal(t, sectors.count(), 1)
require.EqualValues(t, []abi.SectorNumber{1}, sectors[proofType])
// 1 : non-empty + non-empty
epoch++
q.advanceEpoch(epoch)
addr, sectors, ok = q.nextMiner()
require.True(t, ok)
require.Equal(t, addr, addr1)
require.Equal(t, sectors.count(), 3)
require.EqualValues(t, []abi.SectorNumber{1, 2, 3}, sectors[proofType])
sectors.finish(proofType, 3)
require.Equal(t, sectors.count(), 0)
// 2 : empty + empty
epoch++
q.advanceEpoch(epoch)
_, _, ok = q.nextMiner()
require.False(t, ok)
// 3 : empty + non-empty
epoch++
q.advanceEpoch(epoch)
_, sectors, ok = q.nextMiner()
require.True(t, ok)
require.Equal(t, sectors.count(), 1)
require.EqualValues(t, []abi.SectorNumber{4}, sectors[proofType])
// 4 : non-empty + non-empty
epoch++
q.advanceEpoch(epoch)
_, sectors, ok = q.nextMiner()
require.True(t, ok)
require.Equal(t, sectors.count(), 2)
require.EqualValues(t, []abi.SectorNumber{4, 5}, sectors[proofType])
// 5 : empty + non-empty
epoch++
q.advanceEpoch(epoch)
_, sectors, ok = q.nextMiner()
require.True(t, ok)
require.Equal(t, sectors.count(), 2)
require.EqualValues(t, []abi.SectorNumber{4, 5}, sectors[proofType])
sectors.finish(proofType, 1)
require.EqualValues(t, []abi.SectorNumber{5}, sectors[proofType])
// 6
epoch++
q.advanceEpoch(epoch)
_, sectors, ok = q.nextMiner()
require.True(t, ok)
require.Equal(t, sectors.count(), 2)
require.EqualValues(t, []abi.SectorNumber{5, 6}, sectors[proofType])
// 8
epoch += 2
q.advanceEpoch(epoch)
_, sectors, ok = q.nextMiner()
require.True(t, ok)
require.Equal(t, sectors.count(), 2)
require.EqualValues(t, []abi.SectorNumber{5, 6}, sectors[proofType])
}

View File

@ -0,0 +1,318 @@
package stages
import (
"bytes"
"context"
"sort"
"time"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/chain/actors/aerrors"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder"
)
var (
TargetFunds = abi.TokenAmount(types.MustParseFIL("1000FIL"))
MinimumFunds = abi.TokenAmount(types.MustParseFIL("100FIL"))
)
type FundingStage struct {
fundAccount address.Address
taxMin abi.TokenAmount
minFunds, maxFunds abi.TokenAmount
}
func NewFundingStage() (*FundingStage, error) {
// TODO: make all this configurable.
addr, err := address.NewIDAddress(100)
if err != nil {
return nil, err
}
return &FundingStage{
fundAccount: addr,
taxMin: abi.TokenAmount(types.MustParseFIL("1000FIL")),
minFunds: abi.TokenAmount(types.MustParseFIL("1000000FIL")),
maxFunds: abi.TokenAmount(types.MustParseFIL("100000000FIL")),
}, nil
}
func (*FundingStage) Name() string {
return "funding"
}
func (fs *FundingStage) Fund(bb *blockbuilder.BlockBuilder, target address.Address) error {
return fs.fund(bb, target, 0)
}
// sendAndFund "packs" the given message, funding the actor if necessary. It:
//
// 1. Tries to send the given message.
// 2. If that fails, it checks to see if the exit code was ErrInsufficientFunds.
// 3. If so, it sends 1K FIL from the "burnt funds actor" (because we need to send it from
// somewhere) and re-tries the message.0
func (fs *FundingStage) SendAndFund(bb *blockbuilder.BlockBuilder, msg *types.Message) (res *types.MessageReceipt, err error) {
for i := 0; i < 10; i++ {
res, err = bb.PushMessage(msg)
if err == nil {
return res, nil
}
aerr, ok := err.(aerrors.ActorError)
if !ok || aerr.RetCode() != exitcode.ErrInsufficientFunds {
return nil, err
}
// Ok, insufficient funds. Let's fund this miner and try again.
if err := fs.fund(bb, msg.To, i); err != nil {
if !blockbuilder.IsOutOfGas(err) {
err = xerrors.Errorf("failed to fund %s: %w", msg.To, err)
}
return nil, err
}
}
return res, err
}
// fund funds the target actor with 'TargetFunds << shift' FIL. The "shift" parameter allows us to
// keep doubling the amount until the intended operation succeeds.
func (fs *FundingStage) fund(bb *blockbuilder.BlockBuilder, target address.Address, shift int) error {
amt := TargetFunds
if shift > 0 {
if shift >= 8 {
shift = 8 // cap
}
amt = big.Lsh(amt, uint(shift))
}
_, err := bb.PushMessage(&types.Message{
From: fs.fundAccount,
To: target,
Value: amt,
Method: builtin.MethodSend,
})
return err
}
func (fs *FundingStage) PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) {
st := bb.StateTree()
fundAccActor, err := st.GetActor(fs.fundAccount)
if err != nil {
return err
}
if fs.minFunds.LessThan(fundAccActor.Balance) {
return nil
}
// Ok, we're going to go fund this thing.
start := time.Now()
type actor struct {
types.Actor
Address address.Address
}
var targets []*actor
err = st.ForEach(func(addr address.Address, act *types.Actor) error {
// Don't steal from ourselves!
if addr == fs.fundAccount {
return nil
}
if act.Balance.LessThan(fs.taxMin) {
return nil
}
if !(builtin.IsAccountActor(act.Code) || builtin.IsMultisigActor(act.Code)) {
return nil
}
targets = append(targets, &actor{*act, addr})
return nil
})
if err != nil {
return err
}
balance := fundAccActor.Balance.Copy()
sort.Slice(targets, func(i, j int) bool {
return targets[i].Balance.GreaterThan(targets[j].Balance)
})
store := bb.ActorStore()
epoch := bb.Height()
actorsVersion := bb.ActorsVersion()
var accounts, multisigs int
defer func() {
if _err != nil {
return
}
bb.L().Infow("finished funding the simulation",
"duration", time.Since(start),
"targets", len(targets),
"epoch", epoch,
"new-balance", types.FIL(balance),
"old-balance", types.FIL(fundAccActor.Balance),
"multisigs", multisigs,
"accounts", accounts,
)
}()
for _, actor := range targets {
switch {
case builtin.IsAccountActor(actor.Code):
if _, err := bb.PushMessage(&types.Message{
From: actor.Address,
To: fs.fundAccount,
Value: actor.Balance,
}); blockbuilder.IsOutOfGas(err) {
return nil
} else if err != nil {
return err
}
accounts++
case builtin.IsMultisigActor(actor.Code):
msigState, err := multisig.Load(store, &actor.Actor)
if err != nil {
return err
}
threshold, err := msigState.Threshold()
if err != nil {
return err
}
if threshold > 16 {
bb.L().Debugw("ignoring multisig with high threshold",
"multisig", actor.Address,
"threshold", threshold,
"max", 16,
)
continue
}
locked, err := msigState.LockedBalance(epoch)
if err != nil {
return err
}
if locked.LessThan(fs.taxMin) {
continue // not worth it.
}
allSigners, err := msigState.Signers()
if err != nil {
return err
}
signers := make([]address.Address, 0, threshold)
for _, signer := range allSigners {
actor, err := st.GetActor(signer)
if err != nil {
return err
}
if !builtin.IsAccountActor(actor.Code) {
// I am so not dealing with this mess.
continue
}
if uint64(len(signers)) >= threshold {
break
}
}
// Ok, we're not dealing with this one.
if uint64(len(signers)) < threshold {
continue
}
available := big.Sub(actor.Balance, locked)
var txnId uint64
{
msg, err := multisig.Message(actorsVersion, signers[0]).Propose(
actor.Address, fs.fundAccount, available,
builtin.MethodSend, nil,
)
if err != nil {
return err
}
res, err := bb.PushMessage(msg)
if err != nil {
if blockbuilder.IsOutOfGas(err) {
err = nil
}
return err
}
var ret multisig.ProposeReturn
err = ret.UnmarshalCBOR(bytes.NewReader(res.Return))
if err != nil {
return err
}
if ret.Applied {
if !ret.Code.IsSuccess() {
bb.L().Errorw("failed to tax multisig",
"multisig", actor.Address,
"exitcode", ret.Code,
)
}
break
}
txnId = uint64(ret.TxnID)
}
var ret multisig.ProposeReturn
for _, signer := range signers[1:] {
msg, err := multisig.Message(actorsVersion, signer).Approve(actor.Address, txnId, nil)
if err != nil {
return err
}
res, err := bb.PushMessage(msg)
if err != nil {
if blockbuilder.IsOutOfGas(err) {
err = nil
}
return err
}
var ret multisig.ProposeReturn
err = ret.UnmarshalCBOR(bytes.NewReader(res.Return))
if err != nil {
return err
}
// A bit redundant, but nice.
if ret.Applied {
break
}
}
if !ret.Applied {
bb.L().Errorw("failed to apply multisig transaction",
"multisig", actor.Address,
"txnid", txnId,
"signers", len(signers),
"threshold", threshold,
)
continue
}
if !ret.Code.IsSuccess() {
bb.L().Errorw("failed to tax multisig",
"multisig", actor.Address,
"txnid", txnId,
"exitcode", ret.Code,
)
} else {
multisigs++
}
default:
panic("impossible case")
}
balance = big.Int{Int: balance.Add(balance.Int, actor.Balance.Int)}
if balance.GreaterThanEqual(fs.maxFunds) {
// There's no need to get greedy.
// Well, really, we're trying to avoid messing with state _too_ much.
return nil
}
}
return nil
}

View File

@ -0,0 +1,27 @@
package stages
import (
"context"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder"
)
// Stage is a stage of the simulation. It's asked to pack messages for every block.
type Stage interface {
Name() string
PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) error
}
type Funding interface {
SendAndFund(*blockbuilder.BlockBuilder, *types.Message) (*types.MessageReceipt, error)
Fund(*blockbuilder.BlockBuilder, address.Address) error
}
type Committer interface {
EnqueueProveCommit(addr address.Address, preCommitEpoch abi.ChainEpoch, info miner.SectorPreCommitInfo) error
}

View File

@ -0,0 +1,31 @@
package stages
// DefaultPipeline returns the default stage pipeline. This pipeline.
//
// 1. Funds a "funding" actor, if necessary.
// 2. Submits any ready window posts.
// 3. Submits any ready prove commits.
// 4. Submits pre-commits with the remaining gas.
func DefaultPipeline() ([]Stage, error) {
// TODO: make this configurable. E.g., through DI?
// Ideally, we'd also be able to change priority, limit throughput (by limiting gas in the
// block builder, etc.
funding, err := NewFundingStage()
if err != nil {
return nil, err
}
wdpost, err := NewWindowPoStStage()
if err != nil {
return nil, err
}
provecommit, err := NewProveCommitStage(funding)
if err != nil {
return nil, err
}
precommit, err := NewPreCommitStage(funding, provecommit)
if err != nil {
return nil, err
}
return []Stage{funding, wdpost, provecommit, precommit}, nil
}

View File

@ -0,0 +1,347 @@
package stages
import (
"context"
"sort"
"time"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/aerrors"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder"
"github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock"
)
const (
minPreCommitBatchSize = 1
maxPreCommitBatchSize = miner5.PreCommitSectorBatchMaxSize
)
type PreCommitStage struct {
funding Funding
committer Committer
// The tiers represent the top 1%, top 10%, and everyone else. When sealing sectors, we seal
// a group of sectors for the top 1%, a group (half that size) for the top 10%, and one
// sector for everyone else. We determine these rates by looking at two power tables.
// TODO Ideally we'd "learn" this distribution from the network. But this is good enough for
// now.
top1, top10, rest actorIter
initialized bool
}
func NewPreCommitStage(funding Funding, committer Committer) (*PreCommitStage, error) {
return &PreCommitStage{
funding: funding,
committer: committer,
}, nil
}
func (*PreCommitStage) Name() string {
return "pre-commit"
}
// packPreCommits packs pre-commit messages until the block is full.
func (stage *PreCommitStage) PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) {
if !stage.initialized {
if err := stage.load(ctx, bb); err != nil {
return err
}
}
var (
full bool
top1Count, top10Count, restCount int
)
start := time.Now()
defer func() {
if _err != nil {
return
}
bb.L().Debugw("packed pre commits",
"done", top1Count+top10Count+restCount,
"top1", top1Count,
"top10", top10Count,
"rest", restCount,
"filled-block", full,
"duration", time.Since(start),
)
}()
var top1Miners, top10Miners, restMiners int
for i := 0; ; i++ {
var (
minerAddr address.Address
count *int
)
// We pre-commit for the top 1%, 10%, and the of the network 1/3rd of the time each.
// This won't yield the most accurate distribution... but it'll give us a good
// enough distribution.
switch {
case (i%3) <= 0 && top1Miners < stage.top1.len():
count = &top1Count
minerAddr = stage.top1.next()
top1Miners++
case (i%3) <= 1 && top10Miners < stage.top10.len():
count = &top10Count
minerAddr = stage.top10.next()
top10Miners++
case (i%3) <= 2 && restMiners < stage.rest.len():
count = &restCount
minerAddr = stage.rest.next()
restMiners++
default:
// Well, we've run through all miners.
return nil
}
var (
added int
err error
)
added, full, err = stage.packMiner(ctx, bb, minerAddr, maxProveCommitBatchSize)
if err != nil {
return xerrors.Errorf("failed to pack precommits for miner %s: %w", minerAddr, err)
}
*count += added
if full {
return nil
}
}
}
// packPreCommitsMiner packs count pre-commits for the given miner.
func (stage *PreCommitStage) packMiner(
ctx context.Context, bb *blockbuilder.BlockBuilder,
minerAddr address.Address, count int,
) (int, bool, error) {
log := bb.L().With("miner", minerAddr)
epoch := bb.Height()
nv := bb.NetworkVersion()
minerActor, err := bb.StateTree().GetActor(minerAddr)
if err != nil {
return 0, false, err
}
minerState, err := miner.Load(bb.ActorStore(), minerActor)
if err != nil {
return 0, false, err
}
minerInfo, err := minerState.Info()
if err != nil {
return 0, false, err
}
// Make sure the miner is funded.
minerBalance, err := minerState.AvailableBalance(minerActor.Balance)
if err != nil {
return 0, false, err
}
if big.Cmp(minerBalance, MinimumFunds) < 0 {
err := stage.funding.Fund(bb, minerAddr)
if err != nil {
if blockbuilder.IsOutOfGas(err) {
return 0, true, nil
}
return 0, false, err
}
}
// Generate pre-commits.
sealType, err := miner.PreferredSealProofTypeFromWindowPoStType(
nv, minerInfo.WindowPoStProofType,
)
if err != nil {
return 0, false, err
}
sectorNos, err := minerState.UnallocatedSectorNumbers(count)
if err != nil {
return 0, false, err
}
expiration := epoch + policy.GetMaxSectorExpirationExtension()
infos := make([]miner.SectorPreCommitInfo, len(sectorNos))
for i, sno := range sectorNos {
infos[i] = miner.SectorPreCommitInfo{
SealProof: sealType,
SectorNumber: sno,
SealedCID: mock.MockCommR(minerAddr, sno),
SealRandEpoch: epoch - 1,
Expiration: expiration,
}
}
// Commit the pre-commits.
added := 0
if nv >= network.Version13 {
targetBatchSize := maxPreCommitBatchSize
for targetBatchSize >= minPreCommitBatchSize && len(infos) >= minPreCommitBatchSize {
batch := infos
if len(batch) > targetBatchSize {
batch = batch[:targetBatchSize]
}
params := miner5.PreCommitSectorBatchParams{
Sectors: batch,
}
enc, err := actors.SerializeParams(&params)
if err != nil {
return added, false, err
}
// NOTE: just in-case, sendAndFund will "fund" and re-try for any message
// that fails due to "insufficient funds".
if _, err := stage.funding.SendAndFund(bb, &types.Message{
To: minerAddr,
From: minerInfo.Worker,
Value: abi.NewTokenAmount(0),
Method: miner.Methods.PreCommitSectorBatch,
Params: enc,
}); blockbuilder.IsOutOfGas(err) {
// try again with a smaller batch.
targetBatchSize /= 2
continue
} else if aerr, ok := err.(aerrors.ActorError); ok && !aerr.IsFatal() {
// Log the error and move on. No reason to stop.
log.Errorw("failed to pre-commit for unknown reasons",
"error", aerr,
"sectors", batch,
)
return added, false, nil
} else if err != nil {
return added, false, err
}
for _, info := range batch {
if err := stage.committer.EnqueueProveCommit(minerAddr, epoch, info); err != nil {
return added, false, err
}
added++
}
infos = infos[len(batch):]
}
}
for _, info := range infos {
enc, err := actors.SerializeParams(&info) //nolint
if err != nil {
return 0, false, err
}
if _, err := stage.funding.SendAndFund(bb, &types.Message{
To: minerAddr,
From: minerInfo.Worker,
Value: abi.NewTokenAmount(0),
Method: miner.Methods.PreCommitSector,
Params: enc,
}); blockbuilder.IsOutOfGas(err) {
return added, true, nil
} else if err != nil {
return added, false, err
}
if err := stage.committer.EnqueueProveCommit(minerAddr, epoch, info); err != nil {
return added, false, err
}
added++
}
return added, false, nil
}
func (stage *PreCommitStage) load(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) {
bb.L().Infow("loading miner power for pre-commits")
start := time.Now()
defer func() {
if _err != nil {
return
}
bb.L().Infow("loaded miner power for pre-commits",
"duration", time.Since(start),
"top1", stage.top1.len(),
"top10", stage.top10.len(),
"rest", stage.rest.len(),
)
}()
store := bb.ActorStore()
st := bb.ParentStateTree()
powerState, err := loadPower(store, st)
if err != nil {
return xerrors.Errorf("failed to power actor: %w", err)
}
type onboardingInfo struct {
addr address.Address
sectorCount uint64
}
var sealList []onboardingInfo
err = powerState.ForEachClaim(func(addr address.Address, claim power.Claim) error {
if claim.RawBytePower.IsZero() {
return nil
}
minerState, err := loadMiner(store, st, addr)
if err != nil {
return err
}
info, err := minerState.Info()
if err != nil {
return err
}
sectorCount := sectorsFromClaim(info.SectorSize, claim)
if sectorCount > 0 {
sealList = append(sealList, onboardingInfo{addr, uint64(sectorCount)})
}
return nil
})
if err != nil {
return err
}
if len(sealList) == 0 {
return xerrors.Errorf("simulation has no miners")
}
// Now that we have a list of sealing miners, sort them into percentiles.
sort.Slice(sealList, func(i, j int) bool {
return sealList[i].sectorCount < sealList[j].sectorCount
})
// reset, just in case.
stage.top1 = actorIter{}
stage.top10 = actorIter{}
stage.rest = actorIter{}
for i, oi := range sealList {
var dist *actorIter
if i < len(sealList)/100 {
dist = &stage.top1
} else if i < len(sealList)/10 {
dist = &stage.top10
} else {
dist = &stage.rest
}
dist.add(oi.addr)
}
stage.top1.shuffle()
stage.top10.shuffle()
stage.rest.shuffle()
stage.initialized = true
return nil
}

View File

@ -0,0 +1,372 @@
package stages
import (
"context"
"time"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/go-state-types/network"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
power5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/aerrors"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder"
"github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock"
)
const (
minProveCommitBatchSize = 4
maxProveCommitBatchSize = miner5.MaxAggregatedSectors
)
type ProveCommitStage struct {
funding Funding
// We track the set of pending commits. On simulation load, and when a new pre-commit is
// added to the chain, we put the commit in this queue. advanceEpoch(currentEpoch) should be
// called on this queue at every epoch before using it.
commitQueue commitQueue
initialized bool
}
func NewProveCommitStage(funding Funding) (*ProveCommitStage, error) {
return &ProveCommitStage{
funding: funding,
}, nil
}
func (*ProveCommitStage) Name() string {
return "prove-commit"
}
func (stage *ProveCommitStage) EnqueueProveCommit(
minerAddr address.Address, preCommitEpoch abi.ChainEpoch, info miner.SectorPreCommitInfo,
) error {
return stage.commitQueue.enqueueProveCommit(minerAddr, preCommitEpoch, info)
}
// packProveCommits packs all prove-commits for all "ready to be proven" sectors until it fills the
// block or runs out.
func (stage *ProveCommitStage) PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) {
if !stage.initialized {
if err := stage.load(ctx, bb); err != nil {
return err
}
}
// Roll the commitQueue forward.
stage.commitQueue.advanceEpoch(bb.Height())
start := time.Now()
var failed, done, unbatched, count int
defer func() {
if _err != nil {
return
}
remaining := stage.commitQueue.ready()
bb.L().Debugw("packed prove commits",
"remaining", remaining,
"done", done,
"failed", failed,
"unbatched", unbatched,
"miners-processed", count,
"duration", time.Since(start),
)
}()
for {
addr, pending, ok := stage.commitQueue.nextMiner()
if !ok {
return nil
}
res, err := stage.packProveCommitsMiner(ctx, bb, addr, pending)
if err != nil {
return err
}
failed += res.failed
done += res.done
unbatched += res.unbatched
count++
if res.full {
return nil
}
}
}
type proveCommitResult struct {
done, failed, unbatched int
full bool
}
// packProveCommitsMiner enqueues a prove commits from the given miner until it runs out of
// available prove-commits, batching as much as possible.
//
// This function will fund as necessary from the "burnt funds actor" (look, it's convenient).
func (stage *ProveCommitStage) packProveCommitsMiner(
ctx context.Context, bb *blockbuilder.BlockBuilder, minerAddr address.Address,
pending minerPendingCommits,
) (res proveCommitResult, _err error) {
minerActor, err := bb.StateTree().GetActor(minerAddr)
if err != nil {
return res, err
}
minerState, err := miner.Load(bb.ActorStore(), minerActor)
if err != nil {
return res, err
}
info, err := minerState.Info()
if err != nil {
return res, err
}
log := bb.L().With("miner", minerAddr)
nv := bb.NetworkVersion()
for sealType, snos := range pending {
if nv >= network.Version13 {
for len(snos) > minProveCommitBatchSize {
batchSize := maxProveCommitBatchSize
if len(snos) < batchSize {
batchSize = len(snos)
}
batch := snos[:batchSize]
proof, err := mock.MockAggregateSealProof(sealType, minerAddr, batchSize)
if err != nil {
return res, err
}
params := miner5.ProveCommitAggregateParams{
SectorNumbers: bitfield.New(),
AggregateProof: proof,
}
for _, sno := range batch {
params.SectorNumbers.Set(uint64(sno))
}
enc, err := actors.SerializeParams(&params)
if err != nil {
return res, err
}
if _, err := stage.funding.SendAndFund(bb, &types.Message{
From: info.Worker,
To: minerAddr,
Value: abi.NewTokenAmount(0),
Method: miner.Methods.ProveCommitAggregate,
Params: enc,
}); err == nil {
res.done += len(batch)
} else if blockbuilder.IsOutOfGas(err) {
res.full = true
return res, nil
} else if aerr, ok := err.(aerrors.ActorError); !ok || aerr.IsFatal() {
// If we get a random error, or a fatal actor error, bail.
return res, err
} else if aerr.RetCode() == exitcode.ErrNotFound || aerr.RetCode() == exitcode.ErrIllegalArgument {
// If we get a "not-found" or illegal argument error, try to
// remove any missing prove-commits and continue. This can
// happen either because:
//
// 1. The pre-commit failed on execution (but not when
// packing). This shouldn't happen, but we might as well
// gracefully handle it.
// 2. The pre-commit has expired. We'd have to be really
// backloged to hit this case, but we might as well handle
// it.
// First, split into "good" and "missing"
good, err := stage.filterProveCommits(ctx, bb, minerAddr, batch)
if err != nil {
log.Errorw("failed to filter prove commits", "error", err)
// fail with the original error.
return res, aerr
}
removed := len(batch) - len(good)
if removed == 0 {
log.Errorw("failed to prove-commit for unknown reasons",
"error", aerr,
"sectors", batch,
)
res.failed += len(batch)
} else if len(good) == 0 {
log.Errorw("failed to prove commit missing pre-commits",
"error", aerr,
"discarded", removed,
)
res.failed += len(batch)
} else {
// update the pending sector numbers in-place to remove the expired ones.
snos = snos[removed:]
copy(snos, good)
pending.finish(sealType, removed)
log.Errorw("failed to prove commit expired/missing pre-commits",
"error", aerr,
"discarded", removed,
"kept", len(good),
)
res.failed += removed
// Then try again.
continue
}
} else {
log.Errorw("failed to prove commit sector(s)",
"error", err,
"sectors", batch,
)
res.failed += len(batch)
}
pending.finish(sealType, len(batch))
snos = snos[len(batch):]
}
}
for len(snos) > 0 && res.unbatched < power5.MaxMinerProveCommitsPerEpoch {
sno := snos[0]
snos = snos[1:]
proof, err := mock.MockSealProof(sealType, minerAddr)
if err != nil {
return res, err
}
params := miner.ProveCommitSectorParams{
SectorNumber: sno,
Proof: proof,
}
enc, err := actors.SerializeParams(&params)
if err != nil {
return res, err
}
if _, err := stage.funding.SendAndFund(bb, &types.Message{
From: info.Worker,
To: minerAddr,
Value: abi.NewTokenAmount(0),
Method: miner.Methods.ProveCommitSector,
Params: enc,
}); err == nil {
res.unbatched++
res.done++
} else if blockbuilder.IsOutOfGas(err) {
res.full = true
return res, nil
} else if aerr, ok := err.(aerrors.ActorError); !ok || aerr.IsFatal() {
return res, err
} else {
log.Errorw("failed to prove commit sector(s)",
"error", err,
"sectors", []abi.SectorNumber{sno},
)
res.failed++
}
// mark it as "finished" regardless so we skip it.
pending.finish(sealType, 1)
}
// if we get here, we can't pre-commit anything more.
}
return res, nil
}
// loadMiner enqueue all pending prove-commits for the given miner. This is called on load to
// populate the commitQueue and should not need to be called later.
//
// It will drop any pre-commits that have already expired.
func (stage *ProveCommitStage) loadMiner(ctx context.Context, bb *blockbuilder.BlockBuilder, addr address.Address) error {
epoch := bb.Height()
av := bb.ActorsVersion()
minerState, err := loadMiner(bb.ActorStore(), bb.ParentStateTree(), addr)
if err != nil {
return err
}
// Find all pending prove commits and group by proof type. Really, there should never
// (except during upgrades be more than one type.
var total, dropped int
err = minerState.ForEachPrecommittedSector(func(info miner.SectorPreCommitOnChainInfo) error {
total++
msd := policy.GetMaxProveCommitDuration(av, info.Info.SealProof)
if epoch > info.PreCommitEpoch+msd {
dropped++
return nil
}
return stage.commitQueue.enqueueProveCommit(addr, info.PreCommitEpoch, info.Info)
})
if err != nil {
return err
}
if dropped > 0 {
bb.L().Warnw("dropped expired pre-commits on load",
"miner", addr,
"total", total,
"expired", dropped,
)
}
return nil
}
// filterProveCommits filters out expired and/or missing pre-commits.
func (stage *ProveCommitStage) filterProveCommits(
ctx context.Context, bb *blockbuilder.BlockBuilder,
minerAddr address.Address, snos []abi.SectorNumber,
) ([]abi.SectorNumber, error) {
act, err := bb.StateTree().GetActor(minerAddr)
if err != nil {
return nil, err
}
minerState, err := miner.Load(bb.ActorStore(), act)
if err != nil {
return nil, err
}
nextEpoch := bb.Height()
av := bb.ActorsVersion()
good := make([]abi.SectorNumber, 0, len(snos))
for _, sno := range snos {
info, err := minerState.GetPrecommittedSector(sno)
if err != nil {
return nil, err
}
if info == nil {
continue
}
msd := policy.GetMaxProveCommitDuration(av, info.Info.SealProof)
if nextEpoch > info.PreCommitEpoch+msd {
continue
}
good = append(good, sno)
}
return good, nil
}
func (stage *ProveCommitStage) load(ctx context.Context, bb *blockbuilder.BlockBuilder) error {
stage.initialized = false // in case something failes while we're doing this.
stage.commitQueue = commitQueue{offset: bb.Height()}
powerState, err := loadPower(bb.ActorStore(), bb.ParentStateTree())
if err != nil {
return err
}
err = powerState.ForEachClaim(func(minerAddr address.Address, claim power.Claim) error {
// TODO: If we want to finish pre-commits for "new" miners, we'll need to change
// this.
if claim.RawBytePower.IsZero() {
return nil
}
return stage.loadMiner(ctx, bb, minerAddr)
})
if err != nil {
return err
}
stage.initialized = true
return nil
}

View File

@ -0,0 +1,51 @@
package stages
import (
"context"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder"
)
func loadMiner(store adt.Store, st types.StateTree, addr address.Address) (miner.State, error) {
minerActor, err := st.GetActor(addr)
if err != nil {
return nil, err
}
return miner.Load(store, minerActor)
}
func loadPower(store adt.Store, st types.StateTree) (power.State, error) {
powerActor, err := st.GetActor(power.Address)
if err != nil {
return nil, err
}
return power.Load(store, powerActor)
}
// Compute the number of sectors a miner has from their power claim.
func sectorsFromClaim(sectorSize abi.SectorSize, c power.Claim) int64 {
if c.RawBytePower.Int == nil {
return 0
}
sectorCount := big.Div(c.RawBytePower, big.NewIntUnsigned(uint64(sectorSize)))
if !sectorCount.IsInt64() {
panic("impossible number of sectors")
}
return sectorCount.Int64()
}
func postChainCommitInfo(ctx context.Context, bb *blockbuilder.BlockBuilder, epoch abi.ChainEpoch) (abi.Randomness, error) {
cs := bb.StateManager().ChainStore()
ts := bb.ParentTipSet()
commitRand, err := cs.GetChainRandomness(ctx, ts.Cids(), crypto.DomainSeparationTag_PoStChainCommit, epoch, nil, true)
return commitRand, err
}

View File

@ -0,0 +1,317 @@
package stages
import (
"context"
"math"
"time"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/aerrors"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder"
"github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/mock"
)
type WindowPoStStage struct {
// We track the window post periods per miner and assume that no new miners are ever added.
// We record all pending window post messages, and the epoch up through which we've
// generated window post messages.
pendingWposts []*types.Message
wpostPeriods [][]address.Address // (epoch % (epochs in a deadline)) -> miner
nextWpostEpoch abi.ChainEpoch
}
func NewWindowPoStStage() (*WindowPoStStage, error) {
return new(WindowPoStStage), nil
}
func (*WindowPoStStage) Name() string {
return "window-post"
}
// packWindowPoSts packs window posts until either the block is full or all healty sectors
// have been proven. It does not recover sectors.
func (stage *WindowPoStStage) PackMessages(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) {
// Push any new window posts into the queue.
if err := stage.tick(ctx, bb); err != nil {
return err
}
done := 0
failed := 0
defer func() {
if _err != nil {
return
}
bb.L().Debugw("packed window posts",
"done", done,
"failed", failed,
"remaining", len(stage.pendingWposts),
)
}()
// Then pack as many as we can.
for len(stage.pendingWposts) > 0 {
next := stage.pendingWposts[0]
if _, err := bb.PushMessage(next); err != nil {
if blockbuilder.IsOutOfGas(err) {
return nil
}
if aerr, ok := err.(aerrors.ActorError); !ok || aerr.IsFatal() {
return err
}
bb.L().Errorw("failed to submit windowed post",
"error", err,
"miner", next.To,
)
failed++
} else {
done++
}
stage.pendingWposts = stage.pendingWposts[1:]
}
stage.pendingWposts = nil
return nil
}
// stepWindowPoStsMiner enqueues all missing window posts for the current epoch for the given miner.
func (stage *WindowPoStStage) queueMiner(
ctx context.Context, bb *blockbuilder.BlockBuilder,
addr address.Address, minerState miner.State,
commitEpoch abi.ChainEpoch, commitRand abi.Randomness,
) error {
if active, err := minerState.DeadlineCronActive(); err != nil {
return err
} else if !active {
return nil
}
minerInfo, err := minerState.Info()
if err != nil {
return err
}
di, err := minerState.DeadlineInfo(bb.Height())
if err != nil {
return err
}
di = di.NextNotElapsed()
dl, err := minerState.LoadDeadline(di.Index)
if err != nil {
return err
}
provenBf, err := dl.PartitionsPoSted()
if err != nil {
return err
}
proven, err := provenBf.AllMap(math.MaxUint64)
if err != nil {
return err
}
poStBatchSize, err := policy.GetMaxPoStPartitions(bb.NetworkVersion(), minerInfo.WindowPoStProofType)
if err != nil {
return err
}
var (
partitions []miner.PoStPartition
partitionGroups [][]miner.PoStPartition
)
// Only prove partitions with live sectors.
err = dl.ForEachPartition(func(idx uint64, part miner.Partition) error {
if proven[idx] {
return nil
}
// NOTE: We're mimicing the behavior of wdpost_run.go here.
if len(partitions) > 0 && idx%uint64(poStBatchSize) == 0 {
partitionGroups = append(partitionGroups, partitions)
partitions = nil
}
live, err := part.LiveSectors()
if err != nil {
return err
}
liveCount, err := live.Count()
if err != nil {
return err
}
faulty, err := part.FaultySectors()
if err != nil {
return err
}
faultyCount, err := faulty.Count()
if err != nil {
return err
}
if liveCount-faultyCount > 0 {
partitions = append(partitions, miner.PoStPartition{Index: idx})
}
return nil
})
if err != nil {
return err
}
if len(partitions) > 0 {
partitionGroups = append(partitionGroups, partitions)
partitions = nil
}
proof, err := mock.MockWindowPoStProof(minerInfo.WindowPoStProofType, addr)
if err != nil {
return err
}
for _, group := range partitionGroups {
params := miner.SubmitWindowedPoStParams{
Deadline: di.Index,
Partitions: group,
Proofs: []proof5.PoStProof{{
PoStProof: minerInfo.WindowPoStProofType,
ProofBytes: proof,
}},
ChainCommitEpoch: commitEpoch,
ChainCommitRand: commitRand,
}
enc, aerr := actors.SerializeParams(&params)
if aerr != nil {
return xerrors.Errorf("could not serialize submit window post parameters: %w", aerr)
}
msg := &types.Message{
To: addr,
From: minerInfo.Worker,
Method: miner.Methods.SubmitWindowedPoSt,
Params: enc,
Value: types.NewInt(0),
}
stage.pendingWposts = append(stage.pendingWposts, msg)
}
return nil
}
func (stage *WindowPoStStage) load(ctx context.Context, bb *blockbuilder.BlockBuilder) (_err error) {
bb.L().Info("loading window post info")
start := time.Now()
defer func() {
if _err != nil {
return
}
bb.L().Infow("loaded window post info", "duration", time.Since(start))
}()
// reset
stage.wpostPeriods = make([][]address.Address, miner.WPoStChallengeWindow)
stage.pendingWposts = nil
stage.nextWpostEpoch = bb.Height() + 1
st := bb.ParentStateTree()
store := bb.ActorStore()
powerState, err := loadPower(store, st)
if err != nil {
return err
}
commitEpoch := bb.ParentTipSet().Height()
commitRand, err := postChainCommitInfo(ctx, bb, commitEpoch)
if err != nil {
return err
}
return powerState.ForEachClaim(func(minerAddr address.Address, claim power.Claim) error {
// TODO: If we start recovering power, we'll need to change this.
if claim.RawBytePower.IsZero() {
return nil
}
minerState, err := loadMiner(store, st, minerAddr)
if err != nil {
return err
}
// Shouldn't be necessary if the miner has power, but we might as well be safe.
if active, err := minerState.DeadlineCronActive(); err != nil {
return err
} else if !active {
return nil
}
// Record when we need to prove for this miner.
dinfo, err := minerState.DeadlineInfo(bb.Height())
if err != nil {
return err
}
dinfo = dinfo.NextNotElapsed()
ppOffset := int(dinfo.PeriodStart % miner.WPoStChallengeWindow)
stage.wpostPeriods[ppOffset] = append(stage.wpostPeriods[ppOffset], minerAddr)
return stage.queueMiner(ctx, bb, minerAddr, minerState, commitEpoch, commitRand)
})
}
func (stage *WindowPoStStage) tick(ctx context.Context, bb *blockbuilder.BlockBuilder) error {
// If this is our first time, load from scratch.
if stage.wpostPeriods == nil {
return stage.load(ctx, bb)
}
targetHeight := bb.Height()
now := time.Now()
was := len(stage.pendingWposts)
count := 0
defer func() {
bb.L().Debugw("computed window posts",
"miners", count,
"count", len(stage.pendingWposts)-was,
"duration", time.Since(now),
)
}()
st := bb.ParentStateTree()
store := bb.ActorStore()
// Perform a bit of catch up. This lets us do things like skip blocks at upgrades then catch
// up to make the simulation easier.
for ; stage.nextWpostEpoch <= targetHeight; stage.nextWpostEpoch++ {
if stage.nextWpostEpoch+miner.WPoStChallengeWindow < targetHeight {
bb.L().Warnw("skipping old window post", "deadline-open", stage.nextWpostEpoch)
continue
}
commitEpoch := stage.nextWpostEpoch - 1
commitRand, err := postChainCommitInfo(ctx, bb, commitEpoch)
if err != nil {
return err
}
for _, addr := range stage.wpostPeriods[int(stage.nextWpostEpoch%miner.WPoStChallengeWindow)] {
minerState, err := loadMiner(store, st, addr)
if err != nil {
return err
}
if err := stage.queueMiner(ctx, bb, addr, minerState, commitEpoch, commitRand); err != nil {
return err
}
count++
}
}
return nil
}

View File

@ -0,0 +1,71 @@
package simulation
import (
"context"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/cmd/lotus-sim/simulation/blockbuilder"
)
// Step steps the simulation forward one step. This may move forward by more than one epoch.
func (sim *Simulation) Step(ctx context.Context) (*types.TipSet, error) {
log.Infow("step", "epoch", sim.head.Height()+1)
messages, err := sim.popNextMessages(ctx)
if err != nil {
return nil, xerrors.Errorf("failed to select messages for block: %w", err)
}
head, err := sim.makeTipSet(ctx, messages)
if err != nil {
return nil, xerrors.Errorf("failed to make tipset: %w", err)
}
if err := sim.SetHead(head); err != nil {
return nil, xerrors.Errorf("failed to update head: %w", err)
}
return head, nil
}
// popNextMessages generates/picks a set of messages to be included in the next block.
//
// - This function is destructive and should only be called once per epoch.
// - This function does not store anything in the repo.
// - This function handles all gas estimation. The returned messages should all fit in a single
// block.
func (sim *Simulation) popNextMessages(ctx context.Context) ([]*types.Message, error) {
parentTs := sim.head
// First we make sure we don't have an upgrade at this epoch. If we do, we return no
// messages so we can just create an empty block at that epoch.
//
// This isn't what the network does, but it makes things easier. Otherwise, we'd need to run
// migrations before this epoch and I'd rather not deal with that.
nextHeight := parentTs.Height() + 1
prevVer := sim.StateManager.GetNtwkVersion(ctx, nextHeight-1)
nextVer := sim.StateManager.GetNtwkVersion(ctx, nextHeight)
if nextVer != prevVer {
log.Warnw("packing no messages for version upgrade block",
"old", prevVer,
"new", nextVer,
"epoch", nextHeight,
)
return nil, nil
}
bb, err := blockbuilder.NewBlockBuilder(
ctx, log.With("simulation", sim.name),
sim.StateManager, parentTs,
)
if err != nil {
return nil, err
}
for _, stage := range sim.stages {
// We're intentionally ignoring the "full" signal so we can try to pack a few more
// messages.
if err := stage.PackMessages(ctx, bb); err != nil && !blockbuilder.IsOutOfGas(err) {
return nil, xerrors.Errorf("when packing messages with %s: %w", stage.Name(), err)
}
}
return bb.Messages(), nil
}

109
cmd/lotus-sim/upgrade.go Normal file
View File

@ -0,0 +1,109 @@
package main
import (
"fmt"
"strconv"
"strings"
"text/tabwriter"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
)
var upgradeCommand = &cli.Command{
Name: "upgrade",
Description: "Modifies network upgrade heights.",
Subcommands: []*cli.Command{
upgradeSetCommand,
upgradeList,
},
}
var upgradeList = &cli.Command{
Name: "list",
Description: "Lists all pending upgrades.",
Subcommands: []*cli.Command{
upgradeSetCommand,
},
Action: func(cctx *cli.Context) (err error) {
node, err := open(cctx)
if err != nil {
return err
}
defer func() {
if cerr := node.Close(); err == nil {
err = cerr
}
}()
sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
if err != nil {
return err
}
upgrades, err := sim.ListUpgrades()
if err != nil {
return err
}
tw := tabwriter.NewWriter(cctx.App.Writer, 8, 8, 0, ' ', 0)
fmt.Fprintf(tw, "version\theight\tepochs\tmigration\texpensive")
epoch := sim.GetHead().Height()
for _, upgrade := range upgrades {
fmt.Fprintf(
tw, "%d\t%d\t%+d\t%t\t%t",
upgrade.Network, upgrade.Height, upgrade.Height-epoch,
upgrade.Migration != nil,
upgrade.Expensive,
)
}
return nil
},
}
var upgradeSetCommand = &cli.Command{
Name: "set",
ArgsUsage: "<network-version> [+]<epochs>",
Description: "Set a network upgrade height. Prefix with '+' to set it relative to the last epoch.",
Action: func(cctx *cli.Context) (err error) {
args := cctx.Args()
if args.Len() != 2 {
return fmt.Errorf("expected 2 arguments")
}
nvString := args.Get(0)
networkVersion, err := strconv.ParseUint(nvString, 10, 32)
if err != nil {
return fmt.Errorf("failed to parse network version %q: %w", nvString, err)
}
heightString := args.Get(1)
relative := false
if strings.HasPrefix(heightString, "+") {
heightString = heightString[1:]
relative = true
}
height, err := strconv.ParseInt(heightString, 10, 64)
if err != nil {
return fmt.Errorf("failed to parse height version %q: %w", heightString, err)
}
node, err := open(cctx)
if err != nil {
return err
}
defer func() {
if cerr := node.Close(); err == nil {
err = cerr
}
}()
sim, err := node.LoadSim(cctx.Context, cctx.String("simulation"))
if err != nil {
return err
}
if relative {
height += int64(sim.GetHead().Height())
}
return sim.SetUpgradeHeight(network.Version(networkVersion), abi.ChainEpoch(height))
},
}

18
cmd/lotus-sim/util.go Normal file
View File

@ -0,0 +1,18 @@
package main
import (
"fmt"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/lotus/cmd/lotus-sim/simulation"
"github.com/filecoin-project/lotus/lib/ulimit"
)
func open(cctx *cli.Context) (*simulation.Node, error) {
_, _, err := ulimit.ManageFdLimit()
if err != nil {
fmt.Fprintf(cctx.App.ErrWriter, "ERROR: failed to raise ulimit: %s\n", err)
}
return simulation.OpenNode(cctx.Context, cctx.String("repo"))
}

1
go.mod
View File

@ -133,6 +133,7 @@ require (
github.com/prometheus/client_golang v1.6.0 github.com/prometheus/client_golang v1.6.0
github.com/raulk/clock v1.1.0 github.com/raulk/clock v1.1.0
github.com/raulk/go-watchdog v1.0.1 github.com/raulk/go-watchdog v1.0.1
github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25
github.com/stretchr/objx v0.2.0 // indirect github.com/stretchr/objx v0.2.0 // indirect
github.com/stretchr/testify v1.7.0 github.com/stretchr/testify v1.7.0
github.com/syndtr/goleveldb v1.0.0 github.com/syndtr/goleveldb v1.0.0

2
go.sum
View File

@ -1518,6 +1518,8 @@ github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 h1:7z3LSn867ex6VSaahyKadf4WtSsJIgne6A1WLOAGM8A=
github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=

104
lib/stati/covar.go Normal file
View File

@ -0,0 +1,104 @@
package stati
import "math"
type Covar struct {
meanX float64
meanY float64
c float64
n float64
m2x float64
m2y float64
}
func (cov1 *Covar) MeanX() float64 {
return cov1.meanX
}
func (cov1 *Covar) MeanY() float64 {
return cov1.meanY
}
func (cov1 *Covar) N() float64 {
return cov1.n
}
func (cov1 *Covar) Covariance() float64 {
return cov1.c / (cov1.n - 1)
}
func (cov1 *Covar) VarianceX() float64 {
return cov1.m2x / (cov1.n - 1)
}
func (cov1 *Covar) StddevX() float64 {
return math.Sqrt(cov1.VarianceX())
}
func (cov1 *Covar) VarianceY() float64 {
return cov1.m2y / (cov1.n - 1)
}
func (cov1 *Covar) StddevY() float64 {
return math.Sqrt(cov1.VarianceY())
}
func (cov1 *Covar) AddPoint(x, y float64) {
cov1.n++
dx := x - cov1.meanX
cov1.meanX += dx / cov1.n
dx2 := x - cov1.meanX
cov1.m2x += dx * dx2
dy := y - cov1.meanY
cov1.meanY += dy / cov1.n
dy2 := y - cov1.meanY
cov1.m2y += dy * dy2
cov1.c += dx * dy
}
func (cov1 *Covar) Combine(cov2 *Covar) {
if cov1.n == 0 {
*cov1 = *cov2
return
}
if cov2.n == 0 {
return
}
if cov1.n == 1 {
cpy := *cov2
cpy.AddPoint(cov2.meanX, cov2.meanY)
*cov1 = cpy
return
}
if cov2.n == 1 {
cov1.AddPoint(cov2.meanX, cov2.meanY)
}
out := Covar{}
out.n = cov1.n + cov2.n
dx := cov1.meanX - cov2.meanX
out.meanX = cov1.meanX - dx*cov2.n/out.n
out.m2x = cov1.m2x + cov2.m2x + dx*dx*cov1.n*cov2.n/out.n
dy := cov1.meanY - cov2.meanY
out.meanY = cov1.meanY - dy*cov2.n/out.n
out.m2y = cov1.m2y + cov2.m2y + dy*dy*cov1.n*cov2.n/out.n
out.c = cov1.c + cov2.c + dx*dy*cov1.n*cov2.n/out.n
*cov1 = out
}
func (cov1 *Covar) A() float64 {
return cov1.Covariance() / cov1.VarianceX()
}
func (cov1 *Covar) B() float64 {
return cov1.meanY - cov1.meanX*cov1.A()
}
func (cov1 *Covar) Correl() float64 {
return cov1.Covariance() / cov1.StddevX() / cov1.StddevY()
}

56
lib/stati/histo.go Normal file
View File

@ -0,0 +1,56 @@
package stati
import (
"math"
"golang.org/x/xerrors"
)
type Histogram struct {
Buckets []float64
Counts []uint64
}
// NewHistogram creates a histograme with buckets defined as:
// {x > -Inf, x >= buckets[0], x >= buckets[1], ..., x >= buckets[i]}
func NewHistogram(buckets []float64) (*Histogram, error) {
if len(buckets) == 0 {
return nil, xerrors.Errorf("empty buckets")
}
prev := buckets[0]
for i, v := range buckets[1:] {
if v < prev {
return nil, xerrors.Errorf("bucket at index %d is smaller than previous %f < %f", i+1, v, prev)
}
prev = v
}
h := &Histogram{
Buckets: append([]float64{math.Inf(-1)}, buckets...),
Counts: make([]uint64, len(buckets)+1),
}
return h, nil
}
func (h *Histogram) Observe(x float64) {
for i, b := range h.Buckets {
if x >= b {
h.Counts[i]++
} else {
break
}
}
}
func (h *Histogram) Total() uint64 {
return h.Counts[0]
}
func (h *Histogram) Get(i int) uint64 {
if i >= len(h.Counts)-2 {
return h.Counts[i]
}
return h.Counts[i+1] - h.Counts[i+2]
}
func (h *Histogram) GetRatio(i int) float64 {
return float64(h.Get(i)) / float64(h.Total())
}

66
lib/stati/meanvar.go Normal file
View File

@ -0,0 +1,66 @@
package stati
import (
"fmt"
"math"
)
type MeanVar struct {
n float64
mean float64
m2 float64
}
func (v1 *MeanVar) AddPoint(value float64) {
// based on https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
v1.n++
delta := value - v1.mean
v1.mean += delta / v1.n
delta2 := value - v1.mean
v1.m2 += delta * delta2
}
func (v1 *MeanVar) Mean() float64 {
return v1.mean
}
func (v1 *MeanVar) N() float64 {
return v1.n
}
func (v1 *MeanVar) Variance() float64 {
return v1.m2 / (v1.n - 1)
}
func (v1 *MeanVar) Stddev() float64 {
return math.Sqrt(v1.Variance())
}
func (v1 MeanVar) String() string {
return fmt.Sprintf("%f stddev: %f (%.0f)", v1.Mean(), v1.Stddev(), v1.N())
}
func (v1 *MeanVar) Combine(v2 *MeanVar) {
if v1.n == 0 {
*v1 = *v2
return
}
if v2.n == 0 {
return
}
if v1.n == 1 {
cpy := *v2
cpy.AddPoint(v1.mean)
*v1 = cpy
return
}
if v2.n == 1 {
v1.AddPoint(v2.mean)
return
}
newCount := v1.n + v2.n
delta := v2.mean - v1.mean
meanDelta := delta * v2.n / newCount
m2 := v1.m2 + v2.m2 + delta*meanDelta*v1.n
v1.n = newCount
v1.mean += meanDelta
v1.m2 = m2
}

47
lib/stati/stats_test.go Normal file
View File

@ -0,0 +1,47 @@
package stati
import (
"math/rand"
"testing"
)
func TestMeanVar(t *testing.T) {
N := 16
ss := make([]*MeanVar, N)
rng := rand.New(rand.NewSource(1))
for i := 0; i < N; i++ {
ss[i] = &MeanVar{}
maxJ := rng.Intn(1000)
for j := 0; j < maxJ; j++ {
ss[i].AddPoint(rng.NormFloat64()*5 + 500)
}
t.Logf("mean: %f, stddev: %f, count %f", ss[i].mean, ss[i].Stddev(), ss[i].n)
}
out := &MeanVar{}
for i := 0; i < N; i++ {
out.Combine(ss[i])
t.Logf("combine: mean: %f, stddev: %f", out.mean, out.Stddev())
}
}
func TestCovar(t *testing.T) {
N := 16
ss := make([]*Covar, N)
rng := rand.New(rand.NewSource(1))
for i := 0; i < N; i++ {
ss[i] = &Covar{}
maxJ := rng.Intn(1000) + 500
for j := 0; j < maxJ; j++ {
x := rng.NormFloat64()*5 + 500
ss[i].AddPoint(x, x*2-1000)
}
t.Logf("corell: %f, y = %f*x+%f @%.0f", ss[i].Correl(), ss[i].A(), ss[i].B(), ss[i].n)
t.Logf("\txVar: %f yVar: %f covar: %f", ss[i].StddevX(), ss[i].StddevY(), ss[i].Covariance())
}
out := &Covar{}
for i := 0; i < N; i++ {
out.Combine(ss[i])
t.Logf("combine: corell: %f, y = %f*x+%f", out.Correl(), out.A(), out.B())
t.Logf("\txVar: %f yVar: %f covar: %f", out.StddevX(), out.StddevY(), out.Covariance())
}
}